diff --git a/.travis.yml b/.travis.yml index 489a813b9f1..1ea0a30844e 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,7 +1,24 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + language: java +dist: trusty jdk: -- openjdk6 -- oraclejdk7 +- openjdk7 - oraclejdk8 sudo: false before_install: diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index a4358fe89ee..703c5fa05b5 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,5 +1,44 @@ + + # Contributing guidelines +## Code formatting + +We follow the [Google Java Style Guide](https://google.github.io/styleguide/javaguide.html). See +https://github.com/google/google-java-format for IDE plugins. The rules are not configurable. + +The build will fail if the code is not formatted. To format all files from the command line, run: + +``` +mvn fmt:format +``` + +Some aspects are not covered by the formatter: + +* braces must be used with `if`, `else`, `for`, `do` and `while` statements, even when the body is + empty or contains only a single statement. + +Also, if your IDE sorts import statements automatically, make sure it follows the same order as the +formatter: all static imports in ASCII sort order, followed by a blank line, followed by all regular +imports in ASCII sort order. In addition, please avoid using wildcard imports. + ## Working on an issue Before starting to work on something, please comment in JIRA or ask on the mailing list diff --git a/Jenkinsfile-asf b/Jenkinsfile-asf new file mode 100644 index 00000000000..2bc42bea379 --- /dev/null +++ b/Jenkinsfile-asf @@ -0,0 +1,78 @@ +#!groovy + +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +pipeline { + agent { + label 'cassandra-small' + } + + triggers { + // schedules only run against release branches (i.e. 3.x, 4.x, 4.5.x, etc.) + cron(branchPatternCron().matcher(env.BRANCH_NAME).matches() ? "@weekly" : "") + } + + stages { + stage('Matrix') { + matrix { + axes { + axis { + name 'TEST_JAVA_VERSION' + values 'openjdk@1.8.0-292', 'openjdk@1.11.0-9' + } + axis { + name 'SERVER_VERSION' + values '2.1', '3.0', '3.11', '4.0' + } + } + stages { + stage('Tests') { + agent { + label 'cassandra-medium' + } + steps { + script { + executeTests() + junit testResults: '**/target/surefire-reports/TEST-*.xml', allowEmptyResults: true + junit testResults: '**/target/failsafe-reports/TEST-*.xml', allowEmptyResults: true + } + } + } + } + } + } + } +} + +def executeTests() { + sh """ + container_id=\$(docker run -td -e TEST_JAVA_VERSION=${TEST_JAVA_VERSION} -e SERVER_VERSION=${SERVER_VERSION} -v \$(pwd):/home/docker/cassandra-java-driver apache.jfrog.io/cassan-docker/apache/cassandra-java-driver-testing-ubuntu2204 'sleep 2h') + docker exec --user root \$container_id bash -c \"sudo bash /home/docker/cassandra-java-driver/ci/create-user.sh docker \$(id -u) \$(id -g) /home/docker/cassandra-java-driver\" + docker exec --user docker \$container_id './cassandra-java-driver/ci/run-tests.sh' + ( nohup docker stop \$container_id >/dev/null 2>/dev/null & ) + """ +} + +// branch pattern for cron +// should match 3.x, 4.x, 4.5.x, etc +def branchPatternCron() { + ~"((\\d+(\\.[\\dx]+)+))" +} + diff --git a/Jenkinsfile-datastax b/Jenkinsfile-datastax new file mode 100644 index 00000000000..21bd9721f7f --- /dev/null +++ b/Jenkinsfile-datastax @@ -0,0 +1,569 @@ +#!groovy +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +def initializeEnvironment() { + env.DRIVER_DISPLAY_NAME = 'CassandraⓇ Java Driver 3.x' + env.DRIVER_METRIC_TYPE = 'oss' + if (env.GIT_URL.contains('riptano/java-driver')) { + env.DRIVER_DISPLAY_NAME = 'private ' + env.DRIVER_DISPLAY_NAME + env.DRIVER_METRIC_TYPE = 'oss-private' + } else if (env.GIT_URL.contains('java-dse-driver')) { + env.DRIVER_DISPLAY_NAME = 'DSE Java Driver 1.x' + env.DRIVER_METRIC_TYPE = 'dse' + } + + env.GIT_SHA = "${env.GIT_COMMIT.take(7)}" + env.GITHUB_PROJECT_URL = "https://${GIT_URL.replaceFirst(/(git@|http:\/\/|https:\/\/)/, '').replace(':', '/').replace('.git', '')}" + env.GITHUB_BRANCH_URL = "${GITHUB_PROJECT_URL}/tree/${env.BRANCH_NAME}" + env.GITHUB_COMMIT_URL = "${GITHUB_PROJECT_URL}/commit/${env.GIT_COMMIT}" + + env.MAVEN_HOME = "${env.HOME}/.mvn/apache-maven-3.2.5" + env.PATH = "${env.MAVEN_HOME}/bin:${env.PATH}" + env.JAVA_HOME = sh(label: 'Get JAVA_HOME',script: '''#!/bin/bash -le + . ${JABBA_SHELL} + jabba which ${JABBA_VERSION}''', returnStdout: true).trim() + env.JAVA8_HOME = sh(label: 'Get JAVA8_HOME',script: '''#!/bin/bash -le + . ${JABBA_SHELL} + jabba which 1.8''', returnStdout: true).trim() + + sh label: 'Download Apache CassandraⓇ',script: '''#!/bin/bash -le + . ${JABBA_SHELL} + jabba use ${JABBA_VERSION} + . ${CCM_ENVIRONMENT_SHELL} ${SERVER_VERSION} + ''' + + sh label: 'Display Java and environment information',script: '''#!/bin/bash -le + # Load CCM environment variables + set -o allexport + . ${HOME}/environment.txt + set +o allexport + + . ${JABBA_SHELL} + jabba use ${JABBA_VERSION} + + java -version + mvn -v + printenv | sort + ''' +} + +def buildDriver(jabbaVersion) { + def buildDriverScript = '''#!/bin/bash -le + + . ${JABBA_SHELL} + jabba use '''+jabbaVersion+''' + + echo "Building with Java version '''+jabbaVersion+'''" + + mvn -B -V install -DskipTests -Dmaven.javadoc.skip=true + ''' + sh label: 'Build driver', script: buildDriverScript +} + +def executeTests() { + sh label: 'Execute tests', script: '''#!/bin/bash -le + # Load CCM environment variables + set -o allexport + . ${HOME}/environment.txt + set +o allexport + + . ${JABBA_SHELL} + jabba use ${JABBA_VERSION} + + printenv | sort + + mvn -B -V verify \ + -fail-never -P${TEST_PROFILE} \ + -Dcom.datastax.driver.TEST_BASE_NODE_WAIT=120 \ + -Dcom.datastax.driver.NEW_NODE_DELAY_SECONDS=100 \ + -Dcassandra.version=${CCM_CASSANDRA_VERSION} \ + -Ddse=${CCM_IS_DSE} \ + -Dccm.java.home=${CCM_JAVA_HOME} \ + -Dccm.path=${CCM_JAVA_HOME}/bin \ + -Dccm.maxNumberOfNodes=3 \ + -DfailIfNoTests=false \ + -Dmaven.test.failure.ignore=true \ + -Dmaven.javadoc.skip=true \ + -Dproxy.path=${HOME}/proxy + + # run isolated tests + mvn -B -V verify \ + -fail-never -Pisolated \ + -Dcom.datastax.driver.TEST_BASE_NODE_WAIT=120 \ + -Dcom.datastax.driver.NEW_NODE_DELAY_SECONDS=100 \ + -Dcassandra.version=${CCM_CASSANDRA_VERSION} \ + -Ddse=${CCM_IS_DSE} \ + -Dccm.java.home=${CCM_JAVA_HOME} \ + -Dccm.path=${CCM_JAVA_HOME}/bin \ + -Dccm.maxNumberOfNodes=3 \ + -DfailIfNoTests=false \ + -Dmaven.test.failure.ignore=true \ + -Dmaven.javadoc.skip=true + ''' +} + +def executeCodeCoverage() { + jacoco( + execPattern: '**/target/jacoco.exec', + classPattern: '**/classes', + sourcePattern: '**/src/main/java' + ) +} + +def notifySlack(status = 'started') { + // Notify Slack channel for every build except adhoc executions + if (params.ADHOC_BUILD_TYPE != 'BUILD-AND-EXECUTE-TESTS') { + // Set the global pipeline scoped environment (this is above each matrix) + env.BUILD_STATED_SLACK_NOTIFIED = 'true' + + def buildType = 'Commit' + if (params.CI_SCHEDULE != 'DO-NOT-CHANGE-THIS-SELECTION') { + buildType = "${params.CI_SCHEDULE.toLowerCase().capitalize()}" + } + + def color = 'good' // Green + if (status.equalsIgnoreCase('aborted')) { + color = '808080' // Grey + } else if (status.equalsIgnoreCase('unstable')) { + color = 'warning' // Orange + } else if (status.equalsIgnoreCase('failed')) { + color = 'danger' // Red + } + + def message = """Build ${status} for ${env.DRIVER_DISPLAY_NAME} [${buildType}] +<${env.GITHUB_BRANCH_URL}|${env.BRANCH_NAME}> - <${env.RUN_DISPLAY_URL}|#${env.BUILD_NUMBER}> - <${env.GITHUB_COMMIT_URL}|${env.GIT_SHA}>""" + if (!status.equalsIgnoreCase('Started')) { + message += """ +${status} after ${currentBuild.durationString - ' and counting'}""" + } + + slackSend color: "${color}", + channel: "#java-driver-dev-bots", + message: "${message}" + } +} + +def describePerCommitStage() { + script { + currentBuild.displayName = "Per-Commit build" + currentBuild.description = 'Per-Commit build and testing of development Apache CassandraⓇ against Oracle JDK 8' + } +} + +def describeAdhocAndScheduledTestingStage() { + script { + if (params.CI_SCHEDULE == 'DO-NOT-CHANGE-THIS-SELECTION') { + // Ad-hoc build + currentBuild.displayName = "Adhoc testing" + currentBuild.description = "Testing ${params.ADHOC_BUILD_AND_EXECUTE_TESTS_SERVER_VERSION} against JDK version ${params.ADHOC_BUILD_AND_EXECUTE_TESTS_JABBA_VERSION}" + } else { + // Scheduled build + currentBuild.displayName = "${params.CI_SCHEDULE.toLowerCase().replaceAll('_', ' ').capitalize()} schedule" + currentBuild.description = "Testing server versions [${params.CI_SCHEDULE_SERVER_VERSIONS}] against JDK version ${params.CI_SCHEDULE_JABBA_VERSION}" + } + } +} + +// branch pattern for cron +// should match 3.x, 4.x, 4.5.x, etc +def branchPatternCron() { + ~"((\\d+(\\.[\\dx]+)+))" +} + +pipeline { + agent none + + // Global pipeline timeout + options { + timeout(time: 10, unit: 'HOURS') + buildDiscarder(logRotator(artifactNumToKeepStr: '10', // Keep only the last 10 artifacts + numToKeepStr: '50')) // Keep only the last 50 build records + } + + parameters { + choice( + name: 'ADHOC_BUILD_TYPE', + choices: ['BUILD', 'BUILD-AND-EXECUTE-TESTS'], + description: '''

Perform a adhoc build operation

+ + + + + + + + + + + + + + + +
ChoiceDescription
BUILDPerforms a Per-Commit build
BUILD-AND-EXECUTE-TESTSPerforms a build and executes the integration and unit tests
''') + choice( + name: 'ADHOC_BUILD_AND_EXECUTE_TESTS_SERVER_VERSION', + choices: ['2.1', // Legacy Apache CassandraⓇ + '2.2', // Legacy Apache CassandraⓇ + '3.0', // Previous Apache CassandraⓇ + '3.11', // Current Apache CassandraⓇ + '4.0', // Development Apache CassandraⓇ + 'dse-5.1', // Legacy DataStax Enterprise + 'dse-6.0', // Previous DataStax Enterprise + 'dse-6.7', // Previous DataStax Enterprise + 'dse-6.8.0', // Current DataStax Enterprise + 'ALL'], + description: '''Apache Cassandra® or DataStax Enterprise server version to use for adhoc BUILD-AND-EXECUTE-TESTS builds + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ChoiceDescription
2.1Apache Cassandra® v2.1.x
2.2Apache Cassandra® v2.2.x
3.0Apache Cassandra® v3.0.x
3.11Apache Cassandra® v3.11.x
4.0Apache Cassandra® v4.x (CURRENTLY UNDER DEVELOPMENT)
dse-5.1DataStax Enterprise v5.1.x
dse-6.0DataStax Enterprise v6.0.x
dse-6.7DataStax Enterprise v6.7.x
dse-6.8.0DataStax Enterprise v6.8.0
''') + choice( + name: 'ADHOC_BUILD_AND_EXECUTE_TESTS_JABBA_VERSION', + choices: ['zulu@1.6', // Zulu JDK 1.6 + 'zulu@1.7', // Zulu JDK 1.7 + '1.8', // Oracle JDK version 1.8 (current default) + 'openjdk@1.11'], // OpenJDK version 11 + description: '''JDK version to use for TESTING when running adhoc BUILD-AND-EXECUTE-TESTS builds. All builds will use JDK8 for building the driver + + + + + + + + + + + + + + + + + + + + + + + +
ChoiceDescription
zulu@1.6Zulu JDK version 1.6
zulu@1.7Zulu JDK version 1.7
1.8Oracle JDK version 1.8 (Used for compiling regardless of choice)
openjdk@1.11OpenJDK version 11
''') + choice( + name: 'ADHOC_BUILD_AND_EXECUTE_TESTS_TEST_PROFILE', + choices: ['short', 'long'], + description: 'Test profile to execute during test phase of the build') + choice( + name: 'CI_SCHEDULE', + choices: ['DO-NOT-CHANGE-THIS-SELECTION', 'WEEKNIGHTS', 'WEEKENDS', 'MONTHLY'], + description: 'CI testing schedule to execute periodically scheduled builds and tests of the driver (DO NOT CHANGE THIS SELECTION)') + string( + name: 'CI_SCHEDULE_SERVER_VERSIONS', + defaultValue: 'DO-NOT-CHANGE-THIS-SELECTION', + description: 'CI testing server version(s) to utilize for scheduled test runs of the driver (DO NOT CHANGE THIS SELECTION)') + string( + name: 'CI_SCHEDULE_JABBA_VERSION', + defaultValue: 'DO-NOT-CHANGE-THIS-SELECTION', + description: 'CI testing JDK version(s) to utilize for scheduled test runs of the driver (DO NOT CHANGE THIS SELECTION)') + string( + name: 'CI_SCHEDULE_TEST_PROFILE', + defaultValue: 'DO-NOT-CHANGE-THIS-SELECTION', + description: 'CI testing profile to execute (DO NOT CHANGE THIS SELECTION)') + } + + triggers { + // schedules only run against release branches (i.e. 3.x, 4.x, 4.5.x, etc.) + parameterizedCron(branchPatternCron().matcher(env.BRANCH_NAME).matches() ? """ + # Every weeknight (Monday - Friday) around 3:00 AM + ### JDK8 tests against 2.1, 3.0, 3.11 and 4.0 + H 3 * * 1-5 %CI_SCHEDULE=WEEKNIGHTS;CI_SCHEDULE_SERVER_VERSIONS=2.1 3.0 3.11 4.0;CI_SCHEDULE_JABBA_VERSION=1.8;CI_SCHEDULE_TEST_PROFILE=long + # Every weekend (Sunday) around 2:00 PM + ### JDK11 tests against 2.1, 3.0, 3.11 and 4.0 + H 14 * * 0 %CI_SCHEDULE=WEEKENDS;CI_SCHEDULE_SERVER_VERSIONS=2.1 3.0 3.11 4.0;CI_SCHEDULE_JABBA_VERSION=openjdk@1.11;CI_SCHEDULE_TEST_PROFILE=long + """ : "") + } + + environment { + OS_VERSION = 'ubuntu/bionic64/java-driver' + JABBA_SHELL = '/usr/lib/jabba/jabba.sh' + CCM_ENVIRONMENT_SHELL = '/usr/local/bin/ccm_environment.sh' + } + + stages { + stage ('Per-Commit') { + options { + timeout(time: 2, unit: 'HOURS') + } + when { + beforeAgent true + allOf { + expression { params.ADHOC_BUILD_TYPE == 'BUILD' } + expression { params.CI_SCHEDULE == 'DO-NOT-CHANGE-THIS-SELECTION' } + expression { params.CI_SCHEDULE_SERVER_VERSIONS == 'DO-NOT-CHANGE-THIS-SELECTION' } + expression { params.CI_SCHEDULE_JABBA_VERSION == 'DO-NOT-CHANGE-THIS-SELECTION' } + expression { params.CI_SCHEDULE_TEST_PROFILE == 'DO-NOT-CHANGE-THIS-SELECTION' } + not { buildingTag() } + } + } + + matrix { + axes { + axis { + name 'SERVER_VERSION' + values '3.11', // Current Apache CassandraⓇ + '4.0' // Development Apache CassandraⓇ + } + } + + agent { + label "${OS_VERSION}" + } + environment { + // Per-commit builds are only going to run against JDK8 + JABBA_VERSION = '1.8' + TEST_PROFILE = 'short' + } + + stages { + stage('Initialize-Environment') { + steps { + initializeEnvironment() + script { + if (env.BUILD_STATED_SLACK_NOTIFIED != 'true') { + notifySlack() + } + } + } + } + stage('Describe-Build') { + steps { + describePerCommitStage() + } + } + stage('Build-Driver') { + steps { + buildDriver('1.8') + } + } + stage('Execute-Tests') { + steps { + catchError { + // Use the matrix JDK for testing + executeTests() + } + } + post { + always { + /* + * Empty results are possible + * + * - Build failures during mvn verify may exist so report may not be available + */ + junit testResults: '**/target/surefire-reports/TEST-*.xml', allowEmptyResults: true + junit testResults: '**/target/failsafe-reports/TEST-*.xml', allowEmptyResults: true + } + } + } + stage('Execute-Code-Coverage') { + // Ensure the code coverage is run only once per-commit + when { environment name: 'SERVER_VERSION', value: '4.0' } + steps { + executeCodeCoverage() + } + } + } + } + post { + aborted { + notifySlack('aborted') + } + success { + notifySlack('completed') + } + unstable { + notifySlack('unstable') + } + failure { + notifySlack('FAILED') + } + } + } + + stage('Adhoc-And-Scheduled-Testing') { + when { + beforeAgent true + allOf { + expression { (params.ADHOC_BUILD_TYPE == 'BUILD' && params.CI_SCHEDULE != 'DO-NOT-CHANGE-THIS-SELECTION') || + params.ADHOC_BUILD_TYPE == 'BUILD-AND-EXECUTE-TESTS' } + not { buildingTag() } + anyOf { + expression { params.ADHOC_BUILD_TYPE == 'BUILD-AND-EXECUTE-TESTS' } + allOf { + expression { params.ADHOC_BUILD_TYPE == 'BUILD' } + expression { params.CI_SCHEDULE != 'DO-NOT-CHANGE-THIS-SELECTION' } + expression { params.CI_SCHEDULE_SERVER_VERSIONS != 'DO-NOT-CHANGE-THIS-SELECTION' } + } + } + } + } + + environment { + SERVER_VERSIONS = "${params.CI_SCHEDULE_SERVER_VERSIONS == 'DO-NOT-CHANGE-THIS-SELECTION' ? params.ADHOC_BUILD_AND_EXECUTE_TESTS_SERVER_VERSION : params.CI_SCHEDULE_SERVER_VERSIONS}" + JABBA_VERSION = "${params.CI_SCHEDULE_JABBA_VERSION == 'DO-NOT-CHANGE-THIS-SELECTION' ? params.ADHOC_BUILD_AND_EXECUTE_TESTS_JABBA_VERSION : params.CI_SCHEDULE_JABBA_VERSION}" + TEST_PROFILE = "${params.CI_SCHEDULE_TEST_PROFILE == 'DO-NOT-CHANGE-THIS-SELECTION' ? params.ADHOC_BUILD_AND_EXECUTE_TESTS_TEST_PROFILE : params.CI_SCHEDULE_TEST_PROFILE}" + } + + matrix { + axes { + axis { + name 'SERVER_VERSION' + values '2.1', // Legacy Apache CassandraⓇ + '3.0', // Previous Apache CassandraⓇ + '3.11', // Current Apache CassandraⓇ + '4.0', // Development Apache CassandraⓇ + 'dse-5.1', // Legacy DataStax Enterprise + 'dse-6.0', // Previous DataStax Enterprise + 'dse-6.7', // Previous DataStax Enterprise + 'dse-6.8.0' // Current DataStax Enterprise + } + } + when { + beforeAgent true + allOf { + expression { return env.SERVER_VERSIONS.split(' ').any { it =~ /(ALL|${env.SERVER_VERSION})/ } } + } + } + agent { + label "${env.OS_VERSION}" + } + + stages { + stage('Initialize-Environment') { + steps { + initializeEnvironment() + script { + if (env.BUILD_STATED_SLACK_NOTIFIED != 'true') { + notifySlack() + } + } + } + } + stage('Describe-Build') { + steps { + describeAdhocAndScheduledTestingStage() + } + } + stage('Build-Driver') { + steps { + // Jabba default should be a JDK8 for now + buildDriver('1.8') + } + } + stage('Execute-Tests') { + steps { + catchError { + // Use the matrix JDK for testing + executeTests() + } + } + post { + always { + /* + * Empty results are possible + * + * - Build failures during mvn verify may exist so report may not be available + * - With boolean parameters to skip tests a failsafe report may not be available + */ + junit testResults: '**/target/surefire-reports/TEST-*.xml', allowEmptyResults: true + junit testResults: '**/target/failsafe-reports/TEST-*.xml', allowEmptyResults: true + } + } + } + stage('Execute-Code-Coverage') { + // Ensure the code coverage is run only once per-commit + when { + allOf { + environment name: 'SERVER_VERSION', value: '4.0' + environment name: 'JABBA_VERSION', value: '1.8' + } + } + steps { + executeCodeCoverage() + } + } + } + } + post { + aborted { + notifySlack('aborted') + } + success { + notifySlack('completed') + } + unstable { + notifySlack('unstable') + } + failure { + notifySlack('FAILED') + } + } + } + } +} diff --git a/LICENSE b/LICENSE index d6456956733..38f06999ef6 100644 --- a/LICENSE +++ b/LICENSE @@ -200,3 +200,10 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +Apache Cassandra Java Driver bundles code and files from the following projects: + +Protocol Buffers +Copyright 2008 Google Inc. +This product includes software developed as part of the Protocol Buffers project ( https://developers.google.com/protocol-buffers/ ). +see driver-core/src/main/java/com/datastax/driver/core/VIntCoding.java diff --git a/LICENSE_binary b/LICENSE_binary new file mode 100644 index 00000000000..5232779c907 --- /dev/null +++ b/LICENSE_binary @@ -0,0 +1,229 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +Apache Cassandra Java Driver bundles code and files from the following projects: + +Protocol Buffers +Copyright 2008 Google Inc. +This product includes software developed as part of the Protocol Buffers project ( https://developers.google.com/protocol-buffers/ ). +see driver-core/src/main/java/com/datastax/driver/core/VIntCoding.java + +This product bundles Java Native Runtime - POSIX 3.1.15, +which is available under the Eclipse Public License version 2.0. +see licenses/jnr-posix.txt + +This product bundles jnr-x86asm 1.0.2, +which is available under the MIT License. +see licenses/jnr-x86asm.txt + +This product bundles ASM 9.2: a very small and fast Java bytecode manipulation framework, +which is available under the 3-Clause BSD License. +see licenses/asm.txt + +This product bundles HdrHistogram 2.1.12: A High Dynamic Range (HDR) Histogram, +which is available under the 2-Clause BSD License. +see licenses/HdrHistogram.txt + +This product bundles The Simple Logging Facade for Java (SLF4J) API 1.7.26, +which is available under the MIT License. +see licenses/slf4j-api.txt diff --git a/NOTICE.txt b/NOTICE.txt new file mode 100644 index 00000000000..8e27ae3e52f --- /dev/null +++ b/NOTICE.txt @@ -0,0 +1,5 @@ +Apache Cassandra Java Driver +Copyright 2012- The Apache Software Foundation + +This product includes software developed at The Apache Software +Foundation (http://www.apache.org/). diff --git a/NOTICE_binary.txt b/NOTICE_binary.txt new file mode 100644 index 00000000000..c60d8ceb245 --- /dev/null +++ b/NOTICE_binary.txt @@ -0,0 +1,249 @@ +Apache Cassandra Java Driver +Copyright 2012- The Apache Software Foundation + +This product includes software developed at The Apache Software +Foundation (http://www.apache.org/). + +This compiled product also includes Apache-licensed dependencies +that contain the following NOTICE information: + +================================================================== +io.netty:netty-handler NOTICE.txt +================================================================== +This product contains the extensions to Java Collections Framework which has +been derived from the works by JSR-166 EG, Doug Lea, and Jason T. Greene: + + * LICENSE: + * license/LICENSE.jsr166y.txt (Public Domain) + * HOMEPAGE: + * http://gee.cs.oswego.edu/cgi-bin/viewcvs.cgi/jsr166/ + * http://viewvc.jboss.org/cgi-bin/viewvc.cgi/jbosscache/experimental/jsr166/ + +This product contains a modified version of Robert Harder's Public Domain +Base64 Encoder and Decoder, which can be obtained at: + + * LICENSE: + * license/LICENSE.base64.txt (Public Domain) + * HOMEPAGE: + * http://iharder.sourceforge.net/current/java/base64/ + +This product contains a modified portion of 'Webbit', an event based +WebSocket and HTTP server, which can be obtained at: + + * LICENSE: + * license/LICENSE.webbit.txt (BSD License) + * HOMEPAGE: + * https://github.com/joewalnes/webbit + +This product contains a modified portion of 'SLF4J', a simple logging +facade for Java, which can be obtained at: + + * LICENSE: + * license/LICENSE.slf4j.txt (MIT License) + * HOMEPAGE: + * https://www.slf4j.org/ + +This product contains a modified portion of 'Apache Harmony', an open source +Java SE, which can be obtained at: + + * NOTICE: + * license/NOTICE.harmony.txt + * LICENSE: + * license/LICENSE.harmony.txt (Apache License 2.0) + * HOMEPAGE: + * https://archive.apache.org/dist/harmony/ + +This product contains a modified portion of 'jbzip2', a Java bzip2 compression +and decompression library written by Matthew J. Francis. It can be obtained at: + + * LICENSE: + * license/LICENSE.jbzip2.txt (MIT License) + * HOMEPAGE: + * https://code.google.com/p/jbzip2/ + +This product contains a modified portion of 'libdivsufsort', a C API library to construct +the suffix array and the Burrows-Wheeler transformed string for any input string of +a constant-size alphabet written by Yuta Mori. It can be obtained at: + + * LICENSE: + * license/LICENSE.libdivsufsort.txt (MIT License) + * HOMEPAGE: + * https://github.com/y-256/libdivsufsort + +This product contains a modified portion of Nitsan Wakart's 'JCTools', Java Concurrency Tools for the JVM, + which can be obtained at: + + * LICENSE: + * license/LICENSE.jctools.txt (ASL2 License) + * HOMEPAGE: + * https://github.com/JCTools/JCTools + +This product optionally depends on 'JZlib', a re-implementation of zlib in +pure Java, which can be obtained at: + + * LICENSE: + * license/LICENSE.jzlib.txt (BSD style License) + * HOMEPAGE: + * http://www.jcraft.com/jzlib/ + +This product optionally depends on 'Compress-LZF', a Java library for encoding and +decoding data in LZF format, written by Tatu Saloranta. It can be obtained at: + + * LICENSE: + * license/LICENSE.compress-lzf.txt (Apache License 2.0) + * HOMEPAGE: + * https://github.com/ning/compress + +This product optionally depends on 'lz4', a LZ4 Java compression +and decompression library written by Adrien Grand. It can be obtained at: + + * LICENSE: + * license/LICENSE.lz4.txt (Apache License 2.0) + * HOMEPAGE: + * https://github.com/jpountz/lz4-java + +This product optionally depends on 'lzma-java', a LZMA Java compression +and decompression library, which can be obtained at: + + * LICENSE: + * license/LICENSE.lzma-java.txt (Apache License 2.0) + * HOMEPAGE: + * https://github.com/jponge/lzma-java + +This product optionally depends on 'zstd-jni', a zstd-jni Java compression +and decompression library, which can be obtained at: + + * LICENSE: + * license/LICENSE.zstd-jni.txt (Apache License 2.0) + * HOMEPAGE: + * https://github.com/luben/zstd-jni + +This product contains a modified portion of 'jfastlz', a Java port of FastLZ compression +and decompression library written by William Kinney. It can be obtained at: + + * LICENSE: + * license/LICENSE.jfastlz.txt (MIT License) + * HOMEPAGE: + * https://code.google.com/p/jfastlz/ + +This product contains a modified portion of and optionally depends on 'Protocol Buffers', Google's data +interchange format, which can be obtained at: + + * LICENSE: + * license/LICENSE.protobuf.txt (New BSD License) + * HOMEPAGE: + * https://github.com/google/protobuf + +This product optionally depends on 'Bouncy Castle Crypto APIs' to generate +a temporary self-signed X.509 certificate when the JVM does not provide the +equivalent functionality. It can be obtained at: + + * LICENSE: + * license/LICENSE.bouncycastle.txt (MIT License) + * HOMEPAGE: + * https://www.bouncycastle.org/ + +This product optionally depends on 'Snappy', a compression library produced +by Google Inc, which can be obtained at: + + * LICENSE: + * license/LICENSE.snappy.txt (New BSD License) + * HOMEPAGE: + * https://github.com/google/snappy + +This product optionally depends on 'JBoss Marshalling', an alternative Java +serialization API, which can be obtained at: + + * LICENSE: + * license/LICENSE.jboss-marshalling.txt (Apache License 2.0) + * HOMEPAGE: + * https://github.com/jboss-remoting/jboss-marshalling + +This product optionally depends on 'Caliper', Google's micro- +benchmarking framework, which can be obtained at: + + * LICENSE: + * license/LICENSE.caliper.txt (Apache License 2.0) + * HOMEPAGE: + * https://github.com/google/caliper + +This product optionally depends on 'Apache Commons Logging', a logging +framework, which can be obtained at: + + * LICENSE: + * license/LICENSE.commons-logging.txt (Apache License 2.0) + * HOMEPAGE: + * https://commons.apache.org/logging/ + +This product optionally depends on 'Apache Log4J', a logging framework, which +can be obtained at: + + * LICENSE: + * license/LICENSE.log4j.txt (Apache License 2.0) + * HOMEPAGE: + * https://logging.apache.org/log4j/ + +This product optionally depends on 'Aalto XML', an ultra-high performance +non-blocking XML processor, which can be obtained at: + + * LICENSE: + * license/LICENSE.aalto-xml.txt (Apache License 2.0) + * HOMEPAGE: + * https://wiki.fasterxml.com/AaltoHome + +This product contains a modified version of 'HPACK', a Java implementation of +the HTTP/2 HPACK algorithm written by Twitter. It can be obtained at: + + * LICENSE: + * license/LICENSE.hpack.txt (Apache License 2.0) + * HOMEPAGE: + * https://github.com/twitter/hpack + +This product contains a modified version of 'HPACK', a Java implementation of +the HTTP/2 HPACK algorithm written by Cory Benfield. It can be obtained at: + + * LICENSE: + * license/LICENSE.hyper-hpack.txt (MIT License) + * HOMEPAGE: + * https://github.com/python-hyper/hpack/ + +This product contains a modified version of 'HPACK', a Java implementation of +the HTTP/2 HPACK algorithm written by Tatsuhiro Tsujikawa. It can be obtained at: + + * LICENSE: + * license/LICENSE.nghttp2-hpack.txt (MIT License) + * HOMEPAGE: + * https://github.com/nghttp2/nghttp2/ + +This product contains a modified portion of 'Apache Commons Lang', a Java library +provides utilities for the java.lang API, which can be obtained at: + + * LICENSE: + * license/LICENSE.commons-lang.txt (Apache License 2.0) + * HOMEPAGE: + * https://commons.apache.org/proper/commons-lang/ + + +This product contains the Maven wrapper scripts from 'Maven Wrapper', that provides an easy way to ensure a user has everything necessary to run the Maven build. + + * LICENSE: + * license/LICENSE.mvn-wrapper.txt (Apache License 2.0) + * HOMEPAGE: + * https://github.com/takari/maven-wrapper + +This product contains the dnsinfo.h header file, that provides a way to retrieve the system DNS configuration on MacOS. +This private header is also used by Apple's open source + mDNSResponder (https://opensource.apple.com/tarballs/mDNSResponder/). + + * LICENSE: + * license/LICENSE.dnsinfo.txt (Apple Public Source License 2.0) + * HOMEPAGE: + * https://www.opensource.apple.com/source/configd/configd-453.19/dnsinfo/dnsinfo.h + +This product optionally depends on 'Brotli4j', Brotli compression and +decompression for Java., which can be obtained at: + + * LICENSE: + * license/LICENSE.brotli4j.txt (Apache License 2.0) + * HOMEPAGE: + * https://github.com/hyperxpro/Brotli4j diff --git a/README.md b/README.md index 25b0d4754d7..a56816e0767 100644 --- a/README.md +++ b/README.md @@ -1,18 +1,21 @@ -# Datastax Java Driver for Apache Cassandra +# Java Driver for Apache Cassandra® -[![Build Status](https://travis-ci.org/datastax/java-driver.svg?branch=3.x)](https://travis-ci.org/datastax/java-driver) +:warning: The java-driver has recently been donated by Datastax to The Apache Software Foundation and the Apache Cassandra project. Bear with us as we move assets and coordinates. + +[![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0) +[![Maven Central](https://maven-badges.herokuapp.com/maven-central/org.apache.cassandra/cassandra-driver-core/badge.svg)](https://maven-badges.herokuapp.com/maven-central/org.apache.cassandra/cassandra-driver-core) *If you're reading this on github.com, please note that this is the readme for the development version and that some features described here might -not yet have been released. You can find the documentation for latest -version through [Java driver -docs](http://datastax.github.io/java-driver/) or via the release tags, -[e.g. -3.3.0](https://github.com/datastax/java-driver/tree/3.3.0).* +not yet have been released. You can find the documentation for the latest +version through the [Java Driver +docs](http://docs.datastax.com/en/developer/java-driver/3.11/index.html) or via the release tags, +[e.g. 3.12.1](https://github.com/apache/cassandra-java-driver/tree/3.12.1).* A modern, [feature-rich](manual/) and highly tunable Java client -library for Apache Cassandra (1.2+) and DataStax Enterprise (3.1+) using -exclusively Cassandra's binary protocol and Cassandra Query Language v3. +library for Apache Cassandra (2.1+) and using exclusively Cassandra's binary protocol +and Cassandra Query Language v3. _Use the [DataStax Enterprise Java Driver][dse-driver] +for better compatibility and support for DataStax Enterprise._ **Features:** @@ -37,38 +40,32 @@ The driver contains the following modules: - driver-core: the core layer. - driver-mapping: the object mapper. -- driver-extras: optional features for the Java driver. +- driver-extras: optional features for the Java Driver. - driver-examples: example applications using the other modules which are only meant for demonstration purposes. - driver-tests: tests for the java-driver. **Useful links:** -- JIRA (bug tracking): https://datastax-oss.atlassian.net/browse/JAVA -- MAILING LIST: https://groups.google.com/a/lists.datastax.com/forum/#!forum/java-driver-user -- IRC: #datastax-drivers on [irc.freenode.net](http://freenode.net) -- TWITTER: [@dsJavaDriver](https://twitter.com/dsJavaDriver) tweets Java - driver releases and important announcements (low frequency). - [@DataStaxEng](https://twitter.com/datastaxeng) has more news including - other drivers, Cassandra, and DSE. -- DOCS: the [manual](http://docs.datastax.com/en/developer/java-driver/3.2/manual/) has quick +- JIRA (bug tracking): https://issues.apache.org/jira/projects/CASSJAVA +- MAILING LIST: https://cassandra.apache.org/_/community.html#discussions +- DOCS: the [manual](http://docs.datastax.com/en/developer/java-driver/3.11/manual/) has quick start material and technical details about the driver and its features. -- API: http://www.datastax.com/drivers/java/3.2 +- API: https://docs.datastax.com/en/drivers/java/3.11 +- GITHUB REPOSITORY: https://github.com/apache/cassandra-java-driver - [changelog](changelog/) -- [binary tarball](http://downloads.datastax.com/java-driver/cassandra-java-driver-3.3.0.tar.gz) - -**Feeback requested:** help us focus our efforts, provide your input on the [Platform and Runtime Survey](http://goo.gl/forms/qwUE6qnL7U) (we kept it short). ## Getting the driver The last release of the driver is available on Maven Central. You can install -it in your application using the following Maven dependency: +it in your application using the following Maven dependency (_if +using DataStax Enterprise, install the [DataStax Enterprise Java Driver][dse-driver] instead_): ```xml - com.datastax.cassandra + org.apache.cassandra cassandra-driver-core - 3.3.0 + 3.12.1 ``` @@ -76,9 +73,9 @@ Note that the object mapper is published as a separate artifact: ```xml - com.datastax.cassandra + org.apache.cassandra cassandra-driver-mapping - 3.3.0 + 3.12.1 ``` @@ -86,9 +83,9 @@ The 'extras' module is also published as a separate artifact: ```xml - com.datastax.cassandra + org.apache.cassandra cassandra-driver-extras - 3.3.0 + 3.12.1 ``` @@ -97,25 +94,25 @@ We also provide a [shaded JAR](manual/shaded_jar/) to avoid the explicit dependency to Netty. If you can't use a dependency management tool, a -[binary tarball](http://downloads.datastax.com/java-driver/cassandra-java-driver-3.3.0.tar.gz) +[binary tarball](https://cassandra.apache.org/_/download.html) is available for download. ## Compatibility -The Java client driver 3.3.0 ([branch 3.x](https://github.com/datastax/java-driver/tree/3.x)) is compatible with Apache -Cassandra 1.2, 2.0, 2.1, 2.2 and 3.0 (see [this page](http://datastax.github.io/java-driver/manual/native_protocol) for +The Java client driver 3.12.1 ([branch 3.x](https://github.com/apache/cassandra-java-driver/tree/3.x)) is compatible with Apache +Cassandra 2.1, 2.2 and 3.0+ (see [this page](http://docs.datastax.com/en/developer/java-driver/3.11/manual/native_protocol/) for the most up-to-date compatibility information). UDT and tuple support is available only when using Apache Cassandra 2.1 or higher (see [CQL improvements in Cassandra 2.1](http://www.datastax.com/dev/blog/cql-in-2-1)). Other features are available only when using Apache Cassandra 2.0 or higher (e.g. result set paging, -[BatchStatement](https://github.com/datastax/java-driver/blob/3.x/driver-core/src/main/java/com/datastax/driver/core/BatchStatement.java), +[BatchStatement](https://github.com/apache/cassandra-java-driver/blob/3.x/driver-core/src/main/java/com/datastax/driver/core/BatchStatement.java), [lightweight transactions](http://www.datastax.com/documentation/cql/3.1/cql/cql_using/use_ltweight_transaction_t.html) -- see [What's new in Cassandra 2.0](http://www.datastax.com/documentation/cassandra/2.0/cassandra/features/features_key_c.html)). Trying to use these with a cluster running Cassandra 1.2 will result in -an [UnsupportedFeatureException](https://github.com/datastax/java-driver/blob/3.x/driver-core/src/main/java/com/datastax/driver/core/exceptions/UnsupportedFeatureException.java) being thrown. +an [UnsupportedFeatureException](https://github.com/apache/cassandra-java-driver/blob/3.x/driver-core/src/main/java/com/datastax/driver/core/exceptions/UnsupportedFeatureException.java) being thrown. -__Note__: DataStax products do not support big-endian systems. +The java driver supports Java JDK versions 6 and above. ## Upgrading from previous versions @@ -123,23 +120,11 @@ If you are upgrading from a previous version of the driver, be sure to have a lo the [upgrade guide](/upgrade_guide/). -### Troubleshooting - -If you are having issues connecting to the cluster (seeing `NoHostAvailableConnection` exceptions) please check the -[connection requirements](https://github.com/datastax/java-driver/wiki/Connection-requirements). - - -## License -Copyright 2012-2015, DataStax +---- -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at +Apache Cassandra, Apache, Tomcat, Lucene, Solr, Hadoop, Spark, TinkerPop, and Cassandra are +trademarks of the [Apache Software Foundation](http://www.apache.org/) or its subsidiaries in +Canada, the United States and/or other countries. -http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. +Binary artifacts of this product bundle Java Native Runtime libraries, which is available under the Eclipse Public License version 2.0. diff --git a/build.yaml b/build.yaml deleted file mode 100644 index 956c97fe3b2..00000000000 --- a/build.yaml +++ /dev/null @@ -1,84 +0,0 @@ -schedules: - commit: - # Run short suite on commit with enough C* versions to get full protocol version coverage. - schedule: per_commit - matrix: - exclude: - # Exclude all java 7 builds - - java: oraclejdk7 - # Exclude java6 with all versions except latest - - java: openjdk6 - cassandra: ['1.2', '2.0', '2.1', '2.2', '3.0'] - env_vars: | - TEST_GROUP="short" - disable_commit_status: true - nightly: - # Run full suite nightly on change for all primary branches if they have changes. - schedule: nightly - branches: - # regex matches primary branch format (2.1, 3.x, 3.0.x, 3.1.x, etc). - include: ["/\\d+(\\.[\\dx]+)+/"] - env_vars: | - TEST_GROUP="long" - disable_commit_status: true - adhoc: - # Adhoc job for non-primary braches that doesn't have a schedule but may be used to run all configs. - schedule: adhoc - branches: - # regex matches primary branch format (2.1, 3.x, 3.0.x, 3.1.x, etc). - exclude: ["/\\d+(\\.[\\dx]+)+/"] - env_vars: | - TEST_GROUP="long" - disable_commit_status: true -java: - - openjdk6 - - oraclejdk7 - - oraclejdk8 -os: - - ubuntu/trusty64/m3.large -cassandra: - - '1.2' - - '2.0' - - '2.1' - - '2.2' - - '3.0' - - '3.10' -build: - - script: | - . /usr/local/bin/jdk_switcher.sh - jdk_switcher use oraclejdk8 - export MAVEN_HOME=/home/jenkins/.mvn/apache-maven-3.2.5 - export PATH=$MAVEN_HOME/bin:$PATH - mvn -B -V install -DskipTests - - script: | - . /usr/local/bin/jdk_switcher.sh - jdk_switcher use $JAVA_VERSION - - type: maven - version: 3.2.5 - goals: verify --fail-never -P$TEST_GROUP - properties: | - com.datastax.driver.TEST_BASE_NODE_WAIT=120 - com.datastax.driver.NEW_NODE_DELAY_SECONDS=100 - cassandra.version=$CCM_CASSANDRA_VERSION - ccm.java.home=$CCM_JAVA_HOME - ccm.path=$CCM_JAVA_HOME/bin - ccm.maxNumberOfNodes=3 - failIfNoTests=false - maven.test.failure.ignore=true - maven.javadoc.skip=true - - type: maven - version: 3.2.5 - goals: verify --fail-never -Pisolated - properties: | - com.datastax.driver.TEST_BASE_NODE_WAIT=120 - com.datastax.driver.NEW_NODE_DELAY_SECONDS=100 - cassandra.version=$CCM_CASSANDRA_VERSION - ccm.java.home=$CCM_JAVA_HOME - ccm.path=$CCM_JAVA_HOME/bin - ccm.maxNumberOfNodes=3 - failIfNoTests=false - maven.test.failure.ignore=true - maven.javadoc.skip=true - - xunit: - - "**/target/surefire-reports/TEST-*.xml" - - "**/target/failsafe-reports/TEST-*.xml" diff --git a/changelog/README.md b/changelog/README.md index 3320ae0f026..c8c12cffdbf 100644 --- a/changelog/README.md +++ b/changelog/README.md @@ -1,5 +1,257 @@ + + ## Changelog + + +## 3.12.1 +- [improvement] CASSJAVA-55: Remove setting "Host" header for metadata requests. +- [bug] JAVA-3125: Match broadcast RPC for control connection and Astra events + +## 3.12.0 +- [improvement] CASSANDRA-18971: Switch all archs to netty-tcnative-boringssl-static +- [improvement] CASSJAVA-58: Update 3.x DRIVER_NAME to match 4.x Java driver + +## 3.11.5 +- [improvement] JAVA-3114: Shade io.dropwizard.metrics:metrics-core in shaded driver +- [improvement] JAVA-3115: SchemaChangeListener#onKeyspaceChanged can fire when keyspace has not changed if using SimpleStrategy replication + +## 3.11.4 +- [improvement] JAVA-3079: Upgrade Netty to 4.1.94, 3.x edition +- [improvement] JAVA-3082: Fix maven build for Apple-silicon +- [improvement] PR 1671: Fix LatencyAwarePolicy scale docstring + +## 3.11.3 +- [improvement] JAVA-3023: Upgrade Netty to 4.1.77, 3.x edition + + +## 3.11.2 +- [improvement] JAVA-3008: Upgrade Netty to 4.1.75, 3.x edition +- [improvement] JAVA-2984: Upgrade Jackson to resolve high-priority CVEs + + +## 3.11.1 +- [bug] JAVA-2967: Support native transport peer information for DSE 6.8. +- [bug] JAVA-2976: Support missing protocol v5 error codes CAS_WRITE_UNKNOWN, CDC_WRITE_FAILURE. + + +## 3.11.0 + +- [improvement] JAVA-2705: Remove protocol v5 beta status, add v6-beta. +- [bug] JAVA-2923: Detect and use Guava's new HostAndPort.getHost method. +- [bug] JAVA-2922: Switch to modern framing format inside a channel handler. +- [bug] JAVA-2924: Consider protocol version unsupported when server requires USE_BETA flag for it. + + +## 3.10.2 + +- [bug] JAVA-2860: Avoid NPE if channel initialization crashes. + + +## 3.10.1 + +- [bug] JAVA-2857: Fix NPE when built statements without parameters are logged at TRACE level. +- [bug] JAVA-2843: Successfully parse DSE table schema in OSS driver. + + +## 3.10.0 + +- [improvement] JAVA-2676: Don't reschedule flusher after empty runs. +- [new feature] JAVA-2772: Support new protocol v5 message format. + + +## 3.9.0 + +- [bug] JAVA-2627: Avoid logging error message including stack trace in request handler. +- [new feature] JAVA-2706: Add now_in_seconds to protocol v5 query messages. +- [improvement] JAVA-2730: Add support for Cassandra® 4.0 table options +- [improvement] JAVA-2702: Transient Replication Support for Cassandra® 4.0 + + +## 3.8.0 + +- [new feature] JAVA-2356: Support for DataStax Cloud API. +- [improvement] JAVA-2483: Allow to provide secure bundle via URL. +- [improvement] JAVA-2499: Allow to read the secure bundle from an InputStream. +- [improvement] JAVA-2457: Detect CaaS and change default consistency. +- [improvement] JAVA-2485: Add errors for Cloud misconfiguration. +- [documentation] JAVA-2504: Migrate Cloud "getting started" page to driver manual. +- [improvement] JAVA-2516: Enable hostname validation with Cloud +- [bug] JAVA-2515: NEW_NODE and REMOVED_NODE events should trigger ADDED and REMOVED. + + +### 3.7.2 + +- [bug] JAVA-2249: Stop stripping trailing zeros in ByteOrderedTokens. +- [bug] JAVA-1492: Don't immediately reuse busy connections for another request. +- [bug] JAVA-2198: Handle UDTs with names that clash with collection types. +- [bug] JAVA-2204: Avoid memory leak when client holds onto a stale TableMetadata instance. + + +### 3.7.1 + +- [bug] JAVA-2174: Metadata.needsQuote should accept empty strings. +- [bug] JAVA-2193: Fix flaky tests in WarningsTest. + + +### 3.7.0 + +- [improvement] JAVA-2025: Include exception message in Abstract\*Codec.accepts(null). +- [improvement] JAVA-1980: Use covariant return types in RemoteEndpointAwareJdkSSLOptions.Builder methods. +- [documentation] JAVA-2062: Document frozen collection preference with Mapper. +- [bug] JAVA-2071: Fix NPE in ArrayBackedRow.toString(). +- [bug] JAVA-2070: Call onRemove instead of onDown when rack and/or DC information changes for a host. +- [improvement] JAVA-1256: Log parameters of BuiltStatement in QueryLogger. +- [documentation] JAVA-2074: Document preference for LZ4 over Snappy. +- [bug] JAVA-1612: Include netty-common jar in binary tarball. +- [improvement] JAVA-2003: Simplify CBUtil internal API to improve performance. +- [improvement] JAVA-2002: Reimplement TypeCodec.accepts to improve performance. +- [documentation] JAVA-2041: Deprecate cross-DC failover in DCAwareRoundRobinPolicy. +- [documentation] JAVA-1159: Document workaround for using tuple with udt field in Mapper. +- [documentation] JAVA-1964: Complete remaining "Coming Soon" sections in docs. +- [improvement] JAVA-1950: Log server side warnings returned from a query. +- [improvement] JAVA-2123: Allow to use QueryBuilder for building queries against Materialized Views. +- [bug] JAVA-2082: Avoid race condition during cluster close and schema refresh. + + +### 3.6.0 + +- [improvement] JAVA-1394: Add request-queue-depth metric. +- [improvement] JAVA-1857: Add Statement.setHost. +- [bug] JAVA-1920: Use nanosecond precision in LocalTimeCodec#format(). +- [bug] JAVA-1794: Driver tries to create a connection array of size -1. +- [new feature] JAVA-1899: Support virtual tables. +- [bug] JAVA-1908: TableMetadata.asCQLQuery does not add table option 'memtable_flush_period_in_ms' in the generated query. +- [bug] JAVA-1924: StatementWrapper setters should return the wrapping statement. +- [new feature] JAVA-1532: Add Codec support for Java 8's LocalDateTime and ZoneId. +- [improvement] JAVA-1786: Use Google code formatter. +- [bug] JAVA-1871: Change LOCAL\_SERIAL.isDCLocal() to return true. +- [documentation] JAVA-1902: Clarify unavailable & request error in DefaultRetryPolicy javadoc. +- [new feature] JAVA-1903: Add WhiteListPolicy.ofHosts. +- [bug] JAVA-1928: Fix GuavaCompatibility for Guava 26. +- [bug] JAVA-1935: Add null check in QueryConsistencyException.getHost. +- [improvement] JAVA-1771: Send driver name and version in STARTUP message. +- [improvement] JAVA-1388: Add dynamic port discovery for system.peers\_v2. +- [documentation] JAVA-1810: Note which setters are not propagated to PreparedStatement. +- [bug] JAVA-1944: Surface Read and WriteFailureException to RetryPolicy. +- [bug] JAVA-1211: Fix NPE in cluster close when cluster init fails. +- [bug] JAVA-1220: Fail fast on cluster init if previous init failed. +- [bug] JAVA-1929: Preempt session execute queries if session was closed. + +Merged from 3.5.x: + +- [bug] JAVA-1872: Retain table's views when processing table update. + + +### 3.5.0 + +- [improvement] JAVA-1448: TokenAwarePolicy should respect child policy ordering. +- [bug] JAVA-1751: Include defaultTimestamp length in encodedSize for protocol version >= 3. +- [bug] JAVA-1770: Fix message size when using Custom Payload. +- [documentation] JAVA-1760: Add metrics documentation. +- [improvement] JAVA-1765: Update dependencies to latest patch versions. +- [improvement] JAVA-1752: Deprecate DowngradingConsistencyRetryPolicy. +- [improvement] JAVA-1735: Log driver version on first use. +- [documentation] JAVA-1380: Add FAQ entry for errors arising from incompatibilities. +- [improvement] JAVA-1748: Support IS NOT NULL and != in query builder. +- [documentation] JAVA-1740: Mention C*2.2/3.0 incompatibilities in paging state manual. +- [improvement] JAVA-1725: Add a getNodeCount method to CCMAccess for easier automation. +- [new feature] JAVA-708: Add means to measure request sizes. +- [documentation] JAVA-1788: Add example for enabling host name verification to SSL docs. +- [improvement] JAVA-1791: Revert "JAVA-1677: Warn if auth is configured on the client but not the server." +- [bug] JAVA-1789: Account for flags in Prepare encodedSize. +- [bug] JAVA-1797: Use jnr-ffi version required by jnr-posix. + + +### 3.4.0 + +- [improvement] JAVA-1671: Remove unnecessary test on prepared statement metadata. +- [bug] JAVA-1694: Upgrade to jackson-databind 2.7.9.2 to address CVE-2015-15095. +- [documentation] JAVA-1685: Clarify recommendation on preparing SELECT *. +- [improvement] JAVA-1679: Improve error message on batch log write timeout. +- [improvement] JAVA-1672: Remove schema agreement check when repreparing on up. +- [improvement] JAVA-1677: Warn if auth is configured on the client but not the server. +- [new feature] JAVA-1651: Add NO_COMPACT startup option. +- [improvement] JAVA-1683: Add metrics to track writes to nodes. +- [new feature] JAVA-1229: Allow specifying the keyspace for individual queries. +- [improvement] JAVA-1682: Provide a way to record latencies for cancelled speculative executions. +- [improvement] JAVA-1717: Add metrics to latency-aware policy. +- [improvement] JAVA-1675: Remove dates from copyright headers. + +Merged from 3.3.x: + +- [bug] JAVA-1555: Include VIEW and CDC in WriteType. +- [bug] JAVA-1599: exportAsString improvements (sort, format, clustering order) +- [improvement] JAVA-1587: Deterministic ordering of columns used in Mapper#saveQuery +- [improvement] JAVA-1500: Add a metric to report number of in-flight requests. +- [bug] JAVA-1438: QueryBuilder check for empty orderings. +- [improvement] JAVA-1490: Allow zero delay for speculative executions. +- [documentation] JAVA-1607: Add FAQ entry for netty-transport-native-epoll. +- [bug] JAVA-1630: Fix Metadata.addIfAbsent. +- [improvement] JAVA-1619: Update QueryBuilder methods to support Iterable input. +- [improvement] JAVA-1527: Expose host_id and schema_version on Host metadata. +- [new feature] JAVA-1377: Add support for TWCS in SchemaBuilder. +- [improvement] JAVA-1631: Publish a sources jar for driver-core-tests. +- [improvement] JAVA-1632: Add a withIpPrefix(String) method to CCMBridge.Builder. +- [bug] JAVA-1639: VersionNumber does not fullfill equals/hashcode contract. +- [bug] JAVA-1613: Fix broken shaded Netty detection in NettyUtil. +- [bug] JAVA-1666: Fix keyspace export when a UDT has case-sensitive field names. +- [improvement] JAVA-1196: Include hash of result set metadata in prepared statement id. +- [improvement] JAVA-1670: Support user-provided JMX ports for CCMBridge. +- [improvement] JAVA-1661: Avoid String.toLowerCase if possible in Metadata. +- [improvement] JAVA-1659: Expose low-level flusher tuning options. +- [improvement] JAVA-1660: Support netty-transport-native-epoll in OSGi container. + + +### 3.3.2 + +- [bug] JAVA-1666: Fix keyspace export when a UDT has case-sensitive field names. +- [improvement] JAVA-1196: Include hash of result set metadata in prepared statement id. +- [improvement] JAVA-1670: Support user-provided JMX ports for CCMBridge. +- [improvement] JAVA-1661: Avoid String.toLowerCase if possible in Metadata. +- [improvement] JAVA-1659: Expose low-level flusher tuning options. +- [improvement] JAVA-1660: Support netty-transport-native-epoll in OSGi container. + + +### 3.3.1 + +- [bug] JAVA-1555: Include VIEW and CDC in WriteType. +- [bug] JAVA-1599: exportAsString improvements (sort, format, clustering order) +- [improvement] JAVA-1587: Deterministic ordering of columns used in Mapper#saveQuery +- [improvement] JAVA-1500: Add a metric to report number of in-flight requests. +- [bug] JAVA-1438: QueryBuilder check for empty orderings. +- [improvement] JAVA-1490: Allow zero delay for speculative executions. +- [documentation] JAVA-1607: Add FAQ entry for netty-transport-native-epoll. +- [bug] JAVA-1630: Fix Metadata.addIfAbsent. +- [improvement] JAVA-1619: Update QueryBuilder methods to support Iterable input. +- [improvement] JAVA-1527: Expose host_id and schema_version on Host metadata. +- [new feature] JAVA-1377: Add support for TWCS in SchemaBuilder. +- [improvement] JAVA-1631: Publish a sources jar for driver-core-tests. +- [improvement] JAVA-1632: Add a withIpPrefix(String) method to CCMBridge.Builder. +- [bug] JAVA-1639: VersionNumber does not fullfill equals/hashcode contract. +- [bug] JAVA-1613: Fix broken shaded Netty detection in NettyUtil. + + ### 3.3.0 - [bug] JAVA-1469: Update LoggingRetryPolicy to deal with SLF4J-353. diff --git a/ci/appveyor.ps1 b/ci/appveyor.ps1 index bc1d95b69f7..c0db375e05c 100644 --- a/ci/appveyor.ps1 +++ b/ci/appveyor.ps1 @@ -1,3 +1,22 @@ +<# + # Licensed to the Apache Software Foundation (ASF) under one + # or more contributor license agreements. See the NOTICE file + # distributed with this work for additional information + # regarding copyright ownership. The ASF licenses this file + # to you under the Apache License, Version 2.0 (the + # "License"); you may not use this file except in compliance + # with the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, + # software distributed under the License is distributed on an + # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + # KIND, either express or implied. See the License for the + # specific language governing permissions and limitations + # under the License. + #> + Add-Type -AssemblyName System.IO.Compression.FileSystem $dep_dir="C:\Users\appveyor\deps" diff --git a/ci/appveyor.yml b/ci/appveyor.yml index 81dd5b01958..bd621defb76 100644 --- a/ci/appveyor.yml +++ b/ci/appveyor.yml @@ -1,3 +1,20 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + environment: test_profile: default matrix: diff --git a/ci/create-user.sh b/ci/create-user.sh new file mode 100644 index 00000000000..fb193df9a00 --- /dev/null +++ b/ci/create-user.sh @@ -0,0 +1,60 @@ +#!/bin/bash +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +################################ +# +# Prep +# +################################ + +if [ "$1" == "-h" ]; then + echo "$0 [-h] " + echo " this script is used internally by other scripts in the same directory to create a user with the running host user's same uid and gid" + exit 1 +fi + +# arguments +username=$1 +uid=$2 +gid=$3 +BUILD_HOME=$4 + +################################ +# +# Main +# +################################ + +# disable git directory ownership checks +su ${username} -c "git config --global safe.directory '*'" + +if grep "^ID=" /etc/os-release | grep -q 'debian\|ubuntu' ; then + deluser docker + adduser --quiet --disabled-login --no-create-home --uid $uid --gecos ${username} ${username} + groupmod --non-unique -g $gid $username + gpasswd -a ${username} sudo >/dev/null +else + adduser --no-create-home --uid $uid ${username} +fi + +# sudo priviledges +echo "${username} ALL=(root) NOPASSWD:ALL" > /etc/sudoers.d/${username} +chmod 0440 /etc/sudoers.d/${username} + +# proper permissions +chown -R ${username}:${username} /home/docker +chmod og+wx ${BUILD_HOME} \ No newline at end of file diff --git a/ci/run-tests.sh b/ci/run-tests.sh new file mode 100755 index 00000000000..b1bf201442b --- /dev/null +++ b/ci/run-tests.sh @@ -0,0 +1,9 @@ +#!/bin/bash -x + +. ~/env.txt +printenv | sort +cd $(dirname "$(readlink -f "$0")")/.. +mvn -B -V install -DskipTests -Dmaven.javadoc.skip=true +jabba use ${TEST_JAVA_VERSION} +printenv | sort +mvn -B -V verify -T 1 -Dcassandra.version=${SERVER_VERSION} -Ddse=false -Dmaven.test.failure.ignore=true -Dmaven.javadoc.skip=true; diff --git a/ci/uploadtests.ps1 b/ci/uploadtests.ps1 index cf88b16229c..aaf7279ed46 100644 --- a/ci/uploadtests.ps1 +++ b/ci/uploadtests.ps1 @@ -1,3 +1,22 @@ +<# + # Licensed to the Apache Software Foundation (ASF) under one + # or more contributor license agreements. See the NOTICE file + # distributed with this work for additional information + # regarding copyright ownership. The ASF licenses this file + # to you under the Apache License, Version 2.0 (the + # "License"); you may not use this file except in compliance + # with the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, + # software distributed under the License is distributed on an + # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + # KIND, either express or implied. See the License for the + # specific language governing permissions and limitations + # under the License. + #> + $testResults=Get-ChildItem TEST-TestSuite.xml -Recurse Write-Host "Uploading test results." diff --git a/clirr-ignores.xml b/clirr-ignores.xml index 5b59573cc25..271bd6f7773 100644 --- a/clirr-ignores.xml +++ b/clirr-ignores.xml @@ -1,3 +1,23 @@ + + + com/datastax/driver/core/FrameCompressor$LZ4Compressor False positive, the enclosing class is package-private so this was never exposed + + 7005 + com/datastax/driver/core/querybuilder/QueryBuilder + + * + * + Relaxed parameters from List to Iterable for in, lt, lte, eq, gt, and gte + + + 7005 + com/datastax/driver/core/exceptions/AlreadyExistsException + *java.net.InetSocketAddress* + *com.datastax.driver.core.EndPoint* + JAVA-2355: Abstract connection information into new EndPoint type for sni support + + + 7005 + com/datastax/driver/core/exceptions/AuthenticationException + *java.net.InetSocketAddress* + *com.datastax.driver.core.EndPoint* + JAVA-2355: Abstract connection information into new EndPoint type for sni support + + + 7005 + com/datastax/driver/core/exceptions/BootstrappingException + *java.net.InetSocketAddress* + *com.datastax.driver.core.EndPoint* + JAVA-2355: Abstract connection information into new EndPoint type for sni support + + + 7005 + com/datastax/driver/core/exceptions/BusyConnectionException + *java.net.InetSocketAddress* + *com.datastax.driver.core.EndPoint* + JAVA-2355: Abstract connection information into new EndPoint type for sni support + + + 7005 + com/datastax/driver/core/exceptions/BusyPoolException + *java.net.InetSocketAddress* + *com.datastax.driver.core.EndPoint* + JAVA-2355: Abstract connection information into new EndPoint type for sni support + + + 6001 + com/datastax/driver/core/exceptions/ConnectionException + address + JAVA-2355: Abstract connection information into new EndPoint type for sni support + + + 7005 + com/datastax/driver/core/exceptions/ConnectionException + *java.net.InetSocketAddress* + *com.datastax.driver.core.EndPoint* + JAVA-2355: Abstract connection information into new EndPoint type for sni support + + + 7012 + com/datastax/driver/core/exceptions/CoordinatorException + com.datastax.driver.core.EndPoint getEndPoint() + JAVA-2355: Abstract connection information into new EndPoint type for sni support + + + 7005 + com/datastax/driver/core/exceptions/FunctionExecutionException + *java.net.InetSocketAddress* + *com.datastax.driver.core.EndPoint* + JAVA-2355: Abstract connection information into new EndPoint type for sni support + + + 7005 + com/datastax/driver/core/exceptions/InvalidConfigurationInQueryException + *java.net.InetSocketAddress* + *com.datastax.driver.core.EndPoint* + JAVA-2355: Abstract connection information into new EndPoint type for sni support + + + 7005 + com/datastax/driver/core/exceptions/InvalidQueryException + *java.net.InetSocketAddress* + *com.datastax.driver.core.EndPoint* + JAVA-2355: Abstract connection information into new EndPoint type for sni support + + + 7005 + com/datastax/driver/core/exceptions/OperationTimedOutException + *java.net.InetSocketAddress* + *com.datastax.driver.core.EndPoint* + JAVA-2355: Abstract connection information into new EndPoint type for sni support + + + 7005 + com/datastax/driver/core/exceptions/OperationTimedOutException + *java.net.InetSocketAddress* + *com.datastax.driver.core.EndPoint* + JAVA-2355: Abstract connection information into new EndPoint type for sni support + + + 7005 + com/datastax/driver/core/exceptions/OverloadedException + *java.net.InetSocketAddress* + *com.datastax.driver.core.EndPoint* + JAVA-2355: Abstract connection information into new EndPoint type for sni support + + + 7005 + com/datastax/driver/core/exceptions/ProtocolError + *java.net.InetSocketAddress* + *com.datastax.driver.core.EndPoint* + JAVA-2355: Abstract connection information into new EndPoint type for sni support + + + 7005 + com/datastax/driver/core/exceptions/QueryConsistencyException + *java.net.InetSocketAddress* + *com.datastax.driver.core.EndPoint* + JAVA-2355: Abstract connection information into new EndPoint type for sni support + + + 7005 + com/datastax/driver/core/exceptions/ReadFailureException + *java.net.InetSocketAddress* + *com.datastax.driver.core.EndPoint* + JAVA-2355: Abstract connection information into new EndPoint type for sni support + + + 7005 + com/datastax/driver/core/exceptions/ReadTimeoutException + *java.net.InetSocketAddress* + *com.datastax.driver.core.EndPoint* + JAVA-2355: Abstract connection information into new EndPoint type for sni support + + + 7005 + com/datastax/driver/core/exceptions/ServerError + *java.net.InetSocketAddress* + *com.datastax.driver.core.EndPoint* + JAVA-2355: Abstract connection information into new EndPoint type for sni support + + + 7005 + com/datastax/driver/core/exceptions/SyntaxError + *java.net.InetSocketAddress* + *com.datastax.driver.core.EndPoint* + JAVA-2355: Abstract connection information into new EndPoint type for sni support + + + 7005 + com/datastax/driver/core/exceptions/TransportException + *java.net.InetSocketAddress* + *com.datastax.driver.core.EndPoint* + JAVA-2355: Abstract connection information into new EndPoint type for sni support + + + 7005 + com/datastax/driver/core/exceptions/TruncateException + *java.net.InetSocketAddress* + *com.datastax.driver.core.EndPoint* + JAVA-2355: Abstract connection information into new EndPoint type for sni support + + + 7005 + com/datastax/driver/core/exceptions/UnauthorizedException + *java.net.InetSocketAddress* + *com.datastax.driver.core.EndPoint* + JAVA-2355: Abstract connection information into new EndPoint type for sni support + + + 7005 + com/datastax/driver/core/exceptions/UnavailableException + *java.net.InetSocketAddress* + *com.datastax.driver.core.EndPoint* + JAVA-2355: Abstract connection information into new EndPoint type for sni support + + + 7005 + com/datastax/driver/core/exceptions/UnpreparedException + *java.net.InetSocketAddress* + *com.datastax.driver.core.EndPoint* + JAVA-2355: Abstract connection information into new EndPoint type for sni support + + + 7005 + com/datastax/driver/core/exceptions/UnsupportedProtocolVersionException + *java.net.InetSocketAddress* + *com.datastax.driver.core.EndPoint* + JAVA-2355: Abstract connection information into new EndPoint type for sni support + + + 7005 + com/datastax/driver/core/exceptions/WriteFailureException + *java.net.InetSocketAddress* + *com.datastax.driver.core.EndPoint* + JAVA-2355: Abstract connection information into new EndPoint type for sni support + + + 7005 + com/datastax/driver/core/exceptions/WriteTimeoutException + *java.net.InetSocketAddress* + *com.datastax.driver.core.EndPoint* + JAVA-2355: Abstract connection information into new EndPoint type for sni support + diff --git a/docs.yaml b/docs.yaml index 8b47643224b..4b30dcd5813 100644 --- a/docs.yaml +++ b/docs.yaml @@ -1,3 +1,20 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + title: Java Driver for Apache Cassandra summary: High performance Java client for Apache Cassandra homepage: http://datastax.github.io/java-driver/ @@ -33,7 +50,7 @@ sections: files: 'faq/**/*.md' links: - title: Code - href: https://github.com/datastax/java-driver/ + href: https://github.com/apache/cassandra-java-driver/ - title: Docs href: http://docs.datastax.com/en/developer/java-driver/ - title: Issues @@ -54,7 +71,7 @@ versions: - name: '3.3' ref: '3.3.0' - name: '4.0-alpha' - ref: '4.0.0-alpha1' + ref: '9f0edeb' - name: '3.2' ref: '3.2_docfixes' - name: '3.1' diff --git a/doxyfile b/doxyfile index 414bdbd7ec4..d64c6fe1d82 100644 --- a/doxyfile +++ b/doxyfile @@ -1,10 +1,30 @@ + + + # Doxyfile 1.8.10 #--------------------------------------------------------------------------- # Project related configuration options #--------------------------------------------------------------------------- DOXYFILE_ENCODING = UTF-8 -PROJECT_NAME = "DataStax Java Driver" +PROJECT_NAME = "Java Driver" PROJECT_NUMBER = PROJECT_BRIEF = PROJECT_LOGO = @@ -333,4 +353,4 @@ MAX_DOT_GRAPH_DEPTH = 0 DOT_TRANSPARENT = NO DOT_MULTI_TARGETS = NO GENERATE_LEGEND = YES -DOT_CLEANUP = YES \ No newline at end of file +DOT_CLEANUP = YES diff --git a/driver-core/pom.xml b/driver-core/pom.xml index 0cb71e8ef08..45aff62c17d 100644 --- a/driver-core/pom.xml +++ b/driver-core/pom.xml @@ -1,12 +1,14 @@ + + org.apache.maven.plugins + maven-source-plugin + + + attach-test-sources + + test-jar-no-fork + + + + org.apache.felix maven-bundle-plugin @@ -216,7 +250,7 @@ ${project.build.outputDirectory}/META-INF - + @@ -234,8 +268,9 @@ JNR does not provide OSGi bundles, so exclude it; the driver can live without it Explicitly import javax.security.cert because it's required by Netty, but Netty has been explicitly excluded --> - + com.datastax.shaded.* + com.datastax.shaded.metrics;* @@ -249,6 +284,7 @@ io.netty:* + io.dropwizard.metrics:metrics-core io.netty:netty-transport-native-epoll @@ -259,6 +295,10 @@ io.netty com.datastax.shaded.netty + + com.codahale.metrics + com.datastax.shaded.metrics + @@ -275,6 +315,8 @@ META-INF/maven/io.netty/netty-handler/pom.xml META-INF/maven/io.netty/netty-transport/pom.properties META-INF/maven/io.netty/netty-transport/pom.xml + META-INF/maven/io.dropwizard.metrics/metrics-core/pom.properties + META-INF/maven/io.dropwizard.metrics/metrics-core/pom.xml diff --git a/driver-core/src/main/java/com/datastax/driver/core/AbstractAddressableByIndexData.java b/driver-core/src/main/java/com/datastax/driver/core/AbstractAddressableByIndexData.java index 3cc9fb75592..689b41d0b25 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/AbstractAddressableByIndexData.java +++ b/driver-core/src/main/java/com/datastax/driver/core/AbstractAddressableByIndexData.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,281 +19,275 @@ import com.datastax.driver.core.utils.MoreObjects; import com.google.common.reflect.TypeToken; - import java.math.BigDecimal; import java.math.BigInteger; import java.net.InetAddress; import java.nio.ByteBuffer; -import java.util.*; - -abstract class AbstractAddressableByIndexData> extends AbstractGettableByIndexData implements SettableByIndexData { - - final ByteBuffer[] values; - - protected AbstractAddressableByIndexData(ProtocolVersion protocolVersion, int size) { - super(protocolVersion); - this.values = new ByteBuffer[size]; - } - - @SuppressWarnings("unchecked") - protected T setValue(int i, ByteBuffer value) { - values[i] = value; - return (T) this; - } - - @Override - protected ByteBuffer getValue(int i) { - return values[i]; - } - - @Override - public T setBool(int i, boolean v) { - TypeCodec codec = codecFor(i, Boolean.class); - ByteBuffer bb; - if (codec instanceof TypeCodec.PrimitiveBooleanCodec) - bb = ((TypeCodec.PrimitiveBooleanCodec) codec).serializeNoBoxing(v, protocolVersion); - else - bb = codec.serialize(v, protocolVersion); - return setValue(i, bb); - } - - @Override - public T setByte(int i, byte v) { - TypeCodec codec = codecFor(i, Byte.class); - ByteBuffer bb; - if (codec instanceof TypeCodec.PrimitiveByteCodec) - bb = ((TypeCodec.PrimitiveByteCodec) codec).serializeNoBoxing(v, protocolVersion); - else - bb = codec.serialize(v, protocolVersion); - return setValue(i, bb); - } - - @Override - public T setShort(int i, short v) { - TypeCodec codec = codecFor(i, Short.class); - ByteBuffer bb; - if (codec instanceof TypeCodec.PrimitiveShortCodec) - bb = ((TypeCodec.PrimitiveShortCodec) codec).serializeNoBoxing(v, protocolVersion); - else - bb = codec.serialize(v, protocolVersion); - return setValue(i, bb); - } - - @Override - public T setInt(int i, int v) { - TypeCodec codec = codecFor(i, Integer.class); - ByteBuffer bb; - if (codec instanceof TypeCodec.PrimitiveIntCodec) - bb = ((TypeCodec.PrimitiveIntCodec) codec).serializeNoBoxing(v, protocolVersion); - else - bb = codec.serialize(v, protocolVersion); - return setValue(i, bb); - } - - @Override - public T setLong(int i, long v) { - TypeCodec codec = codecFor(i, Long.class); - ByteBuffer bb; - if (codec instanceof TypeCodec.PrimitiveLongCodec) - bb = ((TypeCodec.PrimitiveLongCodec) codec).serializeNoBoxing(v, protocolVersion); - else - bb = codec.serialize(v, protocolVersion); - return setValue(i, bb); - } - - @Override - public T setTimestamp(int i, Date v) { - return setValue(i, codecFor(i, Date.class).serialize(v, protocolVersion)); - } - - @Override - public T setDate(int i, LocalDate v) { - return setValue(i, codecFor(i, LocalDate.class).serialize(v, protocolVersion)); - } - - @Override - public T setTime(int i, long v) { - TypeCodec codec = codecFor(i, Long.class); - ByteBuffer bb; - if (codec instanceof TypeCodec.PrimitiveLongCodec) - bb = ((TypeCodec.PrimitiveLongCodec) codec).serializeNoBoxing(v, protocolVersion); - else - bb = codec.serialize(v, protocolVersion); - return setValue(i, bb); - } - - @Override - public T setFloat(int i, float v) { - TypeCodec codec = codecFor(i, Float.class); - ByteBuffer bb; - if (codec instanceof TypeCodec.PrimitiveFloatCodec) - bb = ((TypeCodec.PrimitiveFloatCodec) codec).serializeNoBoxing(v, protocolVersion); - else - bb = codec.serialize(v, protocolVersion); - return setValue(i, bb); - } - - @Override - public T setDouble(int i, double v) { - TypeCodec codec = codecFor(i, Double.class); - ByteBuffer bb; - if (codec instanceof TypeCodec.PrimitiveDoubleCodec) - bb = ((TypeCodec.PrimitiveDoubleCodec) codec).serializeNoBoxing(v, protocolVersion); - else - bb = codec.serialize(v, protocolVersion); - return setValue(i, bb); - } - - @Override - public T setString(int i, String v) { - return setValue(i, codecFor(i, String.class).serialize(v, protocolVersion)); - } - - @Override - public T setBytes(int i, ByteBuffer v) { - return setValue(i, codecFor(i, ByteBuffer.class).serialize(v, protocolVersion)); - } - - @Override - public T setBytesUnsafe(int i, ByteBuffer v) { - return setValue(i, v == null ? null : v.duplicate()); - } - - @Override - public T setVarint(int i, BigInteger v) { - return setValue(i, codecFor(i, BigInteger.class).serialize(v, protocolVersion)); - } - - @Override - public T setDecimal(int i, BigDecimal v) { - return setValue(i, codecFor(i, BigDecimal.class).serialize(v, protocolVersion)); - } - - @Override - public T setUUID(int i, UUID v) { - return setValue(i, codecFor(i, UUID.class).serialize(v, protocolVersion)); - } - - @Override - public T setInet(int i, InetAddress v) { - return setValue(i, codecFor(i, InetAddress.class).serialize(v, protocolVersion)); - } - - @Override - @SuppressWarnings("unchecked") - public T setList(int i, List v) { - return setValue(i, codecFor(i).serialize(v, protocolVersion)); - } - - @Override - public T setList(int i, List v, Class elementsClass) { - return setValue(i, codecFor(i, TypeTokens.listOf(elementsClass)).serialize(v, protocolVersion)); - } - - @Override - public T setList(int i, List v, TypeToken elementsType) { - return setValue(i, codecFor(i, TypeTokens.listOf(elementsType)).serialize(v, protocolVersion)); - } - - @Override - @SuppressWarnings("unchecked") - public T setMap(int i, Map v) { - return setValue(i, codecFor(i).serialize(v, protocolVersion)); - } - - @Override - public T setMap(int i, Map v, Class keysClass, Class valuesClass) { - return setValue(i, codecFor(i, TypeTokens.mapOf(keysClass, valuesClass)).serialize(v, protocolVersion)); - } - - @Override - public T setMap(int i, Map v, TypeToken keysType, TypeToken valuesType) { - return setValue(i, codecFor(i, TypeTokens.mapOf(keysType, valuesType)).serialize(v, protocolVersion)); - } - - @Override - @SuppressWarnings("unchecked") - public T setSet(int i, Set v) { - return setValue(i, codecFor(i).serialize(v, protocolVersion)); - } - - @Override - public T setSet(int i, Set v, Class elementsClass) { - return setValue(i, codecFor(i, TypeTokens.setOf(elementsClass)).serialize(v, protocolVersion)); - } - - @Override - public T setSet(int i, Set v, TypeToken elementsType) { - return setValue(i, codecFor(i, TypeTokens.setOf(elementsType)).serialize(v, protocolVersion)); - } - - @Override - public T setUDTValue(int i, UDTValue v) { - return setValue(i, codecFor(i, UDTValue.class).serialize(v, protocolVersion)); - } - - @Override - public T setTupleValue(int i, TupleValue v) { - return setValue(i, codecFor(i, TupleValue.class).serialize(v, protocolVersion)); - } - - @Override - public T set(int i, V v, Class targetClass) { - return set(i, v, codecFor(i, targetClass)); - } - - @Override - public T set(int i, V v, TypeToken targetType) { - return set(i, v, codecFor(i, targetType)); - } - - @Override - public T set(int i, V v, TypeCodec codec) { - checkType(i, codec.getCqlType().getName()); - return setValue(i, codec.serialize(v, protocolVersion)); - } - - @Override - public T setToNull(int i) { - return setValue(i, null); - } - - @Override - public boolean equals(Object o) { - if (!(o instanceof AbstractAddressableByIndexData)) - return false; - - AbstractAddressableByIndexData that = (AbstractAddressableByIndexData) o; - if (values.length != that.values.length) - return false; - - if (this.protocolVersion != that.protocolVersion) - return false; - - // Deserializing each value is slightly inefficient, but comparing - // the bytes could in theory be wrong (for varint for instance, 2 values - // can have different binary representation but be the same value due to - // leading zeros). So we don't take any risk. - for (int i = 0; i < values.length; i++) { - DataType thisType = getType(i); - DataType thatType = that.getType(i); - if (!thisType.equals(thatType)) - return false; - - Object thisValue = this.codecFor(i).deserialize(this.values[i], this.protocolVersion); - Object thatValue = that.codecFor(i).deserialize(that.values[i], that.protocolVersion); - if (!MoreObjects.equal(thisValue, thatValue)) - return false; - } - return true; - } - - @Override - public int hashCode() { - // Same as equals - int hash = 31; - for (int i = 0; i < values.length; i++) - hash += values[i] == null ? 1 : codecFor(i).deserialize(values[i], protocolVersion).hashCode(); - return hash; - } +import java.util.Date; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.UUID; + +abstract class AbstractAddressableByIndexData> + extends AbstractGettableByIndexData implements SettableByIndexData { + + final ByteBuffer[] values; + + protected AbstractAddressableByIndexData(ProtocolVersion protocolVersion, int size) { + super(protocolVersion); + this.values = new ByteBuffer[size]; + } + + @SuppressWarnings("unchecked") + protected T setValue(int i, ByteBuffer value) { + values[i] = value; + return (T) this; + } + + @Override + protected ByteBuffer getValue(int i) { + return values[i]; + } + + @Override + public T setBool(int i, boolean v) { + TypeCodec codec = codecFor(i, Boolean.class); + ByteBuffer bb; + if (codec instanceof TypeCodec.PrimitiveBooleanCodec) + bb = ((TypeCodec.PrimitiveBooleanCodec) codec).serializeNoBoxing(v, protocolVersion); + else bb = codec.serialize(v, protocolVersion); + return setValue(i, bb); + } + + @Override + public T setByte(int i, byte v) { + TypeCodec codec = codecFor(i, Byte.class); + ByteBuffer bb; + if (codec instanceof TypeCodec.PrimitiveByteCodec) + bb = ((TypeCodec.PrimitiveByteCodec) codec).serializeNoBoxing(v, protocolVersion); + else bb = codec.serialize(v, protocolVersion); + return setValue(i, bb); + } + + @Override + public T setShort(int i, short v) { + TypeCodec codec = codecFor(i, Short.class); + ByteBuffer bb; + if (codec instanceof TypeCodec.PrimitiveShortCodec) + bb = ((TypeCodec.PrimitiveShortCodec) codec).serializeNoBoxing(v, protocolVersion); + else bb = codec.serialize(v, protocolVersion); + return setValue(i, bb); + } + + @Override + public T setInt(int i, int v) { + TypeCodec codec = codecFor(i, Integer.class); + ByteBuffer bb; + if (codec instanceof TypeCodec.PrimitiveIntCodec) + bb = ((TypeCodec.PrimitiveIntCodec) codec).serializeNoBoxing(v, protocolVersion); + else bb = codec.serialize(v, protocolVersion); + return setValue(i, bb); + } + + @Override + public T setLong(int i, long v) { + TypeCodec codec = codecFor(i, Long.class); + ByteBuffer bb; + if (codec instanceof TypeCodec.PrimitiveLongCodec) + bb = ((TypeCodec.PrimitiveLongCodec) codec).serializeNoBoxing(v, protocolVersion); + else bb = codec.serialize(v, protocolVersion); + return setValue(i, bb); + } + + @Override + public T setTimestamp(int i, Date v) { + return setValue(i, codecFor(i, Date.class).serialize(v, protocolVersion)); + } + + @Override + public T setDate(int i, LocalDate v) { + return setValue(i, codecFor(i, LocalDate.class).serialize(v, protocolVersion)); + } + + @Override + public T setTime(int i, long v) { + TypeCodec codec = codecFor(i, Long.class); + ByteBuffer bb; + if (codec instanceof TypeCodec.PrimitiveLongCodec) + bb = ((TypeCodec.PrimitiveLongCodec) codec).serializeNoBoxing(v, protocolVersion); + else bb = codec.serialize(v, protocolVersion); + return setValue(i, bb); + } + + @Override + public T setFloat(int i, float v) { + TypeCodec codec = codecFor(i, Float.class); + ByteBuffer bb; + if (codec instanceof TypeCodec.PrimitiveFloatCodec) + bb = ((TypeCodec.PrimitiveFloatCodec) codec).serializeNoBoxing(v, protocolVersion); + else bb = codec.serialize(v, protocolVersion); + return setValue(i, bb); + } + + @Override + public T setDouble(int i, double v) { + TypeCodec codec = codecFor(i, Double.class); + ByteBuffer bb; + if (codec instanceof TypeCodec.PrimitiveDoubleCodec) + bb = ((TypeCodec.PrimitiveDoubleCodec) codec).serializeNoBoxing(v, protocolVersion); + else bb = codec.serialize(v, protocolVersion); + return setValue(i, bb); + } + + @Override + public T setString(int i, String v) { + return setValue(i, codecFor(i, String.class).serialize(v, protocolVersion)); + } + + @Override + public T setBytes(int i, ByteBuffer v) { + return setValue(i, codecFor(i, ByteBuffer.class).serialize(v, protocolVersion)); + } + + @Override + public T setBytesUnsafe(int i, ByteBuffer v) { + return setValue(i, v == null ? null : v.duplicate()); + } + + @Override + public T setVarint(int i, BigInteger v) { + return setValue(i, codecFor(i, BigInteger.class).serialize(v, protocolVersion)); + } + + @Override + public T setDecimal(int i, BigDecimal v) { + return setValue(i, codecFor(i, BigDecimal.class).serialize(v, protocolVersion)); + } + + @Override + public T setUUID(int i, UUID v) { + return setValue(i, codecFor(i, UUID.class).serialize(v, protocolVersion)); + } + + @Override + public T setInet(int i, InetAddress v) { + return setValue(i, codecFor(i, InetAddress.class).serialize(v, protocolVersion)); + } + + @Override + @SuppressWarnings("unchecked") + public T setList(int i, List v) { + return setValue(i, codecFor(i).serialize(v, protocolVersion)); + } + + @Override + public T setList(int i, List v, Class elementsClass) { + return setValue(i, codecFor(i, TypeTokens.listOf(elementsClass)).serialize(v, protocolVersion)); + } + + @Override + public T setList(int i, List v, TypeToken elementsType) { + return setValue(i, codecFor(i, TypeTokens.listOf(elementsType)).serialize(v, protocolVersion)); + } + + @Override + @SuppressWarnings("unchecked") + public T setMap(int i, Map v) { + return setValue(i, codecFor(i).serialize(v, protocolVersion)); + } + + @Override + public T setMap(int i, Map v, Class keysClass, Class valuesClass) { + return setValue( + i, codecFor(i, TypeTokens.mapOf(keysClass, valuesClass)).serialize(v, protocolVersion)); + } + + @Override + public T setMap(int i, Map v, TypeToken keysType, TypeToken valuesType) { + return setValue( + i, codecFor(i, TypeTokens.mapOf(keysType, valuesType)).serialize(v, protocolVersion)); + } + + @Override + @SuppressWarnings("unchecked") + public T setSet(int i, Set v) { + return setValue(i, codecFor(i).serialize(v, protocolVersion)); + } + + @Override + public T setSet(int i, Set v, Class elementsClass) { + return setValue(i, codecFor(i, TypeTokens.setOf(elementsClass)).serialize(v, protocolVersion)); + } + + @Override + public T setSet(int i, Set v, TypeToken elementsType) { + return setValue(i, codecFor(i, TypeTokens.setOf(elementsType)).serialize(v, protocolVersion)); + } + + @Override + public T setUDTValue(int i, UDTValue v) { + return setValue(i, codecFor(i, UDTValue.class).serialize(v, protocolVersion)); + } + + @Override + public T setTupleValue(int i, TupleValue v) { + return setValue(i, codecFor(i, TupleValue.class).serialize(v, protocolVersion)); + } + + @Override + public T set(int i, V v, Class targetClass) { + return set(i, v, codecFor(i, targetClass)); + } + + @Override + public T set(int i, V v, TypeToken targetType) { + return set(i, v, codecFor(i, targetType)); + } + + @Override + public T set(int i, V v, TypeCodec codec) { + checkType(i, codec.getCqlType().getName()); + return setValue(i, codec.serialize(v, protocolVersion)); + } + + @Override + public T setToNull(int i) { + return setValue(i, null); + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof AbstractAddressableByIndexData)) return false; + + AbstractAddressableByIndexData that = (AbstractAddressableByIndexData) o; + if (values.length != that.values.length) return false; + + if (this.protocolVersion != that.protocolVersion) return false; + + // Deserializing each value is slightly inefficient, but comparing + // the bytes could in theory be wrong (for varint for instance, 2 values + // can have different binary representation but be the same value due to + // leading zeros). So we don't take any risk. + for (int i = 0; i < values.length; i++) { + DataType thisType = getType(i); + DataType thatType = that.getType(i); + if (!thisType.equals(thatType)) return false; + + Object thisValue = this.codecFor(i).deserialize(this.values[i], this.protocolVersion); + Object thatValue = that.codecFor(i).deserialize(that.values[i], that.protocolVersion); + if (!MoreObjects.equal(thisValue, thatValue)) return false; + } + return true; + } + + @Override + public int hashCode() { + // Same as equals + int hash = 31; + for (int i = 0; i < values.length; i++) + hash += + values[i] == null ? 1 : codecFor(i).deserialize(values[i], protocolVersion).hashCode(); + return hash; + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/AbstractData.java b/driver-core/src/main/java/com/datastax/driver/core/AbstractData.java index bbb349c5b6a..8de0ad00037 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/AbstractData.java +++ b/driver-core/src/main/java/com/datastax/driver/core/AbstractData.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,576 +19,572 @@ import com.datastax.driver.core.utils.MoreObjects; import com.google.common.reflect.TypeToken; - import java.math.BigDecimal; import java.math.BigInteger; import java.net.InetAddress; import java.nio.ByteBuffer; -import java.util.*; - -// We don't want to expose this one: it's less useful externally and it's a bit ugly to expose anyway (but it's convenient). -abstract class AbstractData> extends AbstractGettableData implements SettableData { - - final T wrapped; - final ByteBuffer[] values; - - // Ugly, we could probably clean that: it is currently needed however because we sometimes - // want wrapped to be 'this' (UDTValue), and sometimes some other object (in BoundStatement). - @SuppressWarnings("unchecked") - protected AbstractData(ProtocolVersion protocolVersion, int size) { - super(protocolVersion); - this.wrapped = (T) this; - this.values = new ByteBuffer[size]; - } - - protected AbstractData(ProtocolVersion protocolVersion, T wrapped, int size) { - this(protocolVersion, wrapped, new ByteBuffer[size]); - } - - protected AbstractData(ProtocolVersion protocolVersion, T wrapped, ByteBuffer[] values) { - super(protocolVersion); - this.wrapped = wrapped; - this.values = values; - } - - protected abstract int[] getAllIndexesOf(String name); - - protected T setValue(int i, ByteBuffer value) { - values[i] = value; - return wrapped; - } - - @Override - protected ByteBuffer getValue(int i) { - return values[i]; - } - - @Override - protected int getIndexOf(String name) { - return getAllIndexesOf(name)[0]; - } - - @Override - public T setBool(int i, boolean v) { - TypeCodec codec = codecFor(i, Boolean.class); - ByteBuffer bb; - if (codec instanceof TypeCodec.PrimitiveBooleanCodec) - bb = ((TypeCodec.PrimitiveBooleanCodec) codec).serializeNoBoxing(v, protocolVersion); - else - bb = codec.serialize(v, protocolVersion); - return setValue(i, bb); - } - - @Override - public T setBool(String name, boolean v) { - for (int i : getAllIndexesOf(name)) { - setBool(i, v); - } - return wrapped; - } - - @Override - public T setByte(int i, byte v) { - TypeCodec codec = codecFor(i, Byte.class); - ByteBuffer bb; - if (codec instanceof TypeCodec.PrimitiveByteCodec) - bb = ((TypeCodec.PrimitiveByteCodec) codec).serializeNoBoxing(v, protocolVersion); - else - bb = codec.serialize(v, protocolVersion); - return setValue(i, bb); - } - - @Override - public T setByte(String name, byte v) { - for (int i : getAllIndexesOf(name)) { - setByte(i, v); - } - return wrapped; - } - - @Override - public T setShort(int i, short v) { - TypeCodec codec = codecFor(i, Short.class); - ByteBuffer bb; - if (codec instanceof TypeCodec.PrimitiveShortCodec) - bb = ((TypeCodec.PrimitiveShortCodec) codec).serializeNoBoxing(v, protocolVersion); - else - bb = codec.serialize(v, protocolVersion); - return setValue(i, bb); - } - - @Override - public T setShort(String name, short v) { - for (int i : getAllIndexesOf(name)) { - setShort(i, v); - } - return wrapped; - } - - @Override - public T setInt(int i, int v) { - TypeCodec codec = codecFor(i, Integer.class); - ByteBuffer bb; - if (codec instanceof TypeCodec.PrimitiveIntCodec) - bb = ((TypeCodec.PrimitiveIntCodec) codec).serializeNoBoxing(v, protocolVersion); - else - bb = codec.serialize(v, protocolVersion); - return setValue(i, bb); - } - - @Override - public T setInt(String name, int v) { - for (int i : getAllIndexesOf(name)) { - setInt(i, v); - } - return wrapped; - } - - @Override - public T setLong(int i, long v) { - TypeCodec codec = codecFor(i, Long.class); - ByteBuffer bb; - if (codec instanceof TypeCodec.PrimitiveLongCodec) - bb = ((TypeCodec.PrimitiveLongCodec) codec).serializeNoBoxing(v, protocolVersion); - else - bb = codec.serialize(v, protocolVersion); - return setValue(i, bb); - } - - @Override - public T setLong(String name, long v) { - for (int i : getAllIndexesOf(name)) { - setLong(i, v); - } - return wrapped; - } - - @Override - public T setTimestamp(int i, Date v) { - return setValue(i, codecFor(i, Date.class).serialize(v, protocolVersion)); - } - - @Override - public T setTimestamp(String name, Date v) { - for (int i : getAllIndexesOf(name)) { - setTimestamp(i, v); - } - return wrapped; - } - - @Override - public T setDate(int i, LocalDate v) { - return setValue(i, codecFor(i, LocalDate.class).serialize(v, protocolVersion)); - } - - @Override - public T setDate(String name, LocalDate v) { - for (int i : getAllIndexesOf(name)) { - setDate(i, v); - } - return wrapped; - } - - @Override - public T setTime(int i, long v) { - TypeCodec codec = codecFor(i, Long.class); - ByteBuffer bb; - if (codec instanceof TypeCodec.PrimitiveLongCodec) - bb = ((TypeCodec.PrimitiveLongCodec) codec).serializeNoBoxing(v, protocolVersion); - else - bb = codec.serialize(v, protocolVersion); - return setValue(i, bb); - } - - @Override - public T setTime(String name, long v) { - for (int i : getAllIndexesOf(name)) { - setTime(i, v); - } - return wrapped; - } - - @Override - public T setFloat(int i, float v) { - TypeCodec codec = codecFor(i, Float.class); - ByteBuffer bb; - if (codec instanceof TypeCodec.PrimitiveFloatCodec) - bb = ((TypeCodec.PrimitiveFloatCodec) codec).serializeNoBoxing(v, protocolVersion); - else - bb = codec.serialize(v, protocolVersion); - return setValue(i, bb); - } - - @Override - public T setFloat(String name, float v) { - for (int i : getAllIndexesOf(name)) { - setFloat(i, v); - } - return wrapped; - } - - @Override - public T setDouble(int i, double v) { - TypeCodec codec = codecFor(i, Double.class); - ByteBuffer bb; - if (codec instanceof TypeCodec.PrimitiveDoubleCodec) - bb = ((TypeCodec.PrimitiveDoubleCodec) codec).serializeNoBoxing(v, protocolVersion); - else - bb = codec.serialize(v, protocolVersion); - return setValue(i, bb); - } - - @Override - public T setDouble(String name, double v) { - for (int i : getAllIndexesOf(name)) { - setDouble(i, v); - } - return wrapped; - } - - @Override - public T setString(int i, String v) { - return setValue(i, codecFor(i, String.class).serialize(v, protocolVersion)); - } - - @Override - public T setString(String name, String v) { - for (int i : getAllIndexesOf(name)) { - setString(i, v); - } - return wrapped; - } - - @Override - public T setBytes(int i, ByteBuffer v) { - return setValue(i, codecFor(i, ByteBuffer.class).serialize(v, protocolVersion)); - } - - @Override - public T setBytes(String name, ByteBuffer v) { - for (int i : getAllIndexesOf(name)) { - setBytes(i, v); - } - return wrapped; - } - - @Override - public T setBytesUnsafe(int i, ByteBuffer v) { - return setValue(i, v == null ? null : v.duplicate()); - } - - @Override - public T setBytesUnsafe(String name, ByteBuffer v) { - ByteBuffer value = v == null ? null : v.duplicate(); - for (int i : getAllIndexesOf(name)) { - setValue(i, value); - } - return wrapped; - } - - @Override - public T setVarint(int i, BigInteger v) { - return setValue(i, codecFor(i, BigInteger.class).serialize(v, protocolVersion)); - } - - @Override - public T setVarint(String name, BigInteger v) { - for (int i : getAllIndexesOf(name)) { - setVarint(i, v); - } - return wrapped; - } - - @Override - public T setDecimal(int i, BigDecimal v) { - return setValue(i, codecFor(i, BigDecimal.class).serialize(v, protocolVersion)); - } - - @Override - public T setDecimal(String name, BigDecimal v) { - for (int i : getAllIndexesOf(name)) { - setDecimal(i, v); - } - return wrapped; - } - - @Override - public T setUUID(int i, UUID v) { - return setValue(i, codecFor(i, UUID.class).serialize(v, protocolVersion)); - } - - @Override - public T setUUID(String name, UUID v) { - for (int i : getAllIndexesOf(name)) { - setUUID(i, v); - } - return wrapped; - } - - @Override - public T setInet(int i, InetAddress v) { - return setValue(i, codecFor(i, InetAddress.class).serialize(v, protocolVersion)); - } - - @Override - public T setInet(String name, InetAddress v) { - for (int i : getAllIndexesOf(name)) { - setInet(i, v); - } - return wrapped; - } - - // setToken is package-private because we only want to expose it in BoundStatement - T setToken(int i, Token v) { - if (v == null) - throw new NullPointerException(String.format("Cannot set a null token for column %s", getName(i))); - checkType(i, v.getType().getName()); - // Bypass CodecRegistry when serializing tokens - return setValue(i, v.serialize(protocolVersion)); - } - - T setToken(String name, Token v) { - for (int i : getAllIndexesOf(name)) { - setToken(i, v); - } - return wrapped; - } - - @Override - @SuppressWarnings("unchecked") - public T setList(int i, List v) { - return setValue(i, codecFor(i).serialize(v, protocolVersion)); - } - - @Override - public T setList(int i, List v, Class elementsClass) { - return setValue(i, codecFor(i, TypeTokens.listOf(elementsClass)).serialize(v, protocolVersion)); - } - - @Override - public T setList(int i, List v, TypeToken elementsType) { - return setValue(i, codecFor(i, TypeTokens.listOf(elementsType)).serialize(v, protocolVersion)); - } - - @Override - public T setList(String name, List v) { - for (int i : getAllIndexesOf(name)) { - setList(i, v); - } - return wrapped; - } - - @Override - public T setList(String name, List v, Class elementsClass) { - for (int i : getAllIndexesOf(name)) { - setList(i, v, elementsClass); - } - return wrapped; - } - - @Override - public T setList(String name, List v, TypeToken elementsType) { - for (int i : getAllIndexesOf(name)) { - setList(i, v, elementsType); - } - return wrapped; - } - - @SuppressWarnings("unchecked") - @Override - public T setMap(int i, Map v) { - return setValue(i, codecFor(i).serialize(v, protocolVersion)); - } - - @Override - public T setMap(int i, Map v, Class keysClass, Class valuesClass) { - return setValue(i, codecFor(i, TypeTokens.mapOf(keysClass, valuesClass)).serialize(v, protocolVersion)); - } - - @Override - public T setMap(int i, Map v, TypeToken keysType, TypeToken valuesType) { - return setValue(i, codecFor(i, TypeTokens.mapOf(keysType, valuesType)).serialize(v, protocolVersion)); - } - - @Override - public T setMap(String name, Map v) { - for (int i : getAllIndexesOf(name)) { - setMap(i, v); - } - return wrapped; - } - - @Override - public T setMap(String name, Map v, Class keysClass, Class valuesClass) { - for (int i : getAllIndexesOf(name)) { - setMap(i, v, keysClass, valuesClass); - } - return wrapped; - } - - @Override - public T setMap(String name, Map v, TypeToken keysType, TypeToken valuesType) { - for (int i : getAllIndexesOf(name)) { - setMap(i, v, keysType, valuesType); - } - return wrapped; - } - - @Override - @SuppressWarnings("unchecked") - public T setSet(int i, Set v) { - return setValue(i, codecFor(i).serialize(v, protocolVersion)); - } - - @Override - public T setSet(int i, Set v, Class elementsClass) { - return setValue(i, codecFor(i, TypeTokens.setOf(elementsClass)).serialize(v, protocolVersion)); - } - - @Override - public T setSet(int i, Set v, TypeToken elementsType) { - return setValue(i, codecFor(i, TypeTokens.setOf(elementsType)).serialize(v, protocolVersion)); - } - - @Override - public T setSet(String name, Set v) { - for (int i : getAllIndexesOf(name)) { - setSet(i, v); - } - return wrapped; - } - - @Override - public T setSet(String name, Set v, Class elementsClass) { - for (int i : getAllIndexesOf(name)) { - setSet(i, v, elementsClass); - } - return wrapped; - } - - @Override - public T setSet(String name, Set v, TypeToken elementsType) { - for (int i : getAllIndexesOf(name)) { - setSet(i, v, elementsType); - } - return wrapped; - } - - @Override - public T setUDTValue(int i, UDTValue v) { - return setValue(i, codecFor(i, UDTValue.class).serialize(v, protocolVersion)); - } - - @Override - public T setUDTValue(String name, UDTValue v) { - for (int i : getAllIndexesOf(name)) { - setUDTValue(i, v); - } - return wrapped; - } - - @Override - public T setTupleValue(int i, TupleValue v) { - return setValue(i, codecFor(i, TupleValue.class).serialize(v, protocolVersion)); - } - - @Override - public T setTupleValue(String name, TupleValue v) { - for (int i : getAllIndexesOf(name)) { - setTupleValue(i, v); - } - return wrapped; - } - - @Override - public T set(int i, V v, Class targetClass) { - return set(i, v, codecFor(i, targetClass)); - } - - @Override - public T set(String name, V v, Class targetClass) { - for (int i : getAllIndexesOf(name)) { - set(i, v, targetClass); - } - return wrapped; - } - - @Override - public T set(int i, V v, TypeToken targetType) { - return set(i, v, codecFor(i, targetType)); - } - - @Override - public T set(String name, V v, TypeToken targetType) { - for (int i : getAllIndexesOf(name)) { - set(i, v, targetType); - } - return wrapped; - } - - @Override - public T set(int i, V v, TypeCodec codec) { - checkType(i, codec.getCqlType().getName()); - return setValue(i, codec.serialize(v, protocolVersion)); - } - - @Override - public T set(String name, V v, TypeCodec codec) { - for (int i : getAllIndexesOf(name)) { - set(i, v, codec); - } - return wrapped; - } - - @Override - public T setToNull(int i) { - return setValue(i, null); - } - - @Override - public T setToNull(String name) { - for (int i : getAllIndexesOf(name)) { - setToNull(i); - } - return wrapped; - } - - @Override - public boolean equals(Object o) { - if (!(o instanceof AbstractData)) - return false; - - AbstractData that = (AbstractData) o; - if (values.length != that.values.length) - return false; - - if (this.protocolVersion != that.protocolVersion) - return false; - - // Deserializing each value is slightly inefficient, but comparing - // the bytes could in theory be wrong (for varint for instance, 2 values - // can have different binary representation but be the same value due to - // leading zeros). So we don't take any risk. - for (int i = 0; i < values.length; i++) { - DataType thisType = getType(i); - DataType thatType = that.getType(i); - if (!thisType.equals(thatType)) - return false; - - Object thisValue = this.codecFor(i).deserialize(this.values[i], this.protocolVersion); - Object thatValue = that.codecFor(i).deserialize(that.values[i], that.protocolVersion); - if (!MoreObjects.equal(thisValue, thatValue)) - return false; - } - return true; - } - - @Override - public int hashCode() { - // Same as equals - int hash = 31; - for (int i = 0; i < values.length; i++) - hash += values[i] == null ? 1 : codecFor(i).deserialize(values[i], protocolVersion).hashCode(); - return hash; - } +import java.util.Date; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.UUID; + +// We don't want to expose this one: it's less useful externally and it's a bit ugly to expose +// anyway (but it's convenient). +abstract class AbstractData> extends AbstractGettableData + implements SettableData { + + final T wrapped; + final ByteBuffer[] values; + + // Ugly, we could probably clean that: it is currently needed however because we sometimes + // want wrapped to be 'this' (UDTValue), and sometimes some other object (in BoundStatement). + @SuppressWarnings("unchecked") + protected AbstractData(ProtocolVersion protocolVersion, int size) { + super(protocolVersion); + this.wrapped = (T) this; + this.values = new ByteBuffer[size]; + } + + protected AbstractData(ProtocolVersion protocolVersion, T wrapped, int size) { + this(protocolVersion, wrapped, new ByteBuffer[size]); + } + + protected AbstractData(ProtocolVersion protocolVersion, T wrapped, ByteBuffer[] values) { + super(protocolVersion); + this.wrapped = wrapped; + this.values = values; + } + + protected abstract int[] getAllIndexesOf(String name); + + protected T setValue(int i, ByteBuffer value) { + values[i] = value; + return wrapped; + } + + @Override + protected ByteBuffer getValue(int i) { + return values[i]; + } + + @Override + protected int getIndexOf(String name) { + return getAllIndexesOf(name)[0]; + } + + @Override + public T setBool(int i, boolean v) { + TypeCodec codec = codecFor(i, Boolean.class); + ByteBuffer bb; + if (codec instanceof TypeCodec.PrimitiveBooleanCodec) + bb = ((TypeCodec.PrimitiveBooleanCodec) codec).serializeNoBoxing(v, protocolVersion); + else bb = codec.serialize(v, protocolVersion); + return setValue(i, bb); + } + + @Override + public T setBool(String name, boolean v) { + for (int i : getAllIndexesOf(name)) { + setBool(i, v); + } + return wrapped; + } + + @Override + public T setByte(int i, byte v) { + TypeCodec codec = codecFor(i, Byte.class); + ByteBuffer bb; + if (codec instanceof TypeCodec.PrimitiveByteCodec) + bb = ((TypeCodec.PrimitiveByteCodec) codec).serializeNoBoxing(v, protocolVersion); + else bb = codec.serialize(v, protocolVersion); + return setValue(i, bb); + } + + @Override + public T setByte(String name, byte v) { + for (int i : getAllIndexesOf(name)) { + setByte(i, v); + } + return wrapped; + } + + @Override + public T setShort(int i, short v) { + TypeCodec codec = codecFor(i, Short.class); + ByteBuffer bb; + if (codec instanceof TypeCodec.PrimitiveShortCodec) + bb = ((TypeCodec.PrimitiveShortCodec) codec).serializeNoBoxing(v, protocolVersion); + else bb = codec.serialize(v, protocolVersion); + return setValue(i, bb); + } + + @Override + public T setShort(String name, short v) { + for (int i : getAllIndexesOf(name)) { + setShort(i, v); + } + return wrapped; + } + + @Override + public T setInt(int i, int v) { + TypeCodec codec = codecFor(i, Integer.class); + ByteBuffer bb; + if (codec instanceof TypeCodec.PrimitiveIntCodec) + bb = ((TypeCodec.PrimitiveIntCodec) codec).serializeNoBoxing(v, protocolVersion); + else bb = codec.serialize(v, protocolVersion); + return setValue(i, bb); + } + + @Override + public T setInt(String name, int v) { + for (int i : getAllIndexesOf(name)) { + setInt(i, v); + } + return wrapped; + } + + @Override + public T setLong(int i, long v) { + TypeCodec codec = codecFor(i, Long.class); + ByteBuffer bb; + if (codec instanceof TypeCodec.PrimitiveLongCodec) + bb = ((TypeCodec.PrimitiveLongCodec) codec).serializeNoBoxing(v, protocolVersion); + else bb = codec.serialize(v, protocolVersion); + return setValue(i, bb); + } + + @Override + public T setLong(String name, long v) { + for (int i : getAllIndexesOf(name)) { + setLong(i, v); + } + return wrapped; + } + + @Override + public T setTimestamp(int i, Date v) { + return setValue(i, codecFor(i, Date.class).serialize(v, protocolVersion)); + } + + @Override + public T setTimestamp(String name, Date v) { + for (int i : getAllIndexesOf(name)) { + setTimestamp(i, v); + } + return wrapped; + } + + @Override + public T setDate(int i, LocalDate v) { + return setValue(i, codecFor(i, LocalDate.class).serialize(v, protocolVersion)); + } + + @Override + public T setDate(String name, LocalDate v) { + for (int i : getAllIndexesOf(name)) { + setDate(i, v); + } + return wrapped; + } + + @Override + public T setTime(int i, long v) { + TypeCodec codec = codecFor(i, Long.class); + ByteBuffer bb; + if (codec instanceof TypeCodec.PrimitiveLongCodec) + bb = ((TypeCodec.PrimitiveLongCodec) codec).serializeNoBoxing(v, protocolVersion); + else bb = codec.serialize(v, protocolVersion); + return setValue(i, bb); + } + + @Override + public T setTime(String name, long v) { + for (int i : getAllIndexesOf(name)) { + setTime(i, v); + } + return wrapped; + } + + @Override + public T setFloat(int i, float v) { + TypeCodec codec = codecFor(i, Float.class); + ByteBuffer bb; + if (codec instanceof TypeCodec.PrimitiveFloatCodec) + bb = ((TypeCodec.PrimitiveFloatCodec) codec).serializeNoBoxing(v, protocolVersion); + else bb = codec.serialize(v, protocolVersion); + return setValue(i, bb); + } + + @Override + public T setFloat(String name, float v) { + for (int i : getAllIndexesOf(name)) { + setFloat(i, v); + } + return wrapped; + } + + @Override + public T setDouble(int i, double v) { + TypeCodec codec = codecFor(i, Double.class); + ByteBuffer bb; + if (codec instanceof TypeCodec.PrimitiveDoubleCodec) + bb = ((TypeCodec.PrimitiveDoubleCodec) codec).serializeNoBoxing(v, protocolVersion); + else bb = codec.serialize(v, protocolVersion); + return setValue(i, bb); + } + + @Override + public T setDouble(String name, double v) { + for (int i : getAllIndexesOf(name)) { + setDouble(i, v); + } + return wrapped; + } + + @Override + public T setString(int i, String v) { + return setValue(i, codecFor(i, String.class).serialize(v, protocolVersion)); + } + + @Override + public T setString(String name, String v) { + for (int i : getAllIndexesOf(name)) { + setString(i, v); + } + return wrapped; + } + + @Override + public T setBytes(int i, ByteBuffer v) { + return setValue(i, codecFor(i, ByteBuffer.class).serialize(v, protocolVersion)); + } + + @Override + public T setBytes(String name, ByteBuffer v) { + for (int i : getAllIndexesOf(name)) { + setBytes(i, v); + } + return wrapped; + } + + @Override + public T setBytesUnsafe(int i, ByteBuffer v) { + return setValue(i, v == null ? null : v.duplicate()); + } + + @Override + public T setBytesUnsafe(String name, ByteBuffer v) { + ByteBuffer value = v == null ? null : v.duplicate(); + for (int i : getAllIndexesOf(name)) { + setValue(i, value); + } + return wrapped; + } + + @Override + public T setVarint(int i, BigInteger v) { + return setValue(i, codecFor(i, BigInteger.class).serialize(v, protocolVersion)); + } + + @Override + public T setVarint(String name, BigInteger v) { + for (int i : getAllIndexesOf(name)) { + setVarint(i, v); + } + return wrapped; + } + + @Override + public T setDecimal(int i, BigDecimal v) { + return setValue(i, codecFor(i, BigDecimal.class).serialize(v, protocolVersion)); + } + + @Override + public T setDecimal(String name, BigDecimal v) { + for (int i : getAllIndexesOf(name)) { + setDecimal(i, v); + } + return wrapped; + } + + @Override + public T setUUID(int i, UUID v) { + return setValue(i, codecFor(i, UUID.class).serialize(v, protocolVersion)); + } + + @Override + public T setUUID(String name, UUID v) { + for (int i : getAllIndexesOf(name)) { + setUUID(i, v); + } + return wrapped; + } + + @Override + public T setInet(int i, InetAddress v) { + return setValue(i, codecFor(i, InetAddress.class).serialize(v, protocolVersion)); + } + + @Override + public T setInet(String name, InetAddress v) { + for (int i : getAllIndexesOf(name)) { + setInet(i, v); + } + return wrapped; + } + + // setToken is package-private because we only want to expose it in BoundStatement + T setToken(int i, Token v) { + if (v == null) + throw new NullPointerException( + String.format("Cannot set a null token for column %s", getName(i))); + checkType(i, v.getType().getName()); + // Bypass CodecRegistry when serializing tokens + return setValue(i, v.serialize(protocolVersion)); + } + + T setToken(String name, Token v) { + for (int i : getAllIndexesOf(name)) { + setToken(i, v); + } + return wrapped; + } + + @Override + @SuppressWarnings("unchecked") + public T setList(int i, List v) { + return setValue(i, codecFor(i).serialize(v, protocolVersion)); + } + + @Override + public T setList(int i, List v, Class elementsClass) { + return setValue(i, codecFor(i, TypeTokens.listOf(elementsClass)).serialize(v, protocolVersion)); + } + + @Override + public T setList(int i, List v, TypeToken elementsType) { + return setValue(i, codecFor(i, TypeTokens.listOf(elementsType)).serialize(v, protocolVersion)); + } + + @Override + public T setList(String name, List v) { + for (int i : getAllIndexesOf(name)) { + setList(i, v); + } + return wrapped; + } + + @Override + public T setList(String name, List v, Class elementsClass) { + for (int i : getAllIndexesOf(name)) { + setList(i, v, elementsClass); + } + return wrapped; + } + + @Override + public T setList(String name, List v, TypeToken elementsType) { + for (int i : getAllIndexesOf(name)) { + setList(i, v, elementsType); + } + return wrapped; + } + + @SuppressWarnings("unchecked") + @Override + public T setMap(int i, Map v) { + return setValue(i, codecFor(i).serialize(v, protocolVersion)); + } + + @Override + public T setMap(int i, Map v, Class keysClass, Class valuesClass) { + return setValue( + i, codecFor(i, TypeTokens.mapOf(keysClass, valuesClass)).serialize(v, protocolVersion)); + } + + @Override + public T setMap(int i, Map v, TypeToken keysType, TypeToken valuesType) { + return setValue( + i, codecFor(i, TypeTokens.mapOf(keysType, valuesType)).serialize(v, protocolVersion)); + } + + @Override + public T setMap(String name, Map v) { + for (int i : getAllIndexesOf(name)) { + setMap(i, v); + } + return wrapped; + } + + @Override + public T setMap(String name, Map v, Class keysClass, Class valuesClass) { + for (int i : getAllIndexesOf(name)) { + setMap(i, v, keysClass, valuesClass); + } + return wrapped; + } + + @Override + public T setMap(String name, Map v, TypeToken keysType, TypeToken valuesType) { + for (int i : getAllIndexesOf(name)) { + setMap(i, v, keysType, valuesType); + } + return wrapped; + } + + @Override + @SuppressWarnings("unchecked") + public T setSet(int i, Set v) { + return setValue(i, codecFor(i).serialize(v, protocolVersion)); + } + + @Override + public T setSet(int i, Set v, Class elementsClass) { + return setValue(i, codecFor(i, TypeTokens.setOf(elementsClass)).serialize(v, protocolVersion)); + } + + @Override + public T setSet(int i, Set v, TypeToken elementsType) { + return setValue(i, codecFor(i, TypeTokens.setOf(elementsType)).serialize(v, protocolVersion)); + } + + @Override + public T setSet(String name, Set v) { + for (int i : getAllIndexesOf(name)) { + setSet(i, v); + } + return wrapped; + } + + @Override + public T setSet(String name, Set v, Class elementsClass) { + for (int i : getAllIndexesOf(name)) { + setSet(i, v, elementsClass); + } + return wrapped; + } + + @Override + public T setSet(String name, Set v, TypeToken elementsType) { + for (int i : getAllIndexesOf(name)) { + setSet(i, v, elementsType); + } + return wrapped; + } + + @Override + public T setUDTValue(int i, UDTValue v) { + return setValue(i, codecFor(i, UDTValue.class).serialize(v, protocolVersion)); + } + + @Override + public T setUDTValue(String name, UDTValue v) { + for (int i : getAllIndexesOf(name)) { + setUDTValue(i, v); + } + return wrapped; + } + + @Override + public T setTupleValue(int i, TupleValue v) { + return setValue(i, codecFor(i, TupleValue.class).serialize(v, protocolVersion)); + } + + @Override + public T setTupleValue(String name, TupleValue v) { + for (int i : getAllIndexesOf(name)) { + setTupleValue(i, v); + } + return wrapped; + } + + @Override + public T set(int i, V v, Class targetClass) { + return set(i, v, codecFor(i, targetClass)); + } + + @Override + public T set(String name, V v, Class targetClass) { + for (int i : getAllIndexesOf(name)) { + set(i, v, targetClass); + } + return wrapped; + } + + @Override + public T set(int i, V v, TypeToken targetType) { + return set(i, v, codecFor(i, targetType)); + } + + @Override + public T set(String name, V v, TypeToken targetType) { + for (int i : getAllIndexesOf(name)) { + set(i, v, targetType); + } + return wrapped; + } + + @Override + public T set(int i, V v, TypeCodec codec) { + checkType(i, codec.getCqlType().getName()); + return setValue(i, codec.serialize(v, protocolVersion)); + } + + @Override + public T set(String name, V v, TypeCodec codec) { + for (int i : getAllIndexesOf(name)) { + set(i, v, codec); + } + return wrapped; + } + + @Override + public T setToNull(int i) { + return setValue(i, null); + } + + @Override + public T setToNull(String name) { + for (int i : getAllIndexesOf(name)) { + setToNull(i); + } + return wrapped; + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof AbstractData)) return false; + + AbstractData that = (AbstractData) o; + if (values.length != that.values.length) return false; + + if (this.protocolVersion != that.protocolVersion) return false; + + // Deserializing each value is slightly inefficient, but comparing + // the bytes could in theory be wrong (for varint for instance, 2 values + // can have different binary representation but be the same value due to + // leading zeros). So we don't take any risk. + for (int i = 0; i < values.length; i++) { + DataType thisType = getType(i); + DataType thatType = that.getType(i); + if (!thisType.equals(thatType)) return false; + + Object thisValue = this.codecFor(i).deserialize(this.values[i], this.protocolVersion); + Object thatValue = that.codecFor(i).deserialize(that.values[i], that.protocolVersion); + if (!MoreObjects.equal(thisValue, thatValue)) return false; + } + return true; + } + + @Override + public int hashCode() { + // Same as equals + int hash = 31; + for (int i = 0; i < values.length; i++) + hash += + values[i] == null ? 1 : codecFor(i).deserialize(values[i], protocolVersion).hashCode(); + return hash; + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/AbstractGettableByIndexData.java b/driver-core/src/main/java/com/datastax/driver/core/AbstractGettableByIndexData.java index d1b19741c19..389aeea33e7 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/AbstractGettableByIndexData.java +++ b/driver-core/src/main/java/com/datastax/driver/core/AbstractGettableByIndexData.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,369 +19,310 @@ import com.datastax.driver.core.exceptions.InvalidTypeException; import com.google.common.reflect.TypeToken; - import java.math.BigDecimal; import java.math.BigInteger; import java.net.InetAddress; import java.nio.ByteBuffer; -import java.util.*; +import java.util.Date; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.UUID; abstract class AbstractGettableByIndexData implements GettableByIndexData { - protected final ProtocolVersion protocolVersion; - - protected AbstractGettableByIndexData(ProtocolVersion protocolVersion) { - this.protocolVersion = protocolVersion; - } - - /** - * Returns the type for the value at index {@code i}. - * - * @param i the index of the type to fetch. - * @return the type of the value at index {@code i}. - * @throws IndexOutOfBoundsException if {@code i} is not a valid index. - */ - protected abstract DataType getType(int i); - - /** - * Returns the name corresponding to the value at index {@code i}. - * - * @param i the index of the name to fetch. - * @return the name corresponding to the value at index {@code i}. - * @throws IndexOutOfBoundsException if {@code i} is not a valid index. - */ - protected abstract String getName(int i); - - /** - * Returns the value at index {@code i}. - * - * @param i the index to fetch. - * @return the value at index {@code i}. - * @throws IndexOutOfBoundsException if {@code i} is not a valid index. - */ - protected abstract ByteBuffer getValue(int i); - - protected abstract CodecRegistry getCodecRegistry(); - - protected TypeCodec codecFor(int i) { - return getCodecRegistry().codecFor(getType(i)); - } - - protected TypeCodec codecFor(int i, Class javaClass) { - return getCodecRegistry().codecFor(getType(i), javaClass); - } - - protected TypeCodec codecFor(int i, TypeToken javaType) { - return getCodecRegistry().codecFor(getType(i), javaType); - } - - protected TypeCodec codecFor(int i, T value) { - return getCodecRegistry().codecFor(getType(i), value); - } - - protected void checkType(int i, DataType.Name actual) { - DataType.Name expected = getType(i).getName(); - if (!actual.isCompatibleWith(expected)) - throw new InvalidTypeException(String.format("Value %s is of type %s, not %s", getName(i), expected, actual)); - } - - /** - * {@inheritDoc} - */ - @Override - public boolean isNull(int i) { - return getValue(i) == null; - } - - /** - * {@inheritDoc} - */ - @Override - public boolean getBool(int i) { - ByteBuffer value = getValue(i); - TypeCodec codec = codecFor(i, Boolean.class); - if (codec instanceof TypeCodec.PrimitiveBooleanCodec) - return ((TypeCodec.PrimitiveBooleanCodec) codec).deserializeNoBoxing(value, protocolVersion); - else - return codec.deserialize(value, protocolVersion); - } - - /** - * {@inheritDoc} - */ - @Override - public byte getByte(int i) { - ByteBuffer value = getValue(i); - TypeCodec codec = codecFor(i, Byte.class); - if (codec instanceof TypeCodec.PrimitiveByteCodec) - return ((TypeCodec.PrimitiveByteCodec) codec).deserializeNoBoxing(value, protocolVersion); - else - return codec.deserialize(value, protocolVersion); - } - - /** - * {@inheritDoc} - */ - @Override - public short getShort(int i) { - ByteBuffer value = getValue(i); - TypeCodec codec = codecFor(i, Short.class); - if (codec instanceof TypeCodec.PrimitiveShortCodec) - return ((TypeCodec.PrimitiveShortCodec) codec).deserializeNoBoxing(value, protocolVersion); - else - return codec.deserialize(value, protocolVersion); - } - - /** - * {@inheritDoc} - */ - @Override - public int getInt(int i) { - ByteBuffer value = getValue(i); - TypeCodec codec = codecFor(i, Integer.class); - if (codec instanceof TypeCodec.PrimitiveIntCodec) - return ((TypeCodec.PrimitiveIntCodec) codec).deserializeNoBoxing(value, protocolVersion); - else - return codec.deserialize(value, protocolVersion); - } - - /** - * {@inheritDoc} - */ - @Override - public long getLong(int i) { - ByteBuffer value = getValue(i); - TypeCodec codec = codecFor(i, Long.class); - if (codec instanceof TypeCodec.PrimitiveLongCodec) - return ((TypeCodec.PrimitiveLongCodec) codec).deserializeNoBoxing(value, protocolVersion); - else - return codec.deserialize(value, protocolVersion); - } - - /** - * {@inheritDoc} - */ - @Override - public Date getTimestamp(int i) { - ByteBuffer value = getValue(i); - return codecFor(i, Date.class).deserialize(value, protocolVersion); - } - - /** - * {@inheritDoc} - */ - @Override - public LocalDate getDate(int i) { - ByteBuffer value = getValue(i); - return codecFor(i, LocalDate.class).deserialize(value, protocolVersion); - } - - /** - * {@inheritDoc} - */ - @Override - public long getTime(int i) { - ByteBuffer value = getValue(i); - TypeCodec codec = codecFor(i, Long.class); - if (codec instanceof TypeCodec.PrimitiveLongCodec) - return ((TypeCodec.PrimitiveLongCodec) codec).deserializeNoBoxing(value, protocolVersion); - else - return codec.deserialize(value, protocolVersion); - } - - /** - * {@inheritDoc} - */ - @Override - public float getFloat(int i) { - ByteBuffer value = getValue(i); - TypeCodec codec = codecFor(i, Float.class); - if (codec instanceof TypeCodec.PrimitiveFloatCodec) - return ((TypeCodec.PrimitiveFloatCodec) codec).deserializeNoBoxing(value, protocolVersion); - else - return codec.deserialize(value, protocolVersion); - } - - /** - * {@inheritDoc} - */ - @Override - public double getDouble(int i) { - ByteBuffer value = getValue(i); - TypeCodec codec = codecFor(i, Double.class); - if (codec instanceof TypeCodec.PrimitiveDoubleCodec) - return ((TypeCodec.PrimitiveDoubleCodec) codec).deserializeNoBoxing(value, protocolVersion); - else - return codec.deserialize(value, protocolVersion); - } - - /** - * {@inheritDoc} - */ - @Override - public ByteBuffer getBytesUnsafe(int i) { - ByteBuffer value = getValue(i); - if (value == null) - return null; - return value.duplicate(); - } - - /** - * {@inheritDoc} - */ - @Override - public ByteBuffer getBytes(int i) { - ByteBuffer value = getValue(i); - return codecFor(i, ByteBuffer.class).deserialize(value, protocolVersion); - } - - /** - * {@inheritDoc} - */ - @Override - public String getString(int i) { - ByteBuffer value = getValue(i); - return codecFor(i, String.class).deserialize(value, protocolVersion); - } - - /** - * {@inheritDoc} - */ - @Override - public BigInteger getVarint(int i) { - ByteBuffer value = getValue(i); - return codecFor(i, BigInteger.class).deserialize(value, protocolVersion); - } - - /** - * {@inheritDoc} - */ - @Override - public BigDecimal getDecimal(int i) { - ByteBuffer value = getValue(i); - return codecFor(i, BigDecimal.class).deserialize(value, protocolVersion); - } - - /** - * {@inheritDoc} - */ - @Override - public UUID getUUID(int i) { - ByteBuffer value = getValue(i); - return codecFor(i, UUID.class).deserialize(value, protocolVersion); - } - - /** - * {@inheritDoc} - */ - @Override - public InetAddress getInet(int i) { - ByteBuffer value = getValue(i); - return codecFor(i, InetAddress.class).deserialize(value, protocolVersion); - } - - /** - * {@inheritDoc} - */ - @Override - @SuppressWarnings("unchecked") - public List getList(int i, Class elementsClass) { - return getList(i, TypeToken.of(elementsClass)); - } - - /** - * {@inheritDoc} - */ - @Override - @SuppressWarnings("unchecked") - public List getList(int i, TypeToken elementsType) { - ByteBuffer value = getValue(i); - TypeToken> javaType = TypeTokens.listOf(elementsType); - return codecFor(i, javaType).deserialize(value, protocolVersion); - } - - /** - * {@inheritDoc} - */ - @Override - @SuppressWarnings("unchecked") - public Set getSet(int i, Class elementsClass) { - return getSet(i, TypeToken.of(elementsClass)); - } - - /** - * {@inheritDoc} - */ - @Override - @SuppressWarnings("unchecked") - public Set getSet(int i, TypeToken elementsType) { - ByteBuffer value = getValue(i); - TypeToken> javaType = TypeTokens.setOf(elementsType); - return codecFor(i, javaType).deserialize(value, protocolVersion); - } - - /** - * {@inheritDoc} - */ - @Override - @SuppressWarnings("unchecked") - public Map getMap(int i, Class keysClass, Class valuesClass) { - return getMap(i, TypeToken.of(keysClass), TypeToken.of(valuesClass)); - } - - /** - * {@inheritDoc} - */ - @Override - @SuppressWarnings("unchecked") - public Map getMap(int i, TypeToken keysType, TypeToken valuesType) { - ByteBuffer value = getValue(i); - TypeToken> javaType = TypeTokens.mapOf(keysType, valuesType); - return codecFor(i, javaType).deserialize(value, protocolVersion); - } - - /** - * {@inheritDoc} - */ - @Override - @SuppressWarnings("unchecked") - public UDTValue getUDTValue(int i) { - ByteBuffer value = getValue(i); - return codecFor(i, UDTValue.class).deserialize(value, protocolVersion); - } - - /** - * {@inheritDoc} - */ - @Override - @SuppressWarnings("unchecked") - public TupleValue getTupleValue(int i) { - ByteBuffer value = getValue(i); - return codecFor(i, TupleValue.class).deserialize(value, protocolVersion); - } - - /** - * {@inheritDoc} - */ - @Override - public Object getObject(int i) { - return get(i, codecFor(i)); - } - - @Override - public T get(int i, Class targetClass) { - return get(i, codecFor(i, targetClass)); - } - - @Override - public T get(int i, TypeToken targetType) { - return get(i, codecFor(i, targetType)); - } - - @Override - public T get(int i, TypeCodec codec) { - checkType(i, codec.getCqlType().getName()); - ByteBuffer value = getValue(i); - return codec.deserialize(value, protocolVersion); - } + protected final ProtocolVersion protocolVersion; + + protected AbstractGettableByIndexData(ProtocolVersion protocolVersion) { + this.protocolVersion = protocolVersion; + } + + /** + * Returns the type for the value at index {@code i}. + * + * @param i the index of the type to fetch. + * @return the type of the value at index {@code i}. + * @throws IndexOutOfBoundsException if {@code i} is not a valid index. + */ + protected abstract DataType getType(int i); + + /** + * Returns the name corresponding to the value at index {@code i}. + * + * @param i the index of the name to fetch. + * @return the name corresponding to the value at index {@code i}. + * @throws IndexOutOfBoundsException if {@code i} is not a valid index. + */ + protected abstract String getName(int i); + + /** + * Returns the value at index {@code i}. + * + * @param i the index to fetch. + * @return the value at index {@code i}. + * @throws IndexOutOfBoundsException if {@code i} is not a valid index. + */ + protected abstract ByteBuffer getValue(int i); + + protected abstract CodecRegistry getCodecRegistry(); + + protected TypeCodec codecFor(int i) { + return getCodecRegistry().codecFor(getType(i)); + } + + protected TypeCodec codecFor(int i, Class javaClass) { + return getCodecRegistry().codecFor(getType(i), javaClass); + } + + protected TypeCodec codecFor(int i, TypeToken javaType) { + return getCodecRegistry().codecFor(getType(i), javaType); + } + + protected TypeCodec codecFor(int i, T value) { + return getCodecRegistry().codecFor(getType(i), value); + } + + protected void checkType(int i, DataType.Name actual) { + DataType.Name expected = getType(i).getName(); + if (!actual.isCompatibleWith(expected)) + throw new InvalidTypeException( + String.format("Value %s is of type %s, not %s", getName(i), expected, actual)); + } + + /** {@inheritDoc} */ + @Override + public boolean isNull(int i) { + return getValue(i) == null; + } + + /** {@inheritDoc} */ + @Override + public boolean getBool(int i) { + ByteBuffer value = getValue(i); + TypeCodec codec = codecFor(i, Boolean.class); + if (codec instanceof TypeCodec.PrimitiveBooleanCodec) + return ((TypeCodec.PrimitiveBooleanCodec) codec).deserializeNoBoxing(value, protocolVersion); + else return codec.deserialize(value, protocolVersion); + } + + /** {@inheritDoc} */ + @Override + public byte getByte(int i) { + ByteBuffer value = getValue(i); + TypeCodec codec = codecFor(i, Byte.class); + if (codec instanceof TypeCodec.PrimitiveByteCodec) + return ((TypeCodec.PrimitiveByteCodec) codec).deserializeNoBoxing(value, protocolVersion); + else return codec.deserialize(value, protocolVersion); + } + + /** {@inheritDoc} */ + @Override + public short getShort(int i) { + ByteBuffer value = getValue(i); + TypeCodec codec = codecFor(i, Short.class); + if (codec instanceof TypeCodec.PrimitiveShortCodec) + return ((TypeCodec.PrimitiveShortCodec) codec).deserializeNoBoxing(value, protocolVersion); + else return codec.deserialize(value, protocolVersion); + } + + /** {@inheritDoc} */ + @Override + public int getInt(int i) { + ByteBuffer value = getValue(i); + TypeCodec codec = codecFor(i, Integer.class); + if (codec instanceof TypeCodec.PrimitiveIntCodec) + return ((TypeCodec.PrimitiveIntCodec) codec).deserializeNoBoxing(value, protocolVersion); + else return codec.deserialize(value, protocolVersion); + } + + /** {@inheritDoc} */ + @Override + public long getLong(int i) { + ByteBuffer value = getValue(i); + TypeCodec codec = codecFor(i, Long.class); + if (codec instanceof TypeCodec.PrimitiveLongCodec) + return ((TypeCodec.PrimitiveLongCodec) codec).deserializeNoBoxing(value, protocolVersion); + else return codec.deserialize(value, protocolVersion); + } + + /** {@inheritDoc} */ + @Override + public Date getTimestamp(int i) { + ByteBuffer value = getValue(i); + return codecFor(i, Date.class).deserialize(value, protocolVersion); + } + + /** {@inheritDoc} */ + @Override + public LocalDate getDate(int i) { + ByteBuffer value = getValue(i); + return codecFor(i, LocalDate.class).deserialize(value, protocolVersion); + } + + /** {@inheritDoc} */ + @Override + public long getTime(int i) { + ByteBuffer value = getValue(i); + TypeCodec codec = codecFor(i, Long.class); + if (codec instanceof TypeCodec.PrimitiveLongCodec) + return ((TypeCodec.PrimitiveLongCodec) codec).deserializeNoBoxing(value, protocolVersion); + else return codec.deserialize(value, protocolVersion); + } + + /** {@inheritDoc} */ + @Override + public float getFloat(int i) { + ByteBuffer value = getValue(i); + TypeCodec codec = codecFor(i, Float.class); + if (codec instanceof TypeCodec.PrimitiveFloatCodec) + return ((TypeCodec.PrimitiveFloatCodec) codec).deserializeNoBoxing(value, protocolVersion); + else return codec.deserialize(value, protocolVersion); + } + + /** {@inheritDoc} */ + @Override + public double getDouble(int i) { + ByteBuffer value = getValue(i); + TypeCodec codec = codecFor(i, Double.class); + if (codec instanceof TypeCodec.PrimitiveDoubleCodec) + return ((TypeCodec.PrimitiveDoubleCodec) codec).deserializeNoBoxing(value, protocolVersion); + else return codec.deserialize(value, protocolVersion); + } + + /** {@inheritDoc} */ + @Override + public ByteBuffer getBytesUnsafe(int i) { + ByteBuffer value = getValue(i); + if (value == null) return null; + return value.duplicate(); + } + + /** {@inheritDoc} */ + @Override + public ByteBuffer getBytes(int i) { + ByteBuffer value = getValue(i); + return codecFor(i, ByteBuffer.class).deserialize(value, protocolVersion); + } + + /** {@inheritDoc} */ + @Override + public String getString(int i) { + ByteBuffer value = getValue(i); + return codecFor(i, String.class).deserialize(value, protocolVersion); + } + + /** {@inheritDoc} */ + @Override + public BigInteger getVarint(int i) { + ByteBuffer value = getValue(i); + return codecFor(i, BigInteger.class).deserialize(value, protocolVersion); + } + + /** {@inheritDoc} */ + @Override + public BigDecimal getDecimal(int i) { + ByteBuffer value = getValue(i); + return codecFor(i, BigDecimal.class).deserialize(value, protocolVersion); + } + + /** {@inheritDoc} */ + @Override + public UUID getUUID(int i) { + ByteBuffer value = getValue(i); + return codecFor(i, UUID.class).deserialize(value, protocolVersion); + } + + /** {@inheritDoc} */ + @Override + public InetAddress getInet(int i) { + ByteBuffer value = getValue(i); + return codecFor(i, InetAddress.class).deserialize(value, protocolVersion); + } + + /** {@inheritDoc} */ + @Override + @SuppressWarnings("unchecked") + public List getList(int i, Class elementsClass) { + return getList(i, TypeToken.of(elementsClass)); + } + + /** {@inheritDoc} */ + @Override + @SuppressWarnings("unchecked") + public List getList(int i, TypeToken elementsType) { + ByteBuffer value = getValue(i); + TypeToken> javaType = TypeTokens.listOf(elementsType); + return codecFor(i, javaType).deserialize(value, protocolVersion); + } + + /** {@inheritDoc} */ + @Override + @SuppressWarnings("unchecked") + public Set getSet(int i, Class elementsClass) { + return getSet(i, TypeToken.of(elementsClass)); + } + + /** {@inheritDoc} */ + @Override + @SuppressWarnings("unchecked") + public Set getSet(int i, TypeToken elementsType) { + ByteBuffer value = getValue(i); + TypeToken> javaType = TypeTokens.setOf(elementsType); + return codecFor(i, javaType).deserialize(value, protocolVersion); + } + + /** {@inheritDoc} */ + @Override + @SuppressWarnings("unchecked") + public Map getMap(int i, Class keysClass, Class valuesClass) { + return getMap(i, TypeToken.of(keysClass), TypeToken.of(valuesClass)); + } + + /** {@inheritDoc} */ + @Override + @SuppressWarnings("unchecked") + public Map getMap(int i, TypeToken keysType, TypeToken valuesType) { + ByteBuffer value = getValue(i); + TypeToken> javaType = TypeTokens.mapOf(keysType, valuesType); + return codecFor(i, javaType).deserialize(value, protocolVersion); + } + + /** {@inheritDoc} */ + @Override + @SuppressWarnings("unchecked") + public UDTValue getUDTValue(int i) { + ByteBuffer value = getValue(i); + return codecFor(i, UDTValue.class).deserialize(value, protocolVersion); + } + + /** {@inheritDoc} */ + @Override + @SuppressWarnings("unchecked") + public TupleValue getTupleValue(int i) { + ByteBuffer value = getValue(i); + return codecFor(i, TupleValue.class).deserialize(value, protocolVersion); + } + + /** {@inheritDoc} */ + @Override + public Object getObject(int i) { + return get(i, codecFor(i)); + } + + @Override + public T get(int i, Class targetClass) { + return get(i, codecFor(i, targetClass)); + } + + @Override + public T get(int i, TypeToken targetType) { + return get(i, codecFor(i, targetType)); + } + + @Override + public T get(int i, TypeCodec codec) { + checkType(i, codec.getCqlType().getName()); + ByteBuffer value = getValue(i); + return codec.deserialize(value, protocolVersion); + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/AbstractGettableData.java b/driver-core/src/main/java/com/datastax/driver/core/AbstractGettableData.java index 1cd4898c588..f6c32a34fb6 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/AbstractGettableData.java +++ b/driver-core/src/main/java/com/datastax/driver/core/AbstractGettableData.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,274 +18,217 @@ package com.datastax.driver.core; import com.google.common.reflect.TypeToken; - import java.math.BigDecimal; import java.math.BigInteger; import java.net.InetAddress; import java.nio.ByteBuffer; -import java.util.*; - -public abstract class AbstractGettableData extends AbstractGettableByIndexData implements GettableData { - - /** - * Creates a new AbstractGettableData object. - * - * @param protocolVersion the protocol version in which values returned - * by {@link #getValue} will be returned. This must be a protocol version - * supported by this driver. In general, the correct value will be the - * value returned by {@link ProtocolOptions#getProtocolVersion}. - * @throws IllegalArgumentException if {@code protocolVersion} is not a valid protocol version. - */ - protected AbstractGettableData(ProtocolVersion protocolVersion) { - super(protocolVersion); - } - - /** - * Returns the index corresponding to a given name. - * - * @param name the name for which to return the index of. - * @return the index for the value coressponding to {@code name}. - * @throws IllegalArgumentException if {@code name} is not valid name for this object. - */ - protected abstract int getIndexOf(String name); - - /** - * {@inheritDoc} - */ - @Override - public boolean isNull(String name) { - return isNull(getIndexOf(name)); - } - - /** - * {@inheritDoc} - */ - @Override - public boolean getBool(String name) { - return getBool(getIndexOf(name)); - } - - /** - * {@inheritDoc} - */ - @Override - public byte getByte(String name) { - return getByte(getIndexOf(name)); - } - - /** - * {@inheritDoc} - */ - @Override - public short getShort(String name) { - return getShort(getIndexOf(name)); - } - - /** - * {@inheritDoc} - */ - @Override - public int getInt(String name) { - return getInt(getIndexOf(name)); - } - - /** - * {@inheritDoc} - */ - @Override - public long getLong(String name) { - return getLong(getIndexOf(name)); - } - - /** - * {@inheritDoc} - */ - @Override - public Date getTimestamp(String name) { - return getTimestamp(getIndexOf(name)); - } - - /** - * {@inheritDoc} - */ - @Override - public LocalDate getDate(String name) { - return getDate(getIndexOf(name)); - } - - /** - * {@inheritDoc} - */ - @Override - public long getTime(String name) { - return getTime(getIndexOf(name)); - } - - /** - * {@inheritDoc} - */ - @Override - public float getFloat(String name) { - return getFloat(getIndexOf(name)); - } - - /** - * {@inheritDoc} - */ - @Override - public double getDouble(String name) { - return getDouble(getIndexOf(name)); - } - - /** - * {@inheritDoc} - */ - @Override - public ByteBuffer getBytesUnsafe(String name) { - return getBytesUnsafe(getIndexOf(name)); - } - - /** - * {@inheritDoc} - */ - @Override - public ByteBuffer getBytes(String name) { - return getBytes(getIndexOf(name)); - } - - /** - * {@inheritDoc} - */ - @Override - public String getString(String name) { - return getString(getIndexOf(name)); - } - - /** - * {@inheritDoc} - */ - @Override - public BigInteger getVarint(String name) { - return getVarint(getIndexOf(name)); - } - - /** - * {@inheritDoc} - */ - @Override - public BigDecimal getDecimal(String name) { - return getDecimal(getIndexOf(name)); - } - - /** - * {@inheritDoc} - */ - @Override - public UUID getUUID(String name) { - return getUUID(getIndexOf(name)); - } - - /** - * {@inheritDoc} - */ - @Override - public InetAddress getInet(String name) { - return getInet(getIndexOf(name)); - } - - /** - * {@inheritDoc} - */ - @Override - public List getList(String name, Class elementsClass) { - return getList(getIndexOf(name), elementsClass); - } - - /** - * {@inheritDoc} - */ - @Override - public List getList(String name, TypeToken elementsType) { - return getList(getIndexOf(name), elementsType); - } - - /** - * {@inheritDoc} - */ - @Override - public Set getSet(String name, Class elementsClass) { - return getSet(getIndexOf(name), elementsClass); - } - - /** - * {@inheritDoc} - */ - @Override - public Set getSet(String name, TypeToken elementsType) { - return getSet(getIndexOf(name), elementsType); - } - - /** - * {@inheritDoc} - */ - @Override - public Map getMap(String name, Class keysClass, Class valuesClass) { - return getMap(getIndexOf(name), keysClass, valuesClass); - } - - /** - * {@inheritDoc} - */ - @Override - public Map getMap(String name, TypeToken keysType, TypeToken valuesType) { - return getMap(getIndexOf(name), keysType, valuesType); - } - - /** - * {@inheritDoc} - */ - @Override - public UDTValue getUDTValue(String name) { - return getUDTValue(getIndexOf(name)); - } - - /** - * {@inheritDoc} - */ - @Override - public TupleValue getTupleValue(String name) { - return getTupleValue(getIndexOf(name)); - } - - /** - * {@inheritDoc} - */ - @Override - public Object getObject(String name) { - return getObject(getIndexOf(name)); - } - - /** - * {@inheritDoc} - */ - @Override - public T get(String name, Class targetClass) { - return get(getIndexOf(name), targetClass); - } - - /** - * {@inheritDoc} - */ - @Override - public T get(String name, TypeToken targetType) { - return get(getIndexOf(name), targetType); - } - - /** - * {@inheritDoc} - */ - @Override - public T get(String name, TypeCodec codec) { - return get(getIndexOf(name), codec); - } +import java.util.Date; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.UUID; + +public abstract class AbstractGettableData extends AbstractGettableByIndexData + implements GettableData { + + /** + * Creates a new AbstractGettableData object. + * + * @param protocolVersion the protocol version in which values returned by {@link #getValue} will + * be returned. This must be a protocol version supported by this driver. In general, the + * correct value will be the value returned by {@link ProtocolOptions#getProtocolVersion}. + * @throws IllegalArgumentException if {@code protocolVersion} is not a valid protocol version. + */ + protected AbstractGettableData(ProtocolVersion protocolVersion) { + super(protocolVersion); + } + + /** + * Returns the index corresponding to a given name. + * + * @param name the name for which to return the index of. + * @return the index for the value coressponding to {@code name}. + * @throws IllegalArgumentException if {@code name} is not valid name for this object. + */ + protected abstract int getIndexOf(String name); + + /** {@inheritDoc} */ + @Override + public boolean isNull(String name) { + return isNull(getIndexOf(name)); + } + + /** {@inheritDoc} */ + @Override + public boolean getBool(String name) { + return getBool(getIndexOf(name)); + } + + /** {@inheritDoc} */ + @Override + public byte getByte(String name) { + return getByte(getIndexOf(name)); + } + + /** {@inheritDoc} */ + @Override + public short getShort(String name) { + return getShort(getIndexOf(name)); + } + + /** {@inheritDoc} */ + @Override + public int getInt(String name) { + return getInt(getIndexOf(name)); + } + + /** {@inheritDoc} */ + @Override + public long getLong(String name) { + return getLong(getIndexOf(name)); + } + + /** {@inheritDoc} */ + @Override + public Date getTimestamp(String name) { + return getTimestamp(getIndexOf(name)); + } + + /** {@inheritDoc} */ + @Override + public LocalDate getDate(String name) { + return getDate(getIndexOf(name)); + } + + /** {@inheritDoc} */ + @Override + public long getTime(String name) { + return getTime(getIndexOf(name)); + } + + /** {@inheritDoc} */ + @Override + public float getFloat(String name) { + return getFloat(getIndexOf(name)); + } + + /** {@inheritDoc} */ + @Override + public double getDouble(String name) { + return getDouble(getIndexOf(name)); + } + + /** {@inheritDoc} */ + @Override + public ByteBuffer getBytesUnsafe(String name) { + return getBytesUnsafe(getIndexOf(name)); + } + + /** {@inheritDoc} */ + @Override + public ByteBuffer getBytes(String name) { + return getBytes(getIndexOf(name)); + } + + /** {@inheritDoc} */ + @Override + public String getString(String name) { + return getString(getIndexOf(name)); + } + + /** {@inheritDoc} */ + @Override + public BigInteger getVarint(String name) { + return getVarint(getIndexOf(name)); + } + + /** {@inheritDoc} */ + @Override + public BigDecimal getDecimal(String name) { + return getDecimal(getIndexOf(name)); + } + + /** {@inheritDoc} */ + @Override + public UUID getUUID(String name) { + return getUUID(getIndexOf(name)); + } + + /** {@inheritDoc} */ + @Override + public InetAddress getInet(String name) { + return getInet(getIndexOf(name)); + } + + /** {@inheritDoc} */ + @Override + public List getList(String name, Class elementsClass) { + return getList(getIndexOf(name), elementsClass); + } + + /** {@inheritDoc} */ + @Override + public List getList(String name, TypeToken elementsType) { + return getList(getIndexOf(name), elementsType); + } + + /** {@inheritDoc} */ + @Override + public Set getSet(String name, Class elementsClass) { + return getSet(getIndexOf(name), elementsClass); + } + + /** {@inheritDoc} */ + @Override + public Set getSet(String name, TypeToken elementsType) { + return getSet(getIndexOf(name), elementsType); + } + + /** {@inheritDoc} */ + @Override + public Map getMap(String name, Class keysClass, Class valuesClass) { + return getMap(getIndexOf(name), keysClass, valuesClass); + } + + /** {@inheritDoc} */ + @Override + public Map getMap(String name, TypeToken keysType, TypeToken valuesType) { + return getMap(getIndexOf(name), keysType, valuesType); + } + + /** {@inheritDoc} */ + @Override + public UDTValue getUDTValue(String name) { + return getUDTValue(getIndexOf(name)); + } + + /** {@inheritDoc} */ + @Override + public TupleValue getTupleValue(String name) { + return getTupleValue(getIndexOf(name)); + } + + /** {@inheritDoc} */ + @Override + public Object getObject(String name) { + return getObject(getIndexOf(name)); + } + + /** {@inheritDoc} */ + @Override + public T get(String name, Class targetClass) { + return get(getIndexOf(name), targetClass); + } + + /** {@inheritDoc} */ + @Override + public T get(String name, TypeToken targetType) { + return get(getIndexOf(name), targetType); + } + + /** {@inheritDoc} */ + @Override + public T get(String name, TypeCodec codec) { + return get(getIndexOf(name), codec); + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/AbstractMonotonicTimestampGenerator.java b/driver-core/src/main/java/com/datastax/driver/core/AbstractMonotonicTimestampGenerator.java index 18ed00a0137..5c5548a748b 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/AbstractMonotonicTimestampGenerator.java +++ b/driver-core/src/main/java/com/datastax/driver/core/AbstractMonotonicTimestampGenerator.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,59 +21,59 @@ /** * Base implementation for monotonic timestamp generators. - *

- * The accuracy of the generated timestamps is largely dependent on the - * granularity of the underlying operating system's clock. - *

- * Generally speaking, this granularity is millisecond, and - * the sub-millisecond part is simply a counter that gets incremented - * until the next clock tick, as provided by {@link System#currentTimeMillis()}. - *

- * On some systems, however, it is possible to have a better granularity by using a JNR - * call to {@code gettimeofday}. The driver will use this system call automatically whenever - * available, unless the system property {@code com.datastax.driver.USE_NATIVE_CLOCK} is - * explicitly set to {@code false}. - *

- * Beware that to guarantee monotonicity, if more than one call to {@link #next()} - * is made within the same microsecond, or in the event of a system clock skew, this generator might - * return timestamps that drift out in the future. - * Whe this happens, {@link #onDrift(long, long)} is invoked. + * + *

The accuracy of the generated timestamps is largely dependent on the granularity of the + * underlying operating system's clock. + * + *

Generally speaking, this granularity is millisecond, and the sub-millisecond part is simply a + * counter that gets incremented until the next clock tick, as provided by {@link + * System#currentTimeMillis()}. + * + *

On some systems, however, it is possible to have a better granularity by using a JNR call to + * {@code gettimeofday}. The driver will use this system call automatically whenever available, + * unless the system property {@code com.datastax.driver.USE_NATIVE_CLOCK} is explicitly set to + * {@code false}. + * + *

Beware that to guarantee monotonicity, if more than one call to {@link #next()} is made within + * the same microsecond, or in the event of a system clock skew, this generator might return + * timestamps that drift out in the future. Whe this happens, {@link #onDrift(long, long)} is + * invoked. */ public abstract class AbstractMonotonicTimestampGenerator implements TimestampGenerator { - @VisibleForTesting - volatile Clock clock = ClockFactory.newInstance(); + @VisibleForTesting volatile Clock clock = ClockFactory.newInstance(); - /** - * Compute the next timestamp, given the last timestamp previously generated. - *

- * To guarantee monotonicity, the next timestamp should be strictly greater than the last one. - * If the underlying clock fails to generate monotonically increasing timestamps, the generator will simply - * increment the previous timestamp, and {@link #onDrift(long, long)} will be invoked. - *

- * This implementation is inspired by {@code org.apache.cassandra.service.ClientState#getTimestamp()}. - * - * @param last the last timestamp generated by this generator, in microseconds. - * @return the next timestamp to use, in microseconds. - */ - protected long computeNext(long last) { - long currentTick = clock.currentTimeMicros(); - if (last >= currentTick) { - onDrift(currentTick, last); - return last + 1; - } - return currentTick; + /** + * Compute the next timestamp, given the last timestamp previously generated. + * + *

To guarantee monotonicity, the next timestamp should be strictly greater than the last one. + * If the underlying clock fails to generate monotonically increasing timestamps, the generator + * will simply increment the previous timestamp, and {@link #onDrift(long, long)} will be invoked. + * + *

This implementation is inspired by {@code + * org.apache.cassandra.service.ClientState#getTimestamp()}. + * + * @param last the last timestamp generated by this generator, in microseconds. + * @return the next timestamp to use, in microseconds. + */ + protected long computeNext(long last) { + long currentTick = clock.currentTimeMicros(); + if (last >= currentTick) { + onDrift(currentTick, last); + return last + 1; } + return currentTick; + } - /** - * Called when generated timestamps drift into the future compared to the underlying clock (in other words, if - * {@code lastTimestamp >= currentTick}). - *

- * This could happen if timestamps are requested faster than the clock granularity, or on a clock skew (for example - * because of a leap second). - * - * @param currentTick the current clock tick, in microseconds. - * @param lastTimestamp the last timestamp that was generated, in microseconds. - */ - protected abstract void onDrift(long currentTick, long lastTimestamp); + /** + * Called when generated timestamps drift into the future compared to the underlying clock (in + * other words, if {@code lastTimestamp >= currentTick}). + * + *

This could happen if timestamps are requested faster than the clock granularity, or on a + * clock skew (for example because of a leap second). + * + * @param currentTick the current clock tick, in microseconds. + * @param lastTimestamp the last timestamp that was generated, in microseconds. + */ + protected abstract void onDrift(long currentTick, long lastTimestamp); } diff --git a/driver-core/src/main/java/com/datastax/driver/core/AbstractReconnectionHandler.java b/driver-core/src/main/java/com/datastax/driver/core/AbstractReconnectionHandler.java index ad7234c9499..c559af07632 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/AbstractReconnectionHandler.java +++ b/driver-core/src/main/java/com/datastax/driver/core/AbstractReconnectionHandler.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,200 +24,220 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.util.concurrent.AbstractFuture; import com.google.common.util.concurrent.ListenableFuture; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.RejectedExecutionException; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ScheduledFuture; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.util.concurrent.*; -import java.util.concurrent.atomic.AtomicReference; - /** * Manages periodic reconnection attempts after a host has been marked down. - *

- * Concurrent attempts are handled via the {@link #currentAttempt} reference passed to the constructor. - * For a given reference, only one handler will run at a given time. Additional handlers will cancel - * themselves if they find a previous handler running. - *

- * This class is designed for concurrency, but instances must not be shared: each thread creates and - * starts its own private handler, all interactions happen through {@link #currentAttempt}. + * + *

Concurrent attempts are handled via the {@link #currentAttempt} reference passed to the + * constructor. For a given reference, only one handler will run at a given time. Additional + * handlers will cancel themselves if they find a previous handler running. + * + *

This class is designed for concurrency, but instances must not be shared: each thread creates + * and starts its own private handler, all interactions happen through {@link #currentAttempt}. */ abstract class AbstractReconnectionHandler implements Runnable { - private static final Logger logger = LoggerFactory.getLogger(AbstractReconnectionHandler.class); - - private final String name; - private final ScheduledExecutorService executor; - private final ReconnectionPolicy.ReconnectionSchedule schedule; - /** - * The future that is exposed to clients, representing completion of the current active handler - */ - private final AtomicReference> currentAttempt; - - @VisibleForTesting - final HandlerFuture handlerFuture = new HandlerFuture(); - - private final long initialDelayMs; - - private final CountDownLatch ready = new CountDownLatch(1); - - public AbstractReconnectionHandler(String name, ScheduledExecutorService executor, ReconnectionPolicy.ReconnectionSchedule schedule, AtomicReference> currentAttempt) { - this(name, executor, schedule, currentAttempt, -1); + private static final Logger logger = LoggerFactory.getLogger(AbstractReconnectionHandler.class); + + private final String name; + private final ScheduledExecutorService executor; + private final ReconnectionPolicy.ReconnectionSchedule schedule; + /** + * The future that is exposed to clients, representing completion of the current active handler + */ + private final AtomicReference> currentAttempt; + + @VisibleForTesting final HandlerFuture handlerFuture = new HandlerFuture(); + + private final long initialDelayMs; + + private final CountDownLatch ready = new CountDownLatch(1); + + public AbstractReconnectionHandler( + String name, + ScheduledExecutorService executor, + ReconnectionPolicy.ReconnectionSchedule schedule, + AtomicReference> currentAttempt) { + this(name, executor, schedule, currentAttempt, -1); + } + + public AbstractReconnectionHandler( + String name, + ScheduledExecutorService executor, + ReconnectionPolicy.ReconnectionSchedule schedule, + AtomicReference> currentAttempt, + long initialDelayMs) { + this.name = name; + this.executor = executor; + this.schedule = schedule; + this.currentAttempt = currentAttempt; + this.initialDelayMs = initialDelayMs; + } + + protected abstract Connection tryReconnect() + throws ConnectionException, InterruptedException, UnsupportedProtocolVersionException, + ClusterNameMismatchException; + + protected abstract void onReconnection(Connection connection); + + protected boolean onConnectionException(ConnectionException e, long nextDelayMs) { + return true; + } + + protected boolean onUnknownException(Exception e, long nextDelayMs) { + return true; + } + + // Retrying on authentication errors makes sense for applications that can update the credentials + // at runtime, we don't want to force them + // to restart. + protected boolean onAuthenticationException(AuthenticationException e, long nextDelayMs) { + return true; + } + + // Retrying on these errors is unlikely to work + protected boolean onUnsupportedProtocolVersionException( + UnsupportedProtocolVersionException e, long nextDelayMs) { + return false; + } + + protected boolean onClusterNameMismatchException( + ClusterNameMismatchException e, long nextDelayMs) { + return false; + } + + public void start() { + long firstDelay = (initialDelayMs >= 0) ? initialDelayMs : schedule.nextDelayMs(); + logger.debug("First reconnection scheduled in {}ms", firstDelay); + try { + handlerFuture.nextTry = executor.schedule(this, firstDelay, TimeUnit.MILLISECONDS); + + while (true) { + ListenableFuture previous = currentAttempt.get(); + if (previous != null && !previous.isCancelled()) { + logger.debug("Found another already active handler, cancelling"); + handlerFuture.cancel(false); + break; + } + if (currentAttempt.compareAndSet(previous, handlerFuture)) { + Host.statesLogger.debug("[{}] starting reconnection attempt", name); + break; + } + } + ready.countDown(); + } catch (RejectedExecutionException e) { + // The executor has been shutdown, fair enough, just ignore + logger.debug("Aborting reconnection handling since the cluster is shutting down"); } - - public AbstractReconnectionHandler(String name, ScheduledExecutorService executor, ReconnectionPolicy.ReconnectionSchedule schedule, AtomicReference> currentAttempt, long initialDelayMs) { - this.name = name; - this.executor = executor; - this.schedule = schedule; - this.currentAttempt = currentAttempt; - this.initialDelayMs = initialDelayMs; + } + + @Override + public void run() { + // Just make sure we don't start the first try too fast, in case we find out in start() that we + // need to cancel ourselves + try { + ready.await(); + } catch (InterruptedException e) { + // This can happen at shutdown + Thread.currentThread().interrupt(); + return; } - protected abstract Connection tryReconnect() throws ConnectionException, InterruptedException, UnsupportedProtocolVersionException, ClusterNameMismatchException; - - protected abstract void onReconnection(Connection connection); - - protected boolean onConnectionException(ConnectionException e, long nextDelayMs) { - return true; + if (handlerFuture.isCancelled()) { + logger.debug("Got cancelled, stopping"); + return; } - protected boolean onUnknownException(Exception e, long nextDelayMs) { - return true; + try { + onReconnection(tryReconnect()); + handlerFuture.markAsDone(); + currentAttempt.compareAndSet(handlerFuture, null); + logger.debug("Reconnection successful, cleared the future"); + } catch (ConnectionException e) { + long nextDelay = schedule.nextDelayMs(); + if (onConnectionException(e, nextDelay)) reschedule(nextDelay); + else currentAttempt.compareAndSet(handlerFuture, null); + } catch (AuthenticationException e) { + logger.error(e.getMessage()); + long nextDelay = schedule.nextDelayMs(); + if (onAuthenticationException(e, nextDelay)) { + reschedule(nextDelay); + } else { + logger.error( + "Retries against {} have been suspended. It won't be retried unless the node is restarted.", + e.getEndPoint()); + currentAttempt.compareAndSet(handlerFuture, null); + } + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } catch (UnsupportedProtocolVersionException e) { + logger.error(e.getMessage()); + long nextDelay = schedule.nextDelayMs(); + if (onUnsupportedProtocolVersionException(e, nextDelay)) { + reschedule(nextDelay); + } else { + logger.error( + "Retries against {} have been suspended. It won't be retried unless the node is restarted.", + e.getEndPoint()); + currentAttempt.compareAndSet(handlerFuture, null); + } + } catch (ClusterNameMismatchException e) { + logger.error(e.getMessage()); + long nextDelay = schedule.nextDelayMs(); + if (onClusterNameMismatchException(e, nextDelay)) { + reschedule(nextDelay); + } else { + logger.error( + "Retries against {} have been suspended. It won't be retried unless the node is restarted.", + e.endPoint); + currentAttempt.compareAndSet(handlerFuture, null); + } + } catch (Exception e) { + long nextDelay = schedule.nextDelayMs(); + if (onUnknownException(e, nextDelay)) reschedule(nextDelay); + else currentAttempt.compareAndSet(handlerFuture, null); } + } - // Retrying on authentication errors makes sense for applications that can update the credentials at runtime, we don't want to force them - // to restart. - protected boolean onAuthenticationException(AuthenticationException e, long nextDelayMs) { - return true; + private void reschedule(long nextDelay) { + // If we got cancelled during the failed reconnection attempt that lead here, don't reschedule + if (handlerFuture.isCancelled()) { + currentAttempt.compareAndSet(handlerFuture, null); + return; } - // Retrying on these errors is unlikely to work - protected boolean onUnsupportedProtocolVersionException(UnsupportedProtocolVersionException e, long nextDelayMs) { - return false; - } + Host.statesLogger.debug("[{}] next reconnection attempt in {} ms", name, nextDelay); + handlerFuture.nextTry = executor.schedule(this, nextDelay, TimeUnit.MILLISECONDS); + } - protected boolean onClusterNameMismatchException(ClusterNameMismatchException e, long nextDelayMs) { - return false; - } - - public void start() { - long firstDelay = (initialDelayMs >= 0) ? initialDelayMs : schedule.nextDelayMs(); - logger.debug("First reconnection scheduled in {}ms", firstDelay); - try { - handlerFuture.nextTry = executor.schedule(this, firstDelay, TimeUnit.MILLISECONDS); - - while (true) { - ListenableFuture previous = currentAttempt.get(); - if (previous != null && !previous.isCancelled()) { - logger.debug("Found another already active handler, cancelling"); - handlerFuture.cancel(false); - break; - } - if (currentAttempt.compareAndSet(previous, handlerFuture)) { - Host.statesLogger.debug("[{}] starting reconnection attempt", name); - break; - } - } - ready.countDown(); - } catch (RejectedExecutionException e) { - // The executor has been shutdown, fair enough, just ignore - logger.debug("Aborting reconnection handling since the cluster is shutting down"); - } - } + // The future that the handler exposes to its clients via currentAttempt + @VisibleForTesting + static class HandlerFuture extends AbstractFuture { + // A future representing completion of the next task submitted to the executor + volatile ScheduledFuture nextTry; @Override - public void run() { - // Just make sure we don't start the first try too fast, in case we find out in start() that we need to cancel ourselves - try { - ready.await(); - } catch (InterruptedException e) { - // This can happen at shutdown - Thread.currentThread().interrupt(); - return; - } - - if (handlerFuture.isCancelled()) { - logger.debug("Got cancelled, stopping"); - return; - } - - try { - onReconnection(tryReconnect()); - handlerFuture.markAsDone(); - currentAttempt.compareAndSet(handlerFuture, null); - logger.debug("Reconnection successful, cleared the future"); - } catch (ConnectionException e) { - long nextDelay = schedule.nextDelayMs(); - if (onConnectionException(e, nextDelay)) - reschedule(nextDelay); - else - currentAttempt.compareAndSet(handlerFuture, null); - } catch (AuthenticationException e) { - logger.error(e.getMessage()); - long nextDelay = schedule.nextDelayMs(); - if (onAuthenticationException(e, nextDelay)) { - reschedule(nextDelay); - } else { - logger.error("Retries against {} have been suspended. It won't be retried unless the node is restarted.", e.getHost()); - currentAttempt.compareAndSet(handlerFuture, null); - } - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - } catch (UnsupportedProtocolVersionException e) { - logger.error(e.getMessage()); - long nextDelay = schedule.nextDelayMs(); - if (onUnsupportedProtocolVersionException(e, nextDelay)) { - reschedule(nextDelay); - } else { - logger.error("Retries against {} have been suspended. It won't be retried unless the node is restarted.", e.getHost()); - currentAttempt.compareAndSet(handlerFuture, null); - } - } catch (ClusterNameMismatchException e) { - logger.error(e.getMessage()); - long nextDelay = schedule.nextDelayMs(); - if (onClusterNameMismatchException(e, nextDelay)) { - reschedule(nextDelay); - } else { - logger.error("Retries against {} have been suspended. It won't be retried unless the node is restarted.", e.address.getAddress()); - currentAttempt.compareAndSet(handlerFuture, null); - } - } catch (Exception e) { - long nextDelay = schedule.nextDelayMs(); - if (onUnknownException(e, nextDelay)) - reschedule(nextDelay); - else - currentAttempt.compareAndSet(handlerFuture, null); - } - } - - private void reschedule(long nextDelay) { - // If we got cancelled during the failed reconnection attempt that lead here, don't reschedule - if (handlerFuture.isCancelled()) { - currentAttempt.compareAndSet(handlerFuture, null); - return; - } - - Host.statesLogger.debug("[{}] next reconnection attempt in {} ms", name, nextDelay); - handlerFuture.nextTry = executor.schedule(this, nextDelay, TimeUnit.MILLISECONDS); + public boolean cancel(boolean mayInterruptIfRunning) { + // This is a check-then-act, so we may race with the scheduling of the first try, but in that + // case + // we'll re-check for cancellation when this first try starts running + if (nextTry != null) { + nextTry.cancel(mayInterruptIfRunning); + } + + return super.cancel(mayInterruptIfRunning); } - // The future that the handler exposes to its clients via currentAttempt - @VisibleForTesting - static class HandlerFuture extends AbstractFuture { - // A future representing completion of the next task submitted to the executor - volatile ScheduledFuture nextTry; - - @Override - public boolean cancel(boolean mayInterruptIfRunning) { - // This is a check-then-act, so we may race with the scheduling of the first try, but in that case - // we'll re-check for cancellation when this first try starts running - if (nextTry != null) { - nextTry.cancel(mayInterruptIfRunning); - } - - return super.cancel(mayInterruptIfRunning); - } - - void markAsDone() { - super.set(null); - } + void markAsDone() { + super.set(null); } + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/AbstractSession.java b/driver-core/src/main/java/com/datastax/driver/core/AbstractSession.java index c74320540d9..b99b1981c18 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/AbstractSession.java +++ b/driver-core/src/main/java/com/datastax/driver/core/AbstractSession.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,200 +18,179 @@ package com.datastax.driver.core; import com.google.common.base.Function; -import com.google.common.util.concurrent.Futures; import com.google.common.util.concurrent.ListenableFuture; import com.google.common.util.concurrent.Uninterruptibles; import io.netty.util.concurrent.EventExecutor; - import java.nio.ByteBuffer; import java.util.Map; import java.util.concurrent.ExecutionException; /** * Abstract implementation of the Session interface. - *

- * This is primarly intended to make mocking easier. + * + *

This is primarly intended to make mocking easier. */ public abstract class AbstractSession implements Session { - private static final boolean CHECK_IO_DEADLOCKS = SystemProperties.getBoolean( - "com.datastax.driver.CHECK_IO_DEADLOCKS", true); - - /** - * {@inheritDoc} - */ - @Override - public ResultSet execute(String query) { - return execute(new SimpleStatement(query)); + private static final boolean CHECK_IO_DEADLOCKS = + SystemProperties.getBoolean("com.datastax.driver.CHECK_IO_DEADLOCKS", true); + + /** {@inheritDoc} */ + @Override + public ResultSet execute(String query) { + return execute(new SimpleStatement(query)); + } + + /** {@inheritDoc} */ + @Override + public ResultSet execute(String query, Object... values) { + return execute(new SimpleStatement(query, values)); + } + + /** {@inheritDoc} */ + @Override + public ResultSet execute(String query, Map values) { + return execute(new SimpleStatement(query, values)); + } + + /** {@inheritDoc} */ + @Override + public ResultSet execute(Statement statement) { + checkNotInEventLoop(); + return executeAsync(statement).getUninterruptibly(); + } + + /** {@inheritDoc} */ + @Override + public ResultSetFuture executeAsync(String query) { + return executeAsync(new SimpleStatement(query)); + } + + /** {@inheritDoc} */ + @Override + public ResultSetFuture executeAsync(String query, Map values) { + return executeAsync(new SimpleStatement(query, values)); + } + + /** {@inheritDoc} */ + @Override + public ResultSetFuture executeAsync(String query, Object... values) { + return executeAsync(new SimpleStatement(query, values)); + } + + /** {@inheritDoc} */ + @Override + public PreparedStatement prepare(String query) { + checkNotInEventLoop(); + try { + return Uninterruptibles.getUninterruptibly(prepareAsync(query)); + } catch (ExecutionException e) { + throw DriverThrowables.propagateCause(e); } - - /** - * {@inheritDoc} - */ - @Override - public ResultSet execute(String query, Object... values) { - return execute(new SimpleStatement(query, values)); - } - - /** - * {@inheritDoc} - */ - @Override - public ResultSet execute(String query, Map values) { - return execute(new SimpleStatement(query, values)); - } - - /** - * {@inheritDoc} - */ - @Override - public ResultSet execute(Statement statement) { - checkNotInEventLoop(); - return executeAsync(statement).getUninterruptibly(); - } - - /** - * {@inheritDoc} - */ - @Override - public ResultSetFuture executeAsync(String query) { - return executeAsync(new SimpleStatement(query)); - } - - /** - * {@inheritDoc} - */ - @Override - public ResultSetFuture executeAsync(String query, Map values) { - return executeAsync(new SimpleStatement(query, values)); + } + + /** {@inheritDoc} */ + @Override + public PreparedStatement prepare(RegularStatement statement) { + checkNotInEventLoop(); + try { + return Uninterruptibles.getUninterruptibly(prepareAsync(statement)); + } catch (ExecutionException e) { + throw DriverThrowables.propagateCause(e); } - - /** - * {@inheritDoc} - */ - @Override - public ResultSetFuture executeAsync(String query, Object... values) { - return executeAsync(new SimpleStatement(query, values)); - } - - /** - * {@inheritDoc} - */ - @Override - public PreparedStatement prepare(String query) { - checkNotInEventLoop(); - try { - return Uninterruptibles.getUninterruptibly(prepareAsync(query)); - } catch (ExecutionException e) { - throw DriverThrowables.propagateCause(e); - } - } - - /** - * {@inheritDoc} - */ - @Override - public PreparedStatement prepare(RegularStatement statement) { - checkNotInEventLoop(); - try { - return Uninterruptibles.getUninterruptibly(prepareAsync(statement)); - } catch (ExecutionException e) { - throw DriverThrowables.propagateCause(e); - } - } - - /** - * {@inheritDoc} - */ - @Override - public ListenableFuture prepareAsync(String query) { - return prepareAsync(query, null); - } - - /** - * {@inheritDoc} - */ - @Override - public ListenableFuture prepareAsync(final RegularStatement statement) { - - if (statement.hasValues()) - throw new IllegalArgumentException("A statement to prepare should not have values"); - - final CodecRegistry codecRegistry = getCluster().getConfiguration().getCodecRegistry(); - ListenableFuture prepared = prepareAsync(statement.getQueryString(codecRegistry), statement.getOutgoingPayload()); - return Futures.transform(prepared, new Function() { - @Override - public PreparedStatement apply(PreparedStatement prepared) { - ProtocolVersion protocolVersion = getCluster().getConfiguration().getProtocolOptions().getProtocolVersion(); - ByteBuffer routingKey = statement.getRoutingKey(protocolVersion, codecRegistry); - if (routingKey != null) - prepared.setRoutingKey(routingKey); - if (statement.getConsistencyLevel() != null) - prepared.setConsistencyLevel(statement.getConsistencyLevel()); - if (statement.getSerialConsistencyLevel() != null) - prepared.setSerialConsistencyLevel(statement.getSerialConsistencyLevel()); - if (statement.isTracing()) - prepared.enableTracing(); - prepared.setRetryPolicy(statement.getRetryPolicy()); - prepared.setOutgoingPayload(statement.getOutgoingPayload()); - prepared.setIdempotent(statement.isIdempotent()); - - return prepared; - } + } + + /** {@inheritDoc} */ + @Override + public ListenableFuture prepareAsync(String query) { + return prepareAsync(query, null); + } + + /** {@inheritDoc} */ + @Override + public ListenableFuture prepareAsync(final RegularStatement statement) { + + if (statement.hasValues()) + throw new IllegalArgumentException("A statement to prepare should not have values"); + + final CodecRegistry codecRegistry = getCluster().getConfiguration().getCodecRegistry(); + ListenableFuture prepared = + prepareAsync(statement.getQueryString(codecRegistry), statement.getOutgoingPayload()); + return GuavaCompatibility.INSTANCE.transform( + prepared, + new Function() { + @Override + public PreparedStatement apply(PreparedStatement prepared) { + ProtocolVersion protocolVersion = + getCluster().getConfiguration().getProtocolOptions().getProtocolVersion(); + ByteBuffer routingKey = statement.getRoutingKey(protocolVersion, codecRegistry); + if (routingKey != null) prepared.setRoutingKey(routingKey); + if (statement.getConsistencyLevel() != null) + prepared.setConsistencyLevel(statement.getConsistencyLevel()); + if (statement.getSerialConsistencyLevel() != null) + prepared.setSerialConsistencyLevel(statement.getSerialConsistencyLevel()); + if (statement.isTracing()) prepared.enableTracing(); + prepared.setRetryPolicy(statement.getRetryPolicy()); + prepared.setOutgoingPayload(statement.getOutgoingPayload()); + prepared.setIdempotent(statement.isIdempotent()); + + return prepared; + } }); + } + + /** + * Prepares the provided query string asynchronously, sending along the provided custom payload, + * if any. + * + * @param query the CQL query string to prepare + * @param customPayload the custom payload to send along the query, or {@code null} if no payload + * is to be sent + * @return a future on the prepared statement corresponding to {@code query}. + */ + protected abstract ListenableFuture prepareAsync( + String query, Map customPayload); + + /** {@inheritDoc} */ + @Override + public void close() { + try { + closeAsync().get(); + } catch (ExecutionException e) { + throw DriverThrowables.propagateCause(e); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); } - - /** - * Prepares the provided query string asynchronously, - * sending along the provided custom payload, if any. - * - * @param query the CQL query string to prepare - * @param customPayload the custom payload to send along the query, or {@code null} if no payload is to be sent - * @return a future on the prepared statement corresponding to {@code query}. - */ - protected abstract ListenableFuture prepareAsync(String query, Map customPayload); - - /** - * {@inheritDoc} - */ - @Override - public void close() { - try { - closeAsync().get(); - } catch (ExecutionException e) { - throw DriverThrowables.propagateCause(e); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - } - } - - /** - * Checks that the current thread is not one of the Netty I/O threads used by the driver. - *

- * This method is called from all the synchronous methods of this class to prevent deadlock issues. - *

- * User code extending this class can also call this method at any time to check if any code - * making blocking calls is being wrongly executed on a Netty I/O thread. - *

- * Note that the check performed by this method has a small overhead; if - * that is an issue, checks can be disabled by setting the System property - * {@code com.datastax.driver.CHECK_IO_DEADLOCKS} to {@code false}. - * - * @throws IllegalStateException if the current thread is one of the Netty I/O thread used by the driver. - */ - public void checkNotInEventLoop() { - Connection.Factory connectionFactory = getCluster().manager.connectionFactory; - if (!CHECK_IO_DEADLOCKS || connectionFactory == null) - return; - for (EventExecutor executor : connectionFactory.eventLoopGroup) { - if (executor.inEventLoop()) { - throw new IllegalStateException( - "Detected a synchronous call on an I/O thread, this can cause deadlocks or unpredictable " + - "behavior. This generally happens when a Future callback calls a synchronous Session " + - "method (execute() or prepare()), or iterates a result set past the fetch size " + - "(causing an internal synchronous fetch of the next page of results). " + - "Avoid this in your callbacks, or schedule them on a different executor."); - } - } + } + + /** + * Checks that the current thread is not one of the Netty I/O threads used by the driver. + * + *

This method is called from all the synchronous methods of this class to prevent deadlock + * issues. + * + *

User code extending this class can also call this method at any time to check if any code + * making blocking calls is being wrongly executed on a Netty I/O thread. + * + *

Note that the check performed by this method has a small overhead; if that is an issue, + * checks can be disabled by setting the System property {@code + * com.datastax.driver.CHECK_IO_DEADLOCKS} to {@code false}. + * + * @throws IllegalStateException if the current thread is one of the Netty I/O thread used by the + * driver. + */ + public void checkNotInEventLoop() { + Connection.Factory connectionFactory = getCluster().manager.connectionFactory; + if (!CHECK_IO_DEADLOCKS || connectionFactory == null) return; + for (EventExecutor executor : connectionFactory.eventLoopGroup) { + if (executor.inEventLoop()) { + throw new IllegalStateException( + "Detected a synchronous call on an I/O thread, this can cause deadlocks or unpredictable " + + "behavior. This generally happens when a Future callback calls a synchronous Session " + + "method (execute() or prepare()), or iterates a result set past the fetch size " + + "(causing an internal synchronous fetch of the next page of results). " + + "Avoid this in your callbacks, or schedule them on a different executor."); + } } + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/AbstractTableMetadata.java b/driver-core/src/main/java/com/datastax/driver/core/AbstractTableMetadata.java index ee8b9205386..2cdb9c52e46 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/AbstractTableMetadata.java +++ b/driver-core/src/main/java/com/datastax/driver/core/AbstractTableMetadata.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,324 +17,362 @@ */ package com.datastax.driver.core; - import com.google.common.base.Predicate; -import com.google.common.collect.Iterables; - -import java.util.*; - -/** - * Base class for Tables and Materialized Views metadata. - */ +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.List; +import java.util.Map; +import java.util.UUID; + +/** Base class for Tables and Materialized Views metadata. */ public abstract class AbstractTableMetadata { - static final Comparator columnMetadataComparator = new Comparator() { + static final Comparator columnMetadataComparator = + new Comparator() { @Override public int compare(ColumnMetadata c1, ColumnMetadata c2) { - return c1.getName().compareTo(c2.getName()); + return c1.getName().compareTo(c2.getName()); } - }; + }; - static final Predicate isAscending = new Predicate() { + // comparator for ordering tables and views by name. + static final Comparator byNameComparator = + new Comparator() { @Override - public boolean apply(ClusteringOrder o) { - return o == ClusteringOrder.ASC; + public int compare(AbstractTableMetadata o1, AbstractTableMetadata o2) { + return o1.getName().compareTo(o2.getName()); } - }; - - protected final KeyspaceMetadata keyspace; - protected final String name; - protected final UUID id; - protected final List partitionKey; - protected final List clusteringColumns; - protected final Map columns; - protected final TableOptionsMetadata options; - protected final List clusteringOrder; - protected final VersionNumber cassandraVersion; - - protected AbstractTableMetadata(KeyspaceMetadata keyspace, - String name, - UUID id, - List partitionKey, - List clusteringColumns, - Map columns, - TableOptionsMetadata options, - List clusteringOrder, - VersionNumber cassandraVersion) { - this.keyspace = keyspace; - this.name = name; - this.id = id; - this.partitionKey = partitionKey; - this.clusteringColumns = clusteringColumns; - this.columns = columns; - this.options = options; - this.clusteringOrder = clusteringOrder; - this.cassandraVersion = cassandraVersion; - } - - /** - * Returns the name of this table. - * - * @return the name of this CQL table. - */ - public String getName() { - return name; - } - - /** - * Returns the unique id of this table. - *

- * Note: this id is available in Cassandra 2.1 and above. It will be - * {@code null} for earlier versions. - * - * @return the unique id of the table. - */ - public UUID getId() { - return id; - } - - /** - * Returns the keyspace this table belong to. - * - * @return the keyspace metadata of the keyspace this table belong to. - */ - public KeyspaceMetadata getKeyspace() { - return keyspace; - } - - /** - * Returns metadata on a column of this table. - * - * @param name the name of the column to retrieve ({@code name} will be - * interpreted as a case-insensitive identifier unless enclosed in double-quotes, - * see {@link Metadata#quote}). - * @return the metadata for the column if it exists, or - * {@code null} otherwise. - */ - public ColumnMetadata getColumn(String name) { - return columns.get(Metadata.handleId(name)); - } - - /** - * Returns a list containing all the columns of this table. - *

- * The order of the columns in the list is consistent with - * the order of the columns returned by a {@code SELECT * FROM thisTable}: - * the first column is the partition key, next are the clustering - * columns in their defined order, and then the rest of the - * columns follow in alphabetic order. - * - * @return a list containing the metadata for the columns of this table. - */ - public List getColumns() { - return new ArrayList(columns.values()); - } - - /** - * Returns the list of columns composing the primary key for this table. - *

- * A table will always at least have a partition key (that - * may itself be one or more columns), so the returned list at least - * has one element. - * - * @return the list of columns composing the primary key for this table. - */ - public List getPrimaryKey() { - List pk = new ArrayList(partitionKey.size() + clusteringColumns.size()); - pk.addAll(partitionKey); - pk.addAll(clusteringColumns); - return pk; - } - - /** - * Returns the list of columns composing the partition key for this table. - *

- * A table always has a partition key so the returned list has - * at least one element. - * - * @return the list of columns composing the partition key for this table. - */ - public List getPartitionKey() { - return Collections.unmodifiableList(partitionKey); - } - - /** - * Returns the list of clustering columns for this table. - * - * @return the list of clustering columns for this table. - * If there is no clustering columns, an empty list is returned. - */ - public List getClusteringColumns() { - return Collections.unmodifiableList(clusteringColumns); - } - - /** - * Returns the clustering order for this table. - *

- * The returned contains the clustering order of each clustering column. The - * {@code i}th element of the result correspond to the order (ascending or - * descending) of the {@code i}th clustering column (see - * {@link #getClusteringColumns}). Note that a table defined without any - * particular clustering order is equivalent to one for which all the - * clustering keys are in ascending order. - * - * @return a list with the clustering order for each clustering column. - */ - public List getClusteringOrder() { - return clusteringOrder; - } + }; - /** - * Returns the options for this table. - * - * @return the options for this table. - */ - public TableOptionsMetadata getOptions() { - return options; - } - - void add(ColumnMetadata column) { - columns.put(column.getName(), column); - } - - /** - * Returns a {@code String} containing CQL queries representing this - * table and the index on it. - *

- * In other words, this method returns the queries that would allow you to - * recreate the schema of this table, along with the indexes and views defined on - * this table, if any. - *

- * Note that the returned String is formatted to be human readable (for - * some definition of human readable at least). - * - * @return the CQL queries representing this table schema as a {code - * String}. - */ - public String exportAsString() { - StringBuilder sb = new StringBuilder(); - - sb.append(asCQLQuery(true)); - - return sb.toString(); + static final Predicate isAscending = + new Predicate() { + @Override + public boolean apply(ClusteringOrder o) { + return o == ClusteringOrder.ASC; + } + }; + + protected final KeyspaceMetadata keyspace; + protected final String name; + protected final UUID id; + protected final List partitionKey; + protected final List clusteringColumns; + protected final Map columns; + protected final TableOptionsMetadata options; + protected final List clusteringOrder; + protected final VersionNumber cassandraVersion; + + protected AbstractTableMetadata( + KeyspaceMetadata keyspace, + String name, + UUID id, + List partitionKey, + List clusteringColumns, + Map columns, + TableOptionsMetadata options, + List clusteringOrder, + VersionNumber cassandraVersion) { + this.keyspace = keyspace; + this.name = name; + this.id = id; + this.partitionKey = partitionKey; + this.clusteringColumns = clusteringColumns; + this.columns = columns; + this.options = options; + this.clusteringOrder = clusteringOrder; + this.cassandraVersion = cassandraVersion; + } + + /** + * Returns the name of this table. + * + * @return the name of this CQL table. + */ + public String getName() { + return name; + } + + /** + * Returns the unique id of this table. + * + *

Note: this id is available in Cassandra 2.1 and above. It will be {@code null} for earlier + * versions. + * + * @return the unique id of the table. + */ + public UUID getId() { + return id; + } + + /** + * Returns the keyspace this table belong to. + * + * @return the keyspace metadata of the keyspace this table belong to. + */ + public KeyspaceMetadata getKeyspace() { + return keyspace; + } + + /** + * Returns metadata on a column of this table. + * + * @param name the name of the column to retrieve ({@code name} will be interpreted as a + * case-insensitive identifier unless enclosed in double-quotes, see {@link Metadata#quote}). + * @return the metadata for the column if it exists, or {@code null} otherwise. + */ + public ColumnMetadata getColumn(String name) { + return columns.get(Metadata.handleId(name)); + } + + /** + * Returns a list containing all the columns of this table. + * + *

The order of the columns in the list is consistent with the order of the columns returned by + * a {@code SELECT * FROM thisTable}: the first column is the partition key, next are the + * clustering columns in their defined order, and then the rest of the columns follow in + * alphabetic order. + * + * @return a list containing the metadata for the columns of this table. + */ + public List getColumns() { + return new ArrayList(columns.values()); + } + + /** + * Returns the list of columns composing the primary key for this table. + * + *

A table will always at least have a partition key (that may itself be one or more columns), + * so the returned list at least has one element. + * + * @return the list of columns composing the primary key for this table. + */ + public List getPrimaryKey() { + List pk = + new ArrayList(partitionKey.size() + clusteringColumns.size()); + pk.addAll(partitionKey); + pk.addAll(clusteringColumns); + return pk; + } + + /** + * Returns the list of columns composing the partition key for this table. + * + *

A table always has a partition key so the returned list has at least one element. + * + * @return the list of columns composing the partition key for this table. + */ + public List getPartitionKey() { + return Collections.unmodifiableList(partitionKey); + } + + /** + * Returns the list of clustering columns for this table. + * + * @return the list of clustering columns for this table. If there is no clustering columns, an + * empty list is returned. + */ + public List getClusteringColumns() { + return Collections.unmodifiableList(clusteringColumns); + } + + /** + * Returns the clustering order for this table. + * + *

The returned contains the clustering order of each clustering column. The {@code i}th + * element of the result correspond to the order (ascending or descending) of the {@code i}th + * clustering column (see {@link #getClusteringColumns}). Note that a table defined without any + * particular clustering order is equivalent to one for which all the clustering keys are in + * ascending order. + * + * @return a list with the clustering order for each clustering column. + */ + public List getClusteringOrder() { + return clusteringOrder; + } + + /** + * Returns the options for this table. + * + *

This value will be null for virtual tables. + * + * @return the options for this table. + */ + public TableOptionsMetadata getOptions() { + return options; + } + + /** + * Returns whether or not this table is a virtual table + * + * @return {@code true} if virtual keyspace, {@code false} otherwise. + */ + public boolean isVirtual() { + return getKeyspace().isVirtual(); + } + + void add(ColumnMetadata column) { + columns.put(column.getName(), column); + } + + /** + * Returns a {@code String} containing CQL queries representing this table and the index on it. + * + *

In other words, this method returns the queries that would allow you to recreate the schema + * of this table, along with the indexes and views defined on this table, if any. + * + *

Note that the returned String is formatted to be human readable (for some definition of + * human readable at least). + * + * @return the CQL queries representing this table schema as a {code String}. + */ + public String exportAsString() { + StringBuilder sb = new StringBuilder(); + + sb.append(asCQLQuery(true)); + + return sb.toString(); + } + + /** + * Returns a CQL query representing this table. + * + *

This method returns a single 'CREATE TABLE' query with the options corresponding to this + * table definition. + * + *

Note that the returned string is a single line; the returned query is not formatted in any + * way. + * + * @return the 'CREATE TABLE' query corresponding to this table. + * @see #exportAsString + */ + public String asCQLQuery() { + return asCQLQuery(false); + } + + protected abstract String asCQLQuery(boolean formatted); + + protected StringBuilder appendOptions(StringBuilder sb, boolean formatted) { + // Options + if (options == null) { + return sb; } - - /** - * Returns a CQL query representing this table. - *

- * This method returns a single 'CREATE TABLE' query with the options - * corresponding to this table definition. - *

- * Note that the returned string is a single line; the returned query - * is not formatted in any way. - * - * @return the 'CREATE TABLE' query corresponding to this table. - * @see #exportAsString - */ - public String asCQLQuery() { - return asCQLQuery(false); + sb.append("WITH "); + if (options.isCompactStorage()) and(sb.append("COMPACT STORAGE"), formatted); + if (!clusteringOrder.isEmpty()) and(appendClusteringOrder(sb), formatted); + if (cassandraVersion.getMajor() < 4) + sb.append("read_repair_chance = ").append(options.getReadRepairChance()); + else sb.append("read_repair = '").append(options.getReadRepair()).append('\''); + if (cassandraVersion.getMajor() < 4) + and(sb, formatted) + .append("dclocal_read_repair_chance = ") + .append(options.getLocalReadRepairChance()); + if (cassandraVersion.getMajor() < 2 + || (cassandraVersion.getMajor() == 2 && cassandraVersion.getMinor() == 0)) + and(sb, formatted).append("replicate_on_write = ").append(options.getReplicateOnWrite()); + and(sb, formatted).append("gc_grace_seconds = ").append(options.getGcGraceInSeconds()); + if (cassandraVersion.getMajor() > 3) + and(sb, formatted) + .append("additional_write_policy = '") + .append(options.getAdditionalWritePolicy()) + .append('\''); + and(sb, formatted) + .append("bloom_filter_fp_chance = ") + .append(options.getBloomFilterFalsePositiveChance()); + if (cassandraVersion.getMajor() < 2 + || cassandraVersion.getMajor() == 2 && cassandraVersion.getMinor() < 1) + and(sb, formatted) + .append("caching = '") + .append(options.getCaching().get("keys")) + .append('\''); + else and(sb, formatted).append("caching = ").append(formatOptionMap(options.getCaching())); + if (options.getComment() != null) + and(sb, formatted) + .append("comment = '") + .append(options.getComment().replace("'", "''")) + .append('\''); + and(sb, formatted).append("compaction = ").append(formatOptionMap(options.getCompaction())); + and(sb, formatted).append("compression = ").append(formatOptionMap(options.getCompression())); + if (cassandraVersion.getMajor() >= 2) { + and(sb, formatted).append("default_time_to_live = ").append(options.getDefaultTimeToLive()); + and(sb, formatted) + .append("speculative_retry = '") + .append(options.getSpeculativeRetry()) + .append('\''); + if (options.getIndexInterval() != null) + and(sb, formatted).append("index_interval = ").append(options.getIndexInterval()); } - - protected abstract String asCQLQuery(boolean formatted); - - protected StringBuilder appendOptions(StringBuilder sb, boolean formatted) { - // Options - sb.append(" WITH "); - if (options.isCompactStorage()) - and(sb.append("COMPACT STORAGE"), formatted); - if (!Iterables.all(clusteringOrder, isAscending)) - and(appendClusteringOrder(sb), formatted); - sb.append("read_repair_chance = ").append(options.getReadRepairChance()); - and(sb, formatted).append("dclocal_read_repair_chance = ").append(options.getLocalReadRepairChance()); - if (cassandraVersion.getMajor() < 2 || (cassandraVersion.getMajor() == 2 && cassandraVersion.getMinor() == 0)) - and(sb, formatted).append("replicate_on_write = ").append(options.getReplicateOnWrite()); - and(sb, formatted).append("gc_grace_seconds = ").append(options.getGcGraceInSeconds()); - and(sb, formatted).append("bloom_filter_fp_chance = ").append(options.getBloomFilterFalsePositiveChance()); - if (cassandraVersion.getMajor() < 2 || cassandraVersion.getMajor() == 2 && cassandraVersion.getMinor() < 1) - and(sb, formatted).append("caching = '").append(options.getCaching().get("keys")).append('\''); - else - and(sb, formatted).append("caching = ").append(formatOptionMap(options.getCaching())); - if (options.getComment() != null) - and(sb, formatted).append("comment = '").append(options.getComment().replace("'", "''")).append('\''); - and(sb, formatted).append("compaction = ").append(formatOptionMap(options.getCompaction())); - and(sb, formatted).append("compression = ").append(formatOptionMap(options.getCompression())); - if (cassandraVersion.getMajor() >= 2) { - and(sb, formatted).append("default_time_to_live = ").append(options.getDefaultTimeToLive()); - and(sb, formatted).append("speculative_retry = '").append(options.getSpeculativeRetry()).append('\''); - if (options.getIndexInterval() != null) - and(sb, formatted).append("index_interval = ").append(options.getIndexInterval()); - } - if (cassandraVersion.getMajor() > 2 || (cassandraVersion.getMajor() == 2 && cassandraVersion.getMinor() >= 1)) { - and(sb, formatted).append("min_index_interval = ").append(options.getMinIndexInterval()); - and(sb, formatted).append("max_index_interval = ").append(options.getMaxIndexInterval()); - } - if (cassandraVersion.getMajor() > 2) { - and(sb, formatted).append("crc_check_chance = ").append(options.getCrcCheckChance()); - } - if (cassandraVersion.getMajor() > 3 || (cassandraVersion.getMajor() == 3 && cassandraVersion.getMinor() >= 8)) { - and(sb, formatted).append("cdc = ").append(options.isCDC()); - } - sb.append(';'); - return sb; + if (cassandraVersion.getMajor() > 2 + || (cassandraVersion.getMajor() == 2 && cassandraVersion.getMinor() >= 1)) { + and(sb, formatted).append("min_index_interval = ").append(options.getMinIndexInterval()); + and(sb, formatted).append("max_index_interval = ").append(options.getMaxIndexInterval()); } - - @Override - public String toString() { - return asCQLQuery(); + if (cassandraVersion.getMajor() > 2) { + and(sb, formatted).append("crc_check_chance = ").append(options.getCrcCheckChance()); } - - private StringBuilder appendClusteringOrder(StringBuilder sb) { - sb.append("CLUSTERING ORDER BY ("); - for (int i = 0; i < clusteringColumns.size(); i++) { - if (i > 0) sb.append(", "); - sb.append(clusteringColumns.get(i).getName()).append(' ').append(clusteringOrder.get(i)); - } - return sb.append(')'); + if (cassandraVersion.getMajor() > 3 + || (cassandraVersion.getMajor() == 3 && cassandraVersion.getMinor() >= 8)) { + and(sb, formatted).append("cdc = ").append(options.isCDC()); } - - private static String formatOptionMap(Map m) { - StringBuilder sb = new StringBuilder(); - sb.append("{ "); - boolean first = true; - for (Map.Entry entry : m.entrySet()) { - if (first) first = false; - else sb.append(", "); - sb.append('\'').append(entry.getKey()).append('\''); - sb.append(" : "); - try { - sb.append(Integer.parseInt(entry.getValue())); - } catch (NumberFormatException e) { - sb.append('\'').append(entry.getValue()).append('\''); - } - } - sb.append(" }"); - return sb.toString(); + if (cassandraVersion.getMajor() > 1) { + and(sb, formatted) + .append("memtable_flush_period_in_ms = ") + .append(options.getMemtableFlushPeriodInMs()); } - - private StringBuilder and(StringBuilder sb, boolean formatted) { - return newLine(sb, formatted).append(spaces(2, formatted)).append(" AND "); + sb.append(';'); + return sb; + } + + @Override + public String toString() { + if (keyspace.isVirtual()) { + return name; } - static String spaces(int n, boolean formatted) { - if (!formatted) - return ""; - - StringBuilder sb = new StringBuilder(); - for (int i = 0; i < n; i++) - sb.append(' '); - - return sb.toString(); - } + return asCQLQuery(); + } - static StringBuilder newLine(StringBuilder sb, boolean formatted) { - if (formatted) - sb.append('\n'); - return sb; + private StringBuilder appendClusteringOrder(StringBuilder sb) { + sb.append("CLUSTERING ORDER BY ("); + for (int i = 0; i < clusteringColumns.size(); i++) { + if (i > 0) sb.append(", "); + sb.append(Metadata.quoteIfNecessary(clusteringColumns.get(i).getName())) + .append(' ') + .append(clusteringOrder.get(i)); } - - static StringBuilder spaceOrNewLine(StringBuilder sb, boolean formatted) { - sb.append(formatted ? '\n' : ' '); - return sb; + return sb.append(')'); + } + + private static String formatOptionMap(Map m) { + StringBuilder sb = new StringBuilder(); + sb.append("{ "); + boolean first = true; + for (Map.Entry entry : m.entrySet()) { + if (first) first = false; + else sb.append(", "); + sb.append('\'').append(entry.getKey()).append('\''); + sb.append(" : "); + try { + sb.append(Integer.parseInt(entry.getValue())); + } catch (NumberFormatException e) { + sb.append('\'').append(entry.getValue()).append('\''); + } } - + sb.append(" }"); + return sb.toString(); + } + + private StringBuilder and(StringBuilder sb, boolean formatted) { + return spaceOrNewLine(sb, formatted).append("AND "); + } + + static StringBuilder newLine(StringBuilder sb, boolean formatted) { + if (formatted) sb.append('\n'); + return sb; + } + + static StringBuilder spaceOrNewLine(StringBuilder sb, boolean formatted) { + sb.append(formatted ? "\n " : ' '); + return sb; + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/AggregateMetadata.java b/driver-core/src/main/java/com/datastax/driver/core/AggregateMetadata.java index bf8e6887943..15b95122d74 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/AggregateMetadata.java +++ b/driver-core/src/main/java/com/datastax/driver/core/AggregateMetadata.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,377 +21,422 @@ import com.datastax.driver.core.utils.MoreObjects; import com.google.common.collect.ImmutableList; import com.google.common.collect.Lists; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import java.nio.ByteBuffer; import java.util.Collections; import java.util.List; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; -/** - * Describes a CQL aggregate function (created with {@code CREATE AGGREGATE...}). - */ +/** Describes a CQL aggregate function (created with {@code CREATE AGGREGATE...}). */ public class AggregateMetadata { - private static final Logger LOGGER = LoggerFactory.getLogger(AggregateMetadata.class); - - private final KeyspaceMetadata keyspace; - private final String simpleName; - private final List argumentTypes; - private final String finalFuncSimpleName; - private final String finalFuncFullName; - private final Object initCond; - private final DataType returnType; - private final String stateFuncSimpleName; - private final String stateFuncFullName; - private final DataType stateType; - private final TypeCodec stateTypeCodec; - - private AggregateMetadata(KeyspaceMetadata keyspace, String simpleName, List argumentTypes, - String finalFuncSimpleName, String finalFuncFullName, Object initCond, DataType returnType, - String stateFuncSimpleName, String stateFuncFullName, DataType stateType, TypeCodec stateTypeCodec) { - this.keyspace = keyspace; - this.simpleName = simpleName; - this.argumentTypes = argumentTypes; - this.finalFuncSimpleName = finalFuncSimpleName; - this.finalFuncFullName = finalFuncFullName; - this.initCond = initCond; - this.returnType = returnType; - this.stateFuncSimpleName = stateFuncSimpleName; - this.stateFuncFullName = stateFuncFullName; - this.stateType = stateType; - this.stateTypeCodec = stateTypeCodec; - } - - // Cassandra < 3.0: - // CREATE TABLE system.schema_aggregates ( - // keyspace_name text, - // aggregate_name text, - // signature frozen>, - // argument_types list, - // final_func text, - // initcond blob, - // return_type text, - // state_func text, - // state_type text, - // PRIMARY KEY (keyspace_name, aggregate_name, signature) - // ) WITH CLUSTERING ORDER BY (aggregate_name ASC, signature ASC) - // - // Cassandra >= 3.0: - // CREATE TABLE system.schema_aggregates ( - // keyspace_name text, - // aggregate_name text, - // argument_types frozen>, - // final_func text, - // initcond text, - // return_type text, - // state_func text, - // state_type text, - // PRIMARY KEY (keyspace_name, aggregate_name, argument_types) - // ) WITH CLUSTERING ORDER BY (aggregate_name ASC, argument_types ASC) - static AggregateMetadata build(KeyspaceMetadata ksm, Row row, VersionNumber version, Cluster cluster) { - CodecRegistry codecRegistry = cluster.getConfiguration().getCodecRegistry(); - ProtocolVersion protocolVersion = cluster.getConfiguration().getProtocolOptions().getProtocolVersion(); - String simpleName = row.getString("aggregate_name"); - List argumentTypes = parseTypes(ksm, row.getList("argument_types", String.class), version, cluster); - String finalFuncSimpleName = row.getString("final_func"); - DataType returnType; - if (version.getMajor() >= 3) { - returnType = DataTypeCqlNameParser.parse(row.getString("return_type"), cluster, ksm.getName(), ksm.userTypes, null, false, false); - } else { - returnType = DataTypeClassNameParser.parseOne(row.getString("return_type"), protocolVersion, codecRegistry); - } - String stateFuncSimpleName = row.getString("state_func"); - String stateTypeName = row.getString("state_type"); - DataType stateType; - Object initCond; - if (version.getMajor() >= 3) { - stateType = DataTypeCqlNameParser.parse(stateTypeName, cluster, ksm.getName(), ksm.userTypes, null, false, false); - String rawInitCond = row.getString("initcond"); - if (rawInitCond == null) { - initCond = null; - } else { - try { - initCond = codecRegistry.codecFor(stateType).parse(rawInitCond); - } catch (RuntimeException e) { - LOGGER.warn("Failed to parse INITCOND literal: {}; getInitCond() will return the text literal instead.", rawInitCond); - initCond = rawInitCond; - } - } - } else { - stateType = DataTypeClassNameParser.parseOne(stateTypeName, protocolVersion, codecRegistry); - ByteBuffer rawInitCond = row.getBytes("initcond"); - if (rawInitCond == null) { - initCond = null; - } else { - try { - initCond = codecRegistry.codecFor(stateType).deserialize(rawInitCond, protocolVersion); - } catch (RuntimeException e) { - LOGGER.warn("Failed to deserialize INITCOND value: {}; getInitCond() will return the raw bytes instead.", Bytes.toHexString(rawInitCond)); - initCond = rawInitCond; - } - } - } - - String finalFuncFullName = finalFuncSimpleName == null ? null : Metadata.fullFunctionName(finalFuncSimpleName, Collections.singletonList(stateType)); - String stateFuncFullName = makeStateFuncFullName(stateFuncSimpleName, stateType, argumentTypes); - - return new AggregateMetadata(ksm, simpleName, argumentTypes, - finalFuncSimpleName, finalFuncFullName, initCond, returnType, stateFuncSimpleName, - stateFuncFullName, stateType, codecRegistry.codecFor(stateType)); - } - - private static String makeStateFuncFullName(String stateFuncSimpleName, DataType stateType, List argumentTypes) { - List args = Lists.newArrayList(stateType); - args.addAll(argumentTypes); - return Metadata.fullFunctionName(stateFuncSimpleName, args); - } - - private static List parseTypes(KeyspaceMetadata ksm, List types, VersionNumber version, Cluster cluster) { - if (types.isEmpty()) - return Collections.emptyList(); - - CodecRegistry codecRegistry = cluster.getConfiguration().getCodecRegistry(); - ProtocolVersion protocolVersion = cluster.getConfiguration().getProtocolOptions().getProtocolVersion(); - ImmutableList.Builder builder = ImmutableList.builder(); - for (String name : types) { - DataType type; - if (version.getMajor() >= 3) { - type = DataTypeCqlNameParser.parse(name, cluster, ksm.getName(), ksm.userTypes, null, false, false); - } else { - type = DataTypeClassNameParser.parseOne(name, protocolVersion, codecRegistry); - } - builder.add(type); - } - return builder.build(); - } - - /** - * Returns a CQL query representing this function in human readable form. - *

- * This method is equivalent to {@link #asCQLQuery} but the output is formatted. - * - * @return the CQL query representing this function. - */ - public String exportAsString() { - return asCQLQuery(true); - } - - /** - * Returns a CQL query representing this function. - *

- * This method returns a single 'CREATE FUNCTION' query corresponding to - * this function definition. - * - * @return the 'CREATE FUNCTION' query corresponding to this function. - */ - public String asCQLQuery() { - return asCQLQuery(false); - } - - @Override - public String toString() { - return asCQLQuery(false); - } - - private String asCQLQuery(boolean formatted) { - - StringBuilder sb = new StringBuilder("CREATE AGGREGATE ") - .append(Metadata.quoteIfNecessary(keyspace.getName())) - .append('.'); - - appendSignature(sb); - - TableMetadata.spaceOrNewLine(sb, formatted) - .append("SFUNC ") - .append(Metadata.quoteIfNecessary(stateFuncSimpleName)) - .append(" STYPE ") - .append(stateType.asFunctionParameterString()); - - if (finalFuncSimpleName != null) - TableMetadata.spaceOrNewLine(sb, formatted) - .append("FINALFUNC ") - .append(Metadata.quoteIfNecessary(finalFuncSimpleName)); - - if (initCond != null) - TableMetadata.spaceOrNewLine(sb, formatted) - .append("INITCOND ") - .append(formatInitCond()); - - sb.append(';'); - - return sb.toString(); + private static final Logger LOGGER = LoggerFactory.getLogger(AggregateMetadata.class); + + private final KeyspaceMetadata keyspace; + private final String simpleName; + private final List argumentTypes; + private final String finalFuncSimpleName; + private final String finalFuncFullName; + private final Object initCond; + private final DataType returnType; + private final String stateFuncSimpleName; + private final String stateFuncFullName; + private final DataType stateType; + private final TypeCodec stateTypeCodec; + + private AggregateMetadata( + KeyspaceMetadata keyspace, + String simpleName, + List argumentTypes, + String finalFuncSimpleName, + String finalFuncFullName, + Object initCond, + DataType returnType, + String stateFuncSimpleName, + String stateFuncFullName, + DataType stateType, + TypeCodec stateTypeCodec) { + this.keyspace = keyspace; + this.simpleName = simpleName; + this.argumentTypes = argumentTypes; + this.finalFuncSimpleName = finalFuncSimpleName; + this.finalFuncFullName = finalFuncFullName; + this.initCond = initCond; + this.returnType = returnType; + this.stateFuncSimpleName = stateFuncSimpleName; + this.stateFuncFullName = stateFuncFullName; + this.stateType = stateType; + this.stateTypeCodec = stateTypeCodec; + } + + // Cassandra < 3.0: + // CREATE TABLE system.schema_aggregates ( + // keyspace_name text, + // aggregate_name text, + // signature frozen>, + // argument_types list, + // final_func text, + // initcond blob, + // return_type text, + // state_func text, + // state_type text, + // PRIMARY KEY (keyspace_name, aggregate_name, signature) + // ) WITH CLUSTERING ORDER BY (aggregate_name ASC, signature ASC) + // + // Cassandra >= 3.0: + // CREATE TABLE system.schema_aggregates ( + // keyspace_name text, + // aggregate_name text, + // argument_types frozen>, + // final_func text, + // initcond text, + // return_type text, + // state_func text, + // state_type text, + // PRIMARY KEY (keyspace_name, aggregate_name, argument_types) + // ) WITH CLUSTERING ORDER BY (aggregate_name ASC, argument_types ASC) + static AggregateMetadata build( + KeyspaceMetadata ksm, Row row, VersionNumber version, Cluster cluster) { + CodecRegistry codecRegistry = cluster.getConfiguration().getCodecRegistry(); + ProtocolVersion protocolVersion = + cluster.getConfiguration().getProtocolOptions().getProtocolVersion(); + String simpleName = row.getString("aggregate_name"); + List argumentTypes = + parseTypes(ksm, row.getList("argument_types", String.class), version, cluster); + String finalFuncSimpleName = row.getString("final_func"); + DataType returnType; + if (version.getMajor() >= 3) { + returnType = + DataTypeCqlNameParser.parse( + row.getString("return_type"), + cluster, + ksm.getName(), + ksm.userTypes, + null, + false, + false); + } else { + returnType = + DataTypeClassNameParser.parseOne( + row.getString("return_type"), protocolVersion, codecRegistry); } - - private String formatInitCond() { - if (stateTypeCodec.accepts(initCond)) { - try { - return stateTypeCodec.format(initCond); - } catch (RuntimeException e) { - LOGGER.info("Failed to format INITCOND literal: {}", initCond); - } + String stateFuncSimpleName = row.getString("state_func"); + String stateTypeName = row.getString("state_type"); + DataType stateType; + Object initCond; + if (version.getMajor() >= 3) { + stateType = + DataTypeCqlNameParser.parse( + stateTypeName, cluster, ksm.getName(), ksm.userTypes, null, false, false); + String rawInitCond = row.getString("initcond"); + if (rawInitCond == null) { + initCond = null; + } else { + try { + initCond = codecRegistry.codecFor(stateType).parse(rawInitCond); + } catch (RuntimeException e) { + LOGGER.warn( + "Failed to parse INITCOND literal: {}; getInitCond() will return the text literal instead.", + rawInitCond); + initCond = rawInitCond; } - return initCond.toString(); - } - - private void appendSignature(StringBuilder sb) { - sb - .append(Metadata.quoteIfNecessary(simpleName)) - .append('('); - boolean first = true; - for (DataType type : argumentTypes) { - if (first) - first = false; - else - sb.append(','); - sb.append(type.asFunctionParameterString()); + } + } else { + stateType = DataTypeClassNameParser.parseOne(stateTypeName, protocolVersion, codecRegistry); + ByteBuffer rawInitCond = row.getBytes("initcond"); + if (rawInitCond == null) { + initCond = null; + } else { + try { + initCond = codecRegistry.codecFor(stateType).deserialize(rawInitCond, protocolVersion); + } catch (RuntimeException e) { + LOGGER.warn( + "Failed to deserialize INITCOND value: {}; getInitCond() will return the raw bytes instead.", + Bytes.toHexString(rawInitCond)); + initCond = rawInitCond; } - sb.append(')'); + } } - /** - * Returns the keyspace this aggregate belongs to. - * - * @return the keyspace metadata of the keyspace this aggregate belongs to. - */ - public KeyspaceMetadata getKeyspace() { - return keyspace; + String finalFuncFullName = + finalFuncSimpleName == null + ? null + : Metadata.fullFunctionName(finalFuncSimpleName, Collections.singletonList(stateType)); + String stateFuncFullName = makeStateFuncFullName(stateFuncSimpleName, stateType, argumentTypes); + + return new AggregateMetadata( + ksm, + simpleName, + argumentTypes, + finalFuncSimpleName, + finalFuncFullName, + initCond, + returnType, + stateFuncSimpleName, + stateFuncFullName, + stateType, + codecRegistry.codecFor(stateType)); + } + + private static String makeStateFuncFullName( + String stateFuncSimpleName, DataType stateType, List argumentTypes) { + List args = Lists.newArrayList(stateType); + args.addAll(argumentTypes); + return Metadata.fullFunctionName(stateFuncSimpleName, args); + } + + private static List parseTypes( + KeyspaceMetadata ksm, List types, VersionNumber version, Cluster cluster) { + if (types.isEmpty()) return Collections.emptyList(); + + CodecRegistry codecRegistry = cluster.getConfiguration().getCodecRegistry(); + ProtocolVersion protocolVersion = + cluster.getConfiguration().getProtocolOptions().getProtocolVersion(); + ImmutableList.Builder builder = ImmutableList.builder(); + for (String name : types) { + DataType type; + if (version.getMajor() >= 3) { + type = + DataTypeCqlNameParser.parse( + name, cluster, ksm.getName(), ksm.userTypes, null, false, false); + } else { + type = DataTypeClassNameParser.parseOne(name, protocolVersion, codecRegistry); + } + builder.add(type); } - - /** - * Returns the CQL signature of this aggregate. - *

- * This is the name of the aggregate, followed by the names of the argument types between parentheses, - * like it was specified in the {@code CREATE AGGREGATE...} statement, for example {@code sum(int)}. - *

- * Note that the returned signature is not qualified with the keyspace name. - * - * @return the signature of this aggregate. - */ - public String getSignature() { - StringBuilder sb = new StringBuilder(); - appendSignature(sb); - return sb.toString(); + return builder.build(); + } + + /** + * Returns a CQL query representing this function in human readable form. + * + *

This method is equivalent to {@link #asCQLQuery} but the output is formatted. + * + * @return the CQL query representing this function. + */ + public String exportAsString() { + return asCQLQuery(true); + } + + /** + * Returns a CQL query representing this function. + * + *

This method returns a single 'CREATE FUNCTION' query corresponding to this function + * definition. + * + * @return the 'CREATE FUNCTION' query corresponding to this function. + */ + public String asCQLQuery() { + return asCQLQuery(false); + } + + @Override + public String toString() { + return asCQLQuery(false); + } + + private String asCQLQuery(boolean formatted) { + + StringBuilder sb = + new StringBuilder("CREATE AGGREGATE ") + .append(Metadata.quoteIfNecessary(keyspace.getName())) + .append('.'); + + appendSignature(sb); + + TableMetadata.spaceOrNewLine(sb, formatted) + .append("SFUNC ") + .append(Metadata.quoteIfNecessary(stateFuncSimpleName)); + TableMetadata.spaceOrNewLine(sb, formatted) + .append("STYPE ") + .append(stateType.asFunctionParameterString()); + + if (finalFuncSimpleName != null) + TableMetadata.spaceOrNewLine(sb, formatted) + .append("FINALFUNC ") + .append(Metadata.quoteIfNecessary(finalFuncSimpleName)); + + if (initCond != null) + TableMetadata.spaceOrNewLine(sb, formatted).append("INITCOND ").append(formatInitCond()); + + sb.append(';'); + + return sb.toString(); + } + + private String formatInitCond() { + if (stateTypeCodec.accepts(initCond)) { + try { + return stateTypeCodec.format(initCond); + } catch (RuntimeException e) { + LOGGER.info("Failed to format INITCOND literal: {}", initCond); + } } - - /** - * Returns the simple name of this aggregate. - *

- * This is the name of the aggregate, without arguments. Note that aggregates can be overloaded with - * different argument lists, therefore the simple name may not be unique. For example, - * {@code sum(int)} and {@code sum(int,int)} both have the simple name {@code sum}. - * - * @return the simple name of this aggregate. - * @see #getSignature() - */ - public String getSimpleName() { - return simpleName; + return initCond.toString(); + } + + private void appendSignature(StringBuilder sb) { + sb.append(Metadata.quoteIfNecessary(simpleName)).append('('); + boolean first = true; + for (DataType type : argumentTypes) { + if (first) first = false; + else sb.append(','); + sb.append(type.asFunctionParameterString()); } - - /** - * Returns the types of this aggregate's arguments. - * - * @return the types. - */ - public List getArgumentTypes() { - return argumentTypes; - } - - /** - * Returns the final function of this aggregate. - *

- * This is the function specified with {@code FINALFUNC} in the {@code CREATE AGGREGATE...} - * statement. It transforms the final value after the aggregation is complete. - * - * @return the metadata of the final function, or {@code null} if there is none. - */ - public FunctionMetadata getFinalFunc() { - return (finalFuncFullName == null) - ? null - : keyspace.functions.get(finalFuncFullName); - } - - /** - * Returns the initial state value of this aggregate. - *

- * This is the value specified with {@code INITCOND} in the {@code CREATE AGGREGATE...} - * statement. It's passed to the initial invocation of the state function (if that function - * does not accept null arguments). - *

- * The actual type of the returned object depends on the aggregate's - * {@link #getStateType() state type} and on the {@link TypeCodec codec} used - * to {@link TypeCodec#parse(String) parse} the {@code INITCOND} literal. - *

- * If, for some reason, the {@code INITCOND} literal cannot be parsed, - * a warning will be logged and the returned object will be the original - * {@code INITCOND} literal in its textual, non-parsed form. - * - * @return the initial state, or {@code null} if there is none. - */ - public Object getInitCond() { - return initCond; - } - - /** - * Returns the return type of this aggregate. - *

- * This is the final type of the value computed by this aggregate; in other words, the return - * type of the final function if it is defined, or the state type otherwise. - * - * @return the return type. - */ - public DataType getReturnType() { - return returnType; - } - - /** - * Returns the state function of this aggregate. - *

- * This is the function specified with {@code SFUNC} in the {@code CREATE AGGREGATE...} - * statement. It aggregates the current state with each row to produce a new state. - * - * @return the metadata of the state function. - */ - public FunctionMetadata getStateFunc() { - return keyspace.functions.get(stateFuncFullName); - } - - /** - * Returns the state type of this aggregate. - *

- * This is the type specified with {@code STYPE} in the {@code CREATE AGGREGATE...} - * statement. It defines the type of the value that is accumulated as the aggregate - * iterates through the rows. - * - * @return the state type. - */ - public DataType getStateType() { - return stateType; - } - - @Override - public boolean equals(Object other) { - if (other == this) - return true; - - if (other instanceof AggregateMetadata) { - AggregateMetadata that = (AggregateMetadata) other; - return this.keyspace.getName().equals(that.keyspace.getName()) && - this.argumentTypes.equals(that.argumentTypes) && - MoreObjects.equal(this.finalFuncFullName, that.finalFuncFullName) && - // Note: this might be a problem if a custom codec has been registered for the initCond's type, with a target Java type that - // does not properly implement equals. We don't have any control over this, at worst this would lead to spurious change - // notifications. - MoreObjects.equal(this.initCond, that.initCond) && - this.returnType.equals(that.returnType) && - this.stateFuncFullName.equals(that.stateFuncFullName) && - this.stateType.equals(that.stateType); - } - return false; - } - - @Override - public int hashCode() { - return MoreObjects.hashCode(this.keyspace.getName(), this.argumentTypes, this.finalFuncFullName, this.initCond, this.returnType, this.stateFuncFullName, this.stateType); + sb.append(')'); + } + + /** + * Returns the keyspace this aggregate belongs to. + * + * @return the keyspace metadata of the keyspace this aggregate belongs to. + */ + public KeyspaceMetadata getKeyspace() { + return keyspace; + } + + /** + * Returns the CQL signature of this aggregate. + * + *

This is the name of the aggregate, followed by the names of the argument types between + * parentheses, like it was specified in the {@code CREATE AGGREGATE...} statement, for example + * {@code sum(int)}. + * + *

Note that the returned signature is not qualified with the keyspace name. + * + * @return the signature of this aggregate. + */ + public String getSignature() { + StringBuilder sb = new StringBuilder(); + appendSignature(sb); + return sb.toString(); + } + + /** + * Returns the simple name of this aggregate. + * + *

This is the name of the aggregate, without arguments. Note that aggregates can be overloaded + * with different argument lists, therefore the simple name may not be unique. For example, {@code + * sum(int)} and {@code sum(int,int)} both have the simple name {@code sum}. + * + * @return the simple name of this aggregate. + * @see #getSignature() + */ + public String getSimpleName() { + return simpleName; + } + + /** + * Returns the types of this aggregate's arguments. + * + * @return the types. + */ + public List getArgumentTypes() { + return argumentTypes; + } + + /** + * Returns the final function of this aggregate. + * + *

This is the function specified with {@code FINALFUNC} in the {@code CREATE AGGREGATE...} + * statement. It transforms the final value after the aggregation is complete. + * + * @return the metadata of the final function, or {@code null} if there is none. + */ + public FunctionMetadata getFinalFunc() { + return (finalFuncFullName == null) ? null : keyspace.functions.get(finalFuncFullName); + } + + /** + * Returns the initial state value of this aggregate. + * + *

This is the value specified with {@code INITCOND} in the {@code CREATE AGGREGATE...} + * statement. It's passed to the initial invocation of the state function (if that function does + * not accept null arguments). + * + *

The actual type of the returned object depends on the aggregate's {@link #getStateType() + * state type} and on the {@link TypeCodec codec} used to {@link TypeCodec#parse(String) parse} + * the {@code INITCOND} literal. + * + *

If, for some reason, the {@code INITCOND} literal cannot be parsed, a warning will be logged + * and the returned object will be the original {@code INITCOND} literal in its textual, + * non-parsed form. + * + * @return the initial state, or {@code null} if there is none. + */ + public Object getInitCond() { + return initCond; + } + + /** + * Returns the return type of this aggregate. + * + *

This is the final type of the value computed by this aggregate; in other words, the return + * type of the final function if it is defined, or the state type otherwise. + * + * @return the return type. + */ + public DataType getReturnType() { + return returnType; + } + + /** + * Returns the state function of this aggregate. + * + *

This is the function specified with {@code SFUNC} in the {@code CREATE AGGREGATE...} + * statement. It aggregates the current state with each row to produce a new state. + * + * @return the metadata of the state function. + */ + public FunctionMetadata getStateFunc() { + return keyspace.functions.get(stateFuncFullName); + } + + /** + * Returns the state type of this aggregate. + * + *

This is the type specified with {@code STYPE} in the {@code CREATE AGGREGATE...} statement. + * It defines the type of the value that is accumulated as the aggregate iterates through the + * rows. + * + * @return the state type. + */ + public DataType getStateType() { + return stateType; + } + + @Override + public boolean equals(Object other) { + if (other == this) return true; + + if (other instanceof AggregateMetadata) { + AggregateMetadata that = (AggregateMetadata) other; + return this.keyspace.getName().equals(that.keyspace.getName()) + && this.argumentTypes.equals(that.argumentTypes) + && MoreObjects.equal(this.finalFuncFullName, that.finalFuncFullName) + && + // Note: this might be a problem if a custom codec has been registered for the initCond's + // type, with a target Java type that + // does not properly implement equals. We don't have any control over this, at worst this + // would lead to spurious change + // notifications. + MoreObjects.equal(this.initCond, that.initCond) + && this.returnType.equals(that.returnType) + && this.stateFuncFullName.equals(that.stateFuncFullName) + && this.stateType.equals(that.stateType); } + return false; + } + + @Override + public int hashCode() { + return MoreObjects.hashCode( + this.keyspace.getName(), + this.argumentTypes, + this.finalFuncFullName, + this.initCond, + this.returnType, + this.stateFuncFullName, + this.stateType); + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/ArrayBackedResultSet.java b/driver-core/src/main/java/com/datastax/driver/core/ArrayBackedResultSet.java index 0bf225eed90..6728bc8d089 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ArrayBackedResultSet.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ArrayBackedResultSet.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,449 +23,572 @@ import com.google.common.util.concurrent.ListenableFuture; import com.google.common.util.concurrent.SettableFuture; import com.google.common.util.concurrent.Uninterruptibles; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import java.nio.ByteBuffer; -import java.util.*; +import java.util.ArrayDeque; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Deque; +import java.util.Iterator; +import java.util.List; +import java.util.Queue; +import java.util.UUID; import java.util.concurrent.ConcurrentLinkedQueue; import java.util.concurrent.ExecutionException; import java.util.concurrent.LinkedBlockingDeque; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; -/** - * Default implementation of a result set, backed by an ArrayDeque of ArrayList. - */ +/** Default implementation of a result set, backed by an ArrayDeque of ArrayList. */ abstract class ArrayBackedResultSet implements ResultSet { - private static final Logger logger = LoggerFactory.getLogger(ResultSet.class); - - private static final Queue> EMPTY_QUEUE = new ArrayDeque>(0); - - protected final ColumnDefinitions metadata; - protected final Token.Factory tokenFactory; - private final boolean wasApplied; - - protected final ProtocolVersion protocolVersion; - protected final CodecRegistry codecRegistry; - - private ArrayBackedResultSet(ColumnDefinitions metadata, Token.Factory tokenFactory, List firstRow, ProtocolVersion protocolVersion, CodecRegistry codecRegistry) { - this.metadata = metadata; - this.protocolVersion = protocolVersion; - this.codecRegistry = codecRegistry; - this.tokenFactory = tokenFactory; - this.wasApplied = checkWasApplied(firstRow, metadata, protocolVersion); - } - - static ArrayBackedResultSet fromMessage(Responses.Result msg, SessionManager session, ProtocolVersion protocolVersion, ExecutionInfo info, Statement statement) { - - switch (msg.kind) { - case ROWS: - Responses.Result.Rows r = (Responses.Result.Rows) msg; - - ColumnDefinitions columnDefs; - if (r.metadata.columns == null) { - Statement actualStatement = statement; - if (statement instanceof StatementWrapper) { - actualStatement = ((StatementWrapper) statement).getWrappedStatement(); - } - assert actualStatement instanceof BoundStatement; - columnDefs = ((BoundStatement) actualStatement).statement.getPreparedId().resultSetMetadata; - assert columnDefs != null; - } else { - columnDefs = r.metadata.columns; - } + private static final Logger logger = LoggerFactory.getLogger(ResultSet.class); + + private static final Queue> EMPTY_QUEUE = new ArrayDeque>(0); + + protected volatile ColumnDefinitions metadata; + protected final Token.Factory tokenFactory; + private final boolean wasApplied; + + protected final ProtocolVersion protocolVersion; + protected final CodecRegistry codecRegistry; + + private ArrayBackedResultSet( + ColumnDefinitions metadata, + Token.Factory tokenFactory, + List firstRow, + ProtocolVersion protocolVersion, + CodecRegistry codecRegistry) { + this.metadata = metadata; + this.protocolVersion = protocolVersion; + this.codecRegistry = codecRegistry; + this.tokenFactory = tokenFactory; + this.wasApplied = checkWasApplied(firstRow, metadata, protocolVersion); + } + + static ArrayBackedResultSet fromMessage( + Responses.Result msg, + SessionManager session, + ProtocolVersion protocolVersion, + ExecutionInfo info, + Statement statement) { + + switch (msg.kind) { + case ROWS: + Responses.Result.Rows r = (Responses.Result.Rows) msg; + + Statement actualStatement = statement; + if (statement instanceof StatementWrapper) { + actualStatement = ((StatementWrapper) statement).getWrappedStatement(); + } - Token.Factory tokenFactory = (session == null) ? null - : session.getCluster().manager.metadata.tokenFactory(); - - info = update(info, r, session, r.metadata.pagingState, protocolVersion, columnDefs.codecRegistry, statement); - - // info can be null only for internal calls, but we don't page those. We assert - // this explicitly because MultiPage implementation doesn't support info == null. - assert r.metadata.pagingState == null || info != null; - - return r.metadata.pagingState == null - ? new SinglePage(columnDefs, tokenFactory, protocolVersion, columnDefs.codecRegistry, r.data, info) - : new MultiPage(columnDefs, tokenFactory, protocolVersion, columnDefs.codecRegistry, r.data, info, r.metadata.pagingState, session); - - case VOID: - case SET_KEYSPACE: - case SCHEMA_CHANGE: - info = update(info, msg, session, null, protocolVersion, null, statement); - return empty(info); - case PREPARED: - throw new RuntimeException("Prepared statement received when a ResultSet was expected"); - default: - logger.error("Received unknown result type '{}'; returning empty result set", msg.kind); - info = update(info, msg, session, null, protocolVersion, null, statement); - return empty(info); + ColumnDefinitions columnDefs = r.metadata.columns; + if (columnDefs == null) { + // If result set metadata is not present, it means the request had SKIP_METADATA set, the + // driver + // only ever does that for bound statements. + BoundStatement bs = (BoundStatement) actualStatement; + columnDefs = bs.preparedStatement().getPreparedId().resultSetMetadata.variables; + } else { + // Otherwise, always use the response's metadata. + // In addition, if a new id is present it means we're executing a bound statement with + // protocol v5, + // the schema changed server-side, and we need to update the prepared statement (see + // CASSANDRA-10786). + MD5Digest newMetadataId = r.metadata.metadataId; + assert !(actualStatement instanceof BoundStatement) + || ProtocolFeature.PREPARED_METADATA_CHANGES.isSupportedBy(protocolVersion) + || newMetadataId == null; + if (newMetadataId != null) { + BoundStatement bs = ((BoundStatement) actualStatement); + PreparedId preparedId = bs.preparedStatement().getPreparedId(); + preparedId.resultSetMetadata = + new PreparedId.PreparedMetadata(newMetadataId, columnDefs); + } } + assert columnDefs != null; + + Token.Factory tokenFactory = + (session == null) ? null : session.getCluster().manager.metadata.tokenFactory(); + + info = + update( + info, + r, + session, + r.metadata.pagingState, + protocolVersion, + columnDefs.codecRegistry, + statement); + + // info can be null only for internal calls, but we don't page those. We assert + // this explicitly because MultiPage implementation doesn't support info == null. + assert r.metadata.pagingState == null || info != null; + + return r.metadata.pagingState == null + ? new SinglePage( + columnDefs, tokenFactory, protocolVersion, columnDefs.codecRegistry, r.data, info) + : new MultiPage( + columnDefs, + tokenFactory, + protocolVersion, + columnDefs.codecRegistry, + r.data, + info, + r.metadata.pagingState, + session); + + case VOID: + case SET_KEYSPACE: + case SCHEMA_CHANGE: + info = update(info, msg, session, null, protocolVersion, null, statement); + return empty(info); + case PREPARED: + throw new RuntimeException("Prepared statement received when a ResultSet was expected"); + default: + logger.error("Received unknown result type '{}'; returning empty result set", msg.kind); + info = update(info, msg, session, null, protocolVersion, null, statement); + return empty(info); } - - private static ExecutionInfo update(ExecutionInfo info, Responses.Result msg, SessionManager session, - ByteBuffer pagingState, ProtocolVersion protocolVersion, CodecRegistry codecRegistry, - Statement statement) { - if (info == null) - return null; - - UUID tracingId = msg.getTracingId(); - QueryTrace trace = (tracingId == null) ? null : new QueryTrace(tracingId, session); - - return info.with(trace, msg.warnings, pagingState, statement, protocolVersion, codecRegistry); + } + + private static ExecutionInfo update( + ExecutionInfo info, + Responses.Result msg, + SessionManager session, + ByteBuffer pagingState, + ProtocolVersion protocolVersion, + CodecRegistry codecRegistry, + Statement statement) { + if (info == null) return null; + + UUID tracingId = msg.getTracingId(); + QueryTrace trace = (tracingId == null) ? null : new QueryTrace(tracingId, session); + + return info.with(trace, msg.warnings, pagingState, statement, protocolVersion, codecRegistry); + } + + private static ArrayBackedResultSet empty(ExecutionInfo info) { + // We could pass the protocol version but we know we won't need it so passing a bogus value + // (null) + return new SinglePage(ColumnDefinitions.EMPTY, null, null, null, EMPTY_QUEUE, info); + } + + @Override + public ColumnDefinitions getColumnDefinitions() { + return metadata; + } + + @Override + public List all() { + if (isExhausted()) return Collections.emptyList(); + + // We may have more than 'getAvailableWithoutFetching' results but we won't have less, and + // at least in the single page case this will be exactly the size we want so ... + List result = new ArrayList(getAvailableWithoutFetching()); + for (Row row : this) result.add(row); + return result; + } + + @Override + public Iterator iterator() { + return new Iterator() { + + @Override + public boolean hasNext() { + return !isExhausted(); + } + + @Override + public Row next() { + return ArrayBackedResultSet.this.one(); + } + + @Override + public void remove() { + throw new UnsupportedOperationException(); + } + }; + } + + @Override + public boolean wasApplied() { + return wasApplied; + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("ResultSet[ exhausted: ").append(isExhausted()); + sb.append(", ").append(metadata).append(']'); + return sb.toString(); + } + + private static class SinglePage extends ArrayBackedResultSet { + + private final Queue> rows; + private final ExecutionInfo info; + + private SinglePage( + ColumnDefinitions metadata, + Token.Factory tokenFactory, + ProtocolVersion protocolVersion, + CodecRegistry codecRegistry, + Queue> rows, + ExecutionInfo info) { + super(metadata, tokenFactory, rows.peek(), protocolVersion, codecRegistry); + this.info = info; + this.rows = rows; } - private static ArrayBackedResultSet empty(ExecutionInfo info) { - // We could pass the protocol version but we know we won't need it so passing a bogus value (null) - return new SinglePage(ColumnDefinitions.EMPTY, null, null, null, EMPTY_QUEUE, info); + @Override + public boolean isExhausted() { + return rows.isEmpty(); } @Override - public ColumnDefinitions getColumnDefinitions() { - return metadata; + public Row one() { + return ArrayBackedRow.fromData(metadata, tokenFactory, protocolVersion, rows.poll()); } @Override - public List all() { - if (isExhausted()) - return Collections.emptyList(); - - // We may have more than 'getAvailableWithoutFetching' results but we won't have less, and - // at least in the single page case this will be exactly the size we want so ... - List result = new ArrayList(getAvailableWithoutFetching()); - for (Row row : this) - result.add(row); - return result; + public int getAvailableWithoutFetching() { + return rows.size(); } @Override - public Iterator iterator() { - return new Iterator() { - - @Override - public boolean hasNext() { - return !isExhausted(); - } - - @Override - public Row next() { - return ArrayBackedResultSet.this.one(); - } + public boolean isFullyFetched() { + return true; + } - @Override - public void remove() { - throw new UnsupportedOperationException(); - } - }; + @Override + public ListenableFuture fetchMoreResults() { + return Futures.immediateFuture(this); } @Override - public boolean wasApplied() { - return wasApplied; + public ExecutionInfo getExecutionInfo() { + return info; } @Override - public String toString() { - StringBuilder sb = new StringBuilder(); - sb.append("ResultSet[ exhausted: ").append(isExhausted()); - sb.append(", ").append(metadata).append(']'); - return sb.toString(); + public List getAllExecutionInfo() { + return Collections.singletonList(info); + } + } + + private static class MultiPage extends ArrayBackedResultSet { + + private Queue> currentPage; + private final Queue nextPages = new ConcurrentLinkedQueue(); + + private final Deque infos = new LinkedBlockingDeque(); + + /* + * The fetching state of this result set. The fetchState will always be in one of + * the 3 following state: + * 1) fetchState is null or reference a null: fetching is done, there + * is nothing more to fetch and no query in progress. + * 2) fetchState.get().nextStart is not null: there is more pages to fetch. In + * that case, inProgress is *guaranteed* to be null. + * 3) fetchState.get().inProgress is not null: a page is being fetched. + * In that case, nextStart is *guaranteed* to be null. + * + * Also note that while ResultSet doesn't pretend to be thread-safe, the actual + * fetch is done asynchronously and so we do need to be volatile below. + */ + private volatile FetchingState fetchState; + + private final SessionManager session; + + private MultiPage( + ColumnDefinitions metadata, + Token.Factory tokenFactory, + ProtocolVersion protocolVersion, + CodecRegistry codecRegistry, + Queue> rows, + ExecutionInfo info, + ByteBuffer pagingState, + SessionManager session) { + + // Note: as of Cassandra 2.1.0, it turns out that the result of a CAS update is never paged, + // so + // we could hard-code the result of wasApplied in this class to "true". However, we can not be + // sure + // that this will never change, so apply the generic check by peeking at the first row. + super(metadata, tokenFactory, rows.peek(), protocolVersion, codecRegistry); + this.currentPage = rows; + this.infos.offer(info); + + this.fetchState = new FetchingState(pagingState, null); + this.session = session; } - private static class SinglePage extends ArrayBackedResultSet { + @Override + public boolean isExhausted() { + prepareNextRow(); + return currentPage.isEmpty(); + } - private final Queue> rows; - private final ExecutionInfo info; + @Override + public Row one() { + prepareNextRow(); + return ArrayBackedRow.fromData(metadata, tokenFactory, protocolVersion, currentPage.poll()); + } - private SinglePage(ColumnDefinitions metadata, - Token.Factory tokenFactory, - ProtocolVersion protocolVersion, - CodecRegistry codecRegistry, - Queue> rows, - ExecutionInfo info) { - super(metadata, tokenFactory, rows.peek(), protocolVersion, codecRegistry); - this.info = info; - this.rows = rows; - } + @Override + public int getAvailableWithoutFetching() { + int available = currentPage.size(); + for (NextPage page : nextPages) available += page.data.size(); + return available; + } - @Override - public boolean isExhausted() { - return rows.isEmpty(); - } + @Override + public boolean isFullyFetched() { + return fetchState == null; + } - @Override - public Row one() { - return ArrayBackedRow.fromData(metadata, tokenFactory, protocolVersion, rows.poll()); + // Ensure that after the call the next row to consume is in 'currentPage', i.e. that + // 'currentPage' is empty IFF the ResultSet if fully exhausted. + private void prepareNextRow() { + while (currentPage.isEmpty()) { + // Grab the current state now to get a consistent view in this iteration. + FetchingState fetchingState = this.fetchState; + + NextPage nextPage = nextPages.poll(); + if (nextPage != null) { + if (nextPage.metadata != null) { + this.metadata = nextPage.metadata; + } + currentPage = nextPage.data; + continue; } - - @Override - public int getAvailableWithoutFetching() { - return rows.size(); + if (fetchingState == null) return; + + // We need to know if there is more result, so fetch the next page and + // wait on it. + try { + session.checkNotInEventLoop(); + Uninterruptibles.getUninterruptibly(fetchMoreResults()); + } catch (ExecutionException e) { + throw DriverThrowables.propagateCause(e); } + } + } - @Override - public boolean isFullyFetched() { - return true; - } + @Override + public ListenableFuture fetchMoreResults() { + return fetchMoreResults(this.fetchState); + } - @Override - public ListenableFuture fetchMoreResults() { - return Futures.immediateFuture(this); - } + private ListenableFuture fetchMoreResults(FetchingState fetchState) { + if (fetchState == null) return Futures.immediateFuture(this); - @Override - public ExecutionInfo getExecutionInfo() { - return info; - } + if (fetchState.inProgress != null) return fetchState.inProgress; - @Override - public List getAllExecutionInfo() { - return Collections.singletonList(info); - } + assert fetchState.nextStart != null; + ByteBuffer state = fetchState.nextStart; + SettableFuture future = SettableFuture.create(); + this.fetchState = new FetchingState(null, future); + return queryNextPage(state, future); } - private static class MultiPage extends ArrayBackedResultSet { - - private Queue> currentPage; - private final Queue>> nextPages = new ConcurrentLinkedQueue>>(); - - private final Deque infos = new LinkedBlockingDeque(); - - /* - * The fetching state of this result set. The fetchState will always be in one of - * the 3 following state: - * 1) fetchState is null or reference a null: fetching is done, there - * is nothing more to fetch and no query in progress. - * 2) fetchState.get().nextStart is not null: there is more pages to fetch. In - * that case, inProgress is *guaranteed* to be null. - * 3) fetchState.get().inProgress is not null: a page is being fetched. - * In that case, nextStart is *guaranteed* to be null. - * - * Also note that while ResultSet doesn't pretend to be thread-safe, the actual - * fetch is done asynchronously and so we do need to be volatile below. - */ - private volatile FetchingState fetchState; - - private final SessionManager session; - - private MultiPage(ColumnDefinitions metadata, - Token.Factory tokenFactory, - ProtocolVersion protocolVersion, - CodecRegistry codecRegistry, - Queue> rows, - ExecutionInfo info, - ByteBuffer pagingState, - SessionManager session) { - - // Note: as of Cassandra 2.1.0, it turns out that the result of a CAS update is never paged, so - // we could hard-code the result of wasApplied in this class to "true". However, we can not be sure - // that this will never change, so apply the generic check by peeking at the first row. - super(metadata, tokenFactory, rows.peek(), protocolVersion, codecRegistry); - this.currentPage = rows; - this.infos.offer(info); - - this.fetchState = new FetchingState(pagingState, null); - this.session = session; - } + private ListenableFuture queryNextPage( + ByteBuffer nextStart, final SettableFuture future) { - @Override - public boolean isExhausted() { - prepareNextRow(); - return currentPage.isEmpty(); - } + Statement statement = this.infos.peek().getStatement(); - @Override - public Row one() { - prepareNextRow(); - return ArrayBackedRow.fromData(metadata, tokenFactory, protocolVersion, currentPage.poll()); - } + assert !(statement instanceof BatchStatement); - @Override - public int getAvailableWithoutFetching() { - int available = currentPage.size(); - for (Queue> page : nextPages) - available += page.size(); - return available; - } + final Message.Request request = session.makeRequestMessage(statement, nextStart); + session.execute( + new RequestHandler.Callback() { - @Override - public boolean isFullyFetched() { - return fetchState == null; - } - - // Ensure that after the call the next row to consume is in 'currentPage', i.e. that - // 'currentPage' is empty IFF the ResultSet if fully exhausted. - private void prepareNextRow() { - while (currentPage.isEmpty()) { - // Grab the current state now to get a consistent view in this iteration. - FetchingState fetchingState = this.fetchState; - - Queue> nextPage = nextPages.poll(); - if (nextPage != null) { - currentPage = nextPage; - continue; - } - if (fetchingState == null) - return; - - // We need to know if there is more result, so fetch the next page and - // wait on it. - try { - session.checkNotInEventLoop(); - Uninterruptibles.getUninterruptibly(fetchMoreResults()); - } catch (ExecutionException e) { - throw DriverThrowables.propagateCause(e); - } + @Override + public Message.Request request() { + return request; } - } - - @Override - public ListenableFuture fetchMoreResults() { - return fetchMoreResults(this.fetchState); - } - - private ListenableFuture fetchMoreResults(FetchingState fetchState) { - if (fetchState == null) - return Futures.immediateFuture(this); - if (fetchState.inProgress != null) - return fetchState.inProgress; - - assert fetchState.nextStart != null; - ByteBuffer state = fetchState.nextStart; - SettableFuture future = SettableFuture.create(); - this.fetchState = new FetchingState(null, future); - return queryNextPage(state, future); - } - - private ListenableFuture queryNextPage(ByteBuffer nextStart, final SettableFuture future) { - - Statement statement = this.infos.peek().getStatement(); - - - assert !(statement instanceof BatchStatement); - - final Message.Request request = session.makeRequestMessage(statement, nextStart); - session.execute(new RequestHandler.Callback() { - - @Override - public Message.Request request() { - return request; - } - - @Override - public void register(RequestHandler handler) { - } + @Override + public void register(RequestHandler handler) {} - @Override - public void onSet(Connection connection, Message.Response response, ExecutionInfo info, Statement statement, long latency) { - try { - switch (response.type) { - case RESULT: - Responses.Result rm = (Responses.Result) response; - if (rm.kind == Responses.Result.Kind.ROWS) { - Responses.Result.Rows rows = (Responses.Result.Rows) rm; - info = update(info, rm, MultiPage.this.session, rows.metadata.pagingState, protocolVersion, codecRegistry, statement); - MultiPage.this.nextPages.offer(rows.data); - MultiPage.this.fetchState = rows.metadata.pagingState == null ? null : new FetchingState(rows.metadata.pagingState, null); - } else if (rm.kind == Responses.Result.Kind.VOID) { - // We shouldn't really get a VOID message here but well, no harm in handling it I suppose - info = update(info, rm, MultiPage.this.session, null, protocolVersion, codecRegistry, statement); - MultiPage.this.fetchState = null; - } else { - logger.error("Received unknown result type '{}' during paging: ignoring message", rm.kind); - // This mean we have probably have a bad node, so defunct the connection - connection.defunct(new ConnectionException(connection.address, String.format("Got unexpected %s result response", rm.kind))); - future.setException(new DriverInternalError(String.format("Got unexpected %s result response from %s", rm.kind, connection.address))); - return; - } - - MultiPage.this.infos.offer(info); - future.set(MultiPage.this); - break; - case ERROR: - future.setException(((Responses.Error) response).asException(connection.address)); - break; - default: - // This mean we have probably have a bad node, so defunct the connection - connection.defunct(new ConnectionException(connection.address, String.format("Got unexpected %s response", response.type))); - future.setException(new DriverInternalError(String.format("Got unexpected %s response from %s", response.type, connection.address))); - break; - } - } catch (RuntimeException e) { - // If we get a bug here, the client will not get it, so better forwarding the error - future.setException(new DriverInternalError("Unexpected error while processing response from " + connection.address, e)); + @Override + public void onSet( + Connection connection, + Message.Response response, + ExecutionInfo info, + Statement statement, + long latency) { + try { + switch (response.type) { + case RESULT: + Responses.Result rm = (Responses.Result) response; + if (rm.kind == Responses.Result.Kind.ROWS) { + Responses.Result.Rows rows = (Responses.Result.Rows) rm; + info = + update( + info, + rm, + MultiPage.this.session, + rows.metadata.pagingState, + protocolVersion, + codecRegistry, + statement); + // If the query is a prepared 'SELECT *', the metadata can change between + // pages + ColumnDefinitions newMetadata = null; + if (rows.metadata.metadataId != null) { + newMetadata = rows.metadata.columns; + assert statement instanceof BoundStatement; + BoundStatement bs = (BoundStatement) statement; + bs.preparedStatement().getPreparedId().resultSetMetadata = + new PreparedId.PreparedMetadata( + rows.metadata.metadataId, rows.metadata.columns); + } + MultiPage.this.nextPages.offer(new NextPage(newMetadata, rows.data)); + MultiPage.this.fetchState = + rows.metadata.pagingState == null + ? null + : new FetchingState(rows.metadata.pagingState, null); + } else if (rm.kind == Responses.Result.Kind.VOID) { + // We shouldn't really get a VOID message here but well, no harm in handling + // it I suppose + info = + update( + info, + rm, + MultiPage.this.session, + null, + protocolVersion, + codecRegistry, + statement); + MultiPage.this.fetchState = null; + } else { + logger.error( + "Received unknown result type '{}' during paging: ignoring message", + rm.kind); + // This mean we have probably have a bad node, so defunct the connection + connection.defunct( + new ConnectionException( + connection.endPoint, + String.format("Got unexpected %s result response", rm.kind))); + future.setException( + new DriverInternalError( + String.format( + "Got unexpected %s result response from %s", + rm.kind, connection.endPoint))); + return; } - } - // This is only called for internal calls, so don't bother with ExecutionInfo - @Override - public void onSet(Connection connection, Message.Response response, long latency, int retryCount) { - onSet(connection, response, null, null, latency); + MultiPage.this.infos.offer(info); + future.set(MultiPage.this); + break; + case ERROR: + future.setException( + ((Responses.Error) response).asException(connection.endPoint)); + break; + default: + // This mean we have probably have a bad node, so defunct the connection + connection.defunct( + new ConnectionException( + connection.endPoint, + String.format("Got unexpected %s response", response.type))); + future.setException( + new DriverInternalError( + String.format( + "Got unexpected %s response from %s", + response.type, connection.endPoint))); + break; } + } catch (RuntimeException e) { + // If we get a bug here, the client will not get it, so better forwarding the error + future.setException( + new DriverInternalError( + "Unexpected error while processing response from " + connection.endPoint, + e)); + } + } - @Override - public void onException(Connection connection, Exception exception, long latency, int retryCount) { - future.setException(exception); - } + // This is only called for internal calls, so don't bother with ExecutionInfo + @Override + public void onSet( + Connection connection, Message.Response response, long latency, int retryCount) { + onSet(connection, response, null, null, latency); + } - @Override - public boolean onTimeout(Connection connection, long latency, int retryCount) { - // This won't be called directly since this will be wrapped by RequestHandler. - throw new UnsupportedOperationException(); - } + @Override + public void onException( + Connection connection, Exception exception, long latency, int retryCount) { + future.setException(exception); + } - @Override - public int retryCount() { - // This is only called for internal calls (i.e, when the callback is not wrapped in RequestHandler). - // There is no retry logic in that case, so the value does not really matter. - return 0; - } - }, statement); + @Override + public boolean onTimeout(Connection connection, long latency, int retryCount) { + // This won't be called directly since this will be wrapped by RequestHandler. + throw new UnsupportedOperationException(); + } - return future; - } + @Override + public int retryCount() { + // This is only called for internal calls (i.e, when the callback is not wrapped in + // RequestHandler). + // There is no retry logic in that case, so the value does not really matter. + return 0; + } + }, + statement); - @Override - public ExecutionInfo getExecutionInfo() { - return infos.getLast(); - } + return future; + } - @Override - public List getAllExecutionInfo() { - return new ArrayList(infos); - } + @Override + public ExecutionInfo getExecutionInfo() { + return infos.getLast(); + } - private static class FetchingState { - public final ByteBuffer nextStart; - public final ListenableFuture inProgress; + @Override + public List getAllExecutionInfo() { + return new ArrayList(infos); + } - FetchingState(ByteBuffer nextStart, ListenableFuture inProgress) { - assert (nextStart == null) != (inProgress == null); - this.nextStart = nextStart; - this.inProgress = inProgress; - } - } + private static class FetchingState { + public final ByteBuffer nextStart; + public final ListenableFuture inProgress; + + FetchingState(ByteBuffer nextStart, ListenableFuture inProgress) { + assert (nextStart == null) != (inProgress == null); + this.nextStart = nextStart; + this.inProgress = inProgress; + } } - // This method checks the value of the "[applied]" column manually, to avoid instantiating an ArrayBackedRow - // object that we would throw away immediately. - private static boolean checkWasApplied(List firstRow, ColumnDefinitions metadata, ProtocolVersion protocolVersion) { - // If the column is not present or not a boolean, we assume the query - // was not a conditional statement, and therefore return true. - if (firstRow == null) - return true; - int[] is = metadata.findAllIdx("[applied]"); - if (is == null) - return true; - int i = is[0]; - if (!DataType.cboolean().equals(metadata.getType(i))) - return true; - - // Otherwise return the value of the column - ByteBuffer value = firstRow.get(i); - if (value == null || value.remaining() == 0) - return false; - - return TypeCodec.cboolean().deserializeNoBoxing(value, protocolVersion); + private static class NextPage { + final ColumnDefinitions metadata; + final Queue> data; + + NextPage(ColumnDefinitions metadata, Queue> data) { + this.metadata = metadata; + this.data = data; + } } + } + + // This method checks the value of the "[applied]" column manually, to avoid instantiating an + // ArrayBackedRow + // object that we would throw away immediately. + private static boolean checkWasApplied( + List firstRow, ColumnDefinitions metadata, ProtocolVersion protocolVersion) { + // If the column is not present or not a boolean, we assume the query + // was not a conditional statement, and therefore return true. + if (firstRow == null) return true; + int[] is = metadata.findAllIdx("[applied]"); + if (is == null) return true; + int i = is[0]; + if (!DataType.cboolean().equals(metadata.getType(i))) return true; + + // Otherwise return the value of the column + ByteBuffer value = firstRow.get(i); + if (value == null || value.remaining() == 0) return false; + + return TypeCodec.cboolean().deserializeNoBoxing(value, protocolVersion); + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/ArrayBackedRow.java b/driver-core/src/main/java/com/datastax/driver/core/ArrayBackedRow.java index ca78e52481e..22aa4fdca62 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ArrayBackedRow.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ArrayBackedRow.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,115 +18,124 @@ package com.datastax.driver.core; import com.datastax.driver.core.exceptions.DriverInternalError; - import java.nio.ByteBuffer; import java.util.List; import java.util.regex.Pattern; -/** - * Implementation of a Row backed by an ArrayList. - */ +/** Implementation of a Row backed by an ArrayList. */ class ArrayBackedRow extends AbstractGettableData implements Row { - /** - * A pattern to parse (non-aliased) token column names of the form token(x). - * Note that starting from Cassandra 2.2 built-in functions are declared in - * the system keyspace, so the function name is prefixed with "system.". - */ - private static final Pattern TOKEN_COLUMN_NAME = Pattern.compile("(system\\.)?token(.*)"); - - private final ColumnDefinitions metadata; - private final Token.Factory tokenFactory; - private final List data; - - private ArrayBackedRow(ColumnDefinitions metadata, Token.Factory tokenFactory, ProtocolVersion protocolVersion, List data) { - super(protocolVersion); - this.metadata = metadata; - this.tokenFactory = tokenFactory; - this.data = data; - } - - static Row fromData(ColumnDefinitions metadata, Token.Factory tokenFactory, ProtocolVersion protocolVersion, List data) { - if (data == null) - return null; - - return new ArrayBackedRow(metadata, tokenFactory, protocolVersion, data); - } - - @Override - public ColumnDefinitions getColumnDefinitions() { - return metadata; + /** + * A pattern to parse (non-aliased) token column names of the form token(x). Note that starting + * from Cassandra 2.2 built-in functions are declared in the system keyspace, so the function name + * is prefixed with "system.". + */ + private static final Pattern TOKEN_COLUMN_NAME = Pattern.compile("(system\\.)?token(.*)"); + + private final ColumnDefinitions metadata; + private final Token.Factory tokenFactory; + private final List data; + + private ArrayBackedRow( + ColumnDefinitions metadata, + Token.Factory tokenFactory, + ProtocolVersion protocolVersion, + List data) { + super(protocolVersion); + this.metadata = metadata; + this.tokenFactory = tokenFactory; + this.data = data; + } + + static Row fromData( + ColumnDefinitions metadata, + Token.Factory tokenFactory, + ProtocolVersion protocolVersion, + List data) { + if (data == null) return null; + + return new ArrayBackedRow(metadata, tokenFactory, protocolVersion, data); + } + + @Override + public ColumnDefinitions getColumnDefinitions() { + return metadata; + } + + @Override + protected DataType getType(int i) { + return metadata.getType(i); + } + + @Override + protected String getName(int i) { + return metadata.getName(i); + } + + @Override + protected ByteBuffer getValue(int i) { + return data.get(i); + } + + @Override + protected CodecRegistry getCodecRegistry() { + return metadata.codecRegistry; + } + + @Override + protected int getIndexOf(String name) { + return metadata.getFirstIdx(name); + } + + @Override + public Token getToken(int i) { + if (tokenFactory == null) + throw new DriverInternalError( + "Token factory not set. This should only happen at initialization time"); + + checkType(i, tokenFactory.getTokenType().getName()); + + ByteBuffer value = data.get(i); + if (value == null || value.remaining() == 0) return null; + + return tokenFactory.deserialize(value, protocolVersion); + } + + @Override + public Token getToken(String name) { + return getToken(metadata.getFirstIdx(name)); + } + + @Override + public Token getPartitionKeyToken() { + int i = 0; + for (ColumnDefinitions.Definition column : metadata) { + if (TOKEN_COLUMN_NAME.matcher(column.getName()).matches()) return getToken(i); + i++; } - - @Override - protected DataType getType(int i) { - return metadata.getType(i); - } - - @Override - protected String getName(int i) { - return metadata.getName(i); - } - - @Override - protected ByteBuffer getValue(int i) { - return data.get(i); - } - - @Override - protected CodecRegistry getCodecRegistry() { - return metadata.codecRegistry; - } - - @Override - protected int getIndexOf(String name) { - return metadata.getFirstIdx(name); - } - - @Override - public Token getToken(int i) { - if (tokenFactory == null) - throw new DriverInternalError("Token factory not set. This should only happen at initialization time"); - - checkType(i, tokenFactory.getTokenType().getName()); - - ByteBuffer value = data.get(i); - if (value == null || value.remaining() == 0) - return null; - - return tokenFactory.deserialize(value, protocolVersion); - } - - @Override - public Token getToken(String name) { - return getToken(metadata.getFirstIdx(name)); - } - - @Override - public Token getPartitionKeyToken() { - int i = 0; - for (ColumnDefinitions.Definition column : metadata) { - if (TOKEN_COLUMN_NAME.matcher(column.getName()).matches()) - return getToken(i); - i++; - } - throw new IllegalStateException("Found no column named 'token(...)'. If the column is aliased, use getToken(String)."); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder(); - sb.append("Row["); - for (int i = 0; i < metadata.size(); i++) { - if (i != 0) - sb.append(", "); - ByteBuffer bb = data.get(i); - if (bb == null) - sb.append("NULL"); - else - sb.append(getCodecRegistry().codecFor(metadata.getType(i)).deserialize(bb, protocolVersion).toString()); + throw new IllegalStateException( + "Found no column named 'token(...)'. If the column is aliased, use getToken(String)."); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("Row["); + for (int i = 0; i < metadata.size(); i++) { + if (i != 0) sb.append(", "); + ByteBuffer bb = data.get(i); + if (bb == null) sb.append("NULL"); + else { + Object o = + getCodecRegistry().codecFor(metadata.getType(i)).deserialize(bb, protocolVersion); + if (o == null) { + sb.append("NULL"); + } else { + sb.append(o.toString()); } - sb.append(']'); - return sb.toString(); + } } + sb.append(']'); + return sb.toString(); + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/AtomicMonotonicTimestampGenerator.java b/driver-core/src/main/java/com/datastax/driver/core/AtomicMonotonicTimestampGenerator.java index b5d1ce8a9e3..804b991ead4 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/AtomicMonotonicTimestampGenerator.java +++ b/driver-core/src/main/java/com/datastax/driver/core/AtomicMonotonicTimestampGenerator.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,44 +21,48 @@ import java.util.concurrent.atomic.AtomicLong; /** - * A timestamp generator that guarantees monotonically increasing timestamps among all client threads, and logs warnings - * when timestamps drift in the future. + * A timestamp generator that guarantees monotonically increasing timestamps among all client + * threads, and logs warnings when timestamps drift in the future. * * @see AbstractMonotonicTimestampGenerator */ public class AtomicMonotonicTimestampGenerator extends LoggingMonotonicTimestampGenerator { - private AtomicLong lastRef = new AtomicLong(0); + private AtomicLong lastRef = new AtomicLong(0); - /** - * Creates a new instance with a warning threshold and warning interval of one second. - * - * @see #AtomicMonotonicTimestampGenerator(long, TimeUnit, long, TimeUnit) - */ - public AtomicMonotonicTimestampGenerator() { - this(1, TimeUnit.SECONDS, 1, TimeUnit.SECONDS); - } + /** + * Creates a new instance with a warning threshold and warning interval of one second. + * + * @see #AtomicMonotonicTimestampGenerator(long, TimeUnit, long, TimeUnit) + */ + public AtomicMonotonicTimestampGenerator() { + this(1, TimeUnit.SECONDS, 1, TimeUnit.SECONDS); + } - /** - * Creates a new instance. - * - * @param warningThreshold how far in the future timestamps are allowed to drift before a warning is logged. - * @param warningThresholdUnit the unit for {@code warningThreshold}. - * @param warningInterval how often the warning will be logged if timestamps keep drifting above the threshold. - * @param warningIntervalUnit the unit for {@code warningIntervalUnit}. - */ - public AtomicMonotonicTimestampGenerator(long warningThreshold, TimeUnit warningThresholdUnit, - long warningInterval, TimeUnit warningIntervalUnit) { - super(warningThreshold, warningThresholdUnit, warningInterval, warningIntervalUnit); - } + /** + * Creates a new instance. + * + * @param warningThreshold how far in the future timestamps are allowed to drift before a warning + * is logged. + * @param warningThresholdUnit the unit for {@code warningThreshold}. + * @param warningInterval how often the warning will be logged if timestamps keep drifting above + * the threshold. + * @param warningIntervalUnit the unit for {@code warningIntervalUnit}. + */ + public AtomicMonotonicTimestampGenerator( + long warningThreshold, + TimeUnit warningThresholdUnit, + long warningInterval, + TimeUnit warningIntervalUnit) { + super(warningThreshold, warningThresholdUnit, warningInterval, warningIntervalUnit); + } - @Override - public long next() { - while (true) { - long last = lastRef.get(); - long next = computeNext(last); - if (lastRef.compareAndSet(last, next)) - return next; - } + @Override + public long next() { + while (true) { + long last = lastRef.get(); + long next = computeNext(last); + if (lastRef.compareAndSet(last, next)) return next; } + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/AuthProvider.java b/driver-core/src/main/java/com/datastax/driver/core/AuthProvider.java index cef94ab2832..b623f901468 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/AuthProvider.java +++ b/driver-core/src/main/java/com/datastax/driver/core/AuthProvider.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,37 +18,45 @@ package com.datastax.driver.core; import com.datastax.driver.core.exceptions.AuthenticationException; - import java.net.InetSocketAddress; /** - * Provides {@link Authenticator} instances for use when connecting - * to Cassandra nodes. - *

- * See {@link PlainTextAuthProvider} for an implementation which uses SASL - * PLAIN mechanism to authenticate using username/password strings + * Provides {@link Authenticator} instances for use when connecting to Cassandra nodes. + * + *

See {@link PlainTextAuthProvider} for an implementation which uses SASL PLAIN mechanism to + * authenticate using username/password strings */ public interface AuthProvider { - /** - * A provider that provides no authentication capability. - *

- * This is only useful as a placeholder when no authentication is to be used. - */ - public static final AuthProvider NONE = new AuthProvider() { - @Override - public Authenticator newAuthenticator(InetSocketAddress host, String authenticator) { - throw new AuthenticationException(host, - String.format("Host %s requires authentication, but no authenticator found in Cluster configuration", host)); - } - }; + /** + * A provider that provides no authentication capability. + * + *

This is only useful as a placeholder when no authentication is to be used. + */ + AuthProvider NONE = new ExtendedAuthProvider.NoAuthProvider(); + + /** + * The {@code Authenticator} to use when connecting to {@code host} + * + * @param host the Cassandra host to connect to. + * @param authenticator the configured authenticator on the host. + * @return The authentication implementation to use. + */ + public Authenticator newAuthenticator(InetSocketAddress host, String authenticator) + throws AuthenticationException; + + /** + * Dummy Authenticator that accounts for DSE authentication configured with transitional mode. + * + *

In this situation, the client is allowed to connect without authentication, but DSE would + * still send an AUTHENTICATE response. This Authenticator handles this situation by sending back + * a dummy credential. + */ + class TransitionalModePlainTextAuthenticator + extends PlainTextAuthProvider.PlainTextAuthenticator { - /** - * The {@code Authenticator} to use when connecting to {@code host} - * - * @param host the Cassandra host to connect to. - * @param authenticator the configured authenticator on the host. - * @return The authentication implementation to use. - */ - public Authenticator newAuthenticator(InetSocketAddress host, String authenticator) throws AuthenticationException; + public TransitionalModePlainTextAuthenticator() { + super("", ""); + } + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/Authenticator.java b/driver-core/src/main/java/com/datastax/driver/core/Authenticator.java index 0936c2827e3..29d85b2386c 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Authenticator.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Authenticator.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,57 +19,53 @@ /** * Handles SASL authentication with Cassandra servers. - *

- * Each time a new connection is created and the server requires authentication, - * a new instance of this class will be created by the corresponding - * {@link AuthProvider} to handle that authentication. The lifecycle of that - * new {@code Authenticator} will be: + * + *

Each time a new connection is created and the server requires authentication, a new instance + * of this class will be created by the corresponding {@link AuthProvider} to handle that + * authentication. The lifecycle of that new {@code Authenticator} will be: + * *

    - *
  1. The {@code initialResponse} method will be called. The initial return - * value will be sent to the server to initiate the handshake.
  2. - *
  3. The server will respond to each client response by either issuing a - * challenge or indicating that the authentication is complete (successfully or not). - * If a new challenge is issued, the authenticator {@code evaluateChallenge} - * method will be called to produce a response that will be sent to the - * server. This challenge/response negotiation will continue until the server - * responds that authentication is successful (or an {@code AuthenticationException} - * is raised). - *
  4. - *
  5. When the server indicates that authentication is successful, the - * {@code onAuthenticationSuccess} method will be called with the last information - * that the server may optionally have sent. - *
  6. + *
  7. The {@code initialResponse} method will be called. The initial return value will be sent to + * the server to initiate the handshake. + *
  8. The server will respond to each client response by either issuing a challenge or indicating + * that the authentication is complete (successfully or not). If a new challenge is issued, + * the authenticator {@code evaluateChallenge} method will be called to produce a response + * that will be sent to the server. This challenge/response negotiation will continue until + * the server responds that authentication is successful (or an {@code + * AuthenticationException} is raised). + *
  9. When the server indicates that authentication is successful, the {@code + * onAuthenticationSuccess} method will be called with the last information that the server + * may optionally have sent. *
- * The exact nature of the negotiation between client and server is specific - * to the authentication mechanism configured server side. + * + * The exact nature of the negotiation between client and server is specific to the authentication + * mechanism configured server side. */ public interface Authenticator { - /** - * Obtain an initial response token for initializing the SASL handshake - * - * @return the initial response to send to the server, may be null - */ - public byte[] initialResponse(); + /** + * Obtain an initial response token for initializing the SASL handshake + * + * @return the initial response to send to the server, may be null + */ + public byte[] initialResponse(); - /** - * Evaluate a challenge received from the Server. Generally, this method - * should return null when authentication is complete from the client - * perspective - * - * @param challenge the server's SASL challenge - * @return updated SASL token, may be null to indicate the client - * requires no further action - */ - public byte[] evaluateChallenge(byte[] challenge); + /** + * Evaluate a challenge received from the Server. Generally, this method should return null when + * authentication is complete from the client perspective + * + * @param challenge the server's SASL challenge + * @return updated SASL token, may be null to indicate the client requires no further action + */ + public byte[] evaluateChallenge(byte[] challenge); - /** - * Called when authentication is successful with the last information - * optionally sent by the server. - * - * @param token the information sent by the server with the authentication - * successful message. This will be {@code null} if the server sends no - * particular information on authentication success. - */ - public void onAuthenticationSuccess(byte[] token); + /** + * Called when authentication is successful with the last information optionally sent by the + * server. + * + * @param token the information sent by the server with the authentication successful message. + * This will be {@code null} if the server sends no particular information on authentication + * success. + */ + public void onAuthenticationSuccess(byte[] token); } diff --git a/driver-core/src/main/java/com/datastax/driver/core/BatchStatement.java b/driver-core/src/main/java/com/datastax/driver/core/BatchStatement.java index 481798a7174..22ff7594e91 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/BatchStatement.java +++ b/driver-core/src/main/java/com/datastax/driver/core/BatchStatement.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,256 +17,297 @@ */ package com.datastax.driver.core; +import com.datastax.driver.core.Frame.Header; +import com.datastax.driver.core.Requests.QueryFlag; import com.datastax.driver.core.exceptions.UnsupportedFeatureException; import com.google.common.collect.ImmutableList; - import java.nio.ByteBuffer; -import java.util.*; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; /** - * A statement that groups a number of {@link Statement} so they get executed as - * a batch. - *

- * Note: BatchStatement is not supported with the native protocol version 1: you - * will get an {@link UnsupportedFeatureException} when submitting one if - * version 1 of the protocol is in use (i.e. if you've force version 1 through - * {@link Cluster.Builder#withProtocolVersion} or you use Cassandra 1.2). Note - * however that you can still use CQL Batch statements - * even with the protocol version 1. - *

- * Setting a BatchStatement's serial consistency level is only supported with the - * native protocol version 3 or higher (see {@link #setSerialConsistencyLevel(ConsistencyLevel)}). + * A statement that groups a number of {@link Statement} so they get executed as a batch. + * + *

Note: BatchStatement is not supported with the native protocol version 1: you will get an + * {@link UnsupportedFeatureException} when submitting one if version 1 of the protocol is in use + * (i.e. if you've force version 1 through {@link Cluster.Builder#withProtocolVersion} or you use + * Cassandra 1.2). Note however that you can still use CQL Batch statements even with + * the protocol version 1. + * + *

Setting a BatchStatement's serial consistency level is only supported with the native protocol + * version 3 or higher (see {@link #setSerialConsistencyLevel(ConsistencyLevel)}). */ public class BatchStatement extends Statement { + /** The type of batch to use. */ + public enum Type { /** - * The type of batch to use. + * A logged batch: Cassandra will first write the batch to its distributed batch log to ensure + * the atomicity of the batch (atomicity meaning that if any statement in the batch succeeds, + * all will eventually succeed). */ - public enum Type { - /** - * A logged batch: Cassandra will first write the batch to its distributed batch log - * to ensure the atomicity of the batch (atomicity meaning that if any statement in - * the batch succeeds, all will eventually succeed). - */ - LOGGED, - - /** - * A batch that doesn't use Cassandra's distributed batch log. Such batch are not - * guaranteed to be atomic. - */ - UNLOGGED, - - /** - * A counter batch. Note that such batch is the only type that can contain counter - * operations and it can only contain these. - */ - COUNTER - } - - ; - - final Type batchType; - private final List statements = new ArrayList(); + LOGGED, /** - * Creates a new {@code LOGGED} batch statement. + * A batch that doesn't use Cassandra's distributed batch log. Such batch are not guaranteed to + * be atomic. */ - public BatchStatement() { - this(Type.LOGGED); - } + UNLOGGED, /** - * Creates a new batch statement of the provided type. - * - * @param batchType the type of batch. + * A counter batch. Note that such batch is the only type that can contain counter operations + * and it can only contain these. */ - public BatchStatement(Type batchType) { - this.batchType = batchType; - } + COUNTER + } - IdAndValues getIdAndValues(ProtocolVersion protocolVersion, CodecRegistry codecRegistry) { - IdAndValues idAndVals = new IdAndValues(statements.size()); - for (Statement statement : statements) { - if (statement instanceof StatementWrapper) - statement = ((StatementWrapper) statement).getWrappedStatement(); - if (statement instanceof RegularStatement) { - RegularStatement st = (RegularStatement) statement; - ByteBuffer[] vals = st.getValues(protocolVersion, codecRegistry); - String query = st.getQueryString(codecRegistry); - idAndVals.ids.add(query); - idAndVals.values.add(vals == null ? Collections.emptyList() : Arrays.asList(vals)); - } else { - // We handle BatchStatement in add() so ... - assert statement instanceof BoundStatement; - BoundStatement st = (BoundStatement) statement; - idAndVals.ids.add(st.statement.getPreparedId().id); - idAndVals.values.add(Arrays.asList(st.wrapper.values)); - } - } - return idAndVals; - } + final Type batchType; + private final List statements = new ArrayList(); - /** - * Adds a new statement to this batch. - *

- * Note that {@code statement} can be any {@code Statement}. It is allowed to mix - * {@code RegularStatement} and {@code BoundStatement} in the same - * {@code BatchStatement} in particular. Adding another {@code BatchStatement} - * is also allowed for convenience and is equivalent to adding all the {@code Statement} - * contained in that other {@code BatchStatement}. - *

- * Due to a protocol-level limitation, adding a {@code RegularStatement} with named values - * is currently not supported; an {@code IllegalArgument} will be thrown. - *

- * When adding a {@code BoundStatement}, all of its values must be set, otherwise an - * {@code IllegalStateException} will be thrown when submitting the batch statement. - * See {@link BoundStatement} for more details, in particular how to handle {@code null} - * values. - *

- * Please note that the options of the added Statement (all those defined directly by the - * {@link Statement} class: consistency level, fetch size, tracing, ...) will be ignored - * for the purpose of the execution of the Batch. Instead, the options used are the one - * of this {@code BatchStatement} object. - * - * @param statement the new statement to add. - * @return this batch statement. - * @throws IllegalStateException if adding the new statement means that this - * {@code BatchStatement} has more than 65536 statements (since this is the maximum number - * of statements for a BatchStatement allowed by the underlying protocol). - * @throws IllegalArgumentException if adding a regular statement that uses named values. - */ - public BatchStatement add(Statement statement) { - if (statement instanceof StatementWrapper) { - statement = ((StatementWrapper) statement).getWrappedStatement(); - } - if ((statement instanceof RegularStatement) && ((RegularStatement) statement).usesNamedValues()) { - throw new IllegalArgumentException("Batch statement cannot contain regular statements with named values (" - + ((RegularStatement) statement).getQueryString() + ")"); - } + /** Creates a new {@code LOGGED} batch statement. */ + public BatchStatement() { + this(Type.LOGGED); + } - // We handle BatchStatement here (rather than in getIdAndValues) as it make it slightly - // easier to avoid endless loops if the user mistakenly passes a batch that depends on this - // object (or this directly). - if (statement instanceof BatchStatement) { - for (Statement subStatements : ((BatchStatement) statement).statements) { - add(subStatements); - } - } else { - if (statements.size() >= 0xFFFF) - throw new IllegalStateException("Batch statement cannot contain more than " + 0xFFFF + " statements."); - statements.add(statement); - } - return this; - } + /** + * Creates a new batch statement of the provided type. + * + * @param batchType the type of batch. + */ + public BatchStatement(Type batchType) { + this.batchType = batchType; + } - /** - * Adds multiple statements to this batch. - *

- * This is a shortcut method that calls {@link #add} on all the statements - * from {@code statements}. - * - * @param statements the statements to add. - * @return this batch statement. - */ - public BatchStatement addAll(Iterable statements) { - for (Statement statement : statements) - add(statement); - return this; + IdAndValues getIdAndValues(ProtocolVersion protocolVersion, CodecRegistry codecRegistry) { + IdAndValues idAndVals = new IdAndValues(statements.size()); + for (int i = 0; i < statements.size(); i++) { + Statement statement = statements.get(i); + if (statement instanceof StatementWrapper) { + statement = ((StatementWrapper) statement).getWrappedStatement(); + } + if (statement instanceof RegularStatement) { + RegularStatement st = (RegularStatement) statement; + ByteBuffer[] vals = st.getValues(protocolVersion, codecRegistry); + String query = st.getQueryString(codecRegistry); + idAndVals.ids.add(query); + idAndVals.values[i] = vals == null ? Requests.EMPTY_BB_ARRAY : vals; + } else { + // We handle BatchStatement in add() so ... + assert statement instanceof BoundStatement; + BoundStatement st = (BoundStatement) statement; + idAndVals.ids.add(st.statement.getPreparedId().boundValuesMetadata.id); + idAndVals.values[i] = st.wrapper.values; + } } + return idAndVals; + } - /** - * The statements that have been added to this batch so far. - * - * @return an (immutable) collection of the statements that have been added - * to this batch so far. - */ - public Collection getStatements() { - return ImmutableList.copyOf(statements); + /** + * Adds a new statement to this batch. + * + *

Note that {@code statement} can be any {@code Statement}. It is allowed to mix {@code + * RegularStatement} and {@code BoundStatement} in the same {@code BatchStatement} in particular. + * Adding another {@code BatchStatement} is also allowed for convenience and is equivalent to + * adding all the {@code Statement} contained in that other {@code BatchStatement}. + * + *

Due to a protocol-level limitation, adding a {@code RegularStatement} with named values is + * currently not supported; an {@code IllegalArgument} will be thrown. + * + *

When adding a {@code BoundStatement}, all of its values must be set, otherwise an {@code + * IllegalStateException} will be thrown when submitting the batch statement. See {@link + * BoundStatement} for more details, in particular how to handle {@code null} values. + * + *

Please note that the options of the added Statement (all those defined directly by the + * {@link Statement} class: consistency level, fetch size, tracing, ...) will be ignored for the + * purpose of the execution of the Batch. Instead, the options used are the one of this {@code + * BatchStatement} object. + * + * @param statement the new statement to add. + * @return this batch statement. + * @throws IllegalStateException if adding the new statement means that this {@code + * BatchStatement} has more than 65536 statements (since this is the maximum number of + * statements for a BatchStatement allowed by the underlying protocol). + * @throws IllegalArgumentException if adding a regular statement that uses named values. + */ + public BatchStatement add(Statement statement) { + if (statement instanceof StatementWrapper) { + statement = ((StatementWrapper) statement).getWrappedStatement(); } - - /** - * Clears this batch, removing all statements added so far. - * - * @return this (now empty) {@code BatchStatement}. - */ - public BatchStatement clear() { - statements.clear(); - return this; + if ((statement instanceof RegularStatement) + && ((RegularStatement) statement).usesNamedValues()) { + throw new IllegalArgumentException( + "Batch statement cannot contain regular statements with named values (" + + ((RegularStatement) statement).getQueryString() + + ")"); } - /** - * Returns the number of elements in this batch. - * - * @return the number of elements in this batch. - */ - public int size() { - return statements.size(); + // We handle BatchStatement here (rather than in getIdAndValues) as it make it slightly + // easier to avoid endless loops if the user mistakenly passes a batch that depends on this + // object (or this directly). + if (statement instanceof BatchStatement) { + for (Statement subStatements : ((BatchStatement) statement).statements) { + add(subStatements); + } + } else { + if (statements.size() >= 0xFFFF) + throw new IllegalStateException( + "Batch statement cannot contain more than " + 0xFFFF + " statements."); + statements.add(statement); } + return this; + } - /** - * Sets the serial consistency level for the query. - *

- * This is only supported with version 3 or higher of the native protocol. If you call - * this method when version 2 is in use, you will get an {@link UnsupportedFeatureException} - * when submitting the statement. With version 2, protocol batches with conditions - * have their serial consistency level hardcoded to SERIAL; if you need to execute a batch - * with LOCAL_SERIAL, you will have to use a CQL batch. - * - * @param serialConsistency the serial consistency level to set. - * @return this {@code Statement} object. - * @throws IllegalArgumentException if {@code serialConsistency} is not one of - * {@code ConsistencyLevel.SERIAL} or {@code ConsistencyLevel.LOCAL_SERIAL}. - * @see Statement#setSerialConsistencyLevel(ConsistencyLevel) - */ - @Override - public BatchStatement setSerialConsistencyLevel(ConsistencyLevel serialConsistency) { - return (BatchStatement) super.setSerialConsistencyLevel(serialConsistency); - } + /** + * Adds multiple statements to this batch. + * + *

This is a shortcut method that calls {@link #add} on all the statements from {@code + * statements}. + * + * @param statements the statements to add. + * @return this batch statement. + */ + public BatchStatement addAll(Iterable statements) { + for (Statement statement : statements) add(statement); + return this; + } + + /** + * The statements that have been added to this batch so far. + * + * @return an (immutable) collection of the statements that have been added to this batch so far. + */ + public Collection getStatements() { + return ImmutableList.copyOf(statements); + } - @Override - public ByteBuffer getRoutingKey(ProtocolVersion protocolVersion, CodecRegistry codecRegistry) { - for (Statement statement : statements) { - if (statement instanceof StatementWrapper) - statement = ((StatementWrapper) statement).getWrappedStatement(); - ByteBuffer rk = statement.getRoutingKey(protocolVersion, codecRegistry); - if (rk != null) - return rk; - } - return null; + /** + * Clears this batch, removing all statements added so far. + * + * @return this (now empty) {@code BatchStatement}. + */ + public BatchStatement clear() { + statements.clear(); + return this; + } + + /** + * Returns the number of elements in this batch. + * + * @return the number of elements in this batch. + */ + public int size() { + return statements.size(); + } + + @Override + public int requestSizeInBytes(ProtocolVersion protocolVersion, CodecRegistry codecRegistry) { + int size = Header.lengthFor(protocolVersion) + 3; // type + nb queries + try { + BatchStatement.IdAndValues idAndVals = getIdAndValues(protocolVersion, codecRegistry); + for (int i = 0; i < idAndVals.ids.size(); i++) { + Object q = idAndVals.ids.get(i); + size += + 1 + + (q instanceof String + ? CBUtil.sizeOfLongString((String) q) + : CBUtil.sizeOfShortBytes(((MD5Digest) q).bytes)); + size += CBUtil.sizeOfValueList(idAndVals.values[i]); + } + switch (protocolVersion) { + case V2: + size += CBUtil.sizeOfConsistencyLevel(getConsistencyLevel()); + break; + case V3: + case V4: + case V5: + case V6: + size += CBUtil.sizeOfConsistencyLevel(getConsistencyLevel()); + size += QueryFlag.serializedSize(protocolVersion); + // Serial CL and default timestamp also depend on session-level defaults (QueryOptions). + // We always count them to avoid having to inject QueryOptions here, at worst we + // overestimate by a + // few bytes. + size += CBUtil.sizeOfConsistencyLevel(getSerialConsistencyLevel()); + if (ProtocolFeature.CLIENT_TIMESTAMPS.isSupportedBy(protocolVersion)) { + size += 8; // timestamp + } + if (ProtocolFeature.CUSTOM_PAYLOADS.isSupportedBy(protocolVersion) + && getOutgoingPayload() != null) { + size += CBUtil.sizeOfBytesMap(getOutgoingPayload()); + } + break; + default: + throw protocolVersion.unsupported(); + } + } catch (Exception e) { + size = -1; } + return size; + } - @Override - public String getKeyspace() { - for (Statement statement : statements) { - String keyspace = statement.getKeyspace(); - if (keyspace != null) - return keyspace; - } - return null; + /** + * Sets the serial consistency level for the query. + * + *

This is only supported with version 3 or higher of the native protocol. If you call this + * method when version 2 is in use, you will get an {@link UnsupportedFeatureException} when + * submitting the statement. With version 2, protocol batches with conditions have their serial + * consistency level hardcoded to SERIAL; if you need to execute a batch with LOCAL_SERIAL, you + * will have to use a CQL batch. + * + * @param serialConsistency the serial consistency level to set. + * @return this {@code Statement} object. + * @throws IllegalArgumentException if {@code serialConsistency} is not one of {@code + * ConsistencyLevel.SERIAL} or {@code ConsistencyLevel.LOCAL_SERIAL}. + * @see Statement#setSerialConsistencyLevel(ConsistencyLevel) + */ + @Override + public BatchStatement setSerialConsistencyLevel(ConsistencyLevel serialConsistency) { + return (BatchStatement) super.setSerialConsistencyLevel(serialConsistency); + } + + @Override + public ByteBuffer getRoutingKey(ProtocolVersion protocolVersion, CodecRegistry codecRegistry) { + for (Statement statement : statements) { + if (statement instanceof StatementWrapper) + statement = ((StatementWrapper) statement).getWrappedStatement(); + ByteBuffer rk = statement.getRoutingKey(protocolVersion, codecRegistry); + if (rk != null) return rk; } + return null; + } - @Override - public Boolean isIdempotent() { - if (idempotent != null) { - return idempotent; - } - return isBatchIdempotent(statements); + @Override + public String getKeyspace() { + for (Statement statement : statements) { + String keyspace = statement.getKeyspace(); + if (keyspace != null) return keyspace; } + return null; + } - void ensureAllSet() { - for (Statement statement : statements) - if (statement instanceof BoundStatement) - ((BoundStatement) statement).ensureAllSet(); + @Override + public Boolean isIdempotent() { + if (idempotent != null) { + return idempotent; } + return isBatchIdempotent(statements); + } + + void ensureAllSet() { + for (Statement statement : statements) + if (statement instanceof BoundStatement) ((BoundStatement) statement).ensureAllSet(); + } - static class IdAndValues { + static class IdAndValues { - public final List ids; - public final List> values; + public final List ids; + public final ByteBuffer[][] values; - IdAndValues(int nbstatements) { - ids = new ArrayList(nbstatements); - values = new ArrayList>(nbstatements); - } + IdAndValues(int nbstatements) { + ids = new ArrayList(nbstatements); + values = new ByteBuffer[nbstatements][]; } + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/BoundStatement.java b/driver-core/src/main/java/com/datastax/driver/core/BoundStatement.java index 45264e81165..863498a3f3f 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/BoundStatement.java +++ b/driver-core/src/main/java/com/datastax/driver/core/BoundStatement.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,1389 +17,1203 @@ */ package com.datastax.driver.core; +import com.datastax.driver.core.Frame.Header; +import com.datastax.driver.core.Requests.QueryFlag; import com.datastax.driver.core.exceptions.InvalidTypeException; import com.google.common.reflect.TypeToken; - import java.math.BigDecimal; import java.math.BigInteger; import java.net.InetAddress; import java.nio.ByteBuffer; -import java.util.*; +import java.util.Date; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.UUID; /** * A prepared statement with values bound to the bind variables. - *

- * Once values has been provided for the variables of the {@link PreparedStatement} - * it has been created from, such BoundStatement can be executed (through - * {@link Session#execute(Statement)}). - *

- * The values of a BoundStatement can be set by either index or name. When - * setting them by name, names follow the case insensitivity rules explained in - * {@link ColumnDefinitions} but with the difference that if multiple bind - * variables have the same name, setting that name will set all the - * variables for that name. - *

- * With native protocol V3 or below, all variables of the statement must be bound. - * If you don't explicitly set a value for a variable, an {@code IllegalStateException} - * will be thrown when submitting the statement. If you want to set a variable to - * {@code null}, use {@link #setToNull(int) setToNull}. - *

- * With native protocol V4 or above, variables can be left unset, in which case they - * will be ignored server side (no tombstones will be generated). If you're reusing - * a bound statement, you can {@link #unset(int) unset} variables that were previously - * set. - *

- * This class is not thread-safe. Do not share instances among requests that will - * execute concurrently (e.g. requests run from separate application threads, but also - * separate {@link Session#executeAsync(Statement) executeAsync} calls, even if they're - * triggered from the same thread). + * + *

Once values has been provided for the variables of the {@link PreparedStatement} it has been + * created from, such BoundStatement can be executed (through {@link Session#execute(Statement)}). + * + *

The values of a BoundStatement can be set by either index or name. When setting them by name, + * names follow the case insensitivity rules explained in {@link ColumnDefinitions} but with the + * difference that if multiple bind variables have the same name, setting that name will set + * all the variables for that name. + * + *

With native protocol V3 or below, all variables of the statement must be bound. If you don't + * explicitly set a value for a variable, an {@code IllegalStateException} will be thrown when + * submitting the statement. If you want to set a variable to {@code null}, use {@link + * #setToNull(int) setToNull}. + * + *

With native protocol V4 or above, variables can be left unset, in which case they will be + * ignored server side (no tombstones will be generated). If you're reusing a bound statement, you + * can {@link #unset(int) unset} variables that were previously set. + * + *

This class is not thread-safe. Do not share instances among requests that will execute + * concurrently (e.g. requests run from separate application threads, but also separate {@link + * Session#executeAsync(Statement) executeAsync} calls, even if they're triggered from the same + * thread). */ -public class BoundStatement extends Statement implements SettableData, GettableData { - static final ByteBuffer UNSET = ByteBuffer.allocate(0); - - final PreparedStatement statement; - - // Statement is already an abstract class, so we can't make it extend AbstractData directly. But - // we still want to avoid duplicating too much code so we wrap. - final DataWrapper wrapper; - - private final CodecRegistry codecRegistry; - - private ByteBuffer routingKey; - - /** - * Creates a new {@code BoundStatement} from the provided prepared - * statement. - * - * @param statement the prepared statement from which to create a {@code BoundStatement}. - */ - public BoundStatement(PreparedStatement statement) { - this.statement = statement; - this.wrapper = new DataWrapper(this, statement.getVariables().size()); - for (int i = 0; i < wrapper.values.length; i++) { - wrapper.values[i] = UNSET; - } - - if (statement.getConsistencyLevel() != null) - this.setConsistencyLevel(statement.getConsistencyLevel()); - if (statement.getSerialConsistencyLevel() != null) - this.setSerialConsistencyLevel(statement.getSerialConsistencyLevel()); - if (statement.isTracing()) - this.enableTracing(); - if (statement.getRetryPolicy() != null) - this.setRetryPolicy(statement.getRetryPolicy()); - if (statement.getOutgoingPayload() != null) - this.setOutgoingPayload(statement.getOutgoingPayload()); - else - // propagate incoming payload as outgoing payload, if no outgoing payload has been explicitly set - this.setOutgoingPayload(statement.getIncomingPayload()); - this.codecRegistry = statement.getCodecRegistry(); - if (statement.isIdempotent() != null) { - this.setIdempotent(statement.isIdempotent()); - } - } - - /** - * Returns the prepared statement on which this BoundStatement is based. - * - * @return the prepared statement on which this BoundStatement is based. - */ - public PreparedStatement preparedStatement() { - return statement; - } - - /** - * Returns whether the {@code i}th variable has been bound. - * - * @param i the index of the variable to check. - * @return whether the {@code i}th variable has been bound. - * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.preparedStatement().variables().size()}. - */ - public boolean isSet(int i) { - return wrapper.getValue(i) != UNSET; - } - - /** - * Returns whether the first occurrence of variable {@code name} has been - * bound. - * - * @param name the name of the variable to check. - * @return whether the first occurrence of variable {@code name} has been - * bound to a non-null value. - * @throws IllegalArgumentException if {@code name} is not a prepared - * variable, that is if {@code !this.preparedStatement().variables().names().contains(name)}. - */ - public boolean isSet(String name) { - return wrapper.getValue(wrapper.getIndexOf(name)) != UNSET; - } - - /** - * Unsets the {@code i}th variable. This will leave the statement in the same state as if no setter was - * ever called for this variable. - *

- * The treatment of unset variables depends on the native protocol version, see {@link BoundStatement} - * for explanations. - * - * @param i the index of the variable. - * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.preparedStatement().variables().size()}. - */ - public void unset(int i) { - wrapper.setValue(i, UNSET); - } - - /** - * Unsets all occurrences of variable {@code name}. This will leave the statement in the same state - * as if no setter was ever called for this variable. - *

- * The treatment of unset variables depends on the native protocol version, see {@link BoundStatement} - * for explanations. - * - * @param name the name of the variable. - * @throws IllegalArgumentException if {@code name} is not a prepared - * variable, that is if {@code !this.preparedStatement().variables().names().contains(name)}. - */ - public void unset(String name) { - for (int i : wrapper.getAllIndexesOf(name)) { - wrapper.setValue(i, UNSET); - } - } - - /** - * Bound values to the variables of this statement. - *

- * This is a convenience method to bind all the variables of the - * {@code BoundStatement} in one call. - * - * @param values the values to bind to the variables of the newly created - * BoundStatement. The first element of {@code values} will be bound to the - * first bind variable, etc. It is legal to provide fewer values than the - * statement has bound variables. In that case, the remaining variable need - * to be bound before execution. If more values than variables are provided - * however, an IllegalArgumentException wil be raised. - * @return this bound statement. - * @throws IllegalArgumentException if more {@code values} are provided - * than there is of bound variables in this statement. - * @throws InvalidTypeException if any of the provided value is not of - * correct type to be bound to the corresponding bind variable. - * @throws NullPointerException if one of {@code values} is a collection - * (List, Set or Map) containing a null value. Nulls are not supported in - * collections by CQL. - */ - public BoundStatement bind(Object... values) { - - if (values.length > statement.getVariables().size()) - throw new IllegalArgumentException(String.format("Prepared statement has only %d variables, %d values provided", statement.getVariables().size(), values.length)); - - for (int i = 0; i < values.length; i++) { - Object value = values[i]; - if (value == null) { - wrapper.values[i] = null; - } else { - ProtocolVersion protocolVersion = statement.getPreparedId().protocolVersion; - if (value instanceof Token) - // bypass CodecRegistry for token values - wrapper.values[i] = ((Token) value).serialize(protocolVersion); - else - wrapper.values[i] = wrapper.codecFor(i, value).serialize(value, protocolVersion); - } +public class BoundStatement extends Statement + implements SettableData, GettableData { + static final ByteBuffer UNSET = ByteBuffer.allocate(0); + + final PreparedStatement statement; + + // Statement is already an abstract class, so we can't make it extend AbstractData directly. But + // we still want to avoid duplicating too much code so we wrap. + final DataWrapper wrapper; + + private final CodecRegistry codecRegistry; + + private ByteBuffer routingKey; + + /** + * Creates a new {@code BoundStatement} from the provided prepared statement. + * + * @param statement the prepared statement from which to create a {@code BoundStatement}. + */ + public BoundStatement(PreparedStatement statement) { + this.statement = statement; + this.wrapper = new DataWrapper(this, statement.getVariables().size()); + for (int i = 0; i < wrapper.values.length; i++) { + wrapper.values[i] = UNSET; + } + + if (statement.getConsistencyLevel() != null) + this.setConsistencyLevel(statement.getConsistencyLevel()); + if (statement.getSerialConsistencyLevel() != null) + this.setSerialConsistencyLevel(statement.getSerialConsistencyLevel()); + if (statement.isTracing()) this.enableTracing(); + if (statement.getRetryPolicy() != null) this.setRetryPolicy(statement.getRetryPolicy()); + if (statement.getOutgoingPayload() != null) + this.setOutgoingPayload(statement.getOutgoingPayload()); + else + // propagate incoming payload as outgoing payload, if no outgoing payload has been explicitly + // set + this.setOutgoingPayload(statement.getIncomingPayload()); + this.codecRegistry = statement.getCodecRegistry(); + if (statement.isIdempotent() != null) { + this.setIdempotent(statement.isIdempotent()); + } + } + + /** + * Returns the prepared statement on which this BoundStatement is based. + * + * @return the prepared statement on which this BoundStatement is based. + */ + public PreparedStatement preparedStatement() { + return statement; + } + + /** + * Returns whether the {@code i}th variable has been bound. + * + * @param i the index of the variable to check. + * @return whether the {@code i}th variable has been bound. + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= + * this.preparedStatement().variables().size()}. + */ + public boolean isSet(int i) { + return wrapper.getValue(i) != UNSET; + } + + /** + * Returns whether the first occurrence of variable {@code name} has been bound. + * + * @param name the name of the variable to check. + * @return whether the first occurrence of variable {@code name} has been bound to a non-null + * value. + * @throws IllegalArgumentException if {@code name} is not a prepared variable, that is if {@code + * !this.preparedStatement().variables().names().contains(name)}. + */ + public boolean isSet(String name) { + return wrapper.getValue(wrapper.getIndexOf(name)) != UNSET; + } + + /** + * Unsets the {@code i}th variable. This will leave the statement in the same state as if no + * setter was ever called for this variable. + * + *

The treatment of unset variables depends on the native protocol version, see {@link + * BoundStatement} for explanations. + * + * @param i the index of the variable. + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= + * this.preparedStatement().variables().size()}. + */ + public void unset(int i) { + wrapper.setValue(i, UNSET); + } + + /** + * Unsets all occurrences of variable {@code name}. This will leave the statement in the same + * state as if no setter was ever called for this variable. + * + *

The treatment of unset variables depends on the native protocol version, see {@link + * BoundStatement} for explanations. + * + * @param name the name of the variable. + * @throws IllegalArgumentException if {@code name} is not a prepared variable, that is if {@code + * !this.preparedStatement().variables().names().contains(name)}. + */ + public void unset(String name) { + for (int i : wrapper.getAllIndexesOf(name)) { + wrapper.setValue(i, UNSET); + } + } + + /** + * Bound values to the variables of this statement. + * + *

This is a convenience method to bind all the variables of the {@code BoundStatement} in one + * call. + * + * @param values the values to bind to the variables of the newly created BoundStatement. The + * first element of {@code values} will be bound to the first bind variable, etc. It is legal + * to provide fewer values than the statement has bound variables. In that case, the remaining + * variable need to be bound before execution. If more values than variables are provided + * however, an IllegalArgumentException wil be raised. + * @return this bound statement. + * @throws IllegalArgumentException if more {@code values} are provided than there is of bound + * variables in this statement. + * @throws InvalidTypeException if any of the provided value is not of correct type to be bound to + * the corresponding bind variable. + * @throws NullPointerException if one of {@code values} is a collection (List, Set or Map) + * containing a null value. Nulls are not supported in collections by CQL. + */ + public BoundStatement bind(Object... values) { + + if (values.length > statement.getVariables().size()) + throw new IllegalArgumentException( + String.format( + "Prepared statement has only %d variables, %d values provided", + statement.getVariables().size(), values.length)); + + for (int i = 0; i < values.length; i++) { + Object value = values[i]; + if (value == null) { + wrapper.values[i] = null; + } else { + ProtocolVersion protocolVersion = statement.getPreparedId().protocolVersion; + if (value instanceof Token) + // bypass CodecRegistry for token values + wrapper.values[i] = ((Token) value).serialize(protocolVersion); + else wrapper.values[i] = wrapper.codecFor(i, value).serialize(value, protocolVersion); + } + } + return this; + } + + /** + * The routing key for this bound query. + * + *

This method will return a non-{@code null} value if either of the following occur: + * + *

    + *
  • The routing key has been set directly through {@link BoundStatement#setRoutingKey}. + *
  • The routing key has been set through {@link PreparedStatement#setRoutingKey} for the + * {@code PreparedStatement} this statement has been built from. + *
  • All the columns composing the partition key are bound variables of this {@code + * BoundStatement}. The routing key will then be built using the values provided for these + * partition key columns. + *
+ * + * Otherwise, {@code null} is returned. + * + *

+ * + *

Note that if the routing key has been set through {@link BoundStatement#setRoutingKey}, then + * that takes precedence. If the routing key has been set through {@link + * PreparedStatement#setRoutingKey} then that is used next. If neither of those are set then it is + * computed. + * + * @param protocolVersion unused by this implementation (no internal serialization is required to + * compute the key). + * @param codecRegistry unused by this implementation (no internal serialization is required to + * compute the key). + * @return the routing key for this statement or {@code null}. + */ + @Override + public ByteBuffer getRoutingKey(ProtocolVersion protocolVersion, CodecRegistry codecRegistry) { + if (this.routingKey != null) { + return this.routingKey; + } + + if (statement.getRoutingKey() != null) { + return statement.getRoutingKey(); + } + + int[] rkIndexes = statement.getPreparedId().routingKeyIndexes; + if (rkIndexes != null) { + if (rkIndexes.length == 1) { + return wrapper.values[rkIndexes[0]]; + } else { + ByteBuffer[] components = new ByteBuffer[rkIndexes.length]; + for (int i = 0; i < components.length; ++i) { + ByteBuffer value = wrapper.values[rkIndexes[i]]; + if (value == null) return null; + components[i] = value; } - return this; - } - - /** - * The routing key for this bound query. - *

- * This method will return a non-{@code null} value if either of the following occur: - *

    - *
  • The routing key has been set directly through {@link BoundStatement#setRoutingKey}.
  • - *
  • The routing key has been set through {@link PreparedStatement#setRoutingKey} for the - * {@code PreparedStatement} this statement has been built from.
  • - *
  • All the columns composing the partition key are bound variables of this {@code BoundStatement}. The routing - * key will then be built using the values provided for these partition key columns.
  • - *
- * Otherwise, {@code null} is returned. - *

- *

- * Note that if the routing key has been set through {@link BoundStatement#setRoutingKey}, then that takes - * precedence. If the routing key has been set through {@link PreparedStatement#setRoutingKey} then that is used - * next. If neither of those are set then it is computed. - * - * @param protocolVersion unused by this implementation (no internal serialization is required to compute the key). - * @param codecRegistry unused by this implementation (no internal serialization is required to compute the key). - * @return the routing key for this statement or {@code null}. - */ - @Override - public ByteBuffer getRoutingKey(ProtocolVersion protocolVersion, CodecRegistry codecRegistry) { - if (this.routingKey != null) { - return this.routingKey; - } - - if (statement.getRoutingKey() != null) { - return statement.getRoutingKey(); - } - - int[] rkIndexes = statement.getPreparedId().routingKeyIndexes; - if (rkIndexes != null) { - if (rkIndexes.length == 1) { - return wrapper.values[rkIndexes[0]]; - } else { - ByteBuffer[] components = new ByteBuffer[rkIndexes.length]; - for (int i = 0; i < components.length; ++i) { - ByteBuffer value = wrapper.values[rkIndexes[i]]; - if (value == null) - return null; - components[i] = value; - } - return SimpleStatement.compose(components); - } - } - return null; - } - - /** - * Sets the routing key for this bound statement. - *

- * This is useful when the routing key can neither be set on the {@code PreparedStatement} this bound statement - * was built from, nor automatically computed from bound variables. In particular, this is the case if the - * partition key is composite and only some of its components are bound. - * - * @param routingKey the raw (binary) value to use as routing key. - * @return this {@code BoundStatement} object. - * @see BoundStatement#getRoutingKey - */ - public BoundStatement setRoutingKey(ByteBuffer routingKey) { - this.routingKey = routingKey; - return this; - } - - /** - * Sets the routing key for this bound statement, when the query partition key is composite and the routing key must - * be built from multiple values. - *

- * This is useful when the routing key can neither be set on the {@code PreparedStatement} this bound statement - * was built from, nor automatically computed from bound variables. In particular, this is the case if the - * partition key is composite and only some of its components are bound. - * - * @param routingKeyComponents the raw (binary) values to compose to obtain - * the routing key. - * @return this {@code BoundStatement} object. - * @see BoundStatement#getRoutingKey - */ - public BoundStatement setRoutingKey(ByteBuffer... routingKeyComponents) { - this.routingKey = SimpleStatement.compose(routingKeyComponents); - return this; - } - - /** - * {@inheritDoc} - */ - @Override - public String getKeyspace() { - return statement.getPreparedId().metadata.size() == 0 ? null : statement.getPreparedId().metadata.getKeyspace(0); - } - - /** - * {@inheritDoc} - */ - @Override - public BoundStatement setBool(int i, boolean v) { - return wrapper.setBool(i, v); - } - - /** - * {@inheritDoc} - */ - @Override - public BoundStatement setBool(String name, boolean v) { - return wrapper.setBool(name, v); - } - - /** - * {@inheritDoc} - */ - @Override - public BoundStatement setByte(int i, byte v) { - return wrapper.setByte(i, v); - } - - /** - * {@inheritDoc} - */ - @Override - public BoundStatement setByte(String name, byte v) { - return wrapper.setByte(name, v); - } - - /** - * {@inheritDoc} - */ - @Override - public BoundStatement setShort(int i, short v) { - return wrapper.setShort(i, v); - } - - /** - * {@inheritDoc} - */ - @Override - public BoundStatement setShort(String name, short v) { - return wrapper.setShort(name, v); - } - - /** - * {@inheritDoc} - */ - @Override - public BoundStatement setInt(int i, int v) { - return wrapper.setInt(i, v); - } - - /** - * {@inheritDoc} - */ - @Override - public BoundStatement setInt(String name, int v) { - return wrapper.setInt(name, v); - } - - /** - * {@inheritDoc} - */ - @Override - public BoundStatement setLong(int i, long v) { - return wrapper.setLong(i, v); - } - - /** - * {@inheritDoc} - */ - @Override - public BoundStatement setLong(String name, long v) { - return wrapper.setLong(name, v); - } - - /** - * {@inheritDoc} - */ - @Override - public BoundStatement setTimestamp(int i, Date v) { - return wrapper.setTimestamp(i, v); - } - - /** - * {@inheritDoc} - */ - @Override - public BoundStatement setTimestamp(String name, Date v) { - return wrapper.setTimestamp(name, v); - } - - /** - * {@inheritDoc} - */ - @Override - public BoundStatement setDate(int i, LocalDate v) { - return wrapper.setDate(i, v); - } - - /** - * {@inheritDoc} - */ - @Override - public BoundStatement setDate(String name, LocalDate v) { - return wrapper.setDate(name, v); - } - - /** - * {@inheritDoc} - */ - @Override - public BoundStatement setTime(int i, long v) { - return wrapper.setTime(i, v); - } - - /** - * {@inheritDoc} - */ - @Override - public BoundStatement setTime(String name, long v) { - return wrapper.setTime(name, v); - } - - /** - * {@inheritDoc} - */ - @Override - public BoundStatement setFloat(int i, float v) { - return wrapper.setFloat(i, v); - } - - /** - * {@inheritDoc} - */ - @Override - public BoundStatement setFloat(String name, float v) { - return wrapper.setFloat(name, v); - } - - /** - * {@inheritDoc} - */ - @Override - public BoundStatement setDouble(int i, double v) { - return wrapper.setDouble(i, v); - } - - /** - * {@inheritDoc} - */ - @Override - public BoundStatement setDouble(String name, double v) { - return wrapper.setDouble(name, v); - } - - /** - * {@inheritDoc} - */ - @Override - public BoundStatement setString(int i, String v) { - return wrapper.setString(i, v); - } - - /** - * {@inheritDoc} - */ - @Override - public BoundStatement setString(String name, String v) { - return wrapper.setString(name, v); - } - - /** - * {@inheritDoc} - */ - @Override - public BoundStatement setBytes(int i, ByteBuffer v) { - return wrapper.setBytes(i, v); - } - - /** - * {@inheritDoc} - */ - @Override - public BoundStatement setBytes(String name, ByteBuffer v) { - return wrapper.setBytes(name, v); - } - - /** - * {@inheritDoc} - */ - @Override - public BoundStatement setBytesUnsafe(int i, ByteBuffer v) { - return wrapper.setBytesUnsafe(i, v); - } - - /** - * {@inheritDoc} - */ - @Override - public BoundStatement setBytesUnsafe(String name, ByteBuffer v) { - return wrapper.setBytesUnsafe(name, v); - } - - /** - * {@inheritDoc} - */ - @Override - public BoundStatement setVarint(int i, BigInteger v) { - return wrapper.setVarint(i, v); - } - - /** - * {@inheritDoc} - */ - @Override - public BoundStatement setVarint(String name, BigInteger v) { - return wrapper.setVarint(name, v); - } - - /** - * {@inheritDoc} - */ - @Override - public BoundStatement setDecimal(int i, BigDecimal v) { - return wrapper.setDecimal(i, v); - } - - /** - * {@inheritDoc} - */ - @Override - public BoundStatement setDecimal(String name, BigDecimal v) { - return wrapper.setDecimal(name, v); - } - - /** - * {@inheritDoc} - */ - @Override - public BoundStatement setUUID(int i, UUID v) { - return wrapper.setUUID(i, v); - } - - /** - * {@inheritDoc} - */ - @Override - public BoundStatement setUUID(String name, UUID v) { - return wrapper.setUUID(name, v); - } - - /** - * {@inheritDoc} - */ - @Override - public BoundStatement setInet(int i, InetAddress v) { - return wrapper.setInet(i, v); - } - - /** - * {@inheritDoc} - */ - @Override - public BoundStatement setInet(String name, InetAddress v) { - return wrapper.setInet(name, v); - } - - /** - * Sets the {@code i}th value to the provided {@link Token}. - *

- * {@link #setPartitionKeyToken(Token)} should generally be preferred if you - * have a single token variable. - * - * @param i the index of the variable to set. - * @param v the value to set. - * @return this BoundStatement. - * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.preparedStatement().variables().size()}. - * @throws InvalidTypeException if column {@code i} is not of the type of the token's value. - */ - public BoundStatement setToken(int i, Token v) { - return wrapper.setToken(i, v); - } - - /** - * Sets the value for (all occurrences of) variable {@code name} to the - * provided token. - *

- * {@link #setPartitionKeyToken(Token)} should generally be preferred if you - * have a single token variable. - *

- * If you have multiple token variables, use positional binding ({@link #setToken(int, Token)}, - * or named bind markers: - *

-     * {@code
-     * PreparedStatement pst = session.prepare("SELECT * FROM my_table WHERE token(k) > :min AND token(k) <= :max");
-     * BoundStatement b = pst.bind().setToken("min", minToken).setToken("max", maxToken);
-     * }
-     * 
- * - * @param name the name of the variable to set; if multiple variables - * {@code name} are prepared, all of them are set. - * @param v the value to set. - * @return this BoundStatement. - * @throws IllegalArgumentException if {@code name} is not a prepared - * variable, that is, if {@code !this.preparedStatement().variables().names().contains(name)}. - * @throws InvalidTypeException if (any occurrence of) {@code name} is - * not of the type of the token's value. - */ - public BoundStatement setToken(String name, Token v) { - return wrapper.setToken(name, v); - } - - /** - * Sets the value for (all occurrences of) variable "{@code partition key token}" - * to the provided token (this is the name generated by Cassandra for markers - * corresponding to a {@code token(...)} call). - *

- * This method is a shorthand for statements with a single token variable: - *

-     * {@code
-     * Token token = ...
-     * PreparedStatement pst = session.prepare("SELECT * FROM my_table WHERE token(k) = ?");
-     * BoundStatement b = pst.bind().setPartitionKeyToken(token);
-     * }
-     * 
- * If you have multiple token variables, use positional binding ({@link #setToken(int, Token)}, - * or named bind markers: - *
-     * {@code
-     * PreparedStatement pst = session.prepare("SELECT * FROM my_table WHERE token(k) > :min AND token(k) <= :max");
-     * BoundStatement b = pst.bind().setToken("min", minToken).setToken("max", maxToken);
-     * }
-     * 
- * - * @param v the value to set. - * @return this BoundStatement. - * @throws IllegalArgumentException if {@code name} is not a prepared - * variable, that is, if {@code !this.preparedStatement().variables().names().contains(name)}. - * @throws InvalidTypeException if (any occurrence of) {@code name} is - * not of the type of the token's value. - */ - public BoundStatement setPartitionKeyToken(Token v) { - return setToken("partition key token", v); - } - - /** - * {@inheritDoc} - */ - @Override - public BoundStatement setList(int i, List v) { - return wrapper.setList(i, v); - } - - /** - * {@inheritDoc} - */ - @Override - public BoundStatement setList(int i, List v, Class elementsClass) { - return wrapper.setList(i, v, elementsClass); - } - - /** - * {@inheritDoc} - */ - @Override - public BoundStatement setList(int i, List v, TypeToken elementsType) { - return wrapper.setList(i, v, elementsType); - } - - /** - * {@inheritDoc} - */ - @Override - public BoundStatement setList(String name, List v) { - return wrapper.setList(name, v); - } - - /** - * {@inheritDoc} - */ - @Override - public BoundStatement setList(String name, List v, Class elementsClass) { - return wrapper.setList(name, v, elementsClass); - } - - /** - * {@inheritDoc} - */ - @Override - public BoundStatement setList(String name, List v, TypeToken elementsType) { - return wrapper.setList(name, v, elementsType); - } - - /** - * {@inheritDoc} - */ - @Override - public BoundStatement setMap(int i, Map v) { - return wrapper.setMap(i, v); - } - - /** - * {@inheritDoc} - */ - @Override - public BoundStatement setMap(int i, Map v, Class keysClass, Class valuesClass) { - return wrapper.setMap(i, v, keysClass, valuesClass); - } - - /** - * {@inheritDoc} - */ - @Override - public BoundStatement setMap(int i, Map v, TypeToken keysType, TypeToken valuesType) { - return wrapper.setMap(i, v, keysType, valuesType); - } - - /** - * {@inheritDoc} - */ - @Override - public BoundStatement setMap(String name, Map v) { - return wrapper.setMap(name, v); - } - - /** - * {@inheritDoc} - */ - @Override - public BoundStatement setMap(String name, Map v, Class keysClass, Class valuesClass) { - return wrapper.setMap(name, v, keysClass, valuesClass); - } - - /** - * {@inheritDoc} - */ - @Override - public BoundStatement setMap(String name, Map v, TypeToken keysType, TypeToken valuesType) { - return wrapper.setMap(name, v, keysType, valuesType); - } - - /** - * {@inheritDoc} - */ - @Override - public BoundStatement setSet(int i, Set v) { - return wrapper.setSet(i, v); - } - - /** - * {@inheritDoc} - */ - @Override - public BoundStatement setSet(int i, Set v, Class elementsClass) { - return wrapper.setSet(i, v, elementsClass); - } - - /** - * {@inheritDoc} - */ - @Override - public BoundStatement setSet(int i, Set v, TypeToken elementsType) { - return wrapper.setSet(i, v, elementsType); - } - - /** - * {@inheritDoc} - */ - @Override - public BoundStatement setSet(String name, Set v) { - return wrapper.setSet(name, v); - } - - /** - * {@inheritDoc} - */ - @Override - public BoundStatement setSet(String name, Set v, Class elementsClass) { - return wrapper.setSet(name, v, elementsClass); - } - - /** - * {@inheritDoc} - */ - @Override - public BoundStatement setSet(String name, Set v, TypeToken elementsType) { - return wrapper.setSet(name, v, elementsType); - } - - /** - * {@inheritDoc} - */ - @Override - public BoundStatement setUDTValue(int i, UDTValue v) { - return wrapper.setUDTValue(i, v); - } - - /** - * {@inheritDoc} - */ - @Override - public BoundStatement setUDTValue(String name, UDTValue v) { - return wrapper.setUDTValue(name, v); - } - - /** - * {@inheritDoc} - */ - @Override - public BoundStatement setTupleValue(int i, TupleValue v) { - return wrapper.setTupleValue(i, v); - } - - /** - * {@inheritDoc} - */ - @Override - public BoundStatement setTupleValue(String name, TupleValue v) { - return wrapper.setTupleValue(name, v); - } - - /** - * {@inheritDoc} - */ - @Override - public BoundStatement set(int i, V v, Class targetClass) { - return wrapper.set(i, v, targetClass); - } - - /** - * {@inheritDoc} - */ - @Override - public BoundStatement set(String name, V v, Class targetClass) { - return wrapper.set(name, v, targetClass); - } - - /** - * {@inheritDoc} - */ - @Override - public BoundStatement set(int i, V v, TypeToken targetType) { - return wrapper.set(i, v, targetType); - } - - /** - * {@inheritDoc} - */ - @Override - public BoundStatement set(String name, V v, TypeToken targetType) { - return wrapper.set(name, v, targetType); - } - - /** - * {@inheritDoc} - */ - @Override - public BoundStatement set(int i, V v, TypeCodec codec) { - return wrapper.set(i, v, codec); - } - - /** - * {@inheritDoc} - */ - @Override - public BoundStatement set(String name, V v, TypeCodec codec) { - return wrapper.set(name, v, codec); - } - - /** - * {@inheritDoc} - */ - @Override - public BoundStatement setToNull(int i) { - return wrapper.setToNull(i); - } - - /** - * {@inheritDoc} - */ - @Override - public BoundStatement setToNull(String name) { - return wrapper.setToNull(name); - } - - /** - * {@inheritDoc} - */ - @Override - public boolean isNull(int i) { - return wrapper.isNull(i); - } - - /** - * {@inheritDoc} - */ - @Override - public boolean isNull(String name) { - return wrapper.isNull(name); - } - - /** - * {@inheritDoc} - */ - @Override - public boolean getBool(int i) { - return wrapper.getBool(i); - } - - /** - * {@inheritDoc} - */ - @Override - public boolean getBool(String name) { - return wrapper.getBool(name); - } - - /** - * {@inheritDoc} - */ - @Override - public byte getByte(int i) { - return wrapper.getByte(i); - } - - /** - * {@inheritDoc} - */ - @Override - public byte getByte(String name) { - return wrapper.getByte(name); - } - - /** - * {@inheritDoc} - */ - @Override - public short getShort(int i) { - return wrapper.getShort(i); - } - - /** - * {@inheritDoc} - */ - @Override - public short getShort(String name) { - return wrapper.getShort(name); - } - - /** - * {@inheritDoc} - */ - @Override - public int getInt(int i) { - return wrapper.getInt(i); - } - - /** - * {@inheritDoc} - */ - @Override - public int getInt(String name) { - return wrapper.getInt(name); - } - - /** - * {@inheritDoc} - */ - @Override - public long getLong(int i) { - return wrapper.getLong(i); - } - - /** - * {@inheritDoc} - */ - @Override - public long getLong(String name) { - return wrapper.getLong(name); - } - - /** - * {@inheritDoc} - */ - @Override - public Date getTimestamp(int i) { - return wrapper.getTimestamp(i); - } - - /** - * {@inheritDoc} - */ - @Override - public Date getTimestamp(String name) { - return wrapper.getTimestamp(name); - } - - /** - * {@inheritDoc} - */ - @Override - public LocalDate getDate(int i) { - return wrapper.getDate(i); - } - - /** - * {@inheritDoc} - */ - @Override - public LocalDate getDate(String name) { - return wrapper.getDate(name); - } - - /** - * {@inheritDoc} - */ - @Override - public long getTime(int i) { - return wrapper.getTime(i); - } - - /** - * {@inheritDoc} - */ - @Override - public long getTime(String name) { - return wrapper.getTime(name); - } - - /** - * {@inheritDoc} - */ - @Override - public float getFloat(int i) { - return wrapper.getFloat(i); - } - - /** - * {@inheritDoc} - */ - @Override - public float getFloat(String name) { - return wrapper.getFloat(name); - } - - /** - * {@inheritDoc} - */ - @Override - public double getDouble(int i) { - return wrapper.getDouble(i); - } - - /** - * {@inheritDoc} - */ - @Override - public double getDouble(String name) { - return wrapper.getDouble(name); - } - - /** - * {@inheritDoc} - */ - @Override - public ByteBuffer getBytesUnsafe(int i) { - return wrapper.getBytesUnsafe(i); - } - - /** - * {@inheritDoc} - */ - @Override - public ByteBuffer getBytesUnsafe(String name) { - return wrapper.getBytesUnsafe(name); - } - - /** - * {@inheritDoc} - */ - @Override - public ByteBuffer getBytes(int i) { - return wrapper.getBytes(i); - } - - /** - * {@inheritDoc} - */ - @Override - public ByteBuffer getBytes(String name) { - return wrapper.getBytes(name); - } - - /** - * {@inheritDoc} - */ - @Override - public String getString(int i) { - return wrapper.getString(i); - } - - /** - * {@inheritDoc} - */ - @Override - public String getString(String name) { - return wrapper.getString(name); - } - - /** - * {@inheritDoc} - */ - @Override - public BigInteger getVarint(int i) { - return wrapper.getVarint(i); - } - - /** - * {@inheritDoc} - */ - @Override - public BigInteger getVarint(String name) { - return wrapper.getVarint(name); - } - - /** - * {@inheritDoc} - */ - @Override - public BigDecimal getDecimal(int i) { - return wrapper.getDecimal(i); - } - - /** - * {@inheritDoc} - */ - @Override - public BigDecimal getDecimal(String name) { - return wrapper.getDecimal(name); - } - - /** - * {@inheritDoc} - */ - @Override - public UUID getUUID(int i) { - return wrapper.getUUID(i); - } - - /** - * {@inheritDoc} - */ - @Override - public UUID getUUID(String name) { - return wrapper.getUUID(name); - } - - /** - * {@inheritDoc} - */ - @Override - public InetAddress getInet(int i) { - return wrapper.getInet(i); - } - - /** - * {@inheritDoc} - */ - @Override - public InetAddress getInet(String name) { - return wrapper.getInet(name); - } - - /** - * {@inheritDoc} - */ - @Override - public List getList(int i, Class elementsClass) { - return wrapper.getList(i, elementsClass); - } - - /** - * {@inheritDoc} - */ - @Override - public List getList(int i, TypeToken elementsType) { - return wrapper.getList(i, elementsType); - } - - /** - * {@inheritDoc} - */ - @Override - public List getList(String name, Class elementsClass) { - return wrapper.getList(name, elementsClass); - } - - /** - * {@inheritDoc} - */ - @Override - public List getList(String name, TypeToken elementsType) { - return wrapper.getList(name, elementsType); - } - - /** - * {@inheritDoc} - */ - @Override - public Set getSet(int i, Class elementsClass) { - return wrapper.getSet(i, elementsClass); - } - - /** - * {@inheritDoc} - */ - @Override - public Set getSet(int i, TypeToken elementsType) { - return wrapper.getSet(i, elementsType); - } - - /** - * {@inheritDoc} - */ - @Override - public Set getSet(String name, Class elementsClass) { - return wrapper.getSet(name, elementsClass); - } - - /** - * {@inheritDoc} - */ - @Override - public Set getSet(String name, TypeToken elementsType) { - return wrapper.getSet(name, elementsType); - } - - /** - * {@inheritDoc} - */ - @Override - public Map getMap(int i, Class keysClass, Class valuesClass) { - return wrapper.getMap(i, keysClass, valuesClass); - } - - /** - * {@inheritDoc} - */ - @Override - public Map getMap(int i, TypeToken keysType, TypeToken valuesType) { - return wrapper.getMap(i, keysType, valuesType); - } - - /** - * {@inheritDoc} - */ - @Override - public Map getMap(String name, Class keysClass, Class valuesClass) { - return wrapper.getMap(name, keysClass, valuesClass); - } - - /** - * {@inheritDoc} - */ - @Override - public Map getMap(String name, TypeToken keysType, TypeToken valuesType) { - return wrapper.getMap(name, keysType, valuesType); - } - - /** - * {@inheritDoc} - */ - @Override - public UDTValue getUDTValue(int i) { - return wrapper.getUDTValue(i); - } - - /** - * {@inheritDoc} - */ - @Override - public UDTValue getUDTValue(String name) { - return wrapper.getUDTValue(name); - } - - /** - * {@inheritDoc} - */ - @Override - public TupleValue getTupleValue(int i) { - return wrapper.getTupleValue(i); - } - - /** - * {@inheritDoc} - */ - @Override - public TupleValue getTupleValue(String name) { - return wrapper.getTupleValue(name); - } - - /** - * {@inheritDoc} - */ - @Override - public Object getObject(int i) { - return wrapper.getObject(i); - } - - /** - * {@inheritDoc} - */ - @Override - public Object getObject(String name) { - return wrapper.getObject(name); - } - - - /** - * {@inheritDoc} - */ - @Override - public T get(int i, Class targetClass) { - return wrapper.get(i, targetClass); - } - - /** - * {@inheritDoc} - */ - @Override - public T get(String name, Class targetClass) { - return wrapper.get(name, targetClass); - } - - /** - * {@inheritDoc} - */ - @Override - public T get(int i, TypeToken targetType) { - return wrapper.get(i, targetType); - } - - /** - * {@inheritDoc} - */ - @Override - public T get(String name, TypeToken targetType) { - return wrapper.get(name, targetType); - } - - /** - * {@inheritDoc} - */ - @Override - public T get(int i, TypeCodec codec) { - return wrapper.get(i, codec); - } - - /** - * {@inheritDoc} - */ - @Override - public T get(String name, TypeCodec codec) { - return wrapper.get(name, codec); - } - - void ensureAllSet() { - int index = 0; - for (ByteBuffer value : wrapper.values) { - if (value == BoundStatement.UNSET) - throw new IllegalStateException("Unset value at index " + index + ". " - + "If you want this value to be null, please set it to null explicitly."); - index += 1; - } - } - - static class DataWrapper extends AbstractData { - - DataWrapper(BoundStatement wrapped, int size) { - super(wrapped.statement.getPreparedId().protocolVersion, wrapped, size); - } - - protected int[] getAllIndexesOf(String name) { - return wrapped.statement.getVariables().getAllIdx(name); - } - - protected DataType getType(int i) { - return wrapped.statement.getVariables().getType(i); - } - - protected String getName(int i) { - return wrapped.statement.getVariables().getName(i); - } - - @Override - protected CodecRegistry getCodecRegistry() { - return wrapped.codecRegistry; - } - } + return SimpleStatement.compose(components); + } + } + return null; + } + + /** + * Sets the routing key for this bound statement. + * + *

This is useful when the routing key can neither be set on the {@code PreparedStatement} this + * bound statement was built from, nor automatically computed from bound variables. In particular, + * this is the case if the partition key is composite and only some of its components are bound. + * + * @param routingKey the raw (binary) value to use as routing key. + * @return this {@code BoundStatement} object. + * @see BoundStatement#getRoutingKey + */ + public BoundStatement setRoutingKey(ByteBuffer routingKey) { + this.routingKey = routingKey; + return this; + } + + /** + * Sets the routing key for this bound statement, when the query partition key is composite and + * the routing key must be built from multiple values. + * + *

This is useful when the routing key can neither be set on the {@code PreparedStatement} this + * bound statement was built from, nor automatically computed from bound variables. In particular, + * this is the case if the partition key is composite and only some of its components are bound. + * + * @param routingKeyComponents the raw (binary) values to compose to obtain the routing key. + * @return this {@code BoundStatement} object. + * @see BoundStatement#getRoutingKey + */ + public BoundStatement setRoutingKey(ByteBuffer... routingKeyComponents) { + this.routingKey = SimpleStatement.compose(routingKeyComponents); + return this; + } + + /** {@inheritDoc} */ + @Override + public String getKeyspace() { + ColumnDefinitions defs = statement.getPreparedId().boundValuesMetadata.variables; + return defs.size() == 0 ? null : defs.getKeyspace(0); + } + + /** {@inheritDoc} */ + @Override + public int requestSizeInBytes(ProtocolVersion protocolVersion, CodecRegistry codecRegistry) { + int size = Header.lengthFor(protocolVersion); + try { + size += + CBUtil.sizeOfShortBytes(preparedStatement().getPreparedId().boundValuesMetadata.id.bytes); + if (ProtocolFeature.PREPARED_METADATA_CHANGES.isSupportedBy(protocolVersion)) { + size += + CBUtil.sizeOfShortBytes(preparedStatement().getPreparedId().resultSetMetadata.id.bytes); + } + switch (protocolVersion) { + case V1: + size += CBUtil.sizeOfConsistencyLevel(getConsistencyLevel()); + break; + case V2: + case V3: + case V4: + case V5: + case V6: + size += CBUtil.sizeOfConsistencyLevel(getConsistencyLevel()); + size += QueryFlag.serializedSize(protocolVersion); + if (wrapper.values.length > 0) { + size += CBUtil.sizeOfValueList(wrapper.values); + } + // Fetch size, serial CL and default timestamp also depend on session-level defaults + // (QueryOptions). + // We always count them to avoid having to inject QueryOptions here, at worst we + // overestimate by a + // few bytes. + size += 4; // fetch size + if (getPagingState() != null) { + size += CBUtil.sizeOfValue(getPagingState()); + } + size += CBUtil.sizeOfConsistencyLevel(getSerialConsistencyLevel()); + if (ProtocolFeature.CLIENT_TIMESTAMPS.isSupportedBy(protocolVersion)) { + size += 8; // timestamp + } + if (ProtocolFeature.CUSTOM_PAYLOADS.isSupportedBy(protocolVersion) + && getOutgoingPayload() != null) { + size += CBUtil.sizeOfBytesMap(getOutgoingPayload()); + } + break; + default: + throw protocolVersion.unsupported(); + } + } catch (Exception e) { + size = -1; + } + return size; + } + + /** {@inheritDoc} */ + @Override + public BoundStatement setBool(int i, boolean v) { + return wrapper.setBool(i, v); + } + + /** {@inheritDoc} */ + @Override + public BoundStatement setBool(String name, boolean v) { + return wrapper.setBool(name, v); + } + + /** {@inheritDoc} */ + @Override + public BoundStatement setByte(int i, byte v) { + return wrapper.setByte(i, v); + } + + /** {@inheritDoc} */ + @Override + public BoundStatement setByte(String name, byte v) { + return wrapper.setByte(name, v); + } + + /** {@inheritDoc} */ + @Override + public BoundStatement setShort(int i, short v) { + return wrapper.setShort(i, v); + } + + /** {@inheritDoc} */ + @Override + public BoundStatement setShort(String name, short v) { + return wrapper.setShort(name, v); + } + + /** {@inheritDoc} */ + @Override + public BoundStatement setInt(int i, int v) { + return wrapper.setInt(i, v); + } + + /** {@inheritDoc} */ + @Override + public BoundStatement setInt(String name, int v) { + return wrapper.setInt(name, v); + } + + /** {@inheritDoc} */ + @Override + public BoundStatement setLong(int i, long v) { + return wrapper.setLong(i, v); + } + + /** {@inheritDoc} */ + @Override + public BoundStatement setLong(String name, long v) { + return wrapper.setLong(name, v); + } + + /** {@inheritDoc} */ + @Override + public BoundStatement setTimestamp(int i, Date v) { + return wrapper.setTimestamp(i, v); + } + + /** {@inheritDoc} */ + @Override + public BoundStatement setTimestamp(String name, Date v) { + return wrapper.setTimestamp(name, v); + } + + /** {@inheritDoc} */ + @Override + public BoundStatement setDate(int i, LocalDate v) { + return wrapper.setDate(i, v); + } + + /** {@inheritDoc} */ + @Override + public BoundStatement setDate(String name, LocalDate v) { + return wrapper.setDate(name, v); + } + + /** {@inheritDoc} */ + @Override + public BoundStatement setTime(int i, long v) { + return wrapper.setTime(i, v); + } + + /** {@inheritDoc} */ + @Override + public BoundStatement setTime(String name, long v) { + return wrapper.setTime(name, v); + } + + /** {@inheritDoc} */ + @Override + public BoundStatement setFloat(int i, float v) { + return wrapper.setFloat(i, v); + } + + /** {@inheritDoc} */ + @Override + public BoundStatement setFloat(String name, float v) { + return wrapper.setFloat(name, v); + } + + /** {@inheritDoc} */ + @Override + public BoundStatement setDouble(int i, double v) { + return wrapper.setDouble(i, v); + } + + /** {@inheritDoc} */ + @Override + public BoundStatement setDouble(String name, double v) { + return wrapper.setDouble(name, v); + } + + /** {@inheritDoc} */ + @Override + public BoundStatement setString(int i, String v) { + return wrapper.setString(i, v); + } + + /** {@inheritDoc} */ + @Override + public BoundStatement setString(String name, String v) { + return wrapper.setString(name, v); + } + + /** {@inheritDoc} */ + @Override + public BoundStatement setBytes(int i, ByteBuffer v) { + return wrapper.setBytes(i, v); + } + + /** {@inheritDoc} */ + @Override + public BoundStatement setBytes(String name, ByteBuffer v) { + return wrapper.setBytes(name, v); + } + + /** {@inheritDoc} */ + @Override + public BoundStatement setBytesUnsafe(int i, ByteBuffer v) { + return wrapper.setBytesUnsafe(i, v); + } + + /** {@inheritDoc} */ + @Override + public BoundStatement setBytesUnsafe(String name, ByteBuffer v) { + return wrapper.setBytesUnsafe(name, v); + } + + /** {@inheritDoc} */ + @Override + public BoundStatement setVarint(int i, BigInteger v) { + return wrapper.setVarint(i, v); + } + + /** {@inheritDoc} */ + @Override + public BoundStatement setVarint(String name, BigInteger v) { + return wrapper.setVarint(name, v); + } + + /** {@inheritDoc} */ + @Override + public BoundStatement setDecimal(int i, BigDecimal v) { + return wrapper.setDecimal(i, v); + } + + /** {@inheritDoc} */ + @Override + public BoundStatement setDecimal(String name, BigDecimal v) { + return wrapper.setDecimal(name, v); + } + + /** {@inheritDoc} */ + @Override + public BoundStatement setUUID(int i, UUID v) { + return wrapper.setUUID(i, v); + } + + /** {@inheritDoc} */ + @Override + public BoundStatement setUUID(String name, UUID v) { + return wrapper.setUUID(name, v); + } + + /** {@inheritDoc} */ + @Override + public BoundStatement setInet(int i, InetAddress v) { + return wrapper.setInet(i, v); + } + + /** {@inheritDoc} */ + @Override + public BoundStatement setInet(String name, InetAddress v) { + return wrapper.setInet(name, v); + } + + /** + * Sets the {@code i}th value to the provided {@link Token}. + * + *

{@link #setPartitionKeyToken(Token)} should generally be preferred if you have a single + * token variable. + * + * @param i the index of the variable to set. + * @param v the value to set. + * @return this BoundStatement. + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= + * this.preparedStatement().variables().size()}. + * @throws InvalidTypeException if column {@code i} is not of the type of the token's value. + */ + public BoundStatement setToken(int i, Token v) { + return wrapper.setToken(i, v); + } + + /** + * Sets the value for (all occurrences of) variable {@code name} to the provided token. + * + *

{@link #setPartitionKeyToken(Token)} should generally be preferred if you have a single + * token variable. + * + *

If you have multiple token variables, use positional binding ({@link #setToken(int, Token)}, + * or named bind markers: + * + *

{@code
+   * PreparedStatement pst = session.prepare("SELECT * FROM my_table WHERE token(k) > :min AND token(k) <= :max");
+   * BoundStatement b = pst.bind().setToken("min", minToken).setToken("max", maxToken);
+   * }
+ * + * @param name the name of the variable to set; if multiple variables {@code name} are prepared, + * all of them are set. + * @param v the value to set. + * @return this BoundStatement. + * @throws IllegalArgumentException if {@code name} is not a prepared variable, that is, if {@code + * !this.preparedStatement().variables().names().contains(name)}. + * @throws InvalidTypeException if (any occurrence of) {@code name} is not of the type of the + * token's value. + */ + public BoundStatement setToken(String name, Token v) { + return wrapper.setToken(name, v); + } + + /** + * Sets the value for (all occurrences of) variable "{@code partition key token}" to the provided + * token (this is the name generated by Cassandra for markers corresponding to a {@code + * token(...)} call). + * + *

This method is a shorthand for statements with a single token variable: + * + *

{@code
+   * Token token = ...
+   * PreparedStatement pst = session.prepare("SELECT * FROM my_table WHERE token(k) = ?");
+   * BoundStatement b = pst.bind().setPartitionKeyToken(token);
+   * }
+ * + * If you have multiple token variables, use positional binding ({@link #setToken(int, Token)}, or + * named bind markers: + * + *
{@code
+   * PreparedStatement pst = session.prepare("SELECT * FROM my_table WHERE token(k) > :min AND token(k) <= :max");
+   * BoundStatement b = pst.bind().setToken("min", minToken).setToken("max", maxToken);
+   * }
+ * + * @param v the value to set. + * @return this BoundStatement. + * @throws IllegalArgumentException if {@code name} is not a prepared variable, that is, if {@code + * !this.preparedStatement().variables().names().contains(name)}. + * @throws InvalidTypeException if (any occurrence of) {@code name} is not of the type of the + * token's value. + */ + public BoundStatement setPartitionKeyToken(Token v) { + return setToken("partition key token", v); + } + + /** {@inheritDoc} */ + @Override + public BoundStatement setList(int i, List v) { + return wrapper.setList(i, v); + } + + /** {@inheritDoc} */ + @Override + public BoundStatement setList(int i, List v, Class elementsClass) { + return wrapper.setList(i, v, elementsClass); + } + + /** {@inheritDoc} */ + @Override + public BoundStatement setList(int i, List v, TypeToken elementsType) { + return wrapper.setList(i, v, elementsType); + } + + /** {@inheritDoc} */ + @Override + public BoundStatement setList(String name, List v) { + return wrapper.setList(name, v); + } + + /** {@inheritDoc} */ + @Override + public BoundStatement setList(String name, List v, Class elementsClass) { + return wrapper.setList(name, v, elementsClass); + } + + /** {@inheritDoc} */ + @Override + public BoundStatement setList(String name, List v, TypeToken elementsType) { + return wrapper.setList(name, v, elementsType); + } + + /** {@inheritDoc} */ + @Override + public BoundStatement setMap(int i, Map v) { + return wrapper.setMap(i, v); + } + + /** {@inheritDoc} */ + @Override + public BoundStatement setMap( + int i, Map v, Class keysClass, Class valuesClass) { + return wrapper.setMap(i, v, keysClass, valuesClass); + } + + /** {@inheritDoc} */ + @Override + public BoundStatement setMap( + int i, Map v, TypeToken keysType, TypeToken valuesType) { + return wrapper.setMap(i, v, keysType, valuesType); + } + + /** {@inheritDoc} */ + @Override + public BoundStatement setMap(String name, Map v) { + return wrapper.setMap(name, v); + } + + /** {@inheritDoc} */ + @Override + public BoundStatement setMap( + String name, Map v, Class keysClass, Class valuesClass) { + return wrapper.setMap(name, v, keysClass, valuesClass); + } + + /** {@inheritDoc} */ + @Override + public BoundStatement setMap( + String name, Map v, TypeToken keysType, TypeToken valuesType) { + return wrapper.setMap(name, v, keysType, valuesType); + } + + /** {@inheritDoc} */ + @Override + public BoundStatement setSet(int i, Set v) { + return wrapper.setSet(i, v); + } + + /** {@inheritDoc} */ + @Override + public BoundStatement setSet(int i, Set v, Class elementsClass) { + return wrapper.setSet(i, v, elementsClass); + } + + /** {@inheritDoc} */ + @Override + public BoundStatement setSet(int i, Set v, TypeToken elementsType) { + return wrapper.setSet(i, v, elementsType); + } + + /** {@inheritDoc} */ + @Override + public BoundStatement setSet(String name, Set v) { + return wrapper.setSet(name, v); + } + + /** {@inheritDoc} */ + @Override + public BoundStatement setSet(String name, Set v, Class elementsClass) { + return wrapper.setSet(name, v, elementsClass); + } + + /** {@inheritDoc} */ + @Override + public BoundStatement setSet(String name, Set v, TypeToken elementsType) { + return wrapper.setSet(name, v, elementsType); + } + + /** {@inheritDoc} */ + @Override + public BoundStatement setUDTValue(int i, UDTValue v) { + return wrapper.setUDTValue(i, v); + } + + /** {@inheritDoc} */ + @Override + public BoundStatement setUDTValue(String name, UDTValue v) { + return wrapper.setUDTValue(name, v); + } + + /** {@inheritDoc} */ + @Override + public BoundStatement setTupleValue(int i, TupleValue v) { + return wrapper.setTupleValue(i, v); + } + + /** {@inheritDoc} */ + @Override + public BoundStatement setTupleValue(String name, TupleValue v) { + return wrapper.setTupleValue(name, v); + } + + /** {@inheritDoc} */ + @Override + public BoundStatement set(int i, V v, Class targetClass) { + return wrapper.set(i, v, targetClass); + } + + /** {@inheritDoc} */ + @Override + public BoundStatement set(String name, V v, Class targetClass) { + return wrapper.set(name, v, targetClass); + } + + /** {@inheritDoc} */ + @Override + public BoundStatement set(int i, V v, TypeToken targetType) { + return wrapper.set(i, v, targetType); + } + + /** {@inheritDoc} */ + @Override + public BoundStatement set(String name, V v, TypeToken targetType) { + return wrapper.set(name, v, targetType); + } + + /** {@inheritDoc} */ + @Override + public BoundStatement set(int i, V v, TypeCodec codec) { + return wrapper.set(i, v, codec); + } + + /** {@inheritDoc} */ + @Override + public BoundStatement set(String name, V v, TypeCodec codec) { + return wrapper.set(name, v, codec); + } + + /** {@inheritDoc} */ + @Override + public BoundStatement setToNull(int i) { + return wrapper.setToNull(i); + } + + /** {@inheritDoc} */ + @Override + public BoundStatement setToNull(String name) { + return wrapper.setToNull(name); + } + + /** {@inheritDoc} */ + @Override + public boolean isNull(int i) { + return wrapper.isNull(i); + } + + /** {@inheritDoc} */ + @Override + public boolean isNull(String name) { + return wrapper.isNull(name); + } + + /** {@inheritDoc} */ + @Override + public boolean getBool(int i) { + return wrapper.getBool(i); + } + + /** {@inheritDoc} */ + @Override + public boolean getBool(String name) { + return wrapper.getBool(name); + } + + /** {@inheritDoc} */ + @Override + public byte getByte(int i) { + return wrapper.getByte(i); + } + + /** {@inheritDoc} */ + @Override + public byte getByte(String name) { + return wrapper.getByte(name); + } + + /** {@inheritDoc} */ + @Override + public short getShort(int i) { + return wrapper.getShort(i); + } + + /** {@inheritDoc} */ + @Override + public short getShort(String name) { + return wrapper.getShort(name); + } + + /** {@inheritDoc} */ + @Override + public int getInt(int i) { + return wrapper.getInt(i); + } + + /** {@inheritDoc} */ + @Override + public int getInt(String name) { + return wrapper.getInt(name); + } + + /** {@inheritDoc} */ + @Override + public long getLong(int i) { + return wrapper.getLong(i); + } + + /** {@inheritDoc} */ + @Override + public long getLong(String name) { + return wrapper.getLong(name); + } + + /** {@inheritDoc} */ + @Override + public Date getTimestamp(int i) { + return wrapper.getTimestamp(i); + } + + /** {@inheritDoc} */ + @Override + public Date getTimestamp(String name) { + return wrapper.getTimestamp(name); + } + + /** {@inheritDoc} */ + @Override + public LocalDate getDate(int i) { + return wrapper.getDate(i); + } + + /** {@inheritDoc} */ + @Override + public LocalDate getDate(String name) { + return wrapper.getDate(name); + } + + /** {@inheritDoc} */ + @Override + public long getTime(int i) { + return wrapper.getTime(i); + } + + /** {@inheritDoc} */ + @Override + public long getTime(String name) { + return wrapper.getTime(name); + } + + /** {@inheritDoc} */ + @Override + public float getFloat(int i) { + return wrapper.getFloat(i); + } + + /** {@inheritDoc} */ + @Override + public float getFloat(String name) { + return wrapper.getFloat(name); + } + + /** {@inheritDoc} */ + @Override + public double getDouble(int i) { + return wrapper.getDouble(i); + } + + /** {@inheritDoc} */ + @Override + public double getDouble(String name) { + return wrapper.getDouble(name); + } + + /** {@inheritDoc} */ + @Override + public ByteBuffer getBytesUnsafe(int i) { + return wrapper.getBytesUnsafe(i); + } + + /** {@inheritDoc} */ + @Override + public ByteBuffer getBytesUnsafe(String name) { + return wrapper.getBytesUnsafe(name); + } + + /** {@inheritDoc} */ + @Override + public ByteBuffer getBytes(int i) { + return wrapper.getBytes(i); + } + + /** {@inheritDoc} */ + @Override + public ByteBuffer getBytes(String name) { + return wrapper.getBytes(name); + } + + /** {@inheritDoc} */ + @Override + public String getString(int i) { + return wrapper.getString(i); + } + + /** {@inheritDoc} */ + @Override + public String getString(String name) { + return wrapper.getString(name); + } + + /** {@inheritDoc} */ + @Override + public BigInteger getVarint(int i) { + return wrapper.getVarint(i); + } + + /** {@inheritDoc} */ + @Override + public BigInteger getVarint(String name) { + return wrapper.getVarint(name); + } + + /** {@inheritDoc} */ + @Override + public BigDecimal getDecimal(int i) { + return wrapper.getDecimal(i); + } + + /** {@inheritDoc} */ + @Override + public BigDecimal getDecimal(String name) { + return wrapper.getDecimal(name); + } + + /** {@inheritDoc} */ + @Override + public UUID getUUID(int i) { + return wrapper.getUUID(i); + } + + /** {@inheritDoc} */ + @Override + public UUID getUUID(String name) { + return wrapper.getUUID(name); + } + + /** {@inheritDoc} */ + @Override + public InetAddress getInet(int i) { + return wrapper.getInet(i); + } + + /** {@inheritDoc} */ + @Override + public InetAddress getInet(String name) { + return wrapper.getInet(name); + } + + /** {@inheritDoc} */ + @Override + public List getList(int i, Class elementsClass) { + return wrapper.getList(i, elementsClass); + } + + /** {@inheritDoc} */ + @Override + public List getList(int i, TypeToken elementsType) { + return wrapper.getList(i, elementsType); + } + + /** {@inheritDoc} */ + @Override + public List getList(String name, Class elementsClass) { + return wrapper.getList(name, elementsClass); + } + + /** {@inheritDoc} */ + @Override + public List getList(String name, TypeToken elementsType) { + return wrapper.getList(name, elementsType); + } + + /** {@inheritDoc} */ + @Override + public Set getSet(int i, Class elementsClass) { + return wrapper.getSet(i, elementsClass); + } + + /** {@inheritDoc} */ + @Override + public Set getSet(int i, TypeToken elementsType) { + return wrapper.getSet(i, elementsType); + } + + /** {@inheritDoc} */ + @Override + public Set getSet(String name, Class elementsClass) { + return wrapper.getSet(name, elementsClass); + } + + /** {@inheritDoc} */ + @Override + public Set getSet(String name, TypeToken elementsType) { + return wrapper.getSet(name, elementsType); + } + + /** {@inheritDoc} */ + @Override + public Map getMap(int i, Class keysClass, Class valuesClass) { + return wrapper.getMap(i, keysClass, valuesClass); + } + + /** {@inheritDoc} */ + @Override + public Map getMap(int i, TypeToken keysType, TypeToken valuesType) { + return wrapper.getMap(i, keysType, valuesType); + } + + /** {@inheritDoc} */ + @Override + public Map getMap(String name, Class keysClass, Class valuesClass) { + return wrapper.getMap(name, keysClass, valuesClass); + } + + /** {@inheritDoc} */ + @Override + public Map getMap(String name, TypeToken keysType, TypeToken valuesType) { + return wrapper.getMap(name, keysType, valuesType); + } + + /** {@inheritDoc} */ + @Override + public UDTValue getUDTValue(int i) { + return wrapper.getUDTValue(i); + } + + /** {@inheritDoc} */ + @Override + public UDTValue getUDTValue(String name) { + return wrapper.getUDTValue(name); + } + + /** {@inheritDoc} */ + @Override + public TupleValue getTupleValue(int i) { + return wrapper.getTupleValue(i); + } + + /** {@inheritDoc} */ + @Override + public TupleValue getTupleValue(String name) { + return wrapper.getTupleValue(name); + } + + /** {@inheritDoc} */ + @Override + public Object getObject(int i) { + return wrapper.getObject(i); + } + + /** {@inheritDoc} */ + @Override + public Object getObject(String name) { + return wrapper.getObject(name); + } + + /** {@inheritDoc} */ + @Override + public T get(int i, Class targetClass) { + return wrapper.get(i, targetClass); + } + + /** {@inheritDoc} */ + @Override + public T get(String name, Class targetClass) { + return wrapper.get(name, targetClass); + } + + /** {@inheritDoc} */ + @Override + public T get(int i, TypeToken targetType) { + return wrapper.get(i, targetType); + } + + /** {@inheritDoc} */ + @Override + public T get(String name, TypeToken targetType) { + return wrapper.get(name, targetType); + } + + /** {@inheritDoc} */ + @Override + public T get(int i, TypeCodec codec) { + return wrapper.get(i, codec); + } + + /** {@inheritDoc} */ + @Override + public T get(String name, TypeCodec codec) { + return wrapper.get(name, codec); + } + + void ensureAllSet() { + int index = 0; + for (ByteBuffer value : wrapper.values) { + if (value == BoundStatement.UNSET) + throw new IllegalStateException( + "Unset value at index " + + index + + ". " + + "If you want this value to be null, please set it to null explicitly."); + index += 1; + } + } + + static class DataWrapper extends AbstractData { + + DataWrapper(BoundStatement wrapped, int size) { + super(wrapped.statement.getPreparedId().protocolVersion, wrapped, size); + } + + protected int[] getAllIndexesOf(String name) { + return wrapped.statement.getVariables().getAllIdx(name); + } + + protected DataType getType(int i) { + return wrapped.statement.getVariables().getType(i); + } + + protected String getName(int i) { + return wrapped.statement.getVariables().getName(i); + } + + @Override + protected CodecRegistry getCodecRegistry() { + return wrapped.codecRegistry; + } + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/BytesToSegmentDecoder.java b/driver-core/src/main/java/com/datastax/driver/core/BytesToSegmentDecoder.java new file mode 100644 index 00000000000..a20f2dfe61d --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/BytesToSegmentDecoder.java @@ -0,0 +1,83 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import io.netty.buffer.ByteBuf; +import io.netty.channel.ChannelHandlerContext; +import io.netty.handler.codec.LengthFieldBasedFrameDecoder; +import java.nio.ByteOrder; + +/** + * Decodes {@link Segment}s from a stream of bytes. + * + *

This works like a regular length-field-based decoder, but we override {@link + * #getUnadjustedFrameLength} to handle two peculiarities: the length is encoded on 17 bits, and we + * also want to check the header CRC before we use it. So we parse the whole segment header ahead of + * time, and store it until we're ready to build the segment. + */ +class BytesToSegmentDecoder extends LengthFieldBasedFrameDecoder { + + private final SegmentCodec segmentCodec; + private SegmentCodec.Header header; + + BytesToSegmentDecoder(SegmentCodec segmentCodec) { + super( + // max length (Netty wants this to be the overall length including everything): + segmentCodec.headerLength() + + SegmentCodec.CRC24_LENGTH + + Segment.MAX_PAYLOAD_LENGTH + + SegmentCodec.CRC32_LENGTH, + // offset and size of the "length" field: that's the whole header + 0, + segmentCodec.headerLength() + SegmentCodec.CRC24_LENGTH, + // length adjustment: add the trailing CRC to the declared length + SegmentCodec.CRC32_LENGTH, + // bytes to skip: the header (we've already parsed it while reading the length) + segmentCodec.headerLength() + SegmentCodec.CRC24_LENGTH); + this.segmentCodec = segmentCodec; + } + + @Override + protected Object decode(ChannelHandlerContext ctx, ByteBuf in) throws Exception { + try { + ByteBuf payloadAndCrc = (ByteBuf) super.decode(ctx, in); + if (payloadAndCrc == null) { + return null; + } else { + assert header != null; + Segment segment = segmentCodec.decode(header, payloadAndCrc); + header = null; + return segment; + } + } catch (Exception e) { + // Don't hold on to a stale header if we failed to decode the rest of the segment + header = null; + throw e; + } + } + + @Override + protected long getUnadjustedFrameLength(ByteBuf buffer, int offset, int length, ByteOrder order) { + // The parent class calls this repeatedly for the same "frame" if there weren't enough + // accumulated bytes the first time. Only decode the header the first time: + if (header == null) { + header = segmentCodec.decodeHeader(buffer.slice(offset, length)); + } + return header.payloadLength; + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/CBUtil.java b/driver-core/src/main/java/com/datastax/driver/core/CBUtil.java index 0db58022b8d..98aaccf373e 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/CBUtil.java +++ b/driver-core/src/main/java/com/datastax/driver/core/CBUtil.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,405 +21,328 @@ import com.google.common.collect.ImmutableMap; import io.netty.buffer.ByteBuf; import io.netty.util.CharsetUtil; - import java.net.InetAddress; import java.net.InetSocketAddress; import java.net.UnknownHostException; import java.nio.ByteBuffer; import java.nio.charset.CharacterCodingException; -import java.util.*; - -/** - * ByteBuf utility methods. - */ -abstract class CBUtil { // TODO rename - - private CBUtil() { - } - - private static String readString(ByteBuf cb, int length) { - try { - String str = cb.toString(cb.readerIndex(), length, CharsetUtil.UTF_8); - cb.readerIndex(cb.readerIndex() + length); - return str; - } catch (IllegalStateException e) { - // That's the way netty encapsulate a CCE - if (e.getCause() instanceof CharacterCodingException) - throw new DriverInternalError("Cannot decode string as UTF8"); - else - throw e; - } - } - - public static String readString(ByteBuf cb) { - try { - int length = cb.readUnsignedShort(); - return readString(cb, length); - } catch (IndexOutOfBoundsException e) { - throw new DriverInternalError("Not enough bytes to read an UTF8 serialized string preceded by it's 2 bytes length"); - } - } - - public static void writeString(String str, ByteBuf cb) { - byte[] bytes = str.getBytes(CharsetUtil.UTF_8); - cb.writeShort(bytes.length); - cb.writeBytes(bytes); - } - - public static int sizeOfString(String str) { - return 2 + encodedUTF8Length(str); - } - - private static int encodedUTF8Length(String st) { - int strlen = st.length(); - int utflen = 0; - for (int i = 0; i < strlen; i++) { - int c = st.charAt(i); - if ((c >= 0x0001) && (c <= 0x007F)) - utflen++; - else if (c > 0x07FF) - utflen += 3; - else - utflen += 2; - } - return utflen; - } - - public static String readLongString(ByteBuf cb) { - try { - int length = cb.readInt(); - return readString(cb, length); - } catch (IndexOutOfBoundsException e) { - throw new DriverInternalError("Not enough bytes to read an UTF8 serialized string preceded by it's 4 bytes length"); - } - } - - public static void writeLongString(String str, ByteBuf cb) { - byte[] bytes = str.getBytes(CharsetUtil.UTF_8); - cb.writeInt(bytes.length); - cb.writeBytes(bytes); - } +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.UUID; - public static int sizeOfLongString(String str) { - return 4 + str.getBytes(CharsetUtil.UTF_8).length; - } - - public static byte[] readBytes(ByteBuf cb) { - try { - int length = cb.readUnsignedShort(); - byte[] bytes = new byte[length]; - cb.readBytes(bytes); - return bytes; - } catch (IndexOutOfBoundsException e) { - throw new DriverInternalError("Not enough bytes to read a byte array preceded by it's 2 bytes length"); - } - } - - public static void writeBytes(byte[] bytes, ByteBuf cb) { - cb.writeShort(bytes.length); - cb.writeBytes(bytes); - } - - public static void writeBytes(ByteBuffer bytes, ByteBuf cb) { - cb.writeShort(bytes.remaining()); - cb.writeBytes(bytes.duplicate()); - } +/** ByteBuf utility methods. */ - public static int sizeOfBytes(byte[] bytes) { - return 2 + bytes.length; - } - - public static int sizeOfBytes(ByteBuffer bytes) { - return 2 + bytes.remaining(); - } - - public static Map readBytesMap(ByteBuf cb) { - int length = cb.readUnsignedShort(); - ImmutableMap.Builder builder = ImmutableMap.builder(); - for (int i = 0; i < length; i++) { - String key = readString(cb); - ByteBuffer value = readValue(cb); - if (value == null) - value = Statement.NULL_PAYLOAD_VALUE; - builder.put(key, value); - } - return builder.build(); - } - - public static void writeBytesMap(Map m, ByteBuf cb) { - cb.writeShort(m.size()); - for (Map.Entry entry : m.entrySet()) { - writeString(entry.getKey(), cb); - ByteBuffer value = entry.getValue(); - if (value == Statement.NULL_PAYLOAD_VALUE) - value = null; - writeValue(value, cb); - } - } - - public static int sizeOfBytesMap(Map m) { - int size = 2; - for (Map.Entry entry : m.entrySet()) { - size += sizeOfString(entry.getKey()); - size += sizeOfBytes(entry.getValue()); - } - return size; - } - - public static ConsistencyLevel readConsistencyLevel(ByteBuf cb) { - return ConsistencyLevel.fromCode(cb.readUnsignedShort()); - } - - public static void writeConsistencyLevel(ConsistencyLevel consistency, ByteBuf cb) { - cb.writeShort(consistency.code); - } - - public static int sizeOfConsistencyLevel(ConsistencyLevel consistency) { - return 2; - } - - public static > T readEnumValue(Class enumType, ByteBuf cb) { - String value = CBUtil.readString(cb); - try { - return Enum.valueOf(enumType, value.toUpperCase()); - } catch (IllegalArgumentException e) { - throw new DriverInternalError(String.format("Invalid value '%s' for %s", value, enumType.getSimpleName())); - } - } - - public static > void writeEnumValue(T enumValue, ByteBuf cb) { - writeString(enumValue.toString(), cb); - } - - public static > int sizeOfEnumValue(T enumValue) { - return sizeOfString(enumValue.toString()); - } - - public static UUID readUUID(ByteBuf cb) { - long msb = cb.readLong(); - long lsb = cb.readLong(); - return new UUID(msb, lsb); - } - - public static void writeUUID(UUID uuid, ByteBuf cb) { - cb.writeLong(uuid.getMostSignificantBits()); - cb.writeLong(uuid.getLeastSignificantBits()); - } - - public static int sizeOfUUID(UUID uuid) { - return 16; - } - - public static List readStringList(ByteBuf cb) { - int length = cb.readUnsignedShort(); - List l = new ArrayList(length); - for (int i = 0; i < length; i++) - l.add(readString(cb)); - return l; - } - - public static void writeStringList(List l, ByteBuf cb) { - cb.writeShort(l.size()); - for (String str : l) - writeString(str, cb); - } - - public static int sizeOfStringList(List l) { - int size = 2; - for (String str : l) - size += sizeOfString(str); - return size; - } - - public static Map readStringMap(ByteBuf cb) { - int length = cb.readUnsignedShort(); - Map m = new HashMap(length); - for (int i = 0; i < length; i++) { - String k = readString(cb).toUpperCase(); - String v = readString(cb); - m.put(k, v); - } - return m; - } - - public static void writeStringMap(Map m, ByteBuf cb) { - cb.writeShort(m.size()); - for (Map.Entry entry : m.entrySet()) { - writeString(entry.getKey(), cb); - writeString(entry.getValue(), cb); - } - } - - public static int sizeOfStringMap(Map m) { - int size = 2; - for (Map.Entry entry : m.entrySet()) { - size += sizeOfString(entry.getKey()); - size += sizeOfString(entry.getValue()); - } - return size; - } - - public static Map> readStringToStringListMap(ByteBuf cb) { - int length = cb.readUnsignedShort(); - Map> m = new HashMap>(length); - for (int i = 0; i < length; i++) { - String k = readString(cb).toUpperCase(); - List v = readStringList(cb); - m.put(k, v); - } - return m; - } - - public static void writeStringToStringListMap(Map> m, ByteBuf cb) { - cb.writeShort(m.size()); - for (Map.Entry> entry : m.entrySet()) { - writeString(entry.getKey(), cb); - writeStringList(entry.getValue(), cb); - } - } - - public static int sizeOfStringToStringListMap(Map> m) { - int size = 2; - for (Map.Entry> entry : m.entrySet()) { - size += sizeOfString(entry.getKey()); - size += sizeOfStringList(entry.getValue()); - } - return size; - } - - public static ByteBuffer readValue(ByteBuf cb) { - int length = cb.readInt(); - if (length < 0) - return null; - ByteBuf slice = cb.readSlice(length); - - return ByteBuffer.wrap(readRawBytes(slice)); - } - - public static void writeValue(byte[] bytes, ByteBuf cb) { - if (bytes == null) { - cb.writeInt(-1); - return; - } - - cb.writeInt(bytes.length); - cb.writeBytes(bytes); - } - - public static void writeValue(ByteBuffer bytes, ByteBuf cb) { - if (bytes == null) { - cb.writeInt(-1); - return; - } - - if (bytes == BoundStatement.UNSET) { - cb.writeInt(-2); - return; - } - - cb.writeInt(bytes.remaining()); - cb.writeBytes(bytes.duplicate()); - } - - public static int sizeOfValue(byte[] bytes) { - return 4 + (bytes == null ? 0 : bytes.length); - } - - public static int sizeOfValue(ByteBuffer bytes) { - return 4 + (bytes == null ? 0 : bytes.remaining()); - } - - public static List readValueList(ByteBuf cb) { - int size = cb.readUnsignedShort(); - if (size == 0) - return Collections.emptyList(); - - List l = new ArrayList(size); - for (int i = 0; i < size; i++) - l.add(readValue(cb)); - return l; - } - - public static void writeValueList(List values, ByteBuf cb) { - cb.writeShort(values.size()); - for (ByteBuffer value : values) - CBUtil.writeValue(value, cb); - } - - public static int sizeOfValueList(List values) { - int size = 2; - for (ByteBuffer value : values) - size += CBUtil.sizeOfValue(value); - return size; - } - - public static void writeNamedValueList(Map namedValues, ByteBuf cb) { - cb.writeShort(namedValues.size()); - for (Map.Entry entry : namedValues.entrySet()) { - CBUtil.writeString(entry.getKey(), cb); - CBUtil.writeValue(entry.getValue(), cb); - } - } - - public static int sizeOfNamedValueList(Map namedValues) { - int size = 2; - for (Map.Entry entry : namedValues.entrySet()) { - size += CBUtil.sizeOfString(entry.getKey()); - size += CBUtil.sizeOfValue(entry.getValue()); - } - return size; - } - - public static InetSocketAddress readInet(ByteBuf cb) { - int addrSize = cb.readByte() & 0xFF; - byte[] address = new byte[addrSize]; - cb.readBytes(address); - int port = cb.readInt(); - try { - return new InetSocketAddress(InetAddress.getByAddress(address), port); - } catch (UnknownHostException e) { - throw new DriverInternalError(String.format("Invalid IP address (%d.%d.%d.%d) while deserializing inet address", address[0], address[1], address[2], address[3])); - } - } - - public static InetAddress readInetWithoutPort(ByteBuf cb) { - int addrSize = cb.readByte() & 0xFF; - byte[] address = new byte[addrSize]; - cb.readBytes(address); - try { - return InetAddress.getByAddress(address); - } catch (UnknownHostException e) { - throw new DriverInternalError(String.format("Invalid IP address (%d.%d.%d.%d) while deserializing inet address", address[0], address[1], address[2], address[3])); - } - } - - public static void writeInet(InetSocketAddress inet, ByteBuf cb) { - byte[] address = inet.getAddress().getAddress(); - - cb.writeByte(address.length); - cb.writeBytes(address); - cb.writeInt(inet.getPort()); - } - - public static int sizeOfInet(InetSocketAddress inet) { - byte[] address = inet.getAddress().getAddress(); - return 1 + address.length + 4; - } +// Implementation note: in order to facilitate loop optimizations by the JIT compiler, this class +// favors indexed loops over "foreach" loops. +@SuppressWarnings("ForLoopReplaceableByForEach") +abstract class CBUtil { // TODO rename - /* - * Reads *all* readable bytes from {@code cb} and return them. - * If {@code cb} is backed by an array, this will return the underlying array directly, without copy. - */ - public static byte[] readRawBytes(ByteBuf cb) { - if (cb.hasArray() && cb.readableBytes() == cb.array().length) { - // Move the readerIndex just so we consistently consume the input - cb.readerIndex(cb.writerIndex()); - return cb.array(); - } - - // Otherwise, just read the bytes in a new array - byte[] bytes = new byte[cb.readableBytes()]; - cb.readBytes(bytes); - return bytes; - } + private CBUtil() {} + + private static String readString(ByteBuf cb, int length) { + try { + String str = cb.toString(cb.readerIndex(), length, CharsetUtil.UTF_8); + cb.readerIndex(cb.readerIndex() + length); + return str; + } catch (IllegalStateException e) { + // That's the way netty encapsulate a CCE + if (e.getCause() instanceof CharacterCodingException) + throw new DriverInternalError("Cannot decode string as UTF8"); + else throw e; + } + } + + static String readString(ByteBuf cb) { + try { + int length = cb.readUnsignedShort(); + return readString(cb, length); + } catch (IndexOutOfBoundsException e) { + throw new DriverInternalError( + "Not enough bytes to read an UTF8 serialized string preceded by it's 2 bytes length"); + } + } + + private static void writeString(String str, ByteBuf cb) { + byte[] bytes = str.getBytes(CharsetUtil.UTF_8); + cb.writeShort(bytes.length); + cb.writeBytes(bytes); + } + + static int sizeOfString(String str) { + return 2 + encodedUTF8Length(str); + } + + private static int encodedUTF8Length(String st) { + int strlen = st.length(); + int utflen = 0; + for (int i = 0; i < strlen; i++) { + int c = st.charAt(i); + if ((c >= 0x0001) && (c <= 0x007F)) utflen++; + else if (c > 0x07FF) utflen += 3; + else utflen += 2; + } + return utflen; + } + + static void writeLongString(String str, ByteBuf cb) { + byte[] bytes = str.getBytes(CharsetUtil.UTF_8); + cb.writeInt(bytes.length); + cb.writeBytes(bytes); + } + + static int sizeOfLongString(String str) { + return 4 + str.getBytes(CharsetUtil.UTF_8).length; + } + + static byte[] readBytes(ByteBuf cb) { + try { + int length = cb.readUnsignedShort(); + byte[] bytes = new byte[length]; + cb.readBytes(bytes); + return bytes; + } catch (IndexOutOfBoundsException e) { + throw new DriverInternalError( + "Not enough bytes to read a byte array preceded by it's 2 bytes length"); + } + } + + static void writeShortBytes(byte[] bytes, ByteBuf cb) { + cb.writeShort(bytes.length); + cb.writeBytes(bytes); + } + + static int sizeOfShortBytes(byte[] bytes) { + return 2 + bytes.length; + } + + private static int sizeOfBytes(ByteBuffer bytes) { + return 4 + bytes.remaining(); + } + + static Map readBytesMap(ByteBuf cb) { + int length = cb.readUnsignedShort(); + ImmutableMap.Builder builder = ImmutableMap.builder(); + for (int i = 0; i < length; i++) { + String key = readString(cb); + ByteBuffer value = readValue(cb); + if (value == null) value = Statement.NULL_PAYLOAD_VALUE; + builder.put(key, value); + } + return builder.build(); + } + + static void writeBytesMap(Map m, ByteBuf cb) { + cb.writeShort(m.size()); + for (Map.Entry entry : m.entrySet()) { + writeString(entry.getKey(), cb); + ByteBuffer value = entry.getValue(); + if (value == Statement.NULL_PAYLOAD_VALUE) value = null; + writeValue(value, cb); + } + } + + static int sizeOfBytesMap(Map m) { + int size = 2; + for (Map.Entry entry : m.entrySet()) { + size += sizeOfString(entry.getKey()); + size += sizeOfBytes(entry.getValue()); + } + return size; + } + + static ConsistencyLevel readConsistencyLevel(ByteBuf cb) { + return ConsistencyLevel.fromCode(cb.readUnsignedShort()); + } + + static void writeConsistencyLevel(ConsistencyLevel consistency, ByteBuf cb) { + cb.writeShort(consistency.code); + } + + static int sizeOfConsistencyLevel(@SuppressWarnings("unused") ConsistencyLevel consistency) { + return 2; + } + + static > T readEnumValue(Class enumType, ByteBuf cb) { + String value = CBUtil.readString(cb); + try { + return Enum.valueOf(enumType, value.toUpperCase()); + } catch (IllegalArgumentException e) { + throw new DriverInternalError( + String.format("Invalid value '%s' for %s", value, enumType.getSimpleName())); + } + } + + static > void writeEnumValue(T enumValue, ByteBuf cb) { + writeString(enumValue.toString(), cb); + } + + static > int sizeOfEnumValue(T enumValue) { + return sizeOfString(enumValue.toString()); + } + + static UUID readUUID(ByteBuf cb) { + long msb = cb.readLong(); + long lsb = cb.readLong(); + return new UUID(msb, lsb); + } + + static List readStringList(ByteBuf cb) { + int length = cb.readUnsignedShort(); + List l = new ArrayList(length); + for (int i = 0; i < length; i++) { + l.add(readString(cb)); + } + return l; + } + + static void writeStringMap(Map m, ByteBuf cb) { + cb.writeShort(m.size()); + for (Map.Entry entry : m.entrySet()) { + writeString(entry.getKey(), cb); + writeString(entry.getValue(), cb); + } + } + + static int sizeOfStringMap(Map m) { + int size = 2; + for (Map.Entry entry : m.entrySet()) { + size += sizeOfString(entry.getKey()); + size += sizeOfString(entry.getValue()); + } + return size; + } + + static Map> readStringToStringListMap(ByteBuf cb) { + int length = cb.readUnsignedShort(); + Map> m = new HashMap>(length); + for (int i = 0; i < length; i++) { + String k = readString(cb).toUpperCase(); + List v = readStringList(cb); + m.put(k, v); + } + return m; + } + + static ByteBuffer readValue(ByteBuf cb) { + int length = cb.readInt(); + if (length < 0) return null; + ByteBuf slice = cb.readSlice(length); + + return ByteBuffer.wrap(readRawBytes(slice)); + } + + static void writeValue(byte[] bytes, ByteBuf cb) { + if (bytes == null) { + cb.writeInt(-1); + return; + } + + cb.writeInt(bytes.length); + cb.writeBytes(bytes); + } + + static void writeValue(ByteBuffer bytes, ByteBuf cb) { + if (bytes == null) { + cb.writeInt(-1); + return; + } + + if (bytes == BoundStatement.UNSET) { + cb.writeInt(-2); + return; + } + + cb.writeInt(bytes.remaining()); + cb.writeBytes(bytes.duplicate()); + } + + static int sizeOfValue(byte[] bytes) { + return 4 + (bytes == null ? 0 : bytes.length); + } + + static int sizeOfValue(ByteBuffer bytes) { + return 4 + (bytes == null ? 0 : bytes.remaining()); + } + + static void writeValueList(ByteBuffer[] values, ByteBuf cb) { + cb.writeShort(values.length); + for (int i = 0; i < values.length; i++) { + ByteBuffer value = values[i]; + CBUtil.writeValue(value, cb); + } + } + + static int sizeOfValueList(ByteBuffer[] values) { + int size = 2; + for (int i = 0; i < values.length; i++) { + ByteBuffer value = values[i]; + size += CBUtil.sizeOfValue(value); + } + return size; + } + + static void writeNamedValueList(Map namedValues, ByteBuf cb) { + cb.writeShort(namedValues.size()); + for (Map.Entry entry : namedValues.entrySet()) { + CBUtil.writeString(entry.getKey(), cb); + CBUtil.writeValue(entry.getValue(), cb); + } + } + + static int sizeOfNamedValueList(Map namedValues) { + int size = 2; + for (Map.Entry entry : namedValues.entrySet()) { + size += CBUtil.sizeOfString(entry.getKey()); + size += CBUtil.sizeOfValue(entry.getValue()); + } + return size; + } + + static InetSocketAddress readInet(ByteBuf cb) { + int addrSize = cb.readByte() & 0xFF; + byte[] address = new byte[addrSize]; + cb.readBytes(address); + int port = cb.readInt(); + try { + return new InetSocketAddress(InetAddress.getByAddress(address), port); + } catch (UnknownHostException e) { + throw new DriverInternalError( + String.format( + "Invalid IP address (%d.%d.%d.%d) while deserializing inet address", + address[0], address[1], address[2], address[3])); + } + } + + static InetAddress readInetWithoutPort(ByteBuf cb) { + int addrSize = cb.readByte() & 0xFF; + byte[] address = new byte[addrSize]; + cb.readBytes(address); + try { + return InetAddress.getByAddress(address); + } catch (UnknownHostException e) { + throw new DriverInternalError( + String.format( + "Invalid IP address (%d.%d.%d.%d) while deserializing inet address", + address[0], address[1], address[2], address[3])); + } + } + + /* + * Reads *all* readable bytes from {@code cb} and return them. + * If {@code cb} is backed by an array, this will return the underlying array directly, without copy. + */ + private static byte[] readRawBytes(ByteBuf cb) { + if (cb.hasArray() && cb.readableBytes() == cb.array().length) { + // Move the readerIndex just so we consistently consume the input + cb.readerIndex(cb.writerIndex()); + return cb.array(); + } + + // Otherwise, just read the bytes in a new array + byte[] bytes = new byte[cb.readableBytes()]; + cb.readBytes(bytes); + return bytes; + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/CancelledSpeculativeExecutionException.java b/driver-core/src/main/java/com/datastax/driver/core/CancelledSpeculativeExecutionException.java new file mode 100644 index 00000000000..6cb77040b85 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/CancelledSpeculativeExecutionException.java @@ -0,0 +1,33 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +/** + * Special exception that gets emitted to {@link LatencyTracker}s with the latencies of cancelled + * speculative executions. This allows those trackers to choose whether to ignore those latencies or + * not. + */ +class CancelledSpeculativeExecutionException extends Exception { + + static CancelledSpeculativeExecutionException INSTANCE = + new CancelledSpeculativeExecutionException(); + + private CancelledSpeculativeExecutionException() { + super(); + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/ChainedResultSetFuture.java b/driver-core/src/main/java/com/datastax/driver/core/ChainedResultSetFuture.java index 872604312be..8876cfb0ab1 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ChainedResultSetFuture.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ChainedResultSetFuture.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,59 +19,55 @@ import com.google.common.util.concurrent.AbstractFuture; import com.google.common.util.concurrent.FutureCallback; -import com.google.common.util.concurrent.Futures; import com.google.common.util.concurrent.Uninterruptibles; - import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; -/** - * A {@code ResultSetFuture} that will complete when its source future completes. - */ +/** A {@code ResultSetFuture} that will complete when its source future completes. */ class ChainedResultSetFuture extends AbstractFuture implements ResultSetFuture { - private volatile ResultSetFuture source; + private volatile ResultSetFuture source; - void setSource(ResultSetFuture source) { - if (this.isCancelled()) - source.cancel(false); - this.source = source; - Futures.addCallback(source, new FutureCallback() { - @Override - public void onSuccess(ResultSet result) { - ChainedResultSetFuture.this.set(result); - } + void setSource(ResultSetFuture source) { + if (this.isCancelled()) source.cancel(false); + this.source = source; + GuavaCompatibility.INSTANCE.addCallback( + source, + new FutureCallback() { + @Override + public void onSuccess(ResultSet result) { + ChainedResultSetFuture.this.set(result); + } - @Override - public void onFailure(Throwable t) { - ChainedResultSetFuture.this.setException(t); - } + @Override + public void onFailure(Throwable t) { + ChainedResultSetFuture.this.setException(t); + } }); - } + } - @Override - public boolean cancel(boolean mayInterruptIfRunning) { - return (source == null || source.cancel(mayInterruptIfRunning)) - && super.cancel(mayInterruptIfRunning); - } + @Override + public boolean cancel(boolean mayInterruptIfRunning) { + return (source == null || source.cancel(mayInterruptIfRunning)) + && super.cancel(mayInterruptIfRunning); + } - @Override - public ResultSet getUninterruptibly() { - try { - return Uninterruptibles.getUninterruptibly(this); - } catch (ExecutionException e) { - throw DriverThrowables.propagateCause(e); - } + @Override + public ResultSet getUninterruptibly() { + try { + return Uninterruptibles.getUninterruptibly(this); + } catch (ExecutionException e) { + throw DriverThrowables.propagateCause(e); } + } - @Override - public ResultSet getUninterruptibly(long timeout, TimeUnit unit) throws TimeoutException { - try { - return Uninterruptibles.getUninterruptibly(this, timeout, unit); - } catch (ExecutionException e) { - throw DriverThrowables.propagateCause(e); - } + @Override + public ResultSet getUninterruptibly(long timeout, TimeUnit unit) throws TimeoutException { + try { + return Uninterruptibles.getUninterruptibly(this, timeout, unit); + } catch (ExecutionException e) { + throw DriverThrowables.propagateCause(e); } + } } - diff --git a/driver-core/src/main/java/com/datastax/driver/core/Clock.java b/driver-core/src/main/java/com/datastax/driver/core/Clock.java index a2d31b5b0c5..aad701d5887 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Clock.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Clock.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,48 +17,52 @@ */ package com.datastax.driver.core; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import static java.util.concurrent.TimeUnit.MILLISECONDS; +import static java.util.concurrent.TimeUnit.NANOSECONDS; +import static java.util.concurrent.TimeUnit.SECONDS; import java.util.concurrent.atomic.AtomicReference; - -import static java.util.concurrent.TimeUnit.*; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** - * A small abstraction around system clock that aims to provide microsecond precision with the best accuracy possible. + * A small abstraction around system clock that aims to provide microsecond precision with the best + * accuracy possible. */ interface Clock { - /** - * Returns the current time in microseconds. - * - * @return the difference, measured in microseconds, between the current time and and the Epoch - * (that is, midnight, January 1, 1970 UTC). - */ - long currentTimeMicros(); + /** + * Returns the current time in microseconds. + * + * @return the difference, measured in microseconds, between the current time and and the Epoch + * (that is, midnight, January 1, 1970 UTC). + */ + long currentTimeMicros(); } /** - * Factory that returns the best Clock implementation depending on what native libraries are available in the system. - * If LibC is available through JNR, and if the system property {@code com.datastax.driver.USE_NATIVE_CLOCK} is set to {@code true} - * (which is the default value), then {@link NativeClock} is returned, otherwise {@link SystemClock} is returned. + * Factory that returns the best Clock implementation depending on what native libraries are + * available in the system. If LibC is available through JNR, and if the system property {@code + * com.datastax.driver.USE_NATIVE_CLOCK} is set to {@code true} (which is the default value), then + * {@link NativeClock} is returned, otherwise {@link SystemClock} is returned. */ class ClockFactory { - private static final Logger LOGGER = LoggerFactory.getLogger(ClockFactory.class); + private static final Logger LOGGER = LoggerFactory.getLogger(ClockFactory.class); - private static final String USE_NATIVE_CLOCK_SYSTEM_PROPERTY = "com.datastax.driver.USE_NATIVE_CLOCK"; + private static final String USE_NATIVE_CLOCK_SYSTEM_PROPERTY = + "com.datastax.driver.USE_NATIVE_CLOCK"; - static Clock newInstance() { - if (SystemProperties.getBoolean(USE_NATIVE_CLOCK_SYSTEM_PROPERTY, true) && Native.isGettimeofdayAvailable()) { - LOGGER.info("Using native clock to generate timestamps."); - return new NativeClock(); - } else { - LOGGER.info("Using java.lang.System clock to generate timestamps."); - return new SystemClock(); - } + static Clock newInstance() { + if (SystemProperties.getBoolean(USE_NATIVE_CLOCK_SYSTEM_PROPERTY, true) + && Native.isGettimeofdayAvailable()) { + LOGGER.info("Using native clock to generate timestamps."); + return new NativeClock(); + } else { + LOGGER.info("Using java.lang.System clock to generate timestamps."); + return new SystemClock(); } - + } } /** @@ -66,69 +72,68 @@ static Clock newInstance() { */ class SystemClock implements Clock { - @Override - public long currentTimeMicros() { - return System.currentTimeMillis() * 1000; - } - + @Override + public long currentTimeMicros() { + return System.currentTimeMillis() * 1000; + } } /** - * Provides the current time with microseconds precision with some reasonable accuracy through - * the use of {@link Native#currentTimeMicros()}. - *

- * Because calling JNR methods is slightly expensive, - * we only call it once per second and add the number of nanoseconds since the last call - * to get the current time, which is good enough an accuracy for our purpose (see CASSANDRA-6106). - *

- * This reduces the cost of the call to {@link NativeClock#currentTimeMicros()} to levels comparable - * to those of a call to {@link System#nanoTime()}. + * Provides the current time with microseconds precision with some reasonable accuracy through the + * use of {@link Native#currentTimeMicros()}. + * + *

Because calling JNR methods is slightly expensive, we only call it once per second and add the + * number of nanoseconds since the last call to get the current time, which is good enough an + * accuracy for our purpose (see CASSANDRA-6106). + * + *

This reduces the cost of the call to {@link NativeClock#currentTimeMicros()} to levels + * comparable to those of a call to {@link System#nanoTime()}. */ class NativeClock implements Clock { - private static final long ONE_SECOND_NS = NANOSECONDS.convert(1, SECONDS); - private static final long ONE_MILLISECOND_NS = NANOSECONDS.convert(1, MILLISECONDS); + private static final long ONE_SECOND_NS = NANOSECONDS.convert(1, SECONDS); + private static final long ONE_MILLISECOND_NS = NANOSECONDS.convert(1, MILLISECONDS); - /** - * Records a time in micros along with the System.nanoTime() value at the time the - * time is fetched. - */ - private static class FetchedTime { + /** + * Records a time in micros along with the System.nanoTime() value at the time the time is + * fetched. + */ + private static class FetchedTime { - private final long timeInMicros; - private final long nanoTimeAtCheck; + private final long timeInMicros; + private final long nanoTimeAtCheck; - private FetchedTime(long timeInMicros, long nanoTimeAtCheck) { - this.timeInMicros = timeInMicros; - this.nanoTimeAtCheck = nanoTimeAtCheck; - } + private FetchedTime(long timeInMicros, long nanoTimeAtCheck) { + this.timeInMicros = timeInMicros; + this.nanoTimeAtCheck = nanoTimeAtCheck; } + } - private final AtomicReference lastFetchedTime = new AtomicReference(fetchTimeMicros()); + private final AtomicReference lastFetchedTime = + new AtomicReference(fetchTimeMicros()); - @Override - public long currentTimeMicros() { - FetchedTime spec = lastFetchedTime.get(); - long curNano = System.nanoTime(); - if (curNano > spec.nanoTimeAtCheck + ONE_SECOND_NS) { - lastFetchedTime.compareAndSet(spec, spec = fetchTimeMicros()); - } - return spec.timeInMicros + ((curNano - spec.nanoTimeAtCheck) / 1000); + @Override + public long currentTimeMicros() { + FetchedTime spec = lastFetchedTime.get(); + long curNano = System.nanoTime(); + if (curNano > spec.nanoTimeAtCheck + ONE_SECOND_NS) { + lastFetchedTime.compareAndSet(spec, spec = fetchTimeMicros()); } - - private static FetchedTime fetchTimeMicros() { - // To compensate for the fact that the Native.currentTimeMicros call could take - // some time, instead of picking the nano time before the call or after the - // call, we take the average of both. - long start = System.nanoTime(); - long micros = Native.currentTimeMicros(); - long end = System.nanoTime(); - // If it turns out the call took us more than 1 millisecond (can happen while - // the JVM warms up, unlikely otherwise, but no reasons to take risks), fall back - // to System.currentTimeMillis() temporarily - if ((end - start) > ONE_MILLISECOND_NS) - return new FetchedTime(System.currentTimeMillis() * 1000, System.nanoTime()); - return new FetchedTime(micros, (end + start) / 2); - } - + return spec.timeInMicros + ((curNano - spec.nanoTimeAtCheck) / 1000); + } + + private static FetchedTime fetchTimeMicros() { + // To compensate for the fact that the Native.currentTimeMicros call could take + // some time, instead of picking the nano time before the call or after the + // call, we take the average of both. + long start = System.nanoTime(); + long micros = Native.currentTimeMicros(); + long end = System.nanoTime(); + // If it turns out the call took us more than 1 millisecond (can happen while + // the JVM warms up, unlikely otherwise, but no reasons to take risks), fall back + // to System.currentTimeMillis() temporarily + if ((end - start) > ONE_MILLISECOND_NS) + return new FetchedTime(System.currentTimeMillis() * 1000, System.nanoTime()); + return new FetchedTime(micros, (end + start) / 2); + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/CloseFuture.java b/driver-core/src/main/java/com/datastax/driver/core/CloseFuture.java index a1fbb4c5524..de5961965f8 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/CloseFuture.java +++ b/driver-core/src/main/java/com/datastax/driver/core/CloseFuture.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,81 +20,79 @@ import com.google.common.util.concurrent.AbstractFuture; import com.google.common.util.concurrent.FutureCallback; import com.google.common.util.concurrent.Futures; - import java.util.List; /** * A future on the shutdown of a Cluster or Session instance. - *

- * This is a standard future except for the fact that this class has an - * additional {@link #force} method that can be used to expedite the shutdown - * process (see below). - *

- * Note that this class implements Guava's {@code + * + *

This is a standard future except for the fact that this class has an additional {@link #force} + * method that can be used to expedite the shutdown process (see below). + * + *

Note that this class implements Guava's {@code * ListenableFuture} and can so be used with Guava's future utilities. */ public abstract class CloseFuture extends AbstractFuture { - CloseFuture() { - } + CloseFuture() {} - static CloseFuture immediateFuture() { - CloseFuture future = new CloseFuture() { - @Override - public CloseFuture force() { - return this; - } + static CloseFuture immediateFuture() { + CloseFuture future = + new CloseFuture() { + @Override + public CloseFuture force() { + return this; + } }; - future.set(null); - return future; - } + future.set(null); + return future; + } - /** - * Try to force the completion of the shutdown this is a future of. - *

- * This method will do its best to expedite the shutdown process. In - * particular, all connections will be closed right away, even if there are - * ongoing queries at the time this method is called. - *

- * Note that this method does not block. The completion of this method does - * not imply the shutdown process is done, you still need to wait on this - * future to ensure that, but calling this method will ensure said - * future will return in a timely way. - * - * @return this {@code CloseFuture}. - */ - public abstract CloseFuture force(); + /** + * Try to force the completion of the shutdown this is a future of. + * + *

This method will do its best to expedite the shutdown process. In particular, all + * connections will be closed right away, even if there are ongoing queries at the time this + * method is called. + * + *

Note that this method does not block. The completion of this method does not imply the + * shutdown process is done, you still need to wait on this future to ensure that, but calling + * this method will ensure said future will return in a timely way. + * + * @return this {@code CloseFuture}. + */ + public abstract CloseFuture force(); - // Internal utility for cases where we want to build a future that wait on other ones - static class Forwarding extends CloseFuture { + // Internal utility for cases where we want to build a future that wait on other ones + static class Forwarding extends CloseFuture { - private final List futures; + private final List futures; - Forwarding(List futures) { - this.futures = futures; + Forwarding(List futures) { + this.futures = futures; - Futures.addCallback(Futures.allAsList(futures), new FutureCallback>() { - @Override - public void onFailure(Throwable t) { - Forwarding.this.setException(t); - } + GuavaCompatibility.INSTANCE.addCallback( + Futures.allAsList(futures), + new FutureCallback>() { + @Override + public void onFailure(Throwable t) { + Forwarding.this.setException(t); + } - @Override - public void onSuccess(List v) { - Forwarding.this.onFuturesDone(); - } - }); - } + @Override + public void onSuccess(List v) { + Forwarding.this.onFuturesDone(); + } + }); + } - @Override - public CloseFuture force() { - for (CloseFuture future : futures) - future.force(); - return this; - } + @Override + public CloseFuture force() { + for (CloseFuture future : futures) future.force(); + return this; + } - protected void onFuturesDone() { - set(null); - } + protected void onFuturesDone() { + set(null); } + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/CloudConfig.java b/driver-core/src/main/java/com/datastax/driver/core/CloudConfig.java new file mode 100644 index 00000000000..332ed7fd62a --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/CloudConfig.java @@ -0,0 +1,72 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import com.google.common.collect.ImmutableList; +import java.net.InetSocketAddress; +import java.util.List; + +class CloudConfig { + + private final InetSocketAddress proxyAddress; + private final List endPoints; + private final String localDatacenter; + private final SSLOptions sslOptions; + private final AuthProvider authProvider; + + CloudConfig( + InetSocketAddress proxyAddress, + List endPoints, + String localDatacenter, + SSLOptions sslOptions, + AuthProvider authProvider) { + this.proxyAddress = proxyAddress; + this.endPoints = ImmutableList.copyOf(endPoints); + this.localDatacenter = localDatacenter; + this.sslOptions = sslOptions; + this.authProvider = authProvider; + } + + /** @return not null proxy Address */ + InetSocketAddress getProxyAddress() { + return proxyAddress; + } + + /** @return not null endpoints */ + List getEndPoints() { + return endPoints; + } + + /** @return not null local data center */ + String getLocalDatacenter() { + return localDatacenter; + } + + /** @return not null ssl options that can be used to connect to SniProxy */ + SSLOptions getSslOptions() { + return sslOptions; + } + + /** + * @return nullable AuthProvider that can be used to connect to proxy or null if there was not + * username/password provided in the secure bundle + */ + AuthProvider getAuthProvider() { + return authProvider; + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/CloudConfigFactory.java b/driver-core/src/main/java/com/datastax/driver/core/CloudConfigFactory.java new file mode 100644 index 00000000000..d2181cc3e77 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/CloudConfigFactory.java @@ -0,0 +1,268 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import static com.google.common.base.Charsets.UTF_8; +import static com.google.common.base.Preconditions.checkNotNull; + +import com.fasterxml.jackson.core.JsonParser; +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.base.Throwables; +import com.google.common.io.ByteStreams; +import com.google.common.net.HostAndPort; +import java.io.BufferedReader; +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.net.InetSocketAddress; +import java.net.MalformedURLException; +import java.net.URL; +import java.security.GeneralSecurityException; +import java.security.KeyStore; +import java.security.SecureRandom; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.zip.ZipEntry; +import java.util.zip.ZipInputStream; +import javax.net.ssl.HttpsURLConnection; +import javax.net.ssl.KeyManagerFactory; +import javax.net.ssl.SSLContext; +import javax.net.ssl.TrustManagerFactory; + +class CloudConfigFactory { + + /** + * Creates a {@link CloudConfig} with information fetched from the specified {@link InputStream}. + * + *

The stream must contain a valid secure connect bundle archive in ZIP format. Note that the + * stream will be closed after a call to that method and cannot be used anymore. + * + * @param cloudConfig the stream to read the Cloud configuration from; cannot be null. + * @throws IOException If the Cloud configuration cannot be read. + * @throws GeneralSecurityException If the Cloud SSL context cannot be created. + */ + CloudConfig createCloudConfig(InputStream cloudConfig) + throws IOException, GeneralSecurityException { + checkNotNull(cloudConfig, "cloudConfig cannot be null"); + JsonNode configJson = null; + ByteArrayOutputStream keyStoreOutputStream = null; + ByteArrayOutputStream trustStoreOutputStream = null; + ObjectMapper mapper = new ObjectMapper().configure(JsonParser.Feature.AUTO_CLOSE_SOURCE, false); + ZipInputStream zipInputStream = null; + try { + zipInputStream = new ZipInputStream(cloudConfig); + ZipEntry entry; + while ((entry = zipInputStream.getNextEntry()) != null) { + String fileName = entry.getName(); + if (fileName.equals("config.json")) { + configJson = mapper.readTree(zipInputStream); + } else if (fileName.equals("identity.jks")) { + keyStoreOutputStream = new ByteArrayOutputStream(); + ByteStreams.copy(zipInputStream, keyStoreOutputStream); + } else if (fileName.equals("trustStore.jks")) { + trustStoreOutputStream = new ByteArrayOutputStream(); + ByteStreams.copy(zipInputStream, trustStoreOutputStream); + } + } + } finally { + if (zipInputStream != null) { + zipInputStream.close(); + } + } + + if (configJson == null) { + throw new IllegalStateException("Invalid bundle: missing file config.json"); + } + if (keyStoreOutputStream == null) { + throw new IllegalStateException("Invalid bundle: missing file identity.jks"); + } + if (trustStoreOutputStream == null) { + throw new IllegalStateException("Invalid bundle: missing file trustStore.jks"); + } + char[] keyStorePassword = getKeyStorePassword(configJson); + char[] trustStorePassword = getTrustStorePassword(configJson); + ByteArrayInputStream keyStoreInputStream = + new ByteArrayInputStream(keyStoreOutputStream.toByteArray()); + ByteArrayInputStream trustStoreInputStream = + new ByteArrayInputStream(trustStoreOutputStream.toByteArray()); + SSLContext sslContext = + createSslContext( + keyStoreInputStream, keyStorePassword, trustStoreInputStream, trustStorePassword); + URL metadataServiceUrl = getMetadataServiceUrl(configJson); + JsonNode proxyMetadataJson; + BufferedReader proxyMetadata = null; + try { + proxyMetadata = fetchProxyMetadata(metadataServiceUrl, sslContext); + proxyMetadataJson = mapper.readTree(proxyMetadata); + } finally { + if (proxyMetadata != null) { + proxyMetadata.close(); + } + } + InetSocketAddress sniProxyAddress = getSniProxyAddress(proxyMetadataJson); + List endPoints = getEndPoints(proxyMetadataJson, sniProxyAddress); + String localDatacenter = getLocalDatacenter(proxyMetadataJson); + SSLOptions sslOptions = getSSLOptions(sslContext); + AuthProvider authProvider = getAuthProvider(configJson); + return new CloudConfig(sniProxyAddress, endPoints, localDatacenter, sslOptions, authProvider); + } + + protected char[] getKeyStorePassword(JsonNode configFile) { + if (configFile.has("keyStorePassword")) { + return configFile.get("keyStorePassword").asText().toCharArray(); + } else { + throw new IllegalStateException("Invalid config.json: missing field keyStorePassword"); + } + } + + protected char[] getTrustStorePassword(JsonNode configFile) { + if (configFile.has("trustStorePassword")) { + return configFile.get("trustStorePassword").asText().toCharArray(); + } else { + throw new IllegalStateException("Invalid config.json: missing field trustStorePassword"); + } + } + + protected URL getMetadataServiceUrl(JsonNode configFile) throws MalformedURLException { + if (configFile.has("host")) { + String metadataServiceHost = configFile.get("host").asText(); + if (configFile.has("port")) { + int metadataServicePort = configFile.get("port").asInt(); + return new URL("https", metadataServiceHost, metadataServicePort, "/metadata"); + } else { + throw new IllegalStateException("Invalid config.json: missing field port"); + } + } else { + throw new IllegalStateException("Invalid config.json: missing field host"); + } + } + + protected AuthProvider getAuthProvider(JsonNode configFile) { + if (configFile.has("username")) { + String username = configFile.get("username").asText(); + if (configFile.has("password")) { + String password = configFile.get("password").asText(); + return new PlainTextAuthProvider(username, password); + } + } + return null; + } + + protected SSLContext createSslContext( + ByteArrayInputStream keyStoreInputStream, + char[] keyStorePassword, + ByteArrayInputStream trustStoreInputStream, + char[] trustStorePassword) + throws IOException, GeneralSecurityException { + KeyManagerFactory kmf = createKeyManagerFactory(keyStoreInputStream, keyStorePassword); + TrustManagerFactory tmf = createTrustManagerFactory(trustStoreInputStream, trustStorePassword); + SSLContext sslContext = SSLContext.getInstance("SSL"); + sslContext.init(kmf.getKeyManagers(), tmf.getTrustManagers(), new SecureRandom()); + return sslContext; + } + + protected KeyManagerFactory createKeyManagerFactory( + InputStream keyStoreInputStream, char[] keyStorePassword) + throws IOException, GeneralSecurityException { + KeyStore ks = KeyStore.getInstance("JKS"); + ks.load(keyStoreInputStream, keyStorePassword); + KeyManagerFactory kmf = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm()); + kmf.init(ks, keyStorePassword); + Arrays.fill(keyStorePassword, (char) 0); + return kmf; + } + + protected TrustManagerFactory createTrustManagerFactory( + InputStream trustStoreInputStream, char[] trustStorePassword) + throws IOException, GeneralSecurityException { + KeyStore ts = KeyStore.getInstance("JKS"); + ts.load(trustStoreInputStream, trustStorePassword); + TrustManagerFactory tmf = + TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm()); + tmf.init(ts); + Arrays.fill(trustStorePassword, (char) 0); + return tmf; + } + + protected BufferedReader fetchProxyMetadata(URL metadataServiceUrl, SSLContext sslContext) + throws IOException { + HttpsURLConnection connection = (HttpsURLConnection) metadataServiceUrl.openConnection(); + connection.setSSLSocketFactory(sslContext.getSocketFactory()); + connection.setRequestMethod("GET"); + return new BufferedReader(new InputStreamReader(connection.getInputStream(), UTF_8)); + } + + protected String getLocalDatacenter(JsonNode proxyMetadata) { + JsonNode contactInfo = getContactInfo(proxyMetadata); + if (contactInfo.has("local_dc")) { + return contactInfo.get("local_dc").asText(); + } else { + throw new IllegalStateException("Invalid proxy metadata: missing field local_dc"); + } + } + + protected InetSocketAddress getSniProxyAddress(JsonNode proxyMetadata) { + JsonNode contactInfo = getContactInfo(proxyMetadata); + if (contactInfo.has("sni_proxy_address")) { + HostAndPort sniProxyHostAndPort = + HostAndPort.fromString(contactInfo.get("sni_proxy_address").asText()); + if (!sniProxyHostAndPort.hasPort()) { + throw new IllegalStateException( + "Invalid proxy metadata: missing port from field sni_proxy_address"); + } + String host = GuavaCompatibility.INSTANCE.getHost(sniProxyHostAndPort); + return InetSocketAddress.createUnresolved(host, sniProxyHostAndPort.getPort()); + } else { + throw new IllegalStateException("Invalid proxy metadata: missing field sni_proxy_address"); + } + } + + protected List getEndPoints(JsonNode proxyMetadata, InetSocketAddress sniProxyAddress) { + JsonNode contactInfo = getContactInfo(proxyMetadata); + if (contactInfo.has("contact_points")) { + List endPoints = new ArrayList(); + JsonNode hostIdsJson = contactInfo.get("contact_points"); + for (int i = 0; i < hostIdsJson.size(); i++) { + endPoints.add(new SniEndPoint(sniProxyAddress, hostIdsJson.get(i).asText())); + } + return endPoints; + } else { + throw new IllegalStateException("Invalid proxy metadata: missing field contact_points"); + } + } + + protected JsonNode getContactInfo(JsonNode proxyMetadata) { + if (proxyMetadata.has("contact_info")) { + return proxyMetadata.get("contact_info"); + } else { + throw new IllegalStateException("Invalid proxy metadata: missing field contact_info"); + } + } + + protected SSLOptions getSSLOptions(SSLContext sslContext) { + try { + return SniSSLOptions.builder().withSSLContext(sslContext).build(); + } catch (Exception e) { + throw Throwables.propagate(e); + } + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/Cluster.java b/driver-core/src/main/java/com/datastax/driver/core/Cluster.java index 56b17601649..9c95f391ed5 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Cluster.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Cluster.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,36 +17,87 @@ */ package com.datastax.driver.core; -import com.datastax.driver.core.exceptions.*; -import com.datastax.driver.core.policies.*; +import static com.datastax.driver.core.SchemaElement.KEYSPACE; + +import com.datastax.driver.core.exceptions.AuthenticationException; +import com.datastax.driver.core.exceptions.BusyConnectionException; +import com.datastax.driver.core.exceptions.ConnectionException; +import com.datastax.driver.core.exceptions.InvalidQueryException; +import com.datastax.driver.core.exceptions.NoHostAvailableException; +import com.datastax.driver.core.exceptions.SyntaxError; +import com.datastax.driver.core.exceptions.UnsupportedProtocolVersionException; +import com.datastax.driver.core.policies.AddressTranslator; +import com.datastax.driver.core.policies.IdentityTranslator; +import com.datastax.driver.core.policies.LatencyAwarePolicy; +import com.datastax.driver.core.policies.LoadBalancingPolicy; +import com.datastax.driver.core.policies.Policies; +import com.datastax.driver.core.policies.ReconnectionPolicy; +import com.datastax.driver.core.policies.RetryPolicy; +import com.datastax.driver.core.policies.SpeculativeExecutionPolicy; import com.datastax.driver.core.utils.MoreFutures; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Functions; import com.google.common.base.Predicates; import com.google.common.base.Strings; import com.google.common.base.Throwables; -import com.google.common.collect.*; -import com.google.common.util.concurrent.*; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - +import com.google.common.collect.HashMultimap; +import com.google.common.collect.Iterables; +import com.google.common.collect.Lists; +import com.google.common.collect.MapMaker; +import com.google.common.collect.SetMultimap; +import com.google.common.collect.Sets; +import com.google.common.util.concurrent.AsyncFunction; +import com.google.common.util.concurrent.FutureCallback; +import com.google.common.util.concurrent.Futures; +import com.google.common.util.concurrent.ListenableFuture; +import com.google.common.util.concurrent.ListeningExecutorService; +import com.google.common.util.concurrent.MoreExecutors; +import com.google.common.util.concurrent.SettableFuture; +import com.google.common.util.concurrent.Uninterruptibles; import java.io.Closeable; +import java.io.File; +import java.io.IOException; +import java.io.InputStream; import java.net.InetAddress; import java.net.InetSocketAddress; +import java.net.MalformedURLException; +import java.net.URL; import java.net.UnknownHostException; -import java.util.*; +import java.security.GeneralSecurityException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.Iterator; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Map; import java.util.Map.Entry; -import java.util.concurrent.*; +import java.util.ResourceBundle; +import java.util.Set; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.CancellationException; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.CopyOnWriteArraySet; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Future; +import java.util.concurrent.FutureTask; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; - -import static com.datastax.driver.core.SchemaElement.KEYSPACE; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Information and known state of a Cassandra cluster. - *

- * This is the main entry point of the driver. A simple example of access to a - * Cassandra cluster would be: + * + *

This is the main entry point of the driver. A simple example of access to a Cassandra cluster + * would be: + * *

  *   Cluster cluster = Cluster.builder().addContactPoint("192.168.0.1").build();
  *   Session session = cluster.connect("db1");
@@ -52,2892 +105,3193 @@
  *   for (Row row : session.execute("SELECT * FROM table1"))
  *       // do something ...
  * 
- *

- * A cluster object maintains a permanent connection to one of the cluster nodes - * which it uses solely to maintain information on the state and current - * topology of the cluster. Using the connection, the driver will discover all - * the nodes currently in the cluster as well as new nodes joining the cluster - * subsequently. + * + *

A cluster object maintains a permanent connection to one of the cluster nodes which it uses + * solely to maintain information on the state and current topology of the cluster. Using the + * connection, the driver will discover all the nodes currently in the cluster as well as new nodes + * joining the cluster subsequently. */ public class Cluster implements Closeable { - private static final Logger logger = LoggerFactory.getLogger(Cluster.class); - - static { - // Force initialization to fail fast if there is an issue detecting the version - GuavaCompatibility.init(); + private static final Logger logger = LoggerFactory.getLogger(Cluster.class); + + private static final ResourceBundle driverProperties = + ResourceBundle.getBundle("com.datastax.driver.core.Driver"); + + static { + logDriverVersion(); + // Force initialization to fail fast if there is an issue detecting the version + GuavaCompatibility.init(); + } + + @VisibleForTesting + static final int NEW_NODE_DELAY_SECONDS = + SystemProperties.getInt("com.datastax.driver.NEW_NODE_DELAY_SECONDS", 1); + + // Some per-JVM number that allows to generate unique cluster names when + // multiple Cluster instance are created in the same JVM. + private static final AtomicInteger CLUSTER_ID = new AtomicInteger(0); + + private static final int NOTIF_LOCK_TIMEOUT_SECONDS = + SystemProperties.getInt("com.datastax.driver.NOTIF_LOCK_TIMEOUT_SECONDS", 60); + + final Manager manager; + + /** + * Constructs a new Cluster instance. + * + *

This constructor is mainly exposed so Cluster can be sub-classed as a means to make + * testing/mocking easier or to "intercept" its method call. Most users shouldn't extend this + * class however and should prefer either using the {@link #builder} or calling {@link #buildFrom} + * with a custom Initializer. + * + * @param name the name to use for the cluster (this is not the Cassandra cluster name, see {@link + * #getClusterName}). + * @param contactPoints the list of contact points to use for the new cluster. + * @param configuration the configuration for the new cluster. + */ + protected Cluster(String name, List contactPoints, Configuration configuration) { + this(name, contactPoints, configuration, Collections.emptySet()); + } + + /** + * Constructs a new Cluster instance. + * + *

This constructor is mainly exposed so Cluster can be sub-classed as a means to make + * testing/mocking easier or to "intercept" its method call. Most users shouldn't extend this + * class however and should prefer using the {@link #builder}. + * + * @param initializer the initializer to use. + * @see #buildFrom + */ + protected Cluster(Initializer initializer) { + this( + initializer.getClusterName(), + checkNotEmpty(initializer.getContactPoints()), + initializer.getConfiguration(), + initializer.getInitialListeners()); + } + + private static List checkNotEmpty(List contactPoints) { + if (contactPoints.isEmpty()) + throw new IllegalArgumentException("Cannot build a cluster without contact points"); + return contactPoints; + } + + private Cluster( + String name, + List contactPoints, + Configuration configuration, + Collection listeners) { + this.manager = new Manager(name, contactPoints, configuration, listeners); + } + + /** + * Initialize this Cluster instance. + * + *

This method creates an initial connection to one of the contact points used to construct the + * {@code Cluster} instance. That connection is then used to populate the cluster {@link + * Metadata}. + * + *

Calling this method is optional in the sense that any call to one of the {@code connect} + * methods of this object will automatically trigger a call to this method beforehand. It is thus + * only useful to call this method if for some reason you want to populate the metadata (or test + * that at least one contact point can be reached) without creating a first {@code Session}. + * + *

Please note that this method only creates one control connection for gathering cluster + * metadata. In particular, it doesn't create any connection pools. Those are created when a new + * {@code Session} is created through {@code connect}. + * + *

This method has no effect if the cluster is already initialized. + * + * @return this {@code Cluster} object. + * @throws NoHostAvailableException if no host amongst the contact points can be reached. + * @throws AuthenticationException if an authentication error occurs while contacting the initial + * contact points. + * @throws IllegalStateException if the Cluster was closed prior to calling this method. This can + * occur either directly (through {@link #close()} or {@link #closeAsync()}), or as a result + * of an error while initializing the Cluster. + */ + public Cluster init() { + this.manager.init(); + return this; + } + + /** + * Build a new cluster based on the provided initializer. + * + *

Note that for building a cluster pragmatically, Cluster.Builder provides a slightly less + * verbose shortcut with {@link Builder#build}. + * + *

Also note that that all the contact points provided by {@code initializer} must share the + * same port. + * + * @param initializer the Cluster.Initializer to use + * @return the newly created Cluster instance + * @throws IllegalArgumentException if the list of contact points provided by {@code initializer} + * is empty or if not all those contact points have the same port. + */ + public static Cluster buildFrom(Initializer initializer) { + return new Cluster(initializer); + } + + /** + * Creates a new {@link Cluster.Builder} instance. + * + *

This is a convenience method for {@code new Cluster.Builder()}. + * + * @return the new cluster builder. + */ + public static Cluster.Builder builder() { + return new Cluster.Builder(); + } + + /** + * Returns the current version of the driver. + * + *

This is intended for products that wrap or extend the driver, as a way to check + * compatibility if end-users override the driver version in their application. + * + * @return the version. + */ + public static String getDriverVersion() { + return driverProperties.getString("driver.version"); + } + + /** + * Logs the driver version to the console. + * + *

This method logs the version using the logger {@code com.datastax.driver.core} and level + * {@code INFO}. + */ + public static void logDriverVersion() { + Logger core = LoggerFactory.getLogger("com.datastax.driver.core"); + core.info("Java Driver {} for Apache Cassandra", getDriverVersion()); + } + + /** + * Creates a new session on this cluster but does not initialize it. + * + *

Because this method does not perform any initialization, it cannot fail. The initialization + * of the session (the connection of the Session to the Cassandra nodes) will occur if either the + * {@link Session#init} method is called explicitly, or whenever the returned session object is + * used. + * + *

Once a session returned by this method gets initialized (see above), it will be set to no + * keyspace. If you want to set such session to a keyspace, you will have to explicitly execute a + * 'USE mykeyspace' query. + * + *

Note that if you do not particularly need to defer initialization, it is simpler to use one + * of the {@code connect()} method of this class. + * + * @return a new, non-initialized session on this cluster. + */ + public Session newSession() { + checkNotClosed(manager); + return manager.newSession(); + } + + /** + * Creates a new session on this cluster and initialize it. + * + *

Note that this method will initialize the newly created session, trying to connect to the + * Cassandra nodes before returning. If you only want to create a Session object without + * initializing it right away, see {@link #newSession}. + * + * @return a new session on this cluster sets to no keyspace. + * @throws NoHostAvailableException if the Cluster has not been initialized yet ({@link #init} has + * not be called and this is the first connect call) and no host amongst the contact points + * can be reached. + * @throws AuthenticationException if an authentication error occurs while contacting the initial + * contact points. + * @throws IllegalStateException if the Cluster was closed prior to calling this method. This can + * occur either directly (through {@link #close()} or {@link #closeAsync()}), or as a result + * of an error while initializing the Cluster. + */ + public Session connect() { + try { + return Uninterruptibles.getUninterruptibly(connectAsync()); + } catch (ExecutionException e) { + throw DriverThrowables.propagateCause(e); + } + } + + /** + * Creates a new session on this cluster, initialize it and sets the keyspace to the provided one. + * + *

Note that this method will initialize the newly created session, trying to connect to the + * Cassandra nodes before returning. If you only want to create a Session object without + * initializing it right away, see {@link #newSession}. + * + * @param keyspace The name of the keyspace to use for the created {@code Session}. + * @return a new session on this cluster sets to keyspace {@code keyspaceName}. + * @throws NoHostAvailableException if the Cluster has not been initialized yet ({@link #init} has + * not be called and this is the first connect call) and no host amongst the contact points + * can be reached, or if no host can be contacted to set the {@code keyspace}. + * @throws AuthenticationException if an authentication error occurs while contacting the initial + * contact points. + * @throws InvalidQueryException if the keyspace does not exist. + * @throws IllegalStateException if the Cluster was closed prior to calling this method. This can + * occur either directly (through {@link #close()} or {@link #closeAsync()}), or as a result + * of an error while initializing the Cluster. + */ + public Session connect(String keyspace) { + try { + return Uninterruptibles.getUninterruptibly(connectAsync(keyspace)); + } catch (ExecutionException e) { + throw DriverThrowables.propagateCause(e); + } + } + + /** + * Creates a new session on this cluster and initializes it asynchronously. + * + *

This will also initialize the {@code Cluster} if needed; note that cluster initialization + * happens synchronously on the thread that called this method. Therefore it is recommended to + * initialize the cluster at application startup, and not rely on this method to do it. + * + *

Note that if a {@linkplain Configuration#getDefaultKeyspace() default keyspace} has been + * configured for use with a DBaaS cluster, this method will attempt to set the session keyspace + * to that keyspace, effectively behaving like {@link #connect(String)}. + * + * @return a future that will complete when the session is fully initialized. + * @throws NoHostAvailableException if the Cluster has not been initialized yet ({@link #init} has + * not been called and this is the first connect call) and no host amongst the contact points + * can be reached. + * @throws IllegalStateException if the Cluster was closed prior to calling this method. This can + * occur either directly (through {@link #close()} or {@link #closeAsync()}), or as a result + * of an error while initializing the Cluster. + * @see #connect() + */ + public ListenableFuture connectAsync() { + String defaultKeyspace = getConfiguration().getDefaultKeyspace(); + return connectAsync(defaultKeyspace); + } + + /** + * Creates a new session on this cluster, and initializes it to the given keyspace asynchronously. + * + *

This will also initialize the {@code Cluster} if needed; note that cluster initialization + * happens synchronously on the thread that called this method. Therefore it is recommended to + * initialize the cluster at application startup, and not rely on this method to do it. + * + * @param keyspace The name of the keyspace to use for the created {@code Session}. + * @return a future that will complete when the session is fully initialized. + * @throws NoHostAvailableException if the Cluster has not been initialized yet ({@link #init} has + * not been called and this is the first connect call) and no host amongst the contact points + * can be reached. + * @throws IllegalStateException if the Cluster was closed prior to calling this method. This can + * occur either directly (through {@link #close()} or {@link #closeAsync()}), or as a result + * of an error while initializing the Cluster. + */ + public ListenableFuture connectAsync(final String keyspace) { + checkNotClosed(manager); + init(); + final Session session = manager.newSession(); + ListenableFuture sessionInitialized = session.initAsync(); + if (keyspace == null) { + return sessionInitialized; + } else { + final String useQuery = "USE " + keyspace; + ListenableFuture keyspaceSet = + GuavaCompatibility.INSTANCE.transformAsync( + sessionInitialized, + new AsyncFunction() { + @Override + public ListenableFuture apply(Session session) throws Exception { + return session.executeAsync(useQuery); + } + }); + ListenableFuture withErrorHandling = + GuavaCompatibility.INSTANCE.withFallback( + keyspaceSet, + new AsyncFunction() { + @Override + public ListenableFuture apply(Throwable t) throws Exception { + session.closeAsync(); + if (t instanceof SyntaxError) { + // Give a more explicit message, because it's probably caused by a bad keyspace + // name + SyntaxError e = (SyntaxError) t; + t = + new SyntaxError( + e.getEndPoint(), + String.format( + "Error executing \"%s\" (%s). Check that your keyspace name is valid", + useQuery, e.getMessage())); + } + throw Throwables.propagate(t); + } + }); + return GuavaCompatibility.INSTANCE.transform(withErrorHandling, Functions.constant(session)); + } + } + + /** + * The name of this cluster object. + * + *

Note that this is not the Cassandra cluster name, but rather a name assigned to this Cluster + * object. Currently, that name is only used for one purpose: to distinguish exposed JMX metrics + * when multiple Cluster instances live in the same JVM (which should be rare in the first place). + * That name can be set at Cluster building time (through {@link Builder#withClusterName} for + * instance) but will default to a name like {@code cluster1} where each Cluster instance in the + * same JVM will have a different number. + * + * @return the name for this cluster instance. + */ + public String getClusterName() { + return manager.clusterName; + } + + /** + * Returns read-only metadata on the connected cluster. + * + *

This includes the known nodes with their status as seen by the driver, as well as the schema + * definitions. Since this return metadata on the connected cluster, this method may trigger the + * creation of a connection if none has been established yet (neither {@code init()} nor {@code + * connect()} has been called yet). + * + * @return the cluster metadata. + * @throws NoHostAvailableException if the Cluster has not been initialized yet and no host + * amongst the contact points can be reached. + * @throws AuthenticationException if an authentication error occurs while contacting the initial + * contact points. + * @throws IllegalStateException if the Cluster was closed prior to calling this method. This can + * occur either directly (through {@link #close()} or {@link #closeAsync()}), or as a result + * of an error while initializing the Cluster. + */ + public Metadata getMetadata() { + manager.init(); + return manager.metadata; + } + + /** + * The cluster configuration. + * + * @return the cluster configuration. + */ + public Configuration getConfiguration() { + return manager.configuration; + } + + /** + * The cluster metrics. + * + * @return the cluster metrics, or {@code null} if this cluster has not yet been {@link #init() + * initialized}, or if metrics collection has been disabled (that is if {@link + * Configuration#getMetricsOptions} returns {@code null}). + */ + public Metrics getMetrics() { + checkNotClosed(manager); + return manager.metrics; + } + + /** + * Registers the provided listener to be notified on hosts up/down/added/removed events. + * + *

Registering the same listener multiple times is a no-op. + * + *

This method should be used to register additional listeners on an already-initialized + * cluster. To add listeners to a cluster object prior to its initialization, use {@link + * Builder#withInitialListeners(Collection)}. Calling this method on a non-initialized cluster + * will result in the listener being {@link + * com.datastax.driver.core.Host.StateListener#onRegister(Cluster) notified} twice of cluster + * registration: once inside this method, and once at cluster initialization. + * + * @param listener the new {@link Host.StateListener} to register. + * @return this {@code Cluster} object; + */ + public Cluster register(Host.StateListener listener) { + checkNotClosed(manager); + boolean added = manager.listeners.add(listener); + if (added) listener.onRegister(this); + return this; + } + + /** + * Unregisters the provided listener from being notified on hosts events. + * + *

This method is a no-op if {@code listener} hasn't previously been registered against this + * Cluster. + * + * @param listener the {@link Host.StateListener} to unregister. + * @return this {@code Cluster} object; + */ + public Cluster unregister(Host.StateListener listener) { + checkNotClosed(manager); + boolean removed = manager.listeners.remove(listener); + if (removed) listener.onUnregister(this); + return this; + } + + /** + * Registers the provided tracker to be updated with hosts read latencies. + * + *

Registering the same tracker multiple times is a no-op. + * + *

Beware that the registered tracker's {@link LatencyTracker#update(Host, Statement, + * Exception, long) update} method will be called very frequently (at the end of every query to a + * Cassandra host) and should thus not be costly. + * + *

The main use case for a {@link LatencyTracker} is to allow load balancing policies to + * implement latency awareness. For example, {@link LatencyAwarePolicy} registers it's own + * internal {@code LatencyTracker} (automatically, you don't have to call this method directly). + * + * @param tracker the new {@link LatencyTracker} to register. + * @return this {@code Cluster} object; + */ + public Cluster register(LatencyTracker tracker) { + checkNotClosed(manager); + boolean added = manager.latencyTrackers.add(tracker); + if (added) tracker.onRegister(this); + return this; + } + + /** + * Unregisters the provided latency tracking from being updated with host read latencies. + * + *

This method is a no-op if {@code tracker} hasn't previously been registered against this + * Cluster. + * + * @param tracker the {@link LatencyTracker} to unregister. + * @return this {@code Cluster} object; + */ + public Cluster unregister(LatencyTracker tracker) { + checkNotClosed(manager); + boolean removed = manager.latencyTrackers.remove(tracker); + if (removed) tracker.onUnregister(this); + return this; + } + + /** + * Registers the provided listener to be updated with schema change events. + * + *

Registering the same listener multiple times is a no-op. + * + * @param listener the new {@link SchemaChangeListener} to register. + * @return this {@code Cluster} object; + */ + public Cluster register(SchemaChangeListener listener) { + checkNotClosed(manager); + boolean added = manager.schemaChangeListeners.add(listener); + if (added) listener.onRegister(this); + return this; + } + + /** + * Unregisters the provided schema change listener from being updated with schema change events. + * + *

This method is a no-op if {@code listener} hasn't previously been registered against this + * Cluster. + * + * @param listener the {@link SchemaChangeListener} to unregister. + * @return this {@code Cluster} object; + */ + public Cluster unregister(SchemaChangeListener listener) { + checkNotClosed(manager); + boolean removed = manager.schemaChangeListeners.remove(listener); + if (removed) listener.onUnregister(this); + return this; + } + + /** + * Initiates a shutdown of this cluster instance. + * + *

This method is asynchronous and return a future on the completion of the shutdown process. + * As soon a the cluster is shutdown, no new request will be accepted, but already submitted + * queries are allowed to complete. This method closes all connections from all sessions and + * reclaims all resources used by this Cluster instance. + * + *

If for some reason you wish to expedite this process, the {@link CloseFuture#force} can be + * called on the result future. + * + *

This method has no particular effect if the cluster was already closed (in which case the + * returned future will return immediately). + * + * @return a future on the completion of the shutdown process. + */ + public CloseFuture closeAsync() { + return manager.close(); + } + + /** + * Initiates a shutdown of this cluster instance and blocks until that shutdown completes. + * + *

This method is a shortcut for {@code closeAsync().get()}. + */ + @Override + public void close() { + try { + closeAsync().get(); + } catch (ExecutionException e) { + throw DriverThrowables.propagateCause(e); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } + } + + /** + * Whether this Cluster instance has been closed. + * + *

Note that this method returns true as soon as one of the close methods ({@link #closeAsync} + * or {@link #close}) has been called, it does not guarantee that the closing is done. If you want + * to guarantee that the closing is done, you can call {@code close()} and wait until it returns + * (or call the get method on {@code closeAsync()} with a very short timeout and check this + * doesn't timeout). + * + * @return {@code true} if this Cluster instance has been closed, {@code false} otherwise. + */ + public boolean isClosed() { + return manager.closeFuture.get() != null; + } + + private static void checkNotClosed(Manager manager) { + if (manager.errorDuringInit()) { + throw new IllegalStateException( + "Can't use this cluster instance because it encountered an error in its initialization", + manager.getInitException()); + } else if (manager.isClosed()) { + throw new IllegalStateException( + "Can't use this cluster instance because it was previously closed"); } + } + + /** + * Initializer for {@link Cluster} instances. + * + *

If you want to create a new {@code Cluster} instance programmatically, then it is advised to + * use {@link Cluster.Builder} which can be obtained from the {@link Cluster#builder} method. + * + *

But it is also possible to implement a custom {@code Initializer} that retrieves + * initialization from a web-service or from a configuration file. + */ + public interface Initializer { - @VisibleForTesting - static final int NEW_NODE_DELAY_SECONDS = SystemProperties.getInt("com.datastax.driver.NEW_NODE_DELAY_SECONDS", 1); + /** + * An optional name for the created cluster. + * + *

Such name is optional (a default name will be created otherwise) and is currently only use + * for JMX reporting of metrics. See {@link Cluster#getClusterName} for more information. + * + * @return the name for the created cluster or {@code null} to use an automatically generated + * name. + */ + public String getClusterName(); + + /** + * Returns the initial Cassandra hosts to connect to. + * + * @return the initial Cassandra contact points. See {@link Builder#addContactPoint} for more + * details on contact points. + */ + public List getContactPoints(); + + /** + * The configuration to use for the new cluster. + * + *

Note that some configuration can be modified after the cluster initialization but some + * others cannot. In particular, the ones that cannot be changed afterwards includes: + * + *

    + *
  • the port use to connect to Cassandra nodes (see {@link ProtocolOptions}). + *
  • the policies used (see {@link Policies}). + *
  • the authentication info provided (see {@link Configuration}). + *
  • whether metrics are enabled (see {@link Configuration}). + *
+ * + * @return the configuration to use for the new cluster. + */ + public Configuration getConfiguration(); - private static final ResourceBundle driverProperties = ResourceBundle.getBundle("com.datastax.driver.core.Driver"); + /** + * Optional listeners to register against the newly created cluster. + * + *

Note that contrary to listeners registered post Cluster creation, the listeners returned + * by this method will see {@link Host.StateListener#onAdd} events for the initial contact + * points. + * + * @return a possibly empty collection of {@code Host.StateListener} to register against the + * newly created cluster. + */ + public Collection getInitialListeners(); + } + + /** Helper class to build {@link Cluster} instances. */ + public static class Builder implements Initializer { + + private String clusterName; + private final List rawHostAndPortContactPoints = + new ArrayList(); + private final List rawHostContactPoints = new ArrayList(); + private final List contactPoints = new ArrayList(); + private int port = ProtocolOptions.DEFAULT_PORT; + private int maxSchemaAgreementWaitSeconds = + ProtocolOptions.DEFAULT_MAX_SCHEMA_AGREEMENT_WAIT_SECONDS; + private ProtocolVersion protocolVersion; + private AuthProvider authProvider = AuthProvider.NONE; + + private final Policies.Builder policiesBuilder = Policies.builder(); + private final Configuration.Builder configurationBuilder = Configuration.builder(); + + private ProtocolOptions.Compression compression = ProtocolOptions.Compression.NONE; + private SSLOptions sslOptions = null; + private boolean metricsEnabled = true; + private boolean jmxEnabled = true; + private boolean allowBetaProtocolVersion = false; + private boolean noCompact = false; + private boolean isCloud = false; + + private Collection listeners; - // Some per-JVM number that allows to generate unique cluster names when - // multiple Cluster instance are created in the same JVM. - private static final AtomicInteger CLUSTER_ID = new AtomicInteger(0); + @Override + public String getClusterName() { + return clusterName; + } - private static final int NOTIF_LOCK_TIMEOUT_SECONDS = SystemProperties.getInt("com.datastax.driver.NOTIF_LOCK_TIMEOUT_SECONDS", 60); + @Override + public List getContactPoints() { + // Use a set to remove duplicate endpoints + Set allContactPoints = new LinkedHashSet(contactPoints); + // If contact points were provided as InetAddress/InetSocketAddress, assume the default + // endpoint factory is used. + for (InetAddress address : rawHostContactPoints) { + allContactPoints.add(new TranslatedAddressEndPoint(new InetSocketAddress(address, port))); + } + for (InetSocketAddress socketAddress : rawHostAndPortContactPoints) { + allContactPoints.add(new TranslatedAddressEndPoint(socketAddress)); + } + return new ArrayList(allContactPoints); + } - final Manager manager; + /** + * An optional name for the create cluster. + * + *

Note: this is not related to the Cassandra cluster name (though you are free to provide + * the same name). See {@link Cluster#getClusterName} for details. + * + *

If you use this method and create more than one Cluster instance in the same JVM (which + * should be avoided unless you need to connect to multiple Cassandra clusters), you should make + * sure each Cluster instance get a unique name or you may have a problem with JMX reporting. + * + * @param name the cluster name to use for the created Cluster instance. + * @return this Builder. + */ + public Builder withClusterName(String name) { + this.clusterName = name; + return this; + } /** - * Constructs a new Cluster instance. - *

- * This constructor is mainly exposed so Cluster can be sub-classed as a means to make testing/mocking - * easier or to "intercept" its method call. Most users shouldn't extend this class however and - * should prefer either using the {@link #builder} or calling {@link #buildFrom} with a custom - * Initializer. - * - * @param name the name to use for the cluster (this is not the Cassandra cluster name, see {@link #getClusterName}). - * @param contactPoints the list of contact points to use for the new cluster. - * @param configuration the configuration for the new cluster. + * The port to use to connect to the Cassandra host. + * + *

If not set through this method, the default port (9042) will be used instead. + * + * @param port the port to set. + * @return this Builder. */ - protected Cluster(String name, List contactPoints, Configuration configuration) { - this(name, contactPoints, configuration, Collections.emptySet()); + public Builder withPort(int port) { + this.port = port; + return this; } /** - * Constructs a new Cluster instance. - *

- * This constructor is mainly exposed so Cluster can be sub-classed as a means to make testing/mocking - * easier or to "intercept" its method call. Most users shouldn't extend this class however and - * should prefer using the {@link #builder}. - * - * @param initializer the initializer to use. - * @see #buildFrom + * Create cluster connection using latest development protocol version, which is currently in + * beta. Calling this method will result into setting USE_BETA flag in all outgoing messages, + * which allows server to negotiate the supported protocol version even if it is currently in + * beta. + * + *

This feature is only available starting with version {@link ProtocolVersion#V5 V5}. + * + *

Use with caution, refer to the server and protocol documentation for the details on latest + * protocol version. + * + * @return this Builder. */ - protected Cluster(Initializer initializer) { - this(initializer.getClusterName(), - checkNotEmpty(initializer.getContactPoints()), - initializer.getConfiguration(), - initializer.getInitialListeners()); + public Builder allowBetaProtocolVersion() { + if (protocolVersion != null) + throw new IllegalArgumentException( + "Can't use beta flag with initial protocol version of " + protocolVersion); + + this.allowBetaProtocolVersion = true; + this.protocolVersion = ProtocolVersion.NEWEST_BETA; + return this; } - private static List checkNotEmpty(List contactPoints) { - if (contactPoints.isEmpty()) - throw new IllegalArgumentException("Cannot build a cluster without contact points"); - return contactPoints; + /** + * Sets the maximum time to wait for schema agreement before returning from a DDL query. + * + *

If not set through this method, the default value (10 seconds) will be used. + * + * @param maxSchemaAgreementWaitSeconds the new value to set. + * @return this Builder. + * @throws IllegalStateException if the provided value is zero or less. + */ + public Builder withMaxSchemaAgreementWaitSeconds(int maxSchemaAgreementWaitSeconds) { + if (maxSchemaAgreementWaitSeconds <= 0) + throw new IllegalArgumentException("Max schema agreement wait must be greater than zero"); + + this.maxSchemaAgreementWaitSeconds = maxSchemaAgreementWaitSeconds; + return this; } - private Cluster(String name, List contactPoints, Configuration configuration, Collection listeners) { - this.manager = new Manager(name, contactPoints, configuration, listeners); + /** + * The native protocol version to use. + * + *

The driver supports versions 1 to 5 of the native protocol. Higher versions of the + * protocol have more features and should be preferred, but this also depends on the Cassandra + * version: + * + *

+ * + * + * + * + * + * + * + * + * + *
Native protocol version to Cassandra version correspondence
Protocol versionMinimum Cassandra version
11.2
22.0
32.1
42.2
53.10
+ * + *

By default, the driver will "auto-detect" which protocol version it can use when + * connecting to the first node. More precisely, it will try first with {@link + * ProtocolVersion#NEWEST_SUPPORTED}, and if not supported fallback to the highest version + * supported by the first node it connects to. Please note that once the version is + * "auto-detected", it won't change: if the first node the driver connects to is a Cassandra 1.2 + * node and auto-detection is used (the default), then the native protocol version 1 will be use + * for the lifetime of the Cluster instance. + * + *

By using {@link Builder#allowBetaProtocolVersion()}, it is possible to force driver to + * connect to Cassandra node that supports the latest protocol beta version. Leaving this flag + * out will let client to connect with latest released version. + * + *

This method allows to force the use of a particular protocol version. Forcing version 1 is + * always fine since all Cassandra version (at least all those supporting the native protocol in + * the first place) so far support it. However, please note that a number of features of the + * driver won't be available if that version of the protocol is in use, including result set + * paging, {@link BatchStatement}, executing a non-prepared query with binary values ({@link + * Session#execute(String, Object...)}), ... (those methods will throw an + * UnsupportedFeatureException). Using the protocol version 1 should thus only be considered + * when using Cassandra 1.2, until nodes have been upgraded to Cassandra 2.0. + * + *

If version 2 of the protocol is used, then Cassandra 1.2 nodes will be ignored (the driver + * won't connect to them). + * + *

The default behavior (auto-detection) is fine in almost all case, but you may want to + * force a particular version if you have a Cassandra cluster with mixed 1.2/2.0 nodes (i.e. + * during a Cassandra upgrade). + * + * @param version the native protocol version to use. {@code null} is also supported to trigger + * auto-detection (see above) but this is the default (so you don't have to call this method + * for that behavior). + * @return this Builder. + */ + public Builder withProtocolVersion(ProtocolVersion version) { + if (allowBetaProtocolVersion) + throw new IllegalStateException( + "Can not set the version explicitly if `allowBetaProtocolVersion` was used."); + if (version.compareTo(ProtocolVersion.NEWEST_SUPPORTED) > 0) + throw new IllegalArgumentException( + "Can not use " + + version + + " protocol version. " + + "Newest supported protocol version is: " + + ProtocolVersion.NEWEST_SUPPORTED + + ". " + + "For beta versions, use `allowBetaProtocolVersion` instead"); + this.protocolVersion = version; + return this; } /** - * Initialize this Cluster instance. - *

- * This method creates an initial connection to one of the contact points - * used to construct the {@code Cluster} instance. That connection is then - * used to populate the cluster {@link Metadata}. - *

- * Calling this method is optional in the sense that any call to one of the - * {@code connect} methods of this object will automatically trigger a call - * to this method beforehand. It is thus only useful to call this method if - * for some reason you want to populate the metadata (or test that at least - * one contact point can be reached) without creating a first {@code - * Session}. - *

- * Please note that this method only creates one control connection for - * gathering cluster metadata. In particular, it doesn't create any connection pools. - * Those are created when a new {@code Session} is created through - * {@code connect}. - *

- * This method has no effect if the cluster is already initialized. - * - * @return this {@code Cluster} object. - * @throws NoHostAvailableException if no host amongst the contact points - * can be reached. - * @throws AuthenticationException if an authentication error occurs - * while contacting the initial contact points. - * @throws IllegalStateException if the Cluster was closed prior to calling - * this method. This can occur either directly (through {@link #close()} or - * {@link #closeAsync()}), or as a result of an error while initializing the - * Cluster. + * Adds a contact point - or many if the given address resolves to multiple InetAddress + * s (A records). + * + *

Contact points are addresses of Cassandra nodes that the driver uses to discover the + * cluster topology. Only one contact point is required (the driver will retrieve the address of + * the other nodes automatically), but it is usually a good idea to provide more than one + * contact point, because if that single contact point is unavailable, the driver cannot + * initialize itself correctly. + * + *

Note that by default (that is, unless you use the {@link #withLoadBalancingPolicy}) method + * of this builder), the first successfully contacted host will be used to define the local + * data-center for the client. If follows that if you are running Cassandra in a multiple + * data-center setting, it is a good idea to only provide contact points that are in the same + * datacenter than the client, or to provide manually the load balancing policy that suits your + * need. + * + *

If the host name points to a DNS record with multiple a-records, all InetAddresses + * returned will be used. Make sure that all resulting InetAddresss returned point + * to the same cluster and datacenter. + * + * @param address the address of the node(s) to connect to. + * @return this Builder. + * @throws IllegalArgumentException if the given {@code address} could not be resolved. + * @throws SecurityException if a security manager is present and permission to resolve the host + * name is denied. */ - public Cluster init() { - this.manager.init(); + public Builder addContactPoint(String address) { + // We explicitly check for nulls because InetAdress.getByName() will happily + // accept it and use localhost (while a null here almost likely mean a user error, + // not "connect to localhost") + failIfCloud(); + if (address == null) throw new NullPointerException(); + + try { + InetAddress[] allByName = InetAddress.getAllByName(address); + Collections.addAll(this.rawHostContactPoints, allByName); return this; + } catch (UnknownHostException e) { + throw new IllegalArgumentException("Failed to add contact point: " + address, e); + } } /** - * Build a new cluster based on the provided initializer. - *

- * Note that for building a cluster pragmatically, Cluster.Builder - * provides a slightly less verbose shortcut with {@link Builder#build}. - *

- * Also note that that all the contact points provided by {@code - * initializer} must share the same port. - * - * @param initializer the Cluster.Initializer to use - * @return the newly created Cluster instance - * @throws IllegalArgumentException if the list of contact points provided - * by {@code initializer} is empty or if not all those contact points have the same port. + * Adds a contact point using the given connection information. + * + *

You only need this method if you use a custom connection mechanism and have configured a + * custom {@link EndPointFactory}; otherwise, you can safely ignore it and use the higher level, + * host-and-port-based variants such as {@link #addContactPoint(String)}. */ - public static Cluster buildFrom(Initializer initializer) { - return new Cluster(initializer); + public Builder addContactPoint(EndPoint contactPoint) { + failIfCloud(); + contactPoints.add(contactPoint); + return this; } /** - * Creates a new {@link Cluster.Builder} instance. - *

- * This is a convenience method for {@code new Cluster.Builder()}. + * Adds contact points. * - * @return the new cluster builder. + *

See {@link Builder#addContactPoint} for more details on contact points. + * + *

Note that all contact points must be resolvable; if any of them cannot be + * resolved, this method will fail. + * + * @param addresses addresses of the nodes to add as contact points. + * @return this Builder. + * @throws IllegalArgumentException if any of the given {@code addresses} could not be resolved. + * @throws SecurityException if a security manager is present and permission to resolve the host + * name is denied. + * @see Builder#addContactPoint */ - public static Cluster.Builder builder() { - return new Cluster.Builder(); + public Builder addContactPoints(String... addresses) { + for (String address : addresses) addContactPoint(address); + return this; } /** - * Returns the current version of the driver. - *

- * This is intended for products that wrap or extend the driver, as a way to check - * compatibility if end-users override the driver version in their application. + * Adds contact points. + * + *

See {@link Builder#addContactPoint} for more details on contact points. + * + *

Note that all contact points must be resolvable; if any of them cannot be + * resolved, this method will fail. * - * @return the version. + * @param addresses addresses of the nodes to add as contact points. + * @return this Builder. + * @throws IllegalArgumentException if any of the given {@code addresses} could not be resolved. + * @throws SecurityException if a security manager is present and permission to resolve the host + * name is denied. + * @see Builder#addContactPoint */ - public static String getDriverVersion() { - return driverProperties.getString("driver.version"); + public Builder addContactPoints(InetAddress... addresses) { + failIfCloud(); + Collections.addAll(this.rawHostContactPoints, addresses); + return this; } /** - * Creates a new session on this cluster but does not initialize it. - *

- * Because this method does not perform any initialization, it cannot fail. - * The initialization of the session (the connection of the Session to the - * Cassandra nodes) will occur if either the {@link Session#init} method is - * called explicitly, or whenever the returned session object is used. - *

- * Once a session returned by this method gets initialized (see above), it - * will be set to no keyspace. If you want to set such session to a - * keyspace, you will have to explicitly execute a 'USE mykeyspace' query. - *

- * Note that if you do not particularly need to defer initialization, it is - * simpler to use one of the {@code connect()} method of this class. - * - * @return a new, non-initialized session on this cluster. + * Adds contact points. + * + *

See {@link Builder#addContactPoint} for more details on contact points. + * + * @param addresses addresses of the nodes to add as contact points. + * @return this Builder + * @see Builder#addContactPoint */ - public Session newSession() { - checkNotClosed(manager); - return manager.newSession(); + public Builder addContactPoints(Collection addresses) { + failIfCloud(); + this.rawHostContactPoints.addAll(addresses); + return this; } /** - * Creates a new session on this cluster and initialize it. - *

- * Note that this method will initialize the newly created session, trying - * to connect to the Cassandra nodes before returning. If you only want to - * create a Session object without initializing it right away, see - * {@link #newSession}. - * - * @return a new session on this cluster sets to no keyspace. - * @throws NoHostAvailableException if the Cluster has not been initialized - * yet ({@link #init} has not be called and this is the first connect call) - * and no host amongst the contact points can be reached. - * @throws AuthenticationException if an authentication error occurs while - * contacting the initial contact points. - * @throws IllegalStateException if the Cluster was closed prior to calling - * this method. This can occur either directly (through {@link #close()} or - * {@link #closeAsync()}), or as a result of an error while initializing the - * Cluster. + * Adds contact points. + * + *

See {@link Builder#addContactPoint} for more details on contact points. Contrarily to + * other {@code addContactPoints} methods, this method allows to provide a different port for + * each contact point. Since Cassandra nodes must always all listen on the same port, this is + * rarely what you want and most users should prefer other {@code addContactPoints} methods to + * this one. However, this can be useful if the Cassandra nodes are behind a router and are not + * accessed directly. Note that if you are in this situation (Cassandra nodes are behind a + * router, not directly accessible), you almost surely want to provide a specific {@link + * AddressTranslator} (through {@link #withAddressTranslator}) to translate actual Cassandra + * node addresses to the addresses the driver should use, otherwise the driver will not be able + * to auto-detect new nodes (and will generally not function optimally). + * + * @param addresses addresses of the nodes to add as contact points. + * @return this Builder + * @see Builder#addContactPoint */ - public Session connect() { - try { - return Uninterruptibles.getUninterruptibly(connectAsync()); - } catch (ExecutionException e) { - throw DriverThrowables.propagateCause(e); - } + public Builder addContactPointsWithPorts(InetSocketAddress... addresses) { + failIfCloud(); + Collections.addAll(this.rawHostAndPortContactPoints, addresses); + return this; } /** - * Creates a new session on this cluster, initialize it and sets the - * keyspace to the provided one. - *

- * Note that this method will initialize the newly created session, trying - * to connect to the Cassandra nodes before returning. If you only want to - * create a Session object without initializing it right away, see - * {@link #newSession}. - * - * @param keyspace The name of the keyspace to use for the created - * {@code Session}. - * @return a new session on this cluster sets to keyspace - * {@code keyspaceName}. - * @throws NoHostAvailableException if the Cluster has not been initialized - * yet ({@link #init} has not be called and this is the first connect call) - * and no host amongst the contact points can be reached, or if no host can - * be contacted to set the {@code keyspace}. - * @throws AuthenticationException if an authentication error occurs while - * contacting the initial contact points. - * @throws InvalidQueryException if the keyspace does not exist. - * @throws IllegalStateException if the Cluster was closed prior to calling - * this method. This can occur either directly (through {@link #close()} or - * {@link #closeAsync()}), or as a result of an error while initializing the - * Cluster. + * Adds contact points. + * + *

See {@link Builder#addContactPoint} for more details on contact points. Contrarily to + * other {@code addContactPoints} methods, this method allows to provide a different port for + * each contact point. Since Cassandra nodes must always all listen on the same port, this is + * rarely what you want and most users should prefer other {@code addContactPoints} methods to + * this one. However, this can be useful if the Cassandra nodes are behind a router and are not + * accessed directly. Note that if you are in this situation (Cassandra nodes are behind a + * router, not directly accessible), you almost surely want to provide a specific {@link + * AddressTranslator} (through {@link #withAddressTranslator}) to translate actual Cassandra + * node addresses to the addresses the driver should use, otherwise the driver will not be able + * to auto-detect new nodes (and will generally not function optimally). + * + * @param addresses addresses of the nodes to add as contact points. + * @return this Builder + * @see Builder#addContactPoint */ - public Session connect(String keyspace) { - try { - return Uninterruptibles.getUninterruptibly(connectAsync(keyspace)); - } catch (ExecutionException e) { - throw DriverThrowables.propagateCause(e); - } + public Builder addContactPointsWithPorts(Collection addresses) { + failIfCloud(); + this.rawHostAndPortContactPoints.addAll(addresses); + return this; } /** - * Creates a new session on this cluster and initializes it asynchronously. - *

- * This will also initialize the {@code Cluster} if needed; note that cluster - * initialization happens synchronously on the thread that called this method. - * Therefore it is recommended to initialize the cluster at application - * startup, and not rely on this method to do it. - * - * @return a future that will complete when the session is fully initialized. - * @throws NoHostAvailableException if the Cluster has not been initialized - * yet ({@link #init} has not been called and this is the first connect call) - * and no host amongst the contact points can be reached. - * @throws IllegalStateException if the Cluster was closed prior to calling - * this method. This can occur either directly (through {@link #close()} or - * {@link #closeAsync()}), or as a result of an error while initializing the - * Cluster. - * @see #connect() + * Configures the load balancing policy to use for the new cluster. + * + *

If no load balancing policy is set through this method, {@link + * Policies#defaultLoadBalancingPolicy} will be used instead. + * + * @param policy the load balancing policy to use. + * @return this Builder. */ - public ListenableFuture connectAsync() { - return connectAsync(null); + public Builder withLoadBalancingPolicy(LoadBalancingPolicy policy) { + policiesBuilder.withLoadBalancingPolicy(policy); + return this; } /** - * Creates a new session on this cluster, and initializes it to the given - * keyspace asynchronously. - *

- * This will also initialize the {@code Cluster} if needed; note that cluster - * initialization happens synchronously on the thread that called this method. - * Therefore it is recommended to initialize the cluster at application - * startup, and not rely on this method to do it. - * - * @param keyspace The name of the keyspace to use for the created - * {@code Session}. - * @return a future that will complete when the session is fully initialized. - * @throws NoHostAvailableException if the Cluster has not been initialized - * yet ({@link #init} has not been called and this is the first connect call) - * and no host amongst the contact points can be reached. - * @throws IllegalStateException if the Cluster was closed prior to calling - * this method. This can occur either directly (through {@link #close()} or - * {@link #closeAsync()}), or as a result of an error while initializing the - * Cluster. + * Configures the reconnection policy to use for the new cluster. + * + *

If no reconnection policy is set through this method, {@link + * Policies#DEFAULT_RECONNECTION_POLICY} will be used instead. + * + * @param policy the reconnection policy to use. + * @return this Builder. */ - public ListenableFuture connectAsync(final String keyspace) { - checkNotClosed(manager); - init(); - final Session session = manager.newSession(); - ListenableFuture sessionInitialized = session.initAsync(); - if (keyspace == null) { - return sessionInitialized; - } else { - final String useQuery = "USE " + keyspace; - ListenableFuture keyspaceSet = GuavaCompatibility.INSTANCE.transformAsync(sessionInitialized, new AsyncFunction() { - @Override - public ListenableFuture apply(Session session) throws Exception { - return session.executeAsync(useQuery); - } - }); - ListenableFuture withErrorHandling = GuavaCompatibility.INSTANCE.withFallback(keyspaceSet, new AsyncFunction() { - @Override - public ListenableFuture apply(Throwable t) throws Exception { - session.closeAsync(); - if (t instanceof SyntaxError) { - // Give a more explicit message, because it's probably caused by a bad keyspace name - SyntaxError e = (SyntaxError) t; - t = new SyntaxError(e.getAddress(), - String.format("Error executing \"%s\" (%s). Check that your keyspace name is valid", - useQuery, e.getMessage())); - } - throw Throwables.propagate(t); - } - }); - return Futures.transform(withErrorHandling, Functions.constant(session)); - } + public Builder withReconnectionPolicy(ReconnectionPolicy policy) { + policiesBuilder.withReconnectionPolicy(policy); + return this; } /** - * The name of this cluster object. - *

- * Note that this is not the Cassandra cluster name, but rather a name - * assigned to this Cluster object. Currently, that name is only used - * for one purpose: to distinguish exposed JMX metrics when multiple - * Cluster instances live in the same JVM (which should be rare in the first - * place). That name can be set at Cluster building time (through - * {@link Builder#withClusterName} for instance) but will default to a - * name like {@code cluster1} where each Cluster instance in the same JVM - * will have a different number. - * - * @return the name for this cluster instance. + * Configures the retry policy to use for the new cluster. + * + *

If no retry policy is set through this method, {@link Policies#DEFAULT_RETRY_POLICY} will + * be used instead. + * + * @param policy the retry policy to use. + * @return this Builder. */ - public String getClusterName() { - return manager.clusterName; + public Builder withRetryPolicy(RetryPolicy policy) { + policiesBuilder.withRetryPolicy(policy); + return this; } /** - * Returns read-only metadata on the connected cluster. - *

- * This includes the known nodes with their status as seen by the driver, - * as well as the schema definitions. Since this return metadata on the - * connected cluster, this method may trigger the creation of a connection - * if none has been established yet (neither {@code init()} nor {@code connect()} - * has been called yet). - * - * @return the cluster metadata. - * @throws NoHostAvailableException if the Cluster has not been initialized yet - * and no host amongst the contact points can be reached. - * @throws AuthenticationException if an authentication error occurs - * while contacting the initial contact points. - * @throws IllegalStateException if the Cluster was closed prior to calling - * this method. This can occur either directly (through {@link #close()} or - * {@link #closeAsync()}), or as a result of an error while initializing the - * Cluster. + * Configures the address translator to use for the new cluster. + * + *

See {@link AddressTranslator} for more detail on address translation, but the default + * translator, {@link IdentityTranslator}, should be correct in most cases. If unsure, stick to + * the default. + * + * @param translator the translator to use. + * @return this Builder. */ - public Metadata getMetadata() { - manager.init(); - return manager.metadata; + public Builder withAddressTranslator(AddressTranslator translator) { + policiesBuilder.withAddressTranslator(translator); + return this; } /** - * The cluster configuration. + * Configures the generator that will produce the client-side timestamp sent with each query. + * + *

This feature is only available with version {@link ProtocolVersion#V3 V3} or above of the + * native protocol. With earlier versions, timestamps are always generated server-side, and + * setting a generator through this method will have no effect. * - * @return the cluster configuration. + *

If no generator is set through this method, the driver will default to client-side + * timestamps by using {@link AtomicMonotonicTimestampGenerator}. + * + * @param timestampGenerator the generator to use. + * @return this Builder. */ - public Configuration getConfiguration() { - return manager.configuration; + public Builder withTimestampGenerator(TimestampGenerator timestampGenerator) { + policiesBuilder.withTimestampGenerator(timestampGenerator); + return this; } /** - * The cluster metrics. + * Configures the speculative execution policy to use for the new cluster. * - * @return the cluster metrics, or {@code null} if this cluster has not yet been {@link #init() initialized}, or if - * metrics collection has been disabled (that is if {@link Configuration#getMetricsOptions} returns {@code null}). + *

If no policy is set through this method, {@link + * Policies#defaultSpeculativeExecutionPolicy()} will be used instead. + * + * @param policy the policy to use. + * @return this Builder. */ - public Metrics getMetrics() { - checkNotClosed(manager); - return manager.metrics; + public Builder withSpeculativeExecutionPolicy(SpeculativeExecutionPolicy policy) { + policiesBuilder.withSpeculativeExecutionPolicy(policy); + return this; } /** - * Registers the provided listener to be notified on hosts - * up/down/added/removed events. - *

- * Registering the same listener multiple times is a no-op. - *

- * This method should be used to register additional listeners - * on an already-initialized cluster. - * To add listeners to a cluster object prior to its initialization, - * use {@link Builder#withInitialListeners(Collection)}. - * Calling this method on a non-initialized cluster - * will result in the listener being - * {@link com.datastax.driver.core.Host.StateListener#onRegister(Cluster) notified} - * twice of cluster registration: once inside this method, and once at cluster initialization. - * - * @param listener the new {@link Host.StateListener} to register. - * @return this {@code Cluster} object; + * Configures the endpoint factory to use for the new cluster. + * + *

This is a low-level component for advanced scenarios where connecting to a node requires + * more than its socket address. If you're simply using host+port, the default factory is + * sufficient. */ - public Cluster register(Host.StateListener listener) { - checkNotClosed(manager); - boolean added = manager.listeners.add(listener); - if (added) - listener.onRegister(this); - return this; + public Builder withEndPointFactory(EndPointFactory endPointFactory) { + policiesBuilder.withEndPointFactory(endPointFactory); + return this; } /** - * Unregisters the provided listener from being notified on hosts events. - *

- * This method is a no-op if {@code listener} hasn't previously been - * registered against this Cluster. + * Configures the {@link CodecRegistry} instance to use for the new cluster. + * + *

If no codec registry is set through this method, {@link CodecRegistry#DEFAULT_INSTANCE} + * will be used instead. * - * @param listener the {@link Host.StateListener} to unregister. - * @return this {@code Cluster} object; + *

Note that if two or more {@link Cluster} instances are configured to use the default codec + * registry, they are going to share the same instance. In this case, care should be taken when + * registering new codecs on it as any codec registered by one cluster would be immediately + * available to others sharing the same default instance. + * + * @param codecRegistry the codec registry to use. + * @return this Builder. */ - public Cluster unregister(Host.StateListener listener) { - checkNotClosed(manager); - boolean removed = manager.listeners.remove(listener); - if (removed) - listener.onUnregister(this); - return this; + public Builder withCodecRegistry(CodecRegistry codecRegistry) { + configurationBuilder.withCodecRegistry(codecRegistry); + return this; } /** - * Registers the provided tracker to be updated with hosts read - * latencies. - *

- * Registering the same tracker multiple times is a no-op. - *

- * Beware that the registered tracker's - * {@link LatencyTracker#update(Host, Statement, Exception, long) update} - * method will be called - * very frequently (at the end of every query to a Cassandra host) and - * should thus not be costly. - *

- * The main use case for a {@link LatencyTracker} is to allow - * load balancing policies to implement latency awareness. - * For example, {@link LatencyAwarePolicy} registers it's own internal - * {@code LatencyTracker} (automatically, you don't have to call this - * method directly). - * - * @param tracker the new {@link LatencyTracker} to register. - * @return this {@code Cluster} object; + * Uses the provided credentials when connecting to Cassandra hosts. + * + *

This should be used if the Cassandra cluster has been configured to use the {@code + * PasswordAuthenticator}. If the the default {@code AllowAllAuthenticator} is used instead, + * using this method has no effect. + * + * @param username the username to use to login to Cassandra hosts. + * @param password the password corresponding to {@code username}. + * @return this Builder. */ - public Cluster register(LatencyTracker tracker) { - checkNotClosed(manager); - boolean added = manager.latencyTrackers.add(tracker); - if (added) - tracker.onRegister(this); - return this; + public Builder withCredentials(String username, String password) { + this.authProvider = new PlainTextAuthProvider(username, password); + return this; } /** - * Unregisters the provided latency tracking from being updated - * with host read latencies. - *

- * This method is a no-op if {@code tracker} hasn't previously been - * registered against this Cluster. - * - * @param tracker the {@link LatencyTracker} to unregister. - * @return this {@code Cluster} object; + * Use the specified AuthProvider when connecting to Cassandra hosts. + * + *

Use this method when a custom authentication scheme is in place. You shouldn't call both + * this method and {@code withCredentials} on the same {@code Builder} instance as one will + * supersede the other + * + * @param authProvider the {@link AuthProvider} to use to login to Cassandra hosts. + * @return this Builder */ - public Cluster unregister(LatencyTracker tracker) { - checkNotClosed(manager); - boolean removed = manager.latencyTrackers.remove(tracker); - if (removed) - tracker.onUnregister(this); - return this; + public Builder withAuthProvider(AuthProvider authProvider) { + this.authProvider = authProvider; + return this; } /** - * Registers the provided listener to be updated with schema change events. - *

- * Registering the same listener multiple times is a no-op. + * Sets the compression to use for the transport. * - * @param listener the new {@link SchemaChangeListener} to register. - * @return this {@code Cluster} object; + * @param compression the compression to set. + * @return this Builder. + * @see ProtocolOptions.Compression */ - public Cluster register(SchemaChangeListener listener) { - checkNotClosed(manager); - boolean added = manager.schemaChangeListeners.add(listener); - if (added) - listener.onRegister(this); - return this; + public Builder withCompression(ProtocolOptions.Compression compression) { + this.compression = compression; + return this; } /** - * Unregisters the provided schema change listener from being updated - * with schema change events. - *

- * This method is a no-op if {@code listener} hasn't previously been - * registered against this Cluster. - * - * @param listener the {@link SchemaChangeListener} to unregister. - * @return this {@code Cluster} object; + * Disables metrics collection for the created cluster (metrics are enabled by default + * otherwise). + * + * @return this builder. */ - public Cluster unregister(SchemaChangeListener listener) { - checkNotClosed(manager); - boolean removed = manager.schemaChangeListeners.remove(listener); - if (removed) - listener.onUnregister(this); - return this; + public Builder withoutMetrics() { + this.metricsEnabled = false; + return this; } /** - * Initiates a shutdown of this cluster instance. - *

- * This method is asynchronous and return a future on the completion - * of the shutdown process. As soon a the cluster is shutdown, no - * new request will be accepted, but already submitted queries are - * allowed to complete. This method closes all connections from all - * sessions and reclaims all resources used by this Cluster - * instance. - *

- * If for some reason you wish to expedite this process, the - * {@link CloseFuture#force} can be called on the result future. - *

- * This method has no particular effect if the cluster was already closed - * (in which case the returned future will return immediately). - * - * @return a future on the completion of the shutdown process. + * Enables the use of SSL for the created {@code Cluster}. + * + *

Calling this method will use the JDK-based implementation with the default options (see + * {@link RemoteEndpointAwareJdkSSLOptions.Builder}). This is thus a shortcut for {@code + * withSSL(JdkSSLOptions.builder().build())}. + * + *

Note that if SSL is enabled, the driver will not connect to any Cassandra nodes that + * doesn't have SSL enabled and it is strongly advised to enable SSL on every Cassandra node if + * you plan on using SSL in the driver. + * + * @return this builder. */ - public CloseFuture closeAsync() { - return manager.close(); + public Builder withSSL() { + this.sslOptions = RemoteEndpointAwareJdkSSLOptions.builder().build(); + return this; } /** - * Initiates a shutdown of this cluster instance and blocks until - * that shutdown completes. - *

- * This method is a shortcut for {@code closeAsync().get()}. + * Enable the use of SSL for the created {@code Cluster} using the provided options. + * + * @param sslOptions the SSL options to use. + * @return this builder. */ - @Override - public void close() { - try { - closeAsync().get(); - } catch (ExecutionException e) { - throw DriverThrowables.propagateCause(e); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - } + public Builder withSSL(SSLOptions sslOptions) { + this.sslOptions = sslOptions; + return this; } /** - * Whether this Cluster instance has been closed. - *

- * Note that this method returns true as soon as one of the close methods - * ({@link #closeAsync} or {@link #close}) has been called, it does not guarantee - * that the closing is done. If you want to guarantee that the closing is done, - * you can call {@code close()} and wait until it returns (or call the get method - * on {@code closeAsync()} with a very short timeout and check this doesn't timeout). - * - * @return {@code true} if this Cluster instance has been closed, {@code false} - * otherwise. + * Register the provided listeners in the newly created cluster. + * + *

Note: repeated calls to this method will override the previous ones. + * + * @param listeners the listeners to register. + * @return this builder. */ - public boolean isClosed() { - return manager.closeFuture.get() != null; + public Builder withInitialListeners(Collection listeners) { + this.listeners = listeners; + return this; } - private static void checkNotClosed(Manager manager) { - if (manager.isClosed()) - throw new IllegalStateException("Can't use this cluster instance because it was previously closed"); + /** + * Disables JMX reporting of the metrics. + * + *

JMX reporting is enabled by default (see {@link Metrics}) but can be disabled using this + * option. If metrics are disabled, this is a no-op. + * + * @return this builder. + */ + public Builder withoutJMXReporting() { + this.jmxEnabled = false; + return this; } /** - * Initializer for {@link Cluster} instances. - *

- * If you want to create a new {@code Cluster} instance programmatically, - * then it is advised to use {@link Cluster.Builder} which can be obtained from the - * {@link Cluster#builder} method. - *

- * But it is also possible to implement a custom {@code Initializer} that - * retrieves initialization from a web-service or from a configuration file. + * Sets the PoolingOptions to use for the newly created Cluster. + * + *

If no pooling options are set through this method, default pooling options will be used. + * + * @param options the pooling options to use. + * @return this builder. */ - public interface Initializer { - - /** - * An optional name for the created cluster. - *

- * Such name is optional (a default name will be created otherwise) and is currently - * only use for JMX reporting of metrics. See {@link Cluster#getClusterName} for more - * information. - * - * @return the name for the created cluster or {@code null} to use an automatically - * generated name. - */ - public String getClusterName(); + public Builder withPoolingOptions(PoolingOptions options) { + configurationBuilder.withPoolingOptions(options); + return this; + } - /** - * Returns the initial Cassandra hosts to connect to. - * - * @return the initial Cassandra contact points. See {@link Builder#addContactPoint} - * for more details on contact points. - */ - public List getContactPoints(); - - /** - * The configuration to use for the new cluster. - *

- * Note that some configuration can be modified after the cluster - * initialization but some others cannot. In particular, the ones that - * cannot be changed afterwards includes: - *

    - *
  • the port use to connect to Cassandra nodes (see {@link ProtocolOptions}).
  • - *
  • the policies used (see {@link Policies}).
  • - *
  • the authentication info provided (see {@link Configuration}).
  • - *
  • whether metrics are enabled (see {@link Configuration}).
  • - *
- * - * @return the configuration to use for the new cluster. - */ - public Configuration getConfiguration(); - - /** - * Optional listeners to register against the newly created cluster. - *

- * Note that contrary to listeners registered post Cluster creation, - * the listeners returned by this method will see {@link Host.StateListener#onAdd} - * events for the initial contact points. - * - * @return a possibly empty collection of {@code Host.StateListener} to register - * against the newly created cluster. - */ - public Collection getInitialListeners(); + /** + * Sets the SocketOptions to use for the newly created Cluster. + * + *

If no socket options are set through this method, default socket options will be used. + * + * @param options the socket options to use. + * @return this builder. + */ + public Builder withSocketOptions(SocketOptions options) { + configurationBuilder.withSocketOptions(options); + return this; } /** - * Helper class to build {@link Cluster} instances. + * Sets the QueryOptions to use for the newly created Cluster. + * + *

If no query options are set through this method, default query options will be used. + * + * @param options the query options to use. + * @return this builder. */ - public static class Builder implements Initializer { + public Builder withQueryOptions(QueryOptions options) { + configurationBuilder.withQueryOptions(options); + return this; + } - private String clusterName; - private final List addresses = new ArrayList(); - private final List rawAddresses = new ArrayList(); - private int port = ProtocolOptions.DEFAULT_PORT; - private int maxSchemaAgreementWaitSeconds = ProtocolOptions.DEFAULT_MAX_SCHEMA_AGREEMENT_WAIT_SECONDS; - private ProtocolVersion protocolVersion; - private AuthProvider authProvider = AuthProvider.NONE; + /** + * Sets the threading options to use for the newly created Cluster. + * + *

If no options are set through this method, a new instance of {@link ThreadingOptions} will + * be used. + * + * @param options the options. + * @return this builder. + */ + public Builder withThreadingOptions(ThreadingOptions options) { + configurationBuilder.withThreadingOptions(options); + return this; + } - private final Policies.Builder policiesBuilder = Policies.builder(); - private final Configuration.Builder configurationBuilder = Configuration.builder(); + /** + * Set the {@link NettyOptions} to use for the newly created Cluster. + * + *

If no Netty options are set through this method, {@link NettyOptions#DEFAULT_INSTANCE} + * will be used as a default value, which means that no customization will be applied. + * + * @param nettyOptions the {@link NettyOptions} to use. + * @return this builder. + */ + public Builder withNettyOptions(NettyOptions nettyOptions) { + configurationBuilder.withNettyOptions(nettyOptions); + return this; + } - private ProtocolOptions.Compression compression = ProtocolOptions.Compression.NONE; - private SSLOptions sslOptions = null; - private boolean metricsEnabled = true; - private boolean jmxEnabled = true; - private boolean allowBetaProtocolVersion = false; + /** + * Enables the NO_COMPACT startup option. + *

+ * When this option is supplied, SELECT, UPDATE, DELETE and + * BATCH statements on COMPACT STORAGE tables function in "compatibility" mode which + * allows seeing these tables as if they were "regular" CQL tables. + *

+ * This option only effects interactions with tables using COMPACT STORAGE and is only supported by + * C* 4.0+ and DSE 6.0+. + * + * @return this builder. + * @see CASSANDRA-10857 + */ + public Builder withNoCompact() { + this.noCompact = true; + return this; + } - private Collection listeners; + /** + * Configures this Builder for Cloud deployments by retrieving connection information from the + * provided {@link String}. + * + *

To connect to a Cloud database, you must first download the secure database bundle from + * the DataStax Constellation console that contains the connection information, then instruct + * the driver to read its contents using either this method or one if its variants. + * + *

For more information, please refer to the DataStax Constellation documentation. + * + *

Note that the provided stream will be consumed and closed when this method will + * return; attempting to reuse it afterwards will result in an error being thrown. + * + * @param cloudConfigFile File that contains secure connect bundle zip file. + * @see #withCloudSecureConnectBundle(URL) + * @see #withCloudSecureConnectBundle(InputStream) + */ + public Builder withCloudSecureConnectBundle(File cloudConfigFile) { + try { + return withCloudSecureConnectBundle(cloudConfigFile.toURI().toURL()); + } catch (MalformedURLException e) { + throw new IllegalArgumentException( + "The cloudConfigFile URL " + cloudConfigFile + " is in the wrong format.", e); + } + } - @Override - public String getClusterName() { - return clusterName; - } + /** + * Configures this Builder for Cloud deployments by retrieving connection information from the + * provided {@link URL}. + * + *

To connect to a Cloud database, you must first download the secure database bundle from + * the DataStax Constellation console that contains the connection information, then instruct + * the driver to read its contents using either this method or one if its variants. + * + *

For more information, please refer to the DataStax Constellation documentation. + * + *

Note that the provided stream will be consumed and closed when this method will + * return; attempting to reuse it afterwards will result in an error being thrown. + * + * @param cloudConfigUrl URL to the secure connect bundle zip file. + * @see #withCloudSecureConnectBundle(File) + * @see #withCloudSecureConnectBundle(InputStream) + */ + public Builder withCloudSecureConnectBundle(URL cloudConfigUrl) { + CloudConfig cloudConfig; + try { + cloudConfig = new CloudConfigFactory().createCloudConfig(cloudConfigUrl.openStream()); + } catch (GeneralSecurityException e) { + throw new IllegalStateException( + "Cannot construct cloud config from the cloudConfigUrl: " + cloudConfigUrl, e); + } catch (IOException e) { + throw new IllegalStateException( + "Cannot construct cloud config from the cloudConfigUrl: " + cloudConfigUrl, e); + } + + return addCloudConfigToBuilder(cloudConfig); + } - @Override - public List getContactPoints() { - if (rawAddresses.isEmpty()) - return addresses; - - List allAddresses = new ArrayList(addresses); - for (InetAddress address : rawAddresses) - allAddresses.add(new InetSocketAddress(address, port)); - return allAddresses; - } + /** + * Configures this Builder for Cloud deployments by retrieving connection information from the + * provided {@link InputStream}. + * + *

To connect to a Cloud database, you must first download the secure database bundle from + * the DataStax Constellation console that contains the connection information, then instruct + * the driver to read its contents using either this method or one if its variants. + * + *

For more information, please refer to the DataStax Constellation documentation. + * + *

Note that the provided stream will be consumed and closed when this method will + * return; attempting to reuse it afterwards will result in an error being thrown. + * + * @param cloudConfigInputStream A stream containing the secure connect bundle zip file. + * @see #withCloudSecureConnectBundle(File) + * @see #withCloudSecureConnectBundle(URL) + */ + public Builder withCloudSecureConnectBundle(InputStream cloudConfigInputStream) { + CloudConfig cloudConfig; + try { + cloudConfig = new CloudConfigFactory().createCloudConfig(cloudConfigInputStream); + } catch (GeneralSecurityException e) { + throw new IllegalStateException("Cannot construct cloud config from the InputStream.", e); + } catch (IOException e) { + throw new IllegalStateException("Cannot construct cloud config from the InputStream.", e); + } + + return addCloudConfigToBuilder(cloudConfig); + } - /** - * An optional name for the create cluster. - *

- * Note: this is not related to the Cassandra cluster name (though you - * are free to provide the same name). See {@link Cluster#getClusterName} for - * details. - *

- * If you use this method and create more than one Cluster instance in the - * same JVM (which should be avoided unless you need to connect to multiple - * Cassandra clusters), you should make sure each Cluster instance get a - * unique name or you may have a problem with JMX reporting. - * - * @param name the cluster name to use for the created Cluster instance. - * @return this Builder. - */ - public Builder withClusterName(String name) { - this.clusterName = name; - return this; - } + private Builder addCloudConfigToBuilder(CloudConfig cloudConfig) { + Builder builder = + withEndPointFactory(new SniEndPointFactory(cloudConfig.getProxyAddress())) + .withSSL(cloudConfig.getSslOptions()); + + if (cloudConfig.getAuthProvider() != null) { + builder = builder.withAuthProvider(cloudConfig.getAuthProvider()); + } + if (builder.rawHostContactPoints.size() > 0 + || builder.rawHostAndPortContactPoints.size() > 0 + || builder.contactPoints.size() > 0) { + throw new IllegalStateException( + "Can't use withCloudSecureConnectBundle if you've already called addContactPoint(s)"); + } + for (EndPoint endPoint : cloudConfig.getEndPoints()) { + builder.addContactPoint(endPoint); + } + isCloud = true; + return builder; + } - /** - * The port to use to connect to the Cassandra host. - *

- * If not set through this method, the default port (9042) will be used - * instead. - * - * @param port the port to set. - * @return this Builder. - */ - public Builder withPort(int port) { - this.port = port; - return this; - } + private void failIfCloud() { + if (isCloud) { + throw new IllegalStateException( + "Can't use addContactPoint(s) if you've already called withCloudSecureConnectBundle"); + } + } - /** - * Create cluster connection using latest development protocol version, - * which is currently in beta. Calling this method will result into setting - * USE_BETA flag in all outgoing messages, which allows server to negotiate - * the supported protocol version even if it is currently in beta. - *

- * This feature is only available starting with version {@link ProtocolVersion#V5 V5}. - *

- * Use with caution, refer to the server and protocol documentation for the details - * on latest protocol version. - * - * @return this Builder. - */ - public Builder allowBetaProtocolVersion() { - if (protocolVersion != null) - throw new IllegalArgumentException("Can't use beta flag with initial protocol version of " + protocolVersion); + /** + * The configuration that will be used for the new cluster. + * + *

You should not modify this object directly because changes made to the returned + * object may not be used by the cluster build. Instead, you should use the other methods of + * this {@code Builder}. + * + * @return the configuration to use for the new cluster. + */ + @Override + public Configuration getConfiguration() { + ProtocolOptions protocolOptions = + new ProtocolOptions( + port, + protocolVersion, + maxSchemaAgreementWaitSeconds, + sslOptions, + authProvider, + noCompact) + .setCompression(compression); + + MetricsOptions metricsOptions = new MetricsOptions(metricsEnabled, jmxEnabled); + + return configurationBuilder + .withProtocolOptions(protocolOptions) + .withMetricsOptions(metricsOptions) + .withPolicies(policiesBuilder.build()) + .build(); + } - this.allowBetaProtocolVersion = true; - this.protocolVersion = ProtocolVersion.NEWEST_BETA; - return this; - } + @Override + public Collection getInitialListeners() { + return listeners == null ? Collections.emptySet() : listeners; + } - /** - * Sets the maximum time to wait for schema agreement before returning from a DDL query. - *

- * If not set through this method, the default value (10 seconds) will be used. - * - * @param maxSchemaAgreementWaitSeconds the new value to set. - * @return this Builder. - * @throws IllegalStateException if the provided value is zero or less. - */ - public Builder withMaxSchemaAgreementWaitSeconds(int maxSchemaAgreementWaitSeconds) { - if (maxSchemaAgreementWaitSeconds <= 0) - throw new IllegalArgumentException("Max schema agreement wait must be greater than zero"); + /** + * Builds the cluster with the configured set of initial contact points and policies. + * + *

This is a convenience method for {@code Cluster.buildFrom(this)}. + * + * @return the newly built Cluster instance. + */ + public Cluster build() { + return Cluster.buildFrom(this); + } + } + + static long timeSince(long startNanos, TimeUnit destUnit) { + return destUnit.convert(System.nanoTime() - startNanos, TimeUnit.NANOSECONDS); + } + + private static String generateClusterName() { + return "cluster" + CLUSTER_ID.incrementAndGet(); + } + + /** + * The sessions and hosts managed by this a Cluster instance. + * + *

Note: the reason we create a Manager object separate from Cluster is that Manager is not + * publicly visible. For instance, we wouldn't want user to be able to call the {@link #onUp} and + * {@link #onDown} methods. + */ + class Manager implements Connection.DefaultResponseHandler { + + final String clusterName; + private volatile boolean isInit; + private volatile boolean isFullyInit; + private Exception initException; + // Initial contacts point + final List contactPoints; + final Set sessions = new CopyOnWriteArraySet(); + + Metadata metadata; + final Configuration configuration; + Metrics metrics; + + Connection.Factory connectionFactory; + ControlConnection controlConnection; + + final ConvictionPolicy.Factory convictionPolicyFactory = + new ConvictionPolicy.DefaultConvictionPolicy.Factory(); + + ListeningExecutorService executor; + ListeningExecutorService blockingExecutor; + ScheduledExecutorService reconnectionExecutor; + ScheduledExecutorService scheduledTasksExecutor; + + BlockingQueue executorQueue; + BlockingQueue blockingExecutorQueue; + BlockingQueue reconnectionExecutorQueue; + BlockingQueue scheduledTasksExecutorQueue; + + ConnectionReaper reaper; + + final AtomicReference closeFuture = new AtomicReference(); + + // All the queries that have been prepared (we keep them so we can re-prepared them when a node + // fail or a + // new one join the cluster). + // Note: we could move this down to the session level, but since prepared statement are global + // to a node, + // this would yield a slightly less clear behavior. + ConcurrentMap preparedQueries; + + final Set listeners; + final Set latencyTrackers = new CopyOnWriteArraySet(); + final Set schemaChangeListeners = + new CopyOnWriteArraySet(); + + EventDebouncer nodeListRefreshRequestDebouncer; + EventDebouncer nodeRefreshRequestDebouncer; + EventDebouncer schemaRefreshRequestDebouncer; + + private Manager( + String clusterName, + List contactPoints, + Configuration configuration, + Collection listeners) { + this.clusterName = clusterName == null ? generateClusterName() : clusterName; + this.configuration = configuration; + this.contactPoints = contactPoints; + this.listeners = new CopyOnWriteArraySet(listeners); + } - this.maxSchemaAgreementWaitSeconds = maxSchemaAgreementWaitSeconds; - return this; - } + // Initialization is not too performance intensive and in practice there shouldn't be contention + // on it so synchronized is good enough. + synchronized void init() { + checkNotClosed(this); + if (isInit) { + return; + } + isInit = true; + try { + logger.debug("Starting new cluster with contact points " + contactPoints); + + this.configuration.register(this); + + ThreadingOptions threadingOptions = this.configuration.getThreadingOptions(); + + // executor + ExecutorService tmpExecutor = threadingOptions.createExecutor(clusterName); + this.executorQueue = + (tmpExecutor instanceof ThreadPoolExecutor) + ? ((ThreadPoolExecutor) tmpExecutor).getQueue() + : null; + this.executor = MoreExecutors.listeningDecorator(tmpExecutor); + + // blocking executor + ExecutorService tmpBlockingExecutor = threadingOptions.createBlockingExecutor(clusterName); + this.blockingExecutorQueue = + (tmpBlockingExecutor instanceof ThreadPoolExecutor) + ? ((ThreadPoolExecutor) tmpBlockingExecutor).getQueue() + : null; + this.blockingExecutor = MoreExecutors.listeningDecorator(tmpBlockingExecutor); + + // reconnection executor + this.reconnectionExecutor = threadingOptions.createReconnectionExecutor(clusterName); + this.reconnectionExecutorQueue = + (reconnectionExecutor instanceof ThreadPoolExecutor) + ? ((ThreadPoolExecutor) reconnectionExecutor).getQueue() + : null; + + // scheduled tasks executor + this.scheduledTasksExecutor = threadingOptions.createScheduledTasksExecutor(clusterName); + this.scheduledTasksExecutorQueue = + (scheduledTasksExecutor instanceof ThreadPoolExecutor) + ? ((ThreadPoolExecutor) scheduledTasksExecutor).getQueue() + : null; + + this.reaper = new ConnectionReaper(threadingOptions.createReaperExecutor(clusterName)); + this.metadata = new Metadata(this); + this.connectionFactory = new Connection.Factory(this, configuration); + this.controlConnection = new ControlConnection(this); + this.metrics = configuration.getMetricsOptions().isEnabled() ? new Metrics(this) : null; + this.preparedQueries = new MapMaker().weakValues().makeMap(); + + // create debouncers - at this stage, they are not running yet + final QueryOptions queryOptions = configuration.getQueryOptions(); + this.nodeListRefreshRequestDebouncer = + new EventDebouncer( + "Node list refresh", + scheduledTasksExecutor, + new NodeListRefreshRequestDeliveryCallback()) { + + @Override + int maxPendingEvents() { + return configuration.getQueryOptions().getMaxPendingRefreshNodeListRequests(); + } + + @Override + long delayMs() { + return configuration.getQueryOptions().getRefreshNodeListIntervalMillis(); + } + }; + this.nodeRefreshRequestDebouncer = + new EventDebouncer( + "Node refresh", scheduledTasksExecutor, new NodeRefreshRequestDeliveryCallback()) { + + @Override + int maxPendingEvents() { + return configuration.getQueryOptions().getMaxPendingRefreshNodeRequests(); + } + + @Override + long delayMs() { + return configuration.getQueryOptions().getRefreshNodeIntervalMillis(); + } + }; + this.schemaRefreshRequestDebouncer = + new EventDebouncer( + "Schema refresh", + scheduledTasksExecutor, + new SchemaRefreshRequestDeliveryCallback()) { + + @Override + int maxPendingEvents() { + return configuration.getQueryOptions().getMaxPendingRefreshSchemaRequests(); + } + + @Override + long delayMs() { + return configuration.getQueryOptions().getRefreshSchemaIntervalMillis(); + } + }; - /** - * The native protocol version to use. - *

- * The driver supports versions 1 to 5 of the native protocol. Higher versions - * of the protocol have more features and should be preferred, but this also depends - * on the Cassandra version: - *

- * - * - * - * - * - * - * - * - *
Native protocol version to Cassandra version correspondence
Protocol versionMinimum Cassandra version
11.2
22.0
32.1
42.2
53.10
- *

- * By default, the driver will "auto-detect" which protocol version it can use - * when connecting to the first node. More precisely, it will try first with - * {@link ProtocolVersion#NEWEST_SUPPORTED}, and if not supported fallback to - * the highest version supported by the first node it connects to. Please note - * that once the version is "auto-detected", it won't change: if the first node - * the driver connects to is a Cassandra 1.2 node and auto-detection is used - * (the default), then the native protocol version 1 will be use for the lifetime - * of the Cluster instance. - *

- * By using {@link Builder#allowBetaProtocolVersion()}, it is - * possible to force driver to connect to Cassandra node that supports the latest - * protocol beta version. Leaving this flag out will let client to connect with - * latest released version. - *

- * This method allows to force the use of a particular protocol version. Forcing - * version 1 is always fine since all Cassandra version (at least all those - * supporting the native protocol in the first place) so far support it. However, - * please note that a number of features of the driver won't be available if that - * version of the protocol is in use, including result set paging, - * {@link BatchStatement}, executing a non-prepared query with binary values - * ({@link Session#execute(String, Object...)}), ... (those methods will throw - * an UnsupportedFeatureException). Using the protocol version 1 should thus - * only be considered when using Cassandra 1.2, until nodes have been upgraded - * to Cassandra 2.0. - *

- * If version 2 of the protocol is used, then Cassandra 1.2 nodes will be ignored - * (the driver won't connect to them). - *

- * The default behavior (auto-detection) is fine in almost all case, but you may - * want to force a particular version if you have a Cassandra cluster with mixed - * 1.2/2.0 nodes (i.e. during a Cassandra upgrade). - * - * @param version the native protocol version to use. {@code null} is also supported - * to trigger auto-detection (see above) but this is the default (so you don't have - * to call this method for that behavior). - * @return this Builder. - */ - public Builder withProtocolVersion(ProtocolVersion version) { - if (allowBetaProtocolVersion) - throw new IllegalStateException("Can not set the version explicitly if `allowBetaProtocolVersion` was used."); - if (version.compareTo(ProtocolVersion.NEWEST_SUPPORTED) > 0) - throw new IllegalArgumentException("Can not use " + version + " protocol version. " + - "Newest supported protocol version is: " + ProtocolVersion.NEWEST_SUPPORTED + ". " + - "For beta versions, use `allowBetaProtocolVersion` instead"); - this.protocolVersion = version; - return this; - } + this.scheduledTasksExecutor.scheduleWithFixedDelay( + new CleanupIdleConnectionsTask(), 10, 10, TimeUnit.SECONDS); + + for (EndPoint contactPoint : contactPoints) { + metadata.addContactPoint(contactPoint); + } + // Initialize the control connection: + negotiateProtocolVersionAndConnect(); + if (controlConnection.isCloud() && !configuration.getQueryOptions().isConsistencySet()) { + configuration.getQueryOptions().setConsistencyLevel(ConsistencyLevel.LOCAL_QUORUM); + } + // The control connection: + // - marked contact points down if they couldn't be reached + // - triggered an initial full refresh of metadata.allHosts. If any contact points weren't + // valid, they won't appear in it. + Set downContactPointHosts = Sets.newHashSet(); + Set removedContactPointHosts = Sets.newHashSet(); + for (Host contactPoint : metadata.getContactPoints()) { + if (!metadata.allHosts().contains(contactPoint)) { + removedContactPointHosts.add(contactPoint); + } else if (contactPoint.state == Host.State.DOWN) { + downContactPointHosts.add(contactPoint); + } + } + + // Now that the control connection is ready, we have all the information we need about the + // nodes (datacenter, rack...) to initialize the load balancing policy + Set lbpContactPoints = Sets.newHashSet(metadata.getContactPoints()); + lbpContactPoints.removeAll(removedContactPointHosts); + lbpContactPoints.removeAll(downContactPointHosts); + loadBalancingPolicy().init(Cluster.this, lbpContactPoints); + + speculativeExecutionPolicy().init(Cluster.this); + configuration.getPolicies().getRetryPolicy().init(Cluster.this); + reconnectionPolicy().init(Cluster.this); + configuration.getPolicies().getAddressTranslator().init(Cluster.this); + for (LatencyTracker tracker : latencyTrackers) tracker.onRegister(Cluster.this); + for (Host.StateListener listener : listeners) listener.onRegister(Cluster.this); + for (Host host : removedContactPointHosts) { + loadBalancingPolicy().onRemove(host); + for (Host.StateListener listener : listeners) listener.onRemove(host); + } + + for (Host host : downContactPointHosts) { + loadBalancingPolicy().onDown(host); + for (Host.StateListener listener : listeners) listener.onDown(host); + startPeriodicReconnectionAttempt(host, true); + } + + configuration.getPoolingOptions().setProtocolVersion(protocolVersion()); + + for (Host host : metadata.allHosts()) { + // If the host is down at this stage, it's a contact point that the control connection + // failed to reach. + // Reconnection attempts are already scheduled, and the LBP and listeners have been + // notified above. + if (host.state == Host.State.DOWN) continue; + + // Otherwise, we want to do the equivalent of onAdd(). But since we know for sure that no + // sessions or prepared + // statements exist at this point, we can skip some of the steps (plus this avoids + // scheduling concurrent pool + // creations if a session is created right after this method returns). + logger.info("New Cassandra host {} added", host); + + if (!host.supports(connectionFactory.protocolVersion)) { + logUnsupportedVersionProtocol(host, connectionFactory.protocolVersion); + continue; + } + + if (!lbpContactPoints.contains(host)) loadBalancingPolicy().onAdd(host); + + host.setUp(); + + for (Host.StateListener listener : listeners) listener.onAdd(host); + } + + // start debouncers + this.nodeListRefreshRequestDebouncer.start(); + this.schemaRefreshRequestDebouncer.start(); + this.nodeRefreshRequestDebouncer.start(); + + isFullyInit = true; + } catch (RuntimeException e) { + initException = e; + close(); + throw e; + } + } - /** - * Adds a contact point - or many if the given address resolves to multiple - * InetAddresss (A records). - *

- * Contact points are addresses of Cassandra nodes that the driver uses - * to discover the cluster topology. Only one contact point is required - * (the driver will retrieve the address of the other nodes - * automatically), but it is usually a good idea to provide more than - * one contact point, because if that single contact point is unavailable, - * the driver cannot initialize itself correctly. - *

- * Note that by default (that is, unless you use the {@link #withLoadBalancingPolicy}) - * method of this builder), the first successfully contacted host will be used - * to define the local data-center for the client. If follows that if you are - * running Cassandra in a multiple data-center setting, it is a good idea to - * only provide contact points that are in the same datacenter than the client, - * or to provide manually the load balancing policy that suits your need. - *

- * If the host name points to a DNS record with multiple a-records, all InetAddresses - * returned will be used. Make sure that all resulting InetAddresss returned - * point to the same cluster and datacenter. - * - * @param address the address of the node(s) to connect to. - * @return this Builder. - * @throws IllegalArgumentException if the given {@code address} - * could not be resolved. - * @throws SecurityException if a security manager is present and - * permission to resolve the host name is denied. - */ - public Builder addContactPoint(String address) { - // We explicitly check for nulls because InetAdress.getByName() will happily - // accept it and use localhost (while a null here almost likely mean a user error, - // not "connect to localhost") - if (address == null) - throw new NullPointerException(); + private void negotiateProtocolVersionAndConnect() { + boolean shouldNegotiate = (configuration.getProtocolOptions().initialProtocolVersion == null); + while (true) { + try { + controlConnection.connect(); + return; + } catch (UnsupportedProtocolVersionException e) { + if (!shouldNegotiate) { + throw e; + } + // Do not trust version of server's response, as C* behavior in case of protocol + // negotiation is not + // properly documented, and varies over time (specially after CASSANDRA-11464). Instead, + // always + // retry at attempted version - 1, if such a version exists; and otherwise, stop and fail. + ProtocolVersion attemptedVersion = e.getUnsupportedVersion(); + ProtocolVersion retryVersion = attemptedVersion.getLowerSupported(); + if (retryVersion == null) { + throw e; + } + logger.info( + "Cannot connect with protocol version {}, trying with {}", + attemptedVersion, + retryVersion); + connectionFactory.protocolVersion = retryVersion; + } + } + } - try { - addContactPoints(InetAddress.getAllByName(address)); - return this; - } catch (UnknownHostException e) { - throw new IllegalArgumentException("Failed to add contact point: " + address, e); - } - } + ProtocolVersion protocolVersion() { + return connectionFactory.protocolVersion; + } - /** - * Adds contact points. - *

- * See {@link Builder#addContactPoint} for more details on contact - * points. - *

- * Note that all contact points must be resolvable; - * if any of them cannot be resolved, this method will fail. - * - * @param addresses addresses of the nodes to add as contact points. - * @return this Builder. - * @throws IllegalArgumentException if any of the given {@code addresses} - * could not be resolved. - * @throws SecurityException if a security manager is present and - * permission to resolve the host name is denied. - * @see Builder#addContactPoint - */ - public Builder addContactPoints(String... addresses) { - for (String address : addresses) - addContactPoint(address); - return this; - } + Cluster getCluster() { + return Cluster.this; + } - /** - * Adds contact points. - *

- * See {@link Builder#addContactPoint} for more details on contact - * points. - *

- * Note that all contact points must be resolvable; - * if any of them cannot be resolved, this method will fail. - * - * @param addresses addresses of the nodes to add as contact points. - * @return this Builder. - * @throws IllegalArgumentException if any of the given {@code addresses} - * could not be resolved. - * @throws SecurityException if a security manager is present and - * permission to resolve the host name is denied. - * @see Builder#addContactPoint - */ - public Builder addContactPoints(InetAddress... addresses) { - Collections.addAll(this.rawAddresses, addresses); - return this; - } + LoadBalancingPolicy loadBalancingPolicy() { + return configuration.getPolicies().getLoadBalancingPolicy(); + } - /** - * Adds contact points. - *

- * See {@link Builder#addContactPoint} for more details on contact - * points. - * - * @param addresses addresses of the nodes to add as contact points. - * @return this Builder - * @see Builder#addContactPoint - */ - public Builder addContactPoints(Collection addresses) { - this.rawAddresses.addAll(addresses); - return this; - } + SpeculativeExecutionPolicy speculativeExecutionPolicy() { + return configuration.getPolicies().getSpeculativeExecutionPolicy(); + } - /** - * Adds contact points. - *

- * See {@link Builder#addContactPoint} for more details on contact - * points. Contrarily to other {@code addContactPoints} methods, this method - * allows to provide a different port for each contact point. Since Cassandra - * nodes must always all listen on the same port, this is rarely what you - * want and most users should prefer other {@code addContactPoints} methods to - * this one. However, this can be useful if the Cassandra nodes are behind - * a router and are not accessed directly. Note that if you are in this - * situation (Cassandra nodes are behind a router, not directly accessible), - * you almost surely want to provide a specific {@link AddressTranslator} - * (through {@link #withAddressTranslator}) to translate actual Cassandra node - * addresses to the addresses the driver should use, otherwise the driver - * will not be able to auto-detect new nodes (and will generally not function - * optimally). - * - * @param addresses addresses of the nodes to add as contact points. - * @return this Builder - * @see Builder#addContactPoint - */ - public Builder addContactPointsWithPorts(InetSocketAddress... addresses) { - Collections.addAll(this.addresses, addresses); - return this; - } + ReconnectionPolicy reconnectionPolicy() { + return configuration.getPolicies().getReconnectionPolicy(); + } - /** - * Adds contact points. - *

- * See {@link Builder#addContactPoint} for more details on contact - * points. Contrarily to other {@code addContactPoints} methods, this method - * allows to provide a different port for each contact point. Since Cassandra - * nodes must always all listen on the same port, this is rarely what you - * want and most users should prefer other {@code addContactPoints} methods to - * this one. However, this can be useful if the Cassandra nodes are behind - * a router and are not accessed directly. Note that if you are in this - * situation (Cassandra nodes are behind a router, not directly accessible), - * you almost surely want to provide a specific {@link AddressTranslator} - * (through {@link #withAddressTranslator}) to translate actual Cassandra node - * addresses to the addresses the driver should use, otherwise the driver - * will not be able to auto-detect new nodes (and will generally not function - * optimally). - * - * @param addresses addresses of the nodes to add as contact points. - * @return this Builder - * @see Builder#addContactPoint - */ - public Builder addContactPointsWithPorts(Collection addresses) { - this.addresses.addAll(addresses); - return this; - } + InetSocketAddress translateAddress(InetSocketAddress address) { + InetSocketAddress translated = + configuration.getPolicies().getAddressTranslator().translate(address); + return translated == null ? address : translated; + } - /** - * Configures the load balancing policy to use for the new cluster. - *

- * If no load balancing policy is set through this method, - * {@link Policies#defaultLoadBalancingPolicy} will be used instead. - * - * @param policy the load balancing policy to use. - * @return this Builder. - */ - public Builder withLoadBalancingPolicy(LoadBalancingPolicy policy) { - policiesBuilder.withLoadBalancingPolicy(policy); - return this; - } + InetSocketAddress translateAddress(InetAddress address) { + InetSocketAddress sa = new InetSocketAddress(address, connectionFactory.getPort()); + return translateAddress(sa); + } - /** - * Configures the reconnection policy to use for the new cluster. - *

- * If no reconnection policy is set through this method, - * {@link Policies#DEFAULT_RECONNECTION_POLICY} will be used instead. - * - * @param policy the reconnection policy to use. - * @return this Builder. - */ - public Builder withReconnectionPolicy(ReconnectionPolicy policy) { - policiesBuilder.withReconnectionPolicy(policy); - return this; - } - - /** - * Configures the retry policy to use for the new cluster. - *

- * If no retry policy is set through this method, - * {@link Policies#DEFAULT_RETRY_POLICY} will be used instead. - * - * @param policy the retry policy to use. - * @return this Builder. - */ - public Builder withRetryPolicy(RetryPolicy policy) { - policiesBuilder.withRetryPolicy(policy); - return this; - } + private Session newSession() { + SessionManager session = new SessionManager(Cluster.this); + sessions.add(session); + return session; + } - /** - * Configures the address translator to use for the new cluster. - *

- * See {@link AddressTranslator} for more detail on address translation, - * but the default translator, {@link IdentityTranslator}, should be - * correct in most cases. If unsure, stick to the default. - * - * @param translator the translator to use. - * @return this Builder. - */ - public Builder withAddressTranslator(AddressTranslator translator) { - policiesBuilder.withAddressTranslator(translator); - return this; - } + boolean removeSession(Session session) { + return sessions.remove(session); + } - /** - * Configures the generator that will produce the client-side timestamp sent - * with each query. - *

- * This feature is only available with version {@link ProtocolVersion#V3 V3} or - * above of the native protocol. With earlier versions, timestamps are always - * generated server-side, and setting a generator through this method will have - * no effect. - *

- * If no generator is set through this method, the driver will default to - * client-side timestamps by using {@link AtomicMonotonicTimestampGenerator}. - * - * @param timestampGenerator the generator to use. - * @return this Builder. - */ - public Builder withTimestampGenerator(TimestampGenerator timestampGenerator) { - policiesBuilder.withTimestampGenerator(timestampGenerator); - return this; + void reportQuery(Host host, Statement statement, Exception exception, long latencyNanos) { + for (LatencyTracker tracker : latencyTrackers) { + try { + tracker.update(host, statement, exception, latencyNanos); + } catch (Exception e) { + logger.error("Call to latency tracker failed", e); } + } + } - /** - * Configures the speculative execution policy to use for the new cluster. - *

- * If no policy is set through this method, {@link Policies#defaultSpeculativeExecutionPolicy()} - * will be used instead. - * - * @param policy the policy to use. - * @return this Builder. - */ - public Builder withSpeculativeExecutionPolicy(SpeculativeExecutionPolicy policy) { - policiesBuilder.withSpeculativeExecutionPolicy(policy); - return this; - } + ControlConnection getControlConnection() { + return controlConnection; + } + List getContactPoints() { + return contactPoints; + } - /** - * Configures the {@link CodecRegistry} instance to use for the new cluster. - *

- * If no codec registry is set through this method, {@link CodecRegistry#DEFAULT_INSTANCE} - * will be used instead. - *

Note that if two or more {@link Cluster} instances are configured to - * use the default codec registry, they are going to share the same instance. - * In this case, care should be taken when registering new codecs on it as any - * codec registered by one cluster would be immediately available to others - * sharing the same default instance. - * - * @param codecRegistry the codec registry to use. - * @return this Builder. - */ - public Builder withCodecRegistry(CodecRegistry codecRegistry) { - configurationBuilder.withCodecRegistry(codecRegistry); - return this; - } + boolean isClosed() { + return closeFuture.get() != null; + } - /** - * Uses the provided credentials when connecting to Cassandra hosts. - *

- * This should be used if the Cassandra cluster has been configured to - * use the {@code PasswordAuthenticator}. If the the default {@code - * AllowAllAuthenticator} is used instead, using this method has no - * effect. - * - * @param username the username to use to login to Cassandra hosts. - * @param password the password corresponding to {@code username}. - * @return this Builder. - */ - public Builder withCredentials(String username, String password) { - this.authProvider = new PlainTextAuthProvider(username, password); - return this; - } + boolean errorDuringInit() { + return (isInit && initException != null); + } - /** - * Use the specified AuthProvider when connecting to Cassandra - * hosts. - *

- * Use this method when a custom authentication scheme is in place. - * You shouldn't call both this method and {@code withCredentials} - * on the same {@code Builder} instance as one will supersede the - * other - * - * @param authProvider the {@link AuthProvider} to use to login to - * Cassandra hosts. - * @return this Builder - */ - public Builder withAuthProvider(AuthProvider authProvider) { - this.authProvider = authProvider; - return this; - } + Exception getInitException() { + return initException; + } - /** - * Sets the compression to use for the transport. - * - * @param compression the compression to set. - * @return this Builder. - * @see ProtocolOptions.Compression - */ - public Builder withCompression(ProtocolOptions.Compression compression) { - this.compression = compression; - return this; - } + private CloseFuture close() { - /** - * Disables metrics collection for the created cluster (metrics are - * enabled by default otherwise). - * - * @return this builder. - */ - public Builder withoutMetrics() { - this.metricsEnabled = false; - return this; - } + CloseFuture future = closeFuture.get(); + if (future != null) return future; - /** - * Enables the use of SSL for the created {@code Cluster}. - *

- * Calling this method will use the JDK-based implementation with the default options - * (see {@link RemoteEndpointAwareJdkSSLOptions.Builder}). - * This is thus a shortcut for {@code withSSL(JdkSSLOptions.builder().build())}. - *

- * Note that if SSL is enabled, the driver will not connect to any - * Cassandra nodes that doesn't have SSL enabled and it is strongly - * advised to enable SSL on every Cassandra node if you plan on using - * SSL in the driver. - * - * @return this builder. - */ - public Builder withSSL() { - this.sslOptions = RemoteEndpointAwareJdkSSLOptions.builder().build(); - return this; - } + if (isInit) { + logger.debug("Shutting down"); - /** - * Enable the use of SSL for the created {@code Cluster} using the provided options. - * - * @param sslOptions the SSL options to use. - * @return this builder. - */ - public Builder withSSL(SSLOptions sslOptions) { - this.sslOptions = sslOptions; - return this; + // stop debouncers + if (nodeListRefreshRequestDebouncer != null) { + nodeListRefreshRequestDebouncer.stop(); } - - /** - * Register the provided listeners in the newly created cluster. - *

- * Note: repeated calls to this method will override the previous ones. - * - * @param listeners the listeners to register. - * @return this builder. - */ - public Builder withInitialListeners(Collection listeners) { - this.listeners = listeners; - return this; + if (nodeRefreshRequestDebouncer != null) { + nodeRefreshRequestDebouncer.stop(); } - - /** - * Disables JMX reporting of the metrics. - *

- * JMX reporting is enabled by default (see {@link Metrics}) but can be - * disabled using this option. If metrics are disabled, this is a - * no-op. - * - * @return this builder. - */ - public Builder withoutJMXReporting() { - this.jmxEnabled = false; - return this; + if (schemaRefreshRequestDebouncer != null) { + schemaRefreshRequestDebouncer.stop(); } - /** - * Sets the PoolingOptions to use for the newly created Cluster. - *

- * If no pooling options are set through this method, default pooling - * options will be used. - * - * @param options the pooling options to use. - * @return this builder. - */ - public Builder withPoolingOptions(PoolingOptions options) { - configurationBuilder.withPoolingOptions(options); - return this; - } + // If we're shutting down, there is no point in waiting on scheduled reconnections, nor on + // notifications + // delivery or blocking tasks so we use shutdownNow + shutdownNow(reconnectionExecutor); + shutdownNow(scheduledTasksExecutor); + shutdownNow(blockingExecutor); - /** - * Sets the SocketOptions to use for the newly created Cluster. - *

- * If no socket options are set through this method, default socket - * options will be used. - * - * @param options the socket options to use. - * @return this builder. - */ - public Builder withSocketOptions(SocketOptions options) { - configurationBuilder.withSocketOptions(options); - return this; + // but for the worker executor, we want to let submitted tasks finish unless the shutdown is + // forced. + if (executor != null) { + executor.shutdown(); } - /** - * Sets the QueryOptions to use for the newly created Cluster. - *

- * If no query options are set through this method, default query - * options will be used. - * - * @param options the query options to use. - * @return this builder. - */ - public Builder withQueryOptions(QueryOptions options) { - configurationBuilder.withQueryOptions(options); - return this; - } + // We also close the metrics + if (metrics != null) metrics.shutdown(); - /** - * Sets the threading options to use for the newly created Cluster. - *

- * If no options are set through this method, a new instance of {@link ThreadingOptions} will be used. - * - * @param options the options. - * @return this builder. - */ - public Builder withThreadingOptions(ThreadingOptions options) { - configurationBuilder.withThreadingOptions(options); - return this; - } + loadBalancingPolicy().close(); + speculativeExecutionPolicy().close(); + configuration.getPolicies().getRetryPolicy().close(); + reconnectionPolicy().close(); + configuration.getPolicies().getAddressTranslator().close(); + for (LatencyTracker tracker : latencyTrackers) tracker.onUnregister(Cluster.this); + for (Host.StateListener listener : listeners) listener.onUnregister(Cluster.this); + for (SchemaChangeListener listener : schemaChangeListeners) + listener.onUnregister(Cluster.this); - /** - * Set the {@link NettyOptions} to use for the newly created Cluster. - *

- * If no Netty options are set through this method, {@link NettyOptions#DEFAULT_INSTANCE} - * will be used as a default value, which means that no customization will be applied. - * - * @param nettyOptions the {@link NettyOptions} to use. - * @return this builder. - */ - public Builder withNettyOptions(NettyOptions nettyOptions) { - configurationBuilder.withNettyOptions(nettyOptions); - return this; + // Then we shutdown all connections + List futures = new ArrayList(sessions.size() + 1); + if (controlConnection != null) { + futures.add(controlConnection.closeAsync()); } + for (Session session : sessions) futures.add(session.closeAsync()); - /** - * The configuration that will be used for the new cluster. - *

- * You should not modify this object directly because changes made - * to the returned object may not be used by the cluster build. - * Instead, you should use the other methods of this {@code Builder}. - * - * @return the configuration to use for the new cluster. - */ - @Override - public Configuration getConfiguration() { - ProtocolOptions protocolOptions = new ProtocolOptions(port, protocolVersion, maxSchemaAgreementWaitSeconds, sslOptions, authProvider) - .setCompression(compression); - - MetricsOptions metricsOptions = new MetricsOptions(metricsEnabled, jmxEnabled); + future = new ClusterCloseFuture(futures); + // The rest will happen asynchronously, when all connections are successfully closed + } else { + future = CloseFuture.immediateFuture(); + } - return configurationBuilder - .withProtocolOptions(protocolOptions) - .withMetricsOptions(metricsOptions) - .withPolicies(policiesBuilder.build()) - .build(); - } - - @Override - public Collection getInitialListeners() { - return listeners == null ? Collections.emptySet() : listeners; - } + return closeFuture.compareAndSet(null, future) + ? future + : closeFuture.get(); // We raced, it's ok, return the future that was actually set + } - /** - * Builds the cluster with the configured set of initial contact points - * and policies. - *

- * This is a convenience method for {@code Cluster.buildFrom(this)}. - * - * @return the newly built Cluster instance. - */ - public Cluster build() { - return Cluster.buildFrom(this); + private void shutdownNow(ExecutorService executor) { + if (executor != null) { + List pendingTasks = executor.shutdownNow(); + // If some tasks were submitted to this executor but not yet commenced, make sure the + // corresponding futures complete + for (Runnable pendingTask : pendingTasks) { + if (pendingTask instanceof FutureTask) ((FutureTask) pendingTask).cancel(false); } + } } - static long timeSince(long startNanos, TimeUnit destUnit) { - return destUnit.convert(System.nanoTime() - startNanos, TimeUnit.NANOSECONDS); + void logUnsupportedVersionProtocol(Host host, ProtocolVersion version) { + logger.warn( + "Detected added or restarted Cassandra host {} but ignoring it since it does not support the version {} of the native " + + "protocol which is currently in use. If you want to force the use of a particular version of the native protocol, use " + + "Cluster.Builder#usingProtocolVersion() when creating the Cluster instance.", + host, + version); } - private static String generateClusterName() { - return "cluster" + CLUSTER_ID.incrementAndGet(); + void logClusterNameMismatch(Host host, String expectedClusterName, String actualClusterName) { + logger.warn( + "Detected added or restarted Cassandra host {} but ignoring it since its cluster name '{}' does not match the one " + + "currently known ({})", + host, + actualClusterName, + expectedClusterName); } - /** - * The sessions and hosts managed by this a Cluster instance. - *

- * Note: the reason we create a Manager object separate from Cluster is - * that Manager is not publicly visible. For instance, we wouldn't want - * user to be able to call the {@link #onUp} and {@link #onDown} methods. - */ - class Manager implements Connection.DefaultResponseHandler { - - final String clusterName; - private boolean isInit; - private volatile boolean isFullyInit; - - // Initial contacts point - final List contactPoints; - final Set sessions = new CopyOnWriteArraySet(); - - Metadata metadata; - final Configuration configuration; - Metrics metrics; - - Connection.Factory connectionFactory; - ControlConnection controlConnection; - - final ConvictionPolicy.Factory convictionPolicyFactory = new ConvictionPolicy.DefaultConvictionPolicy.Factory(); - - ListeningExecutorService executor; - ListeningExecutorService blockingExecutor; - ScheduledExecutorService reconnectionExecutor; - ScheduledExecutorService scheduledTasksExecutor; - - BlockingQueue executorQueue; - BlockingQueue blockingExecutorQueue; - BlockingQueue reconnectionExecutorQueue; - BlockingQueue scheduledTasksExecutorQueue; - - ConnectionReaper reaper; - - final AtomicReference closeFuture = new AtomicReference(); + public ListenableFuture triggerOnUp(final Host host) { + if (!isClosed()) { + return executor.submit( + new ExceptionCatchingRunnable() { + @Override + public void runMayThrow() throws InterruptedException, ExecutionException { + onUp(host, null); + } + }); + } else { + return MoreFutures.VOID_SUCCESS; + } + } - // All the queries that have been prepared (we keep them so we can re-prepared them when a node fail or a - // new one join the cluster). - // Note: we could move this down to the session level, but since prepared statement are global to a node, - // this would yield a slightly less clear behavior. - ConcurrentMap preparedQueries; + // Use triggerOnUp unless you're sure you want to run this on the current thread. + private void onUp(final Host host, Connection reusedConnection) + throws InterruptedException, ExecutionException { + if (isClosed()) return; - final Set listeners; - final Set latencyTrackers = new CopyOnWriteArraySet(); - final Set schemaChangeListeners = new CopyOnWriteArraySet(); + if (!host.supports(connectionFactory.protocolVersion)) { + logUnsupportedVersionProtocol(host, connectionFactory.protocolVersion); + return; + } - EventDebouncer nodeListRefreshRequestDebouncer; - EventDebouncer nodeRefreshRequestDebouncer; - EventDebouncer schemaRefreshRequestDebouncer; + try { - private Manager(String clusterName, List contactPoints, Configuration configuration, Collection listeners) { - this.clusterName = clusterName == null ? generateClusterName() : clusterName; - this.configuration = configuration; - this.contactPoints = contactPoints; - this.listeners = new CopyOnWriteArraySet(listeners); + boolean locked = + host.notificationsLock.tryLock(NOTIF_LOCK_TIMEOUT_SECONDS, TimeUnit.SECONDS); + if (!locked) { + logger.warn( + "Could not acquire notifications lock within {} seconds, ignoring UP notification for {}", + NOTIF_LOCK_TIMEOUT_SECONDS, + host); + return; } + try { - // Initialization is not too performance intensive and in practice there shouldn't be contention - // on it so synchronized is good enough. - synchronized void init() { - checkNotClosed(this); - if (isInit) - return; - isInit = true; - - logger.debug("Starting new cluster with contact points " + contactPoints); - - this.configuration.register(this); - - ThreadingOptions threadingOptions = this.configuration.getThreadingOptions(); - - // executor - ExecutorService tmpExecutor = threadingOptions.createExecutor(clusterName); - this.executorQueue = (tmpExecutor instanceof ThreadPoolExecutor) - ? ((ThreadPoolExecutor) tmpExecutor).getQueue() : null; - this.executor = MoreExecutors.listeningDecorator(tmpExecutor); - - // blocking executor - ExecutorService tmpBlockingExecutor = threadingOptions.createBlockingExecutor(clusterName); - this.blockingExecutorQueue = (tmpBlockingExecutor instanceof ThreadPoolExecutor) - ? ((ThreadPoolExecutor) tmpBlockingExecutor).getQueue() : null; - this.blockingExecutor = MoreExecutors.listeningDecorator(tmpBlockingExecutor); - - // reconnection executor - this.reconnectionExecutor = threadingOptions.createReconnectionExecutor(clusterName); - this.reconnectionExecutorQueue = (reconnectionExecutor instanceof ThreadPoolExecutor) - ? ((ThreadPoolExecutor) reconnectionExecutor).getQueue() : null; - - // scheduled tasks executor - this.scheduledTasksExecutor = threadingOptions.createScheduledTasksExecutor(clusterName); - this.scheduledTasksExecutorQueue = (scheduledTasksExecutor instanceof ThreadPoolExecutor) - ? ((ThreadPoolExecutor) scheduledTasksExecutor).getQueue() : null; - - this.reaper = new ConnectionReaper(threadingOptions.createReaperExecutor(clusterName)); - this.metadata = new Metadata(this); - this.connectionFactory = new Connection.Factory(this, configuration); - this.controlConnection = new ControlConnection(this); - this.metrics = configuration.getMetricsOptions().isEnabled() ? new Metrics(this) : null; - this.preparedQueries = new MapMaker().weakValues().makeMap(); - - // create debouncers - at this stage, they are not running yet - final QueryOptions queryOptions = configuration.getQueryOptions(); - this.nodeListRefreshRequestDebouncer = new EventDebouncer( - "Node list refresh", - scheduledTasksExecutor, - new NodeListRefreshRequestDeliveryCallback() - ) { - - @Override - int maxPendingEvents() { - return configuration.getQueryOptions().getMaxPendingRefreshNodeListRequests(); - } - - @Override - long delayMs() { - return configuration.getQueryOptions().getRefreshNodeListIntervalMillis(); - } - }; - this.nodeRefreshRequestDebouncer = new EventDebouncer( - "Node refresh", - scheduledTasksExecutor, - new NodeRefreshRequestDeliveryCallback() - ) { - - @Override - int maxPendingEvents() { - return configuration.getQueryOptions().getMaxPendingRefreshNodeRequests(); - } - - @Override - long delayMs() { - return configuration.getQueryOptions().getRefreshNodeIntervalMillis(); - } - }; - this.schemaRefreshRequestDebouncer = new EventDebouncer( - "Schema refresh", - scheduledTasksExecutor, - new SchemaRefreshRequestDeliveryCallback() - ) { - - @Override - int maxPendingEvents() { - return configuration.getQueryOptions().getMaxPendingRefreshSchemaRequests(); - } + // We don't want to use the public Host.isUp() as this would make us skip the rest for + // suspected hosts + if (host.state == Host.State.UP) return; - @Override - long delayMs() { - return configuration.getQueryOptions().getRefreshSchemaIntervalMillis(); - } - }; + Host.statesLogger.debug("[{}] marking host UP", host); - this.scheduledTasksExecutor.scheduleWithFixedDelay(new CleanupIdleConnectionsTask(), 10, 10, TimeUnit.SECONDS); + // If there is a reconnection attempt scheduled for that node, cancel it + Future scheduledAttempt = host.reconnectionAttempt.getAndSet(null); + if (scheduledAttempt != null) { + logger.debug("Cancelling reconnection attempt since node is UP"); + scheduledAttempt.cancel(false); + } - for (InetSocketAddress address : contactPoints) { - // We don't want to signal -- call onAdd() -- because nothing is ready - // yet (loadbalancing policy, control connection, ...). All we want is - // create the Host object so we can initialize the control connection. - metadata.add(address); + try { + if (getCluster().getConfiguration().getQueryOptions().isReprepareOnUp()) + reusedConnection = prepareAllQueries(host, reusedConnection); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + // Don't propagate because we don't want to prevent other listener to run + } catch (UnsupportedProtocolVersionException e) { + logUnsupportedVersionProtocol(host, e.getUnsupportedVersion()); + return; + } catch (ClusterNameMismatchException e) { + logClusterNameMismatch(host, e.expectedClusterName, e.actualClusterName); + return; + } + + // Session#onUp() expects the load balancing policy to have been updated first, so that + // Host distances are up to date. This mean the policy could return the node before the + // new pool have been created. This is harmless if there is no prior pool since + // RequestHandler + // will ignore the node, but we do want to make sure there is no prior pool so we don't + // query from a pool we will shutdown right away. + for (SessionManager s : sessions) s.removePool(host); + loadBalancingPolicy().onUp(host); + controlConnection.onUp(host); + + logger.trace("Adding/renewing host pools for newly UP host {}", host); + + List> futures = Lists.newArrayListWithCapacity(sessions.size()); + for (SessionManager s : sessions) futures.add(s.forceRenewPool(host, reusedConnection)); + + try { + // Only mark the node up once all session have re-added their pool (if the + // load-balancing + // policy says it should), so that Host.isUp() don't return true before we're + // reconnected + // to the node. + List poolCreationResults = Futures.allAsList(futures).get(); + + // If any of the creation failed, they will have signaled a connection failure + // which will trigger a reconnection to the node. So don't bother marking UP. + if (Iterables.any(poolCreationResults, Predicates.equalTo(false))) { + logger.debug("Connection pool cannot be created, not marking {} UP", host); + return; } - Collection allHosts = metadata.allHosts(); - - // At this stage, metadata.allHosts() only contains the contact points, that's what we want to pass to LBP.init(). - // But the control connection will initialize first and discover more hosts, so make a copy. - Set contactPointHosts = Sets.newHashSet(allHosts); - - try { - negotiateProtocolVersionAndConnect(); - - // The control connection can mark hosts down if it failed to connect to them, or remove them if they weren't found - // in the control host's system.peers. Separate them: - Set downContactPointHosts = Sets.newHashSet(); - Set removedContactPointHosts = Sets.newHashSet(); - for (Host host : contactPointHosts) { - if (!allHosts.contains(host)) - removedContactPointHosts.add(host); - else if (host.state == Host.State.DOWN) - downContactPointHosts.add(host); - } - contactPointHosts.removeAll(removedContactPointHosts); - contactPointHosts.removeAll(downContactPointHosts); - - // Now that the control connection is ready, we have all the information we need about the nodes (datacenter, - // rack...) to initialize the load balancing policy - loadBalancingPolicy().init(Cluster.this, contactPointHosts); - - speculativeExecutionPolicy().init(Cluster.this); - configuration.getPolicies().getRetryPolicy().init(Cluster.this); - reconnectionPolicy().init(Cluster.this); - configuration.getPolicies().getAddressTranslator().init(Cluster.this); - for (LatencyTracker tracker : latencyTrackers) - tracker.onRegister(Cluster.this); - for (Host.StateListener listener : listeners) - listener.onRegister(Cluster.this); - for (Host host : removedContactPointHosts) { - loadBalancingPolicy().onRemove(host); - for (Host.StateListener listener : listeners) - listener.onRemove(host); - } + host.setUp(); - for (Host host : downContactPointHosts) { - loadBalancingPolicy().onDown(host); - for (Host.StateListener listener : listeners) - listener.onDown(host); - startPeriodicReconnectionAttempt(host, true); - } + for (Host.StateListener listener : listeners) listener.onUp(host); - configuration.getPoolingOptions().setProtocolVersion(protocolVersion()); + } catch (ExecutionException e) { + Throwable t = e.getCause(); + // That future is not really supposed to throw unexpected exceptions + if (!(t instanceof InterruptedException) && !(t instanceof CancellationException)) + logger.error( + "Unexpected error while marking node UP: while this shouldn't happen, this shouldn't be critical", + t); + } - for (Host host : allHosts) { - // If the host is down at this stage, it's a contact point that the control connection failed to reach. - // Reconnection attempts are already scheduled, and the LBP and listeners have been notified above. - if (host.state == Host.State.DOWN) continue; + // Now, check if there isn't pools to create/remove following the addition. + // We do that now only so that it's not called before we've set the node up. + for (SessionManager s : sessions) s.updateCreatedPools().get(); - // Otherwise, we want to do the equivalent of onAdd(). But since we know for sure that no sessions or prepared - // statements exist at this point, we can skip some of the steps (plus this avoids scheduling concurrent pool - // creations if a session is created right after this method returns). - logger.info("New Cassandra host {} added", host); + } finally { + host.notificationsLock.unlock(); + } - if (!host.supports(connectionFactory.protocolVersion)) { - logUnsupportedVersionProtocol(host, connectionFactory.protocolVersion); - continue; - } + } finally { + if (reusedConnection != null && !reusedConnection.hasOwner()) reusedConnection.closeAsync(); + } + } - if (!contactPointHosts.contains(host)) - loadBalancingPolicy().onAdd(host); + public ListenableFuture triggerOnDown(final Host host, boolean startReconnection) { + return triggerOnDown(host, false, startReconnection); + } - host.setUp(); + public ListenableFuture triggerOnDown( + final Host host, final boolean isHostAddition, final boolean startReconnection) { + if (!isClosed()) { + return executor.submit( + new ExceptionCatchingRunnable() { + @Override + public void runMayThrow() throws InterruptedException, ExecutionException { + onDown(host, isHostAddition, startReconnection); + } + }); + } else { + return MoreFutures.VOID_SUCCESS; + } + } - for (Host.StateListener listener : listeners) - listener.onAdd(host); - } + // Use triggerOnDown unless you're sure you want to run this on the current thread. + private void onDown(final Host host, final boolean isHostAddition, boolean startReconnection) + throws InterruptedException, ExecutionException { + if (isClosed()) return; + + boolean locked = host.notificationsLock.tryLock(NOTIF_LOCK_TIMEOUT_SECONDS, TimeUnit.SECONDS); + if (!locked) { + logger.warn( + "Could not acquire notifications lock within {} seconds, ignoring DOWN notification for {}", + NOTIF_LOCK_TIMEOUT_SECONDS, + host); + return; + } + try { + + // Note: we don't want to skip that method if !host.isUp() because we set isUp + // late in onUp, and so we can rely on isUp if there is an error during onUp. + // But if there is a reconnection attempt in progress already, then we know + // we've already gone through that method since the last successful onUp(), so + // we're good skipping it. + if (host.reconnectionAttempt.get() != null) { + logger.debug("Aborting onDown because a reconnection is running on DOWN host {}", host); + return; + } + + Host.statesLogger.debug("[{}] marking host DOWN", host); + + // Remember if we care about this node at all. We must call this before + // we've signalled the load balancing policy, since most policy will always + // IGNORE down nodes anyway. + HostDistance distance = loadBalancingPolicy().distance(host); + + boolean wasUp = host.isUp(); + host.setDown(); + + loadBalancingPolicy().onDown(host); + controlConnection.onDown(host); + for (SessionManager s : sessions) s.onDown(host); + + // Contrarily to other actions of that method, there is no reason to notify listeners + // unless the host was UP at the beginning of this function since even if a onUp fail + // mid-method, listeners won't have been notified of the UP. + if (wasUp) { + for (Host.StateListener listener : listeners) listener.onDown(host); + } + + // Don't start a reconnection if we ignore the node anyway (JAVA-314) + if (distance == HostDistance.IGNORED || !startReconnection) return; + + startPeriodicReconnectionAttempt(host, isHostAddition); + } finally { + host.notificationsLock.unlock(); + } + } - // start debouncers - this.nodeListRefreshRequestDebouncer.start(); - this.schemaRefreshRequestDebouncer.start(); - this.nodeRefreshRequestDebouncer.start(); + void startPeriodicReconnectionAttempt(final Host host, final boolean isHostAddition) { + new AbstractReconnectionHandler( + host.toString(), + reconnectionExecutor, + reconnectionPolicy().newSchedule(), + host.reconnectionAttempt) { - isFullyInit = true; - } catch (NoHostAvailableException e) { - close(); - throw e; - } + @Override + protected Connection tryReconnect() + throws ConnectionException, InterruptedException, UnsupportedProtocolVersionException, + ClusterNameMismatchException { + return connectionFactory.open(host); } - private void negotiateProtocolVersionAndConnect() { - boolean shouldNegotiate = (configuration.getProtocolOptions().initialProtocolVersion == null); - while (true) { - try { - controlConnection.connect(); - return; - } catch (UnsupportedProtocolVersionException e) { - if (!shouldNegotiate) { - throw e; - } - // Do not trust version of server's response, as C* behavior in case of protocol negotiation is not - // properly documented, and varies over time (specially after CASSANDRA-11464). Instead, always - // retry at attempted version - 1, if such a version exists; and otherwise, stop and fail. - ProtocolVersion attemptedVersion = e.getUnsupportedVersion(); - ProtocolVersion retryVersion = attemptedVersion.getLowerSupported(); - if (retryVersion == null) { - throw e; - } - logger.info("Cannot connect with protocol version {}, trying with {}", attemptedVersion, retryVersion); - connectionFactory.protocolVersion = retryVersion; - } + @Override + protected void onReconnection(Connection connection) { + // Make sure we have up-to-date infos on that host before adding it (so we typically + // catch that an upgraded node uses a new cassandra version). + if (controlConnection.refreshNodeInfo(host)) { + logger.debug("Successful reconnection to {}, setting host UP", host); + try { + if (isHostAddition) { + onAdd(host, connection); + submitNodeListRefresh(); + } else onUp(host, connection); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } catch (Exception e) { + logger.error("Unexpected error while setting node up", e); } + } else { + logger.debug("Not enough info for {}, ignoring host", host); + connection.closeAsync(); + } } - ProtocolVersion protocolVersion() { - return connectionFactory.protocolVersion; - } - - Cluster getCluster() { - return Cluster.this; - } - - LoadBalancingPolicy loadBalancingPolicy() { - return configuration.getPolicies().getLoadBalancingPolicy(); - } - - SpeculativeExecutionPolicy speculativeExecutionPolicy() { - return configuration.getPolicies().getSpeculativeExecutionPolicy(); - } - - ReconnectionPolicy reconnectionPolicy() { - return configuration.getPolicies().getReconnectionPolicy(); - } - - InetSocketAddress translateAddress(InetAddress address) { - InetSocketAddress sa = new InetSocketAddress(address, connectionFactory.getPort()); - InetSocketAddress translated = configuration.getPolicies().getAddressTranslator().translate(sa); - return translated == null ? sa : translated; + @Override + protected boolean onConnectionException(ConnectionException e, long nextDelayMs) { + if (logger.isDebugEnabled()) + logger.debug( + "Failed reconnection to {} ({}), scheduling retry in {} milliseconds", + host, + e.getMessage(), + nextDelayMs); + return true; } - private Session newSession() { - SessionManager session = new SessionManager(Cluster.this); - sessions.add(session); - return session; + @Override + protected boolean onUnknownException(Exception e, long nextDelayMs) { + logger.error( + String.format( + "Unknown error during reconnection to %s, scheduling retry in %d milliseconds", + host, nextDelayMs), + e); + return true; } - boolean removeSession(Session session) { - return sessions.remove(session); - } + @Override + protected boolean onAuthenticationException(AuthenticationException e, long nextDelayMs) { + logger.error( + String.format( + "Authentication error during reconnection to %s, scheduling retry in %d milliseconds", + host, nextDelayMs), + e); + return true; + } + }.start(); + } - void reportQuery(Host host, Statement statement, Exception exception, long latencyNanos) { - for (LatencyTracker tracker : latencyTrackers) { - tracker.update(host, statement, exception, latencyNanos); - } - } + void startSingleReconnectionAttempt(final Host host) { + if (isClosed() || host.isUp()) return; - boolean isClosed() { - return closeFuture.get() != null; - } + logger.debug("Scheduling one-time reconnection to {}", host); - private CloseFuture close() { - - CloseFuture future = closeFuture.get(); - if (future != null) - return future; - - if (isInit) { - logger.debug("Shutting down"); - - // stop debouncers - nodeListRefreshRequestDebouncer.stop(); - nodeRefreshRequestDebouncer.stop(); - schemaRefreshRequestDebouncer.stop(); - - // If we're shutting down, there is no point in waiting on scheduled reconnections, nor on notifications - // delivery or blocking tasks so we use shutdownNow - shutdownNow(reconnectionExecutor); - shutdownNow(scheduledTasksExecutor); - shutdownNow(blockingExecutor); - - // but for the worker executor, we want to let submitted tasks finish unless the shutdown is forced. - executor.shutdown(); - - // We also close the metrics - if (metrics != null) - metrics.shutdown(); - - loadBalancingPolicy().close(); - speculativeExecutionPolicy().close(); - configuration.getPolicies().getRetryPolicy().close(); - reconnectionPolicy().close(); - configuration.getPolicies().getAddressTranslator().close(); - for (LatencyTracker tracker : latencyTrackers) - tracker.onUnregister(Cluster.this); - for (Host.StateListener listener : listeners) - listener.onUnregister(Cluster.this); - for (SchemaChangeListener listener : schemaChangeListeners) - listener.onUnregister(Cluster.this); - - // Then we shutdown all connections - List futures = new ArrayList(sessions.size() + 1); - futures.add(controlConnection.closeAsync()); - for (Session session : sessions) - futures.add(session.closeAsync()); - - future = new ClusterCloseFuture(futures); - // The rest will happen asynchronously, when all connections are successfully closed - } else { - future = CloseFuture.immediateFuture(); - } + // Setting an initial delay of 0 to start immediately, and all the exception handlers return + // false to prevent further attempts + new AbstractReconnectionHandler( + host.toString(), + reconnectionExecutor, + reconnectionPolicy().newSchedule(), + host.reconnectionAttempt, + 0) { - return closeFuture.compareAndSet(null, future) - ? future - : closeFuture.get(); // We raced, it's ok, return the future that was actually set + @Override + protected Connection tryReconnect() + throws ConnectionException, InterruptedException, UnsupportedProtocolVersionException, + ClusterNameMismatchException { + return connectionFactory.open(host); } - private void shutdownNow(ExecutorService executor) { - List pendingTasks = executor.shutdownNow(); - // If some tasks were submitted to this executor but not yet commenced, make sure the corresponding futures complete - for (Runnable pendingTask : pendingTasks) { - if (pendingTask instanceof FutureTask) - ((FutureTask) pendingTask).cancel(false); + @Override + protected void onReconnection(Connection connection) { + // Make sure we have up-to-date infos on that host before adding it (so we typically + // catch that an upgraded node uses a new cassandra version). + if (controlConnection.refreshNodeInfo(host)) { + logger.debug("Successful reconnection to {}, setting host UP", host); + try { + onUp(host, connection); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } catch (Exception e) { + logger.error("Unexpected error while setting node up", e); } + } else { + logger.debug("Not enough info for {}, ignoring host", host); + connection.closeAsync(); + } } - void logUnsupportedVersionProtocol(Host host, ProtocolVersion version) { - logger.warn("Detected added or restarted Cassandra host {} but ignoring it since it does not support the version {} of the native " - + "protocol which is currently in use. If you want to force the use of a particular version of the native protocol, use " - + "Cluster.Builder#usingProtocolVersion() when creating the Cluster instance.", host, version); + @Override + protected boolean onConnectionException(ConnectionException e, long nextDelayMs) { + if (logger.isDebugEnabled()) + logger.debug("Failed one-time reconnection to {} ({})", host, e.getMessage()); + return false; } - void logClusterNameMismatch(Host host, String expectedClusterName, String actualClusterName) { - logger.warn("Detected added or restarted Cassandra host {} but ignoring it since its cluster name '{}' does not match the one " - + "currently known ({})", - host, actualClusterName, expectedClusterName); + @Override + protected boolean onUnknownException(Exception e, long nextDelayMs) { + logger.error(String.format("Unknown error during one-time reconnection to %s", host), e); + return false; } - public ListenableFuture triggerOnUp(final Host host) { - if (!isClosed()) { - return executor.submit(new ExceptionCatchingRunnable() { - @Override - public void runMayThrow() throws InterruptedException, ExecutionException { - onUp(host, null); - } - }); - } else { - return MoreFutures.VOID_SUCCESS; - } + @Override + protected boolean onAuthenticationException(AuthenticationException e, long nextDelayMs) { + logger.error( + String.format("Authentication error during one-time reconnection to %s", host), e); + return false; } + }.start(); + } - // Use triggerOnUp unless you're sure you want to run this on the current thread. - private void onUp(final Host host, Connection reusedConnection) throws InterruptedException, ExecutionException { - if (isClosed()) - return; - - if (!host.supports(connectionFactory.protocolVersion)) { - logUnsupportedVersionProtocol(host, connectionFactory.protocolVersion); - return; - } - - try { - - boolean locked = host.notificationsLock.tryLock(NOTIF_LOCK_TIMEOUT_SECONDS, TimeUnit.SECONDS); - if (!locked) { - logger.warn("Could not acquire notifications lock within {} seconds, ignoring UP notification for {}", NOTIF_LOCK_TIMEOUT_SECONDS, host); - return; - } - try { - - // We don't want to use the public Host.isUp() as this would make us skip the rest for suspected hosts - if (host.state == Host.State.UP) - return; - - Host.statesLogger.debug("[{}] marking host UP", host); - - // If there is a reconnection attempt scheduled for that node, cancel it - Future scheduledAttempt = host.reconnectionAttempt.getAndSet(null); - if (scheduledAttempt != null) { - logger.debug("Cancelling reconnection attempt since node is UP"); - scheduledAttempt.cancel(false); - } - - try { - if (getCluster().getConfiguration().getQueryOptions().isReprepareOnUp()) - reusedConnection = prepareAllQueries(host, reusedConnection); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - // Don't propagate because we don't want to prevent other listener to run - } catch (UnsupportedProtocolVersionException e) { - logUnsupportedVersionProtocol(host, e.getUnsupportedVersion()); - return; - } catch (ClusterNameMismatchException e) { - logClusterNameMismatch(host, e.expectedClusterName, e.actualClusterName); - return; - } - - // Session#onUp() expects the load balancing policy to have been updated first, so that - // Host distances are up to date. This mean the policy could return the node before the - // new pool have been created. This is harmless if there is no prior pool since RequestHandler - // will ignore the node, but we do want to make sure there is no prior pool so we don't - // query from a pool we will shutdown right away. - for (SessionManager s : sessions) - s.removePool(host); - loadBalancingPolicy().onUp(host); - controlConnection.onUp(host); - - logger.trace("Adding/renewing host pools for newly UP host {}", host); - - List> futures = Lists.newArrayListWithCapacity(sessions.size()); - for (SessionManager s : sessions) - futures.add(s.forceRenewPool(host, reusedConnection)); - - try { - // Only mark the node up once all session have re-added their pool (if the load-balancing - // policy says it should), so that Host.isUp() don't return true before we're reconnected - // to the node. - List poolCreationResults = Futures.allAsList(futures).get(); - - // If any of the creation failed, they will have signaled a connection failure - // which will trigger a reconnection to the node. So don't bother marking UP. - if (Iterables.any(poolCreationResults, Predicates.equalTo(false))) { - logger.debug("Connection pool cannot be created, not marking {} UP", host); - return; - } - - host.setUp(); + public ListenableFuture triggerOnAdd(final Host host) { + if (!isClosed()) { + return executor.submit( + new ExceptionCatchingRunnable() { + @Override + public void runMayThrow() throws InterruptedException, ExecutionException { + onAdd(host, null); + } + }); + } else { + return MoreFutures.VOID_SUCCESS; + } + } - for (Host.StateListener listener : listeners) - listener.onUp(host); + // Use triggerOnAdd unless you're sure you want to run this on the current thread. + private void onAdd(final Host host, Connection reusedConnection) + throws InterruptedException, ExecutionException { + if (isClosed()) return; - } catch (ExecutionException e) { - Throwable t = e.getCause(); - // That future is not really supposed to throw unexpected exceptions - if (!(t instanceof InterruptedException) && !(t instanceof CancellationException)) - logger.error("Unexpected error while marking node UP: while this shouldn't happen, this shouldn't be critical", t); - } + if (!host.supports(connectionFactory.protocolVersion)) { + logUnsupportedVersionProtocol(host, connectionFactory.protocolVersion); + return; + } - // Now, check if there isn't pools to create/remove following the addition. - // We do that now only so that it's not called before we've set the node up. - for (SessionManager s : sessions) - s.updateCreatedPools().get(); + try { - } finally { - host.notificationsLock.unlock(); - } - - } finally { - if (reusedConnection != null && !reusedConnection.hasOwner()) - reusedConnection.closeAsync(); - } + boolean locked = + host.notificationsLock.tryLock(NOTIF_LOCK_TIMEOUT_SECONDS, TimeUnit.SECONDS); + if (!locked) { + logger.warn( + "Could not acquire notifications lock within {} seconds, ignoring ADD notification for {}", + NOTIF_LOCK_TIMEOUT_SECONDS, + host); + return; } - - public ListenableFuture triggerOnDown(final Host host, boolean startReconnection) { - return triggerOnDown(host, false, startReconnection); - } - - public ListenableFuture triggerOnDown(final Host host, final boolean isHostAddition, final boolean startReconnection) { - if (!isClosed()) { - return executor.submit(new ExceptionCatchingRunnable() { - @Override - public void runMayThrow() throws InterruptedException, ExecutionException { - onDown(host, isHostAddition, startReconnection); - } - }); - } else { - return MoreFutures.VOID_SUCCESS; - } - } - - // Use triggerOnDown unless you're sure you want to run this on the current thread. - private void onDown(final Host host, final boolean isHostAddition, boolean startReconnection) throws InterruptedException, ExecutionException { - if (isClosed()) - return; - - boolean locked = host.notificationsLock.tryLock(NOTIF_LOCK_TIMEOUT_SECONDS, TimeUnit.SECONDS); - if (!locked) { - logger.warn("Could not acquire notifications lock within {} seconds, ignoring DOWN notification for {}", NOTIF_LOCK_TIMEOUT_SECONDS, host); - return; + try { + Host.statesLogger.debug("[{}] adding host", host); + + // Adds to the load balancing first and foremost, as doing so might change the decision + // it will make for distance() on that node (not likely but we leave that possibility). + // This does mean the policy may start returning that node for query plan, but as long + // as no pools have been created (below) this will be ignored by RequestHandler so it's + // fine. + loadBalancingPolicy().onAdd(host); + + // Next, if the host should be ignored, well, ignore it. + if (loadBalancingPolicy().distance(host) == HostDistance.IGNORED) { + // We still mark the node UP though as it should be (and notifiy the listeners). + // We'll mark it down if we have a notification anyway and we've documented that + // especially + // for IGNORED hosts, the isUp() method was a best effort guess + host.setUp(); + for (Host.StateListener listener : listeners) listener.onAdd(host); + return; + } + + try { + reusedConnection = prepareAllQueries(host, reusedConnection); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + // Don't propagate because we don't want to prevent other listener to run + } catch (UnsupportedProtocolVersionException e) { + logUnsupportedVersionProtocol(host, e.getUnsupportedVersion()); + return; + } catch (ClusterNameMismatchException e) { + logClusterNameMismatch(host, e.expectedClusterName, e.actualClusterName); + return; + } + + controlConnection.onAdd(host); + + List> futures = Lists.newArrayListWithCapacity(sessions.size()); + for (SessionManager s : sessions) futures.add(s.maybeAddPool(host, reusedConnection)); + + try { + // Only mark the node up once all session have added their pool (if the load-balancing + // policy says it should), so that Host.isUp() don't return true before we're + // reconnected + // to the node. + List poolCreationResults = Futures.allAsList(futures).get(); + + // If any of the creation failed, they will have signaled a connection failure + // which will trigger a reconnection to the node. So don't bother marking UP. + if (Iterables.any(poolCreationResults, Predicates.equalTo(false))) { + logger.debug("Connection pool cannot be created, not marking {} UP", host); + return; } - try { - // Note: we don't want to skip that method if !host.isUp() because we set isUp - // late in onUp, and so we can rely on isUp if there is an error during onUp. - // But if there is a reconnection attempt in progress already, then we know - // we've already gone through that method since the last successful onUp(), so - // we're good skipping it. - if (host.reconnectionAttempt.get() != null) { - logger.debug("Aborting onDown because a reconnection is running on DOWN host {}", host); - return; - } - - Host.statesLogger.debug("[{}] marking host DOWN", host); + host.setUp(); - // Remember if we care about this node at all. We must call this before - // we've signalled the load balancing policy, since most policy will always - // IGNORE down nodes anyway. - HostDistance distance = loadBalancingPolicy().distance(host); + for (Host.StateListener listener : listeners) listener.onAdd(host); - boolean wasUp = host.isUp(); - host.setDown(); + } catch (ExecutionException e) { + Throwable t = e.getCause(); + // That future is not really supposed to throw unexpected exceptions + if (!(t instanceof InterruptedException) && !(t instanceof CancellationException)) + logger.error( + "Unexpected error while adding node: while this shouldn't happen, this shouldn't be critical", + t); + } - loadBalancingPolicy().onDown(host); - controlConnection.onDown(host); - for (SessionManager s : sessions) - s.onDown(host); + // Now, check if there isn't pools to create/remove following the addition. + // We do that now only so that it's not called before we've set the node up. + for (SessionManager s : sessions) s.updateCreatedPools().get(); - // Contrarily to other actions of that method, there is no reason to notify listeners - // unless the host was UP at the beginning of this function since even if a onUp fail - // mid-method, listeners won't have been notified of the UP. - if (wasUp) { - for (Host.StateListener listener : listeners) - listener.onDown(host); - } - - // Don't start a reconnection if we ignore the node anyway (JAVA-314) - if (distance == HostDistance.IGNORED || !startReconnection) - return; - - startPeriodicReconnectionAttempt(host, isHostAddition); - } finally { - host.notificationsLock.unlock(); - } + } finally { + host.notificationsLock.unlock(); } - void startPeriodicReconnectionAttempt(final Host host, final boolean isHostAddition) { - new AbstractReconnectionHandler(host.toString(), reconnectionExecutor, reconnectionPolicy().newSchedule(), host.reconnectionAttempt) { - - @Override - protected Connection tryReconnect() throws ConnectionException, InterruptedException, UnsupportedProtocolVersionException, ClusterNameMismatchException { - return connectionFactory.open(host); - } - - @Override - protected void onReconnection(Connection connection) { - // Make sure we have up-to-date infos on that host before adding it (so we typically - // catch that an upgraded node uses a new cassandra version). - if (controlConnection.refreshNodeInfo(host)) { - logger.debug("Successful reconnection to {}, setting host UP", host); - try { - if (isHostAddition) { - onAdd(host, connection); - submitNodeListRefresh(); - } else - onUp(host, connection); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - } catch (Exception e) { - logger.error("Unexpected error while setting node up", e); - } - } else { - logger.debug("Not enough info for {}, ignoring host", host); - connection.closeAsync(); - } - } - - @Override - protected boolean onConnectionException(ConnectionException e, long nextDelayMs) { - if (logger.isDebugEnabled()) - logger.debug("Failed reconnection to {} ({}), scheduling retry in {} milliseconds", host, e.getMessage(), nextDelayMs); - return true; - } + } finally { + if (reusedConnection != null && !reusedConnection.hasOwner()) reusedConnection.closeAsync(); + } + } - @Override - protected boolean onUnknownException(Exception e, long nextDelayMs) { - logger.error(String.format("Unknown error during reconnection to %s, scheduling retry in %d milliseconds", host, nextDelayMs), e); - return true; - } + public ListenableFuture triggerOnRemove(final Host host) { + if (!isClosed()) { + return executor.submit( + new ExceptionCatchingRunnable() { + @Override + public void runMayThrow() throws InterruptedException, ExecutionException { + onRemove(host); + } + }); + } else { + return MoreFutures.VOID_SUCCESS; + } + } - @Override - protected boolean onAuthenticationException(AuthenticationException e, long nextDelayMs) { - logger.error(String.format("Authentication error during reconnection to %s, scheduling retry in %d milliseconds", host, nextDelayMs), e); - return true; - } + // Use triggerOnRemove unless you're sure you want to run this on the current thread. + private void onRemove(Host host) throws InterruptedException, ExecutionException { + if (isClosed()) return; - }.start(); - } + boolean locked = host.notificationsLock.tryLock(NOTIF_LOCK_TIMEOUT_SECONDS, TimeUnit.SECONDS); + if (!locked) { + logger.warn( + "Could not acquire notifications lock within {} seconds, ignoring REMOVE notification for {}", + NOTIF_LOCK_TIMEOUT_SECONDS, + host); + return; + } + try { - void startSingleReconnectionAttempt(final Host host) { - if (isClosed() || host.isUp()) - return; + host.setDown(); - logger.debug("Scheduling one-time reconnection to {}", host); + Host.statesLogger.debug("[{}] removing host", host); - // Setting an initial delay of 0 to start immediately, and all the exception handlers return false to prevent further attempts - new AbstractReconnectionHandler(host.toString(), reconnectionExecutor, reconnectionPolicy().newSchedule(), host.reconnectionAttempt, 0) { + loadBalancingPolicy().onRemove(host); + controlConnection.onRemove(host); + for (SessionManager s : sessions) s.onRemove(host); - @Override - protected Connection tryReconnect() throws ConnectionException, InterruptedException, UnsupportedProtocolVersionException, ClusterNameMismatchException { - return connectionFactory.open(host); - } + for (Host.StateListener listener : listeners) listener.onRemove(host); + } finally { + host.notificationsLock.unlock(); + } + } - @Override - protected void onReconnection(Connection connection) { - // Make sure we have up-to-date infos on that host before adding it (so we typically - // catch that an upgraded node uses a new cassandra version). - if (controlConnection.refreshNodeInfo(host)) { - logger.debug("Successful reconnection to {}, setting host UP", host); - try { - onUp(host, connection); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - } catch (Exception e) { - logger.error("Unexpected error while setting node up", e); - } - } else { - logger.debug("Not enough info for {}, ignoring host", host); - connection.closeAsync(); - } - } + public void signalHostDown(Host host, boolean isHostAddition) { + // Don't mark the node down until we've fully initialized the controlConnection as this might + // mess up with + // the protocol detection + if (!isFullyInit || isClosed()) return; - @Override - protected boolean onConnectionException(ConnectionException e, long nextDelayMs) { - if (logger.isDebugEnabled()) - logger.debug("Failed one-time reconnection to {} ({})", host, e.getMessage()); - return false; - } + triggerOnDown(host, isHostAddition, true); + } - @Override - protected boolean onUnknownException(Exception e, long nextDelayMs) { - logger.error(String.format("Unknown error during one-time reconnection to %s", host), e); - return false; - } + public void removeHost(Host host, boolean isInitialConnection) { + if (host == null) return; - @Override - protected boolean onAuthenticationException(AuthenticationException e, long nextDelayMs) { - logger.error(String.format("Authentication error during one-time reconnection to %s", host), e); - return false; - } - }.start(); + if (metadata.remove(host)) { + if (isInitialConnection) { + logger.warn( + "You listed {} in your contact points, but it wasn't found in the control host's system.peers at startup", + host); + } else { + logger.info("Cassandra host {} removed", host); + triggerOnRemove(host); } + } + } - public ListenableFuture triggerOnAdd(final Host host) { - if (!isClosed()) { - return executor.submit(new ExceptionCatchingRunnable() { - @Override - public void runMayThrow() throws InterruptedException, ExecutionException { - onAdd(host, null); - } - }); - } else { - return MoreFutures.VOID_SUCCESS; - } - } + public void ensurePoolsSizing() { + if (protocolVersion().compareTo(ProtocolVersion.V3) >= 0) return; - // Use triggerOnAdd unless you're sure you want to run this on the current thread. - private void onAdd(final Host host, Connection reusedConnection) throws InterruptedException, ExecutionException { - if (isClosed()) - return; + for (SessionManager session : sessions) { + for (HostConnectionPool pool : session.pools.values()) pool.ensureCoreConnections(); + } + } - if (!host.supports(connectionFactory.protocolVersion)) { - logUnsupportedVersionProtocol(host, connectionFactory.protocolVersion); - return; - } + public PreparedStatement addPrepared(PreparedStatement stmt) { + PreparedStatement previous = + preparedQueries.putIfAbsent(stmt.getPreparedId().boundValuesMetadata.id, stmt); + if (previous != null) { + logger.warn( + "Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. " + + "Consider preparing the statement only once. Query='{}'", + stmt.getQueryString()); + + // The one object in the cache will get GCed once it's not referenced by the client anymore + // since we use a weak reference. + // So we need to make sure that the instance we do return to the user is the one that is in + // the cache. + // However if the result metadata changed since the last PREPARE call, this also needs to be + // updated. + previous.getPreparedId().resultSetMetadata = stmt.getPreparedId().resultSetMetadata; + return previous; + } + return stmt; + } + /** + * @param reusedConnection an existing connection (from a reconnection attempt) that we want to + * reuse to prepare the statements (might be null). + * @return a connection that the rest of the initialization process can use (it will be made + * part of a connection pool). Can be reusedConnection, or one that was open in the method. + */ + private Connection prepareAllQueries(Host host, Connection reusedConnection) + throws InterruptedException, UnsupportedProtocolVersionException, + ClusterNameMismatchException { + if (preparedQueries.isEmpty()) return reusedConnection; + + logger.debug( + "Preparing {} prepared queries on newly up node {}", preparedQueries.size(), host); + Connection connection = null; + try { + connection = (reusedConnection == null) ? connectionFactory.open(host) : reusedConnection; + + // Furthermore, along with each prepared query we keep the current keyspace at the time of + // preparation + // as we need to make it is the same when we re-prepare on new/restarted nodes. Most query + // will use the + // same keyspace so keeping it each time is slightly wasteful, but this doesn't really + // matter and is + // simpler. Besides, we do avoid in prepareAllQueries to not set the current keyspace more + // than needed. + + // We need to make sure we prepared every query with the right current keyspace, i.e. the + // one originally + // used for preparing it. However, since we are likely that all prepared query belong to + // only a handful + // of different keyspace (possibly only one), and to avoid setting the current keyspace more + // than needed, + // we first sort the query per keyspace. + SetMultimap perKeyspace = HashMultimap.create(); + for (PreparedStatement ps : preparedQueries.values()) { + // It's possible for a query to not have a current keyspace. But since null doesn't work + // well as + // map keys, we use the empty string instead (that is not a valid keyspace name). + String keyspace = ps.getQueryKeyspace() == null ? "" : ps.getQueryKeyspace(); + perKeyspace.put(keyspace, ps.getQueryString()); + } + + for (String keyspace : perKeyspace.keySet()) { + // Empty string mean no particular keyspace to set + if (!keyspace.isEmpty()) connection.setKeyspace(keyspace); + + List futures = + new ArrayList(preparedQueries.size()); + for (String query : perKeyspace.get(keyspace)) { + futures.add(connection.write(new Requests.Prepare(query))); + } + for (Connection.Future future : futures) { try { - - boolean locked = host.notificationsLock.tryLock(NOTIF_LOCK_TIMEOUT_SECONDS, TimeUnit.SECONDS); - if (!locked) { - logger.warn("Could not acquire notifications lock within {} seconds, ignoring ADD notification for {}", NOTIF_LOCK_TIMEOUT_SECONDS, host); - return; - } - try { - Host.statesLogger.debug("[{}] adding host", host); - - // Adds to the load balancing first and foremost, as doing so might change the decision - // it will make for distance() on that node (not likely but we leave that possibility). - // This does mean the policy may start returning that node for query plan, but as long - // as no pools have been created (below) this will be ignored by RequestHandler so it's fine. - loadBalancingPolicy().onAdd(host); - - // Next, if the host should be ignored, well, ignore it. - if (loadBalancingPolicy().distance(host) == HostDistance.IGNORED) { - // We still mark the node UP though as it should be (and notifiy the listeners). - // We'll mark it down if we have a notification anyway and we've documented that especially - // for IGNORED hosts, the isUp() method was a best effort guess - host.setUp(); - for (Host.StateListener listener : listeners) - listener.onAdd(host); - return; - } - - try { - reusedConnection = prepareAllQueries(host, reusedConnection); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - // Don't propagate because we don't want to prevent other listener to run - } catch (UnsupportedProtocolVersionException e) { - logUnsupportedVersionProtocol(host, e.getUnsupportedVersion()); - return; - } catch (ClusterNameMismatchException e) { - logClusterNameMismatch(host, e.expectedClusterName, e.actualClusterName); - return; - } - - controlConnection.onAdd(host); - - List> futures = Lists.newArrayListWithCapacity(sessions.size()); - for (SessionManager s : sessions) - futures.add(s.maybeAddPool(host, reusedConnection)); - - try { - // Only mark the node up once all session have added their pool (if the load-balancing - // policy says it should), so that Host.isUp() don't return true before we're reconnected - // to the node. - List poolCreationResults = Futures.allAsList(futures).get(); - - // If any of the creation failed, they will have signaled a connection failure - // which will trigger a reconnection to the node. So don't bother marking UP. - if (Iterables.any(poolCreationResults, Predicates.equalTo(false))) { - logger.debug("Connection pool cannot be created, not marking {} UP", host); - return; - } - - host.setUp(); - - for (Host.StateListener listener : listeners) - listener.onAdd(host); - - } catch (ExecutionException e) { - Throwable t = e.getCause(); - // That future is not really supposed to throw unexpected exceptions - if (!(t instanceof InterruptedException) && !(t instanceof CancellationException)) - logger.error("Unexpected error while adding node: while this shouldn't happen, this shouldn't be critical", t); - } - - // Now, check if there isn't pools to create/remove following the addition. - // We do that now only so that it's not called before we've set the node up. - for (SessionManager s : sessions) - s.updateCreatedPools().get(); - - } finally { - host.notificationsLock.unlock(); - } - - } finally { - if (reusedConnection != null && !reusedConnection.hasOwner()) - reusedConnection.closeAsync(); - } - } - - public ListenableFuture triggerOnRemove(final Host host) { - if (!isClosed()) { - return executor.submit(new ExceptionCatchingRunnable() { - @Override - public void runMayThrow() throws InterruptedException, ExecutionException { - onRemove(host); - } - }); - } else { - return MoreFutures.VOID_SUCCESS; - } - } - - // Use triggerOnRemove unless you're sure you want to run this on the current thread. - private void onRemove(Host host) throws InterruptedException, ExecutionException { - if (isClosed()) - return; - - boolean locked = host.notificationsLock.tryLock(NOTIF_LOCK_TIMEOUT_SECONDS, TimeUnit.SECONDS); - if (!locked) { - logger.warn("Could not acquire notifications lock within {} seconds, ignoring REMOVE notification for {}", NOTIF_LOCK_TIMEOUT_SECONDS, host); - return; + future.get(); + } catch (ExecutionException e) { + // This "might" happen if we drop a CF but haven't removed it's prepared queries + // (which we don't do + // currently). It's not a big deal however as if it's a more serious problem it'll + // show up later when + // the query is tried for execution. + logger.debug("Unexpected error while preparing queries on new/newly up host", e); } - try { - - host.setDown(); - - Host.statesLogger.debug("[{}] removing host", host); + } + } + + return connection; + } catch (ConnectionException e) { + // Ignore, not a big deal + if (connection != null) connection.closeAsync(); + return null; + } catch (AuthenticationException e) { + // That's a bad news, but ignore at this point + if (connection != null) connection.closeAsync(); + return null; + } catch (BusyConnectionException e) { + // Ignore, not a big deal + // In theory the problem is transient so the connection could be reused later, but if the + // core pool size is 1 + // it's better to close this one so that we start with a fresh connection. + if (connection != null) connection.closeAsync(); + return null; + } + } - loadBalancingPolicy().onRemove(host); - controlConnection.onRemove(host); - for (SessionManager s : sessions) - s.onRemove(host); + ListenableFuture submitSchemaRefresh( + final SchemaElement targetType, + final String targetKeyspace, + final String targetName, + final List targetSignature) { + SchemaRefreshRequest request = + new SchemaRefreshRequest(targetType, targetKeyspace, targetName, targetSignature); + logger.trace("Submitting schema refresh: {}", request); + return schemaRefreshRequestDebouncer.eventReceived(request); + } - for (Host.StateListener listener : listeners) - listener.onRemove(host); - } finally { - host.notificationsLock.unlock(); - } - } + ListenableFuture submitNodeListRefresh() { + logger.trace("Submitting node list and token map refresh"); + return nodeListRefreshRequestDebouncer.eventReceived(new NodeListRefreshRequest()); + } - public void signalHostDown(Host host, boolean isHostAddition) { - // Don't mark the node down until we've fully initialized the controlConnection as this might mess up with - // the protocol detection - if (!isFullyInit || isClosed()) - return; + ListenableFuture submitNodeRefresh(InetSocketAddress address, HostEvent eventType) { + NodeRefreshRequest request = new NodeRefreshRequest(address, eventType); + logger.trace("Submitting node refresh: {}", request); + return nodeRefreshRequestDebouncer.eventReceived(request); + } - triggerOnDown(host, isHostAddition, true); - } + // refresh the schema using the provided connection, and notice the future with the provided + // resultset once done + public void refreshSchemaAndSignal( + final Connection connection, + final DefaultResultSetFuture future, + final ResultSet rs, + final SchemaElement targetType, + final String targetKeyspace, + final String targetName, + final List targetSignature) { + if (logger.isDebugEnabled()) + logger.debug( + "Refreshing schema for {}{}", + targetType == null ? "everything" : targetKeyspace, + (targetType == KEYSPACE) ? "" : "." + targetName + " (" + targetType + ")"); + + maybeRefreshSchemaAndSignal( + connection, future, rs, targetType, targetKeyspace, targetName, targetSignature); + } - public void removeHost(Host host, boolean isInitialConnection) { - if (host == null) - return; + public void waitForSchemaAgreementAndSignal( + final Connection connection, final DefaultResultSetFuture future, final ResultSet rs) { + maybeRefreshSchemaAndSignal(connection, future, rs, null, null, null, null); + } - if (metadata.remove(host)) { - if (isInitialConnection) { - logger.warn("You listed {} in your contact points, but it wasn't found in the control host's system.peers at startup", host); + private void maybeRefreshSchemaAndSignal( + final Connection connection, + final DefaultResultSetFuture future, + final ResultSet rs, + final SchemaElement targetType, + final String targetKeyspace, + final String targetName, + final List targetSignature) { + final boolean refreshSchema = + (targetKeyspace != null); // if false, only wait for schema agreement + + executor.submit( + new Runnable() { + @Override + public void run() { + boolean schemaInAgreement = false; + try { + // Before refreshing the schema, wait for schema agreement so + // that querying a table just after having created it don't fail. + schemaInAgreement = + ControlConnection.waitForSchemaAgreement(connection, Cluster.Manager.this); + if (!schemaInAgreement) + logger.warn( + "No schema agreement from live replicas after {} s. The schema may not be up to date on some nodes.", + configuration.getProtocolOptions().getMaxSchemaAgreementWaitSeconds()); + + ListenableFuture schemaReady; + if (refreshSchema) { + schemaReady = + submitSchemaRefresh(targetType, targetKeyspace, targetName, targetSignature); + // JAVA-1120: skip debouncing delay and force immediate delivery + if (!schemaReady.isDone()) + schemaRefreshRequestDebouncer.scheduleImmediateDelivery(); } else { - logger.info("Cassandra host {} removed", host); - triggerOnRemove(host); + schemaReady = MoreFutures.VOID_SUCCESS; } + final boolean finalSchemaInAgreement = schemaInAgreement; + schemaReady.addListener( + new Runnable() { + @Override + public void run() { + rs.getExecutionInfo().setSchemaInAgreement(finalSchemaInAgreement); + future.setResult(rs); + } + }, + GuavaCompatibility.INSTANCE.sameThreadExecutor()); + + } catch (Exception e) { + logger.warn("Error while waiting for schema agreement", e); + // This is not fatal, complete the future anyway + rs.getExecutionInfo().setSchemaInAgreement(schemaInAgreement); + future.setResult(rs); + } } - } - - public void ensurePoolsSizing() { - if (protocolVersion().compareTo(ProtocolVersion.V3) >= 0) - return; - - for (SessionManager session : sessions) { - for (HostConnectionPool pool : session.pools.values()) - pool.ensureCoreConnections(); - } - } - - public PreparedStatement addPrepared(PreparedStatement stmt) { - PreparedStatement previous = preparedQueries.putIfAbsent(stmt.getPreparedId().id, stmt); - if (previous != null) { - logger.warn("Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. " - + "Consider preparing the statement only once. Query='{}'", stmt.getQueryString()); - - // The one object in the cache will get GCed once it's not referenced by the client anymore since we use a weak reference. - // So we need to make sure that the instance we do return to the user is the one that is in the cache. - return previous; - } - return stmt; - } - - /** - * @param reusedConnection an existing connection (from a reconnection attempt) that we want to - * reuse to prepare the statements (might be null). - * @return a connection that the rest of the initialization process can use (it will be made part - * of a connection pool). Can be reusedConnection, or one that was open in the method. - */ - private Connection prepareAllQueries(Host host, Connection reusedConnection) throws InterruptedException, UnsupportedProtocolVersionException, ClusterNameMismatchException { - if (preparedQueries.isEmpty()) - return reusedConnection; - - logger.debug("Preparing {} prepared queries on newly up node {}", preparedQueries.size(), host); - Connection connection = null; - try { - connection = (reusedConnection == null) - ? connectionFactory.open(host) - : reusedConnection; - - try { - ControlConnection.waitForSchemaAgreement(connection, this); - } catch (ExecutionException e) { - // As below, just move on - } - - // Furthermore, along with each prepared query we keep the current keyspace at the time of preparation - // as we need to make it is the same when we re-prepare on new/restarted nodes. Most query will use the - // same keyspace so keeping it each time is slightly wasteful, but this doesn't really matter and is - // simpler. Besides, we do avoid in prepareAllQueries to not set the current keyspace more than needed. - - // We need to make sure we prepared every query with the right current keyspace, i.e. the one originally - // used for preparing it. However, since we are likely that all prepared query belong to only a handful - // of different keyspace (possibly only one), and to avoid setting the current keyspace more than needed, - // we first sort the query per keyspace. - SetMultimap perKeyspace = HashMultimap.create(); - for (PreparedStatement ps : preparedQueries.values()) { - // It's possible for a query to not have a current keyspace. But since null doesn't work well as - // map keys, we use the empty string instead (that is not a valid keyspace name). - String keyspace = ps.getQueryKeyspace() == null ? "" : ps.getQueryKeyspace(); - perKeyspace.put(keyspace, ps.getQueryString()); - } + }); + } - for (String keyspace : perKeyspace.keySet()) { - // Empty string mean no particular keyspace to set - if (!keyspace.isEmpty()) - connection.setKeyspace(keyspace); - - List futures = new ArrayList(preparedQueries.size()); - for (String query : perKeyspace.get(keyspace)) { - futures.add(connection.write(new Requests.Prepare(query))); - } - for (Connection.Future future : futures) { - try { - future.get(); - } catch (ExecutionException e) { - // This "might" happen if we drop a CF but haven't removed it's prepared queries (which we don't do - // currently). It's not a big deal however as if it's a more serious problem it'll show up later when - // the query is tried for execution. - logger.debug("Unexpected error while preparing queries on new/newly up host", e); + // Called when some message has been received but has been initiated from the server (streamId < + // 0). + // This is called on an I/O thread, so all blocking operation must be done on an executor. + @Override + public void handle(Message.Response response) { + + if (!(response instanceof Responses.Event)) { + logger.error("Received an unexpected message from the server: {}", response); + return; + } + + final ProtocolEvent event = ((Responses.Event) response).event; + + logger.debug("Received event {}, scheduling delivery", response); + + switch (event.type) { + case TOPOLOGY_CHANGE: + ProtocolEvent.TopologyChange tpc = (ProtocolEvent.TopologyChange) event; + Host.statesLogger.debug("[{}] received event {}", tpc.node, tpc.change); + // Do NOT translate the address, it will be matched against Host.getBroadcastRpcAddress() + // to find the target host. + switch (tpc.change) { + case REMOVED_NODE: + submitNodeRefresh(tpc.node, HostEvent.REMOVED); + break; + default: + // If a node was added, we don't have enough information to create a new Host (we are + // missing it's ID) so trigger a full refresh + submitNodeListRefresh(); + break; + } + break; + case STATUS_CHANGE: + ProtocolEvent.StatusChange stc = (ProtocolEvent.StatusChange) event; + Host.statesLogger.debug("[{}] received event {}", stc.node, stc.status); + // Do NOT translate the address, it will be matched against Host.getBroadcastRpcAddress() + // to find the target host. + switch (stc.status) { + case UP: + submitNodeRefresh(stc.node, HostEvent.UP); + break; + case DOWN: + submitNodeRefresh(stc.node, HostEvent.DOWN); + break; + } + break; + case SCHEMA_CHANGE: + if (!configuration.getQueryOptions().isMetadataEnabled()) return; + + ProtocolEvent.SchemaChange scc = (ProtocolEvent.SchemaChange) event; + switch (scc.change) { + case CREATED: + case UPDATED: + submitSchemaRefresh( + scc.targetType, scc.targetKeyspace, scc.targetName, scc.targetSignature); + break; + case DROPPED: + if (scc.targetType == KEYSPACE) { + final KeyspaceMetadata removedKeyspace = + manager.metadata.removeKeyspace(scc.targetKeyspace); + if (removedKeyspace != null) { + executor.submit( + new Runnable() { + @Override + public void run() { + manager.metadata.triggerOnKeyspaceRemoved(removedKeyspace); } - } + }); } - - return connection; - } catch (ConnectionException e) { - // Ignore, not a big deal - if (connection != null) - connection.closeAsync(); - return null; - } catch (AuthenticationException e) { - // That's a bad news, but ignore at this point - if (connection != null) - connection.closeAsync(); - return null; - } catch (BusyConnectionException e) { - // Ignore, not a big deal - // In theory the problem is transient so the connection could be reused later, but if the core pool size is 1 - // it's better to close this one so that we start with a fresh connection. - if (connection != null) - connection.closeAsync(); - return null; - } - } - - ListenableFuture submitSchemaRefresh(final SchemaElement targetType, final String targetKeyspace, final String targetName, final List targetSignature) { - SchemaRefreshRequest request = new SchemaRefreshRequest(targetType, targetKeyspace, targetName, targetSignature); - logger.trace("Submitting schema refresh: {}", request); - return schemaRefreshRequestDebouncer.eventReceived(request); - } - - ListenableFuture submitNodeListRefresh() { - logger.trace("Submitting node list and token map refresh"); - return nodeListRefreshRequestDebouncer.eventReceived(new NodeListRefreshRequest()); - } - - ListenableFuture submitNodeRefresh(InetSocketAddress address, HostEvent eventType) { - NodeRefreshRequest request = new NodeRefreshRequest(address, eventType); - logger.trace("Submitting node refresh: {}", request); - return nodeRefreshRequestDebouncer.eventReceived(request); - } - - // refresh the schema using the provided connection, and notice the future with the provided resultset once done - public void refreshSchemaAndSignal(final Connection connection, final DefaultResultSetFuture future, final ResultSet rs, final SchemaElement targetType, final String targetKeyspace, final String targetName, final List targetSignature) { - if (logger.isDebugEnabled()) - logger.debug("Refreshing schema for {}{}", - targetType == null ? "everything" : targetKeyspace, - (targetType == KEYSPACE) ? "" : "." + targetName + " (" + targetType + ")"); - - maybeRefreshSchemaAndSignal(connection, future, rs, targetType, targetKeyspace, targetName, targetSignature); - } - - public void waitForSchemaAgreementAndSignal(final Connection connection, final DefaultResultSetFuture future, final ResultSet rs) { - maybeRefreshSchemaAndSignal(connection, future, rs, null, null, null, null); - } - - private void maybeRefreshSchemaAndSignal(final Connection connection, final DefaultResultSetFuture future, final ResultSet rs, final SchemaElement targetType, final String targetKeyspace, final String targetName, final List targetSignature) { - final boolean refreshSchema = (targetKeyspace != null); // if false, only wait for schema agreement - - executor.submit(new Runnable() { - @Override - public void run() { - boolean schemaInAgreement = false; - try { - // Before refreshing the schema, wait for schema agreement so - // that querying a table just after having created it don't fail. - schemaInAgreement = ControlConnection.waitForSchemaAgreement(connection, Cluster.Manager.this); - if (!schemaInAgreement) - logger.warn("No schema agreement from live replicas after {} s. The schema may not be up to date on some nodes.", configuration.getProtocolOptions().getMaxSchemaAgreementWaitSeconds()); - - ListenableFuture schemaReady; - if (refreshSchema) { - schemaReady = submitSchemaRefresh(targetType, targetKeyspace, targetName, targetSignature); - // JAVA-1120: skip debouncing delay and force immediate delivery - if (!schemaReady.isDone()) - schemaRefreshRequestDebouncer.scheduleImmediateDelivery(); - } else { - schemaReady = MoreFutures.VOID_SUCCESS; + } else { + KeyspaceMetadata keyspace = manager.metadata.keyspaces.get(scc.targetKeyspace); + if (keyspace == null) { + logger.warn( + "Received a DROPPED notification for {} {}.{}, but this keyspace is unknown in our metadata", + scc.targetType, + scc.targetKeyspace, + scc.targetName); + } else { + switch (scc.targetType) { + case TABLE: + // we can't tell whether it's a table or a view, + // but since two objects cannot have the same name, + // try removing both + final TableMetadata removedTable = keyspace.removeTable(scc.targetName); + if (removedTable != null) { + executor.submit( + new Runnable() { + @Override + public void run() { + manager.metadata.triggerOnTableRemoved(removedTable); + } + }); + } else { + final MaterializedViewMetadata removedView = + keyspace.removeMaterializedView(scc.targetName); + if (removedView != null) { + executor.submit( + new Runnable() { + @Override + public void run() { + manager.metadata.triggerOnMaterializedViewRemoved(removedView); + } + }); } - final boolean finalSchemaInAgreement = schemaInAgreement; - schemaReady.addListener(new Runnable() { - @Override - public void run() { - rs.getExecutionInfo().setSchemaInAgreement(finalSchemaInAgreement); - future.setResult(rs); - } - }, GuavaCompatibility.INSTANCE.sameThreadExecutor()); - - } catch (Exception e) { - logger.warn("Error while waiting for schema agreement", e); - // This is not fatal, complete the future anyway - rs.getExecutionInfo().setSchemaInAgreement(schemaInAgreement); - future.setResult(rs); - } + } + break; + case TYPE: + final UserType removedType = keyspace.removeUserType(scc.targetName); + if (removedType != null) { + executor.submit( + new Runnable() { + @Override + public void run() { + manager.metadata.triggerOnUserTypeRemoved(removedType); + } + }); + } + break; + case FUNCTION: + final FunctionMetadata removedFunction = + keyspace.removeFunction( + Metadata.fullFunctionName(scc.targetName, scc.targetSignature)); + if (removedFunction != null) { + executor.submit( + new Runnable() { + @Override + public void run() { + manager.metadata.triggerOnFunctionRemoved(removedFunction); + } + }); + } + break; + case AGGREGATE: + final AggregateMetadata removedAggregate = + keyspace.removeAggregate( + Metadata.fullFunctionName(scc.targetName, scc.targetSignature)); + if (removedAggregate != null) { + executor.submit( + new Runnable() { + @Override + public void run() { + manager.metadata.triggerOnAggregateRemoved(removedAggregate); + } + }); + } + break; + } } - }); - } - - // Called when some message has been received but has been initiated from the server (streamId < 0). - // This is called on an I/O thread, so all blocking operation must be done on an executor. - @Override - public void handle(Message.Response response) { - - if (!(response instanceof Responses.Event)) { - logger.error("Received an unexpected message from the server: {}", response); - return; - } - - final ProtocolEvent event = ((Responses.Event) response).event; - - logger.debug("Received event {}, scheduling delivery", response); - - switch (event.type) { - case TOPOLOGY_CHANGE: - ProtocolEvent.TopologyChange tpc = (ProtocolEvent.TopologyChange) event; - InetSocketAddress tpAddr = translateAddress(tpc.node.getAddress()); - Host.statesLogger.debug("[{}] received event {}", tpAddr, tpc.change); - switch (tpc.change) { - case NEW_NODE: - submitNodeRefresh(tpAddr, HostEvent.ADDED); - break; - case REMOVED_NODE: - submitNodeRefresh(tpAddr, HostEvent.REMOVED); - break; - case MOVED_NODE: - submitNodeListRefresh(); - break; - } - break; - case STATUS_CHANGE: - ProtocolEvent.StatusChange stc = (ProtocolEvent.StatusChange) event; - InetSocketAddress stAddr = translateAddress(stc.node.getAddress()); - Host.statesLogger.debug("[{}] received event {}", stAddr, stc.status); - switch (stc.status) { - case UP: - submitNodeRefresh(stAddr, HostEvent.UP); - break; - case DOWN: - submitNodeRefresh(stAddr, HostEvent.DOWN); - break; - } - break; - case SCHEMA_CHANGE: - if (!configuration.getQueryOptions().isMetadataEnabled()) - return; - - ProtocolEvent.SchemaChange scc = (ProtocolEvent.SchemaChange) event; - switch (scc.change) { - case CREATED: - case UPDATED: - submitSchemaRefresh(scc.targetType, scc.targetKeyspace, scc.targetName, scc.targetSignature); - break; - case DROPPED: - if (scc.targetType == KEYSPACE) { - final KeyspaceMetadata removedKeyspace = manager.metadata.removeKeyspace(scc.targetKeyspace); - if (removedKeyspace != null) { - executor.submit(new Runnable() { - @Override - public void run() { - manager.metadata.triggerOnKeyspaceRemoved(removedKeyspace); - } - }); - } - } else { - KeyspaceMetadata keyspace = manager.metadata.keyspaces.get(scc.targetKeyspace); - if (keyspace == null) { - logger.warn("Received a DROPPED notification for {} {}.{}, but this keyspace is unknown in our metadata", - scc.targetType, scc.targetKeyspace, scc.targetName); - } else { - switch (scc.targetType) { - case TABLE: - // we can't tell whether it's a table or a view, - // but since two objects cannot have the same name, - // try removing both - final TableMetadata removedTable = keyspace.removeTable(scc.targetName); - if (removedTable != null) { - executor.submit(new Runnable() { - @Override - public void run() { - manager.metadata.triggerOnTableRemoved(removedTable); - } - }); - } else { - final MaterializedViewMetadata removedView = keyspace.removeMaterializedView(scc.targetName); - if (removedView != null) { - executor.submit(new Runnable() { - @Override - public void run() { - manager.metadata.triggerOnMaterializedViewRemoved(removedView); - } - }); - } - } - break; - case TYPE: - final UserType removedType = keyspace.removeUserType(scc.targetName); - if (removedType != null) { - executor.submit(new Runnable() { - @Override - public void run() { - manager.metadata.triggerOnUserTypeRemoved(removedType); - } - }); - } - break; - case FUNCTION: - final FunctionMetadata removedFunction = keyspace.removeFunction(Metadata.fullFunctionName(scc.targetName, scc.targetSignature)); - if (removedFunction != null) { - executor.submit(new Runnable() { - @Override - public void run() { - manager.metadata.triggerOnFunctionRemoved(removedFunction); - } - }); - } - break; - case AGGREGATE: - final AggregateMetadata removedAggregate = keyspace.removeAggregate(Metadata.fullFunctionName(scc.targetName, scc.targetSignature)); - if (removedAggregate != null) { - executor.submit(new Runnable() { - @Override - public void run() { - manager.metadata.triggerOnAggregateRemoved(removedAggregate); - } - }); - } - break; - } - } - } - break; - } - break; - } - } - - void refreshConnectedHosts() { - // Deal first with the control connection: if it's connected to a node that is not LOCAL, try - // reconnecting (thus letting the loadBalancingPolicy pick a better node) - Host ccHost = controlConnection.connectedHost(); - if (ccHost == null || loadBalancingPolicy().distance(ccHost) != HostDistance.LOCAL) - controlConnection.triggerReconnect(); - - try { - for (SessionManager s : sessions) - Uninterruptibles.getUninterruptibly(s.updateCreatedPools()); - } catch (ExecutionException e) { - throw DriverThrowables.propagateCause(e); - } - } - - void refreshConnectedHost(Host host) { - // Deal with the control connection if it was using this host - Host ccHost = controlConnection.connectedHost(); - if (ccHost == null || ccHost.equals(host) && loadBalancingPolicy().distance(ccHost) != HostDistance.LOCAL) - controlConnection.triggerReconnect(); - - for (SessionManager s : sessions) - s.updateCreatedPools(host); - } - - private class ClusterCloseFuture extends CloseFuture.Forwarding { + } + break; + } + break; + } + } - ClusterCloseFuture(List futures) { - super(futures); - } + void refreshConnectedHosts() { + // Deal first with the control connection: if it's connected to a node that is not LOCAL, try + // reconnecting (thus letting the loadBalancingPolicy pick a better node) + Host ccHost = controlConnection.connectedHost(); + if (ccHost == null || loadBalancingPolicy().distance(ccHost) != HostDistance.LOCAL) + controlConnection.triggerReconnect(); + + try { + for (SessionManager s : sessions) + Uninterruptibles.getUninterruptibly(s.updateCreatedPools()); + } catch (ExecutionException e) { + throw DriverThrowables.propagateCause(e); + } + } - @Override - public CloseFuture force() { - // The only ExecutorService we haven't forced yet is executor - shutdownNow(executor); - return super.force(); - } + void refreshConnectedHost(Host host) { + // Deal with the control connection if it was using this host + Host ccHost = controlConnection.connectedHost(); + if (ccHost == null + || ccHost.equals(host) && loadBalancingPolicy().distance(ccHost) != HostDistance.LOCAL) + controlConnection.triggerReconnect(); - @Override - protected void onFuturesDone() { - /* - * When we reach this, all sessions should be shutdown. We've also started a shutdown - * of the thread pools used by this object. Remains 2 things before marking the shutdown - * as done: - * 1) we need to wait for the completion of the shutdown of the Cluster threads pools. - * 2) we need to shutdown the Connection.Factory, i.e. the executors used by Netty. - * But at least for 2), we must not do it on the current thread because that could be - * a netty worker, which we're going to shutdown. So creates some thread for that. - */ - (new Thread("Shutdown-checker") { - @Override - public void run() { - // Just wait indefinitely on the the completion of the thread pools. Provided the user - // call force(), we'll never really block forever. - try { - reconnectionExecutor.awaitTermination(Long.MAX_VALUE, TimeUnit.SECONDS); - scheduledTasksExecutor.awaitTermination(Long.MAX_VALUE, TimeUnit.SECONDS); - executor.awaitTermination(Long.MAX_VALUE, TimeUnit.SECONDS); - blockingExecutor.awaitTermination(Long.MAX_VALUE, TimeUnit.SECONDS); - - // Some of the jobs on the executors can be doing query stuff, so close the - // connectionFactory at the very last - connectionFactory.shutdown(); - - reaper.shutdown(); - - set(null); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - setException(e); - } - } - }).start(); - } - } + for (SessionManager s : sessions) s.updateCreatedPools(host); + } - private class CleanupIdleConnectionsTask implements Runnable { - @Override - public void run() { + private class ClusterCloseFuture extends CloseFuture.Forwarding { + + ClusterCloseFuture(List futures) { + super(futures); + } + + @Override + public CloseFuture force() { + // The only ExecutorService we haven't forced yet is executor + shutdownNow(executor); + return super.force(); + } + + @Override + protected void onFuturesDone() { + /* + * When we reach this, all sessions should be shutdown. We've also started a shutdown + * of the thread pools used by this object. Remains 2 things before marking the shutdown + * as done: + * 1) we need to wait for the completion of the shutdown of the Cluster threads pools. + * 2) we need to shutdown the Connection.Factory, i.e. the executors used by Netty. + * But at least for 2), we must not do it on the current thread because that could be + * a netty worker, which we're going to shutdown. So creates some thread for that. + */ + (new Thread("Shutdown-checker") { + @Override + public void run() { + // Just wait indefinitely on the the completion of the thread pools. Provided the + // user + // call force(), we'll never really block forever. try { - long now = System.currentTimeMillis(); - for (SessionManager session : sessions) { - session.cleanupIdleConnections(now); - } - } catch (Exception e) { - logger.warn("Error while trashing idle connections", e); + if (reconnectionExecutor != null) { + reconnectionExecutor.awaitTermination(Long.MAX_VALUE, TimeUnit.SECONDS); + } + if (scheduledTasksExecutor != null) { + scheduledTasksExecutor.awaitTermination(Long.MAX_VALUE, TimeUnit.SECONDS); + } + if (executor != null) { + executor.awaitTermination(Long.MAX_VALUE, TimeUnit.SECONDS); + } + if (blockingExecutor != null) { + blockingExecutor.awaitTermination(Long.MAX_VALUE, TimeUnit.SECONDS); + } + + // Some of the jobs on the executors can be doing query stuff, so close the + // connectionFactory at the very last + if (connectionFactory != null) { + connectionFactory.shutdown(); + } + if (reaper != null) { + reaper.shutdown(); + } + set(null); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + setException(e); } - } - } - - private class SchemaRefreshRequest { - - private final SchemaElement targetType; - private final String targetKeyspace; - private final String targetName; - private final List targetSignature; - - public SchemaRefreshRequest(SchemaElement targetType, String targetKeyspace, String targetName, List targetSignature) { - this.targetType = targetType; - this.targetKeyspace = Strings.emptyToNull(targetKeyspace); - this.targetName = Strings.emptyToNull(targetName); - this.targetSignature = targetSignature; - } - - /** - * Coalesce schema refresh requests. - * The algorithm is simple: if more than 2 keyspaces - * need refresh, then refresh the entire schema; - * otherwise if more than 2 elements in the same keyspace - * need refresh, then refresh the entire keyspace. - * - * @param that the other request to merge with the current one. - * @return A coalesced request - */ - SchemaRefreshRequest coalesce(SchemaRefreshRequest that) { - if (this.targetType == null || that.targetType == null) - return new SchemaRefreshRequest(null, null, null, null); - if (!this.targetKeyspace.equals(that.targetKeyspace)) - return new SchemaRefreshRequest(null, null, null, null); - if (this.targetName == null || that.targetName == null) - return new SchemaRefreshRequest(KEYSPACE, targetKeyspace, null, null); - if (!this.targetName.equals(that.targetName)) - return new SchemaRefreshRequest(KEYSPACE, targetKeyspace, null, null); - return this; - } - - @Override - public String toString() { - if (this.targetType == null) - return "Refresh ALL"; - if (this.targetName == null) - return "Refresh keyspace " + targetKeyspace; - return String.format("Refresh %s %s.%s", targetType, targetKeyspace, targetName); - } - } - - private class SchemaRefreshRequestDeliveryCallback implements EventDebouncer.DeliveryCallback { - - @Override - public ListenableFuture deliver(final List events) { - return executor.submit(new ExceptionCatchingRunnable() { - @Override - public void runMayThrow() throws InterruptedException, ExecutionException { - SchemaRefreshRequest coalesced = null; - for (SchemaRefreshRequest request : events) { - coalesced = coalesced == null ? request : coalesced.coalesce(request); - } - assert coalesced != null; - logger.trace("Coalesced schema refresh request: {}", coalesced); - controlConnection.refreshSchema(coalesced.targetType, coalesced.targetKeyspace, coalesced.targetName, coalesced.targetSignature); - } - }); - } - - } - - private class NodeRefreshRequest { - - private final InetSocketAddress address; + } + }) + .start(); + } + } - private final HostEvent eventType; + private class CleanupIdleConnectionsTask implements Runnable { + @Override + public void run() { + try { + long now = System.currentTimeMillis(); + for (SessionManager session : sessions) { + session.cleanupIdleConnections(now); + } + } catch (Exception e) { + logger.warn("Error while trashing idle connections", e); + } + } + } - private NodeRefreshRequest(InetSocketAddress address, HostEvent eventType) { - this.address = address; - this.eventType = eventType; - } + private class SchemaRefreshRequest { + + private final SchemaElement targetType; + private final String targetKeyspace; + private final String targetName; + private final List targetSignature; + + public SchemaRefreshRequest( + SchemaElement targetType, + String targetKeyspace, + String targetName, + List targetSignature) { + this.targetType = targetType; + this.targetKeyspace = Strings.emptyToNull(targetKeyspace); + this.targetName = Strings.emptyToNull(targetName); + this.targetSignature = targetSignature; + } + + /** + * Coalesce schema refresh requests. The algorithm is simple: if more than 2 keyspaces need + * refresh, then refresh the entire schema; otherwise if more than 2 elements in the same + * keyspace need refresh, then refresh the entire keyspace. + * + * @param that the other request to merge with the current one. + * @return A coalesced request + */ + SchemaRefreshRequest coalesce(SchemaRefreshRequest that) { + if (this.targetType == null || that.targetType == null) + return new SchemaRefreshRequest(null, null, null, null); + if (!this.targetKeyspace.equals(that.targetKeyspace)) + return new SchemaRefreshRequest(null, null, null, null); + if (this.targetName == null || that.targetName == null) + return new SchemaRefreshRequest(KEYSPACE, targetKeyspace, null, null); + if (!this.targetName.equals(that.targetName)) + return new SchemaRefreshRequest(KEYSPACE, targetKeyspace, null, null); + return this; + } + + @Override + public String toString() { + if (this.targetType == null) return "Refresh ALL"; + if (this.targetName == null) return "Refresh keyspace " + targetKeyspace; + return String.format("Refresh %s %s.%s", targetType, targetKeyspace, targetName); + } + } - @Override - public String toString() { - return address + " " + eventType; - } + private class SchemaRefreshRequestDeliveryCallback + implements EventDebouncer.DeliveryCallback { + + @Override + public ListenableFuture deliver(final List events) { + return executor.submit( + new ExceptionCatchingRunnable() { + @Override + public void runMayThrow() throws InterruptedException, ExecutionException { + SchemaRefreshRequest coalesced = null; + for (SchemaRefreshRequest request : events) { + coalesced = coalesced == null ? request : coalesced.coalesce(request); + } + assert coalesced != null; + logger.trace("Coalesced schema refresh request: {}", coalesced); + controlConnection.refreshSchema( + coalesced.targetType, + coalesced.targetKeyspace, + coalesced.targetName, + coalesced.targetSignature); + } + }); + } + } - } + private class NodeRefreshRequest { - private class NodeRefreshRequestDeliveryCallback implements EventDebouncer.DeliveryCallback { + private final InetSocketAddress address; - @Override - public ListenableFuture deliver(List events) { - Map hosts = new HashMap(); - // only keep the last event for each host - for (NodeRefreshRequest req : events) { - hosts.put(req.address, req.eventType); - } - List> futures = new ArrayList>(hosts.size()); - for (final Entry entry : hosts.entrySet()) { - InetSocketAddress address = entry.getKey(); - HostEvent eventType = entry.getValue(); - switch (eventType) { - case UP: - Host upHost = metadata.getHost(address); - if (upHost == null) { - upHost = metadata.add(address); - // If upHost is still null, it means we didn't know about it the line before but - // got beaten at adding it to the metadata by another thread. In that case, it's - // fine to let the other thread win and ignore the notification here - if (upHost == null) - continue; - futures.add(schedule(hostAdded(upHost))); - } else { - futures.add(schedule(hostUp(upHost))); - } - break; - case ADDED: - Host newHost = metadata.add(address); - if (newHost != null) { - futures.add(schedule(hostAdded(newHost))); - } else { - // If host already existed, retrieve it and check its state, if it's not up schedule a - // hostUp event. - Host existingHost = metadata.getHost(address); - if (!existingHost.isUp()) - futures.add(schedule(hostUp(existingHost))); - } - break; - case DOWN: - // Note that there is a slight risk we can receive the event late and thus - // mark the host down even though we already had reconnected successfully. - // But it is unlikely, and don't have too much consequence since we'll try reconnecting - // right away, so we favor the detection to make the Host.isUp method more reliable. - Host downHost = metadata.getHost(address); - if (downHost != null) { - // Only process DOWN events if we have no active connections to the host . Otherwise, we - // wait for the connections to fail. This is to prevent against a bad control host - // aggressively marking DOWN all of its peers. - if (downHost.convictionPolicy.hasActiveConnections()) { - logger.debug("Ignoring down event on {} because it still has active connections", downHost); - } else { - futures.add(execute(hostDown(downHost))); - } - } - break; - case REMOVED: - Host removedHost = metadata.getHost(address); - if (removedHost != null) - futures.add(execute(hostRemoved(removedHost))); - break; - } - } - return Futures.allAsList(futures); - } + private final HostEvent eventType; - private ListenableFuture execute(ExceptionCatchingRunnable task) { - return executor.submit(task); - } + private NodeRefreshRequest(InetSocketAddress address, HostEvent eventType) { + this.address = address; + this.eventType = eventType; + } - private ListenableFuture schedule(final ExceptionCatchingRunnable task) { - // Cassandra tends to send notifications for new/up nodes a bit early (it is triggered once - // gossip is up, but that is before the client-side server is up), so we add a delay - // (otherwise the connection will likely fail and have to be retry which is wasteful). - // This has been fixed by CASSANDRA-8236 and does not apply to protocol versions >= 4 - // and C* versions >= 2.2.0 - if (protocolVersion().compareTo(ProtocolVersion.V4) < 0) { - final SettableFuture future = SettableFuture.create(); - scheduledTasksExecutor.schedule(new ExceptionCatchingRunnable() { - @Override - public void runMayThrow() throws Exception { - ListenableFuture f = execute(task); - Futures.addCallback(f, new FutureCallback() { - @Override - public void onSuccess(Object result) { - future.set(null); - } + @Override + public String toString() { + return address + " " + eventType; + } + } - @Override - public void onFailure(Throwable t) { - future.setException(t); - } - }); - } - }, NEW_NODE_DELAY_SECONDS, TimeUnit.SECONDS); - return future; + private class NodeRefreshRequestDeliveryCallback + implements EventDebouncer.DeliveryCallback { + + @Override + public ListenableFuture deliver(List events) { + Map hosts = new HashMap(); + // only keep the last event for each host + for (NodeRefreshRequest req : events) { + hosts.put(req.address, req.eventType); + } + List> futures = new ArrayList>(hosts.size()); + for (final Entry entry : hosts.entrySet()) { + InetSocketAddress address = entry.getKey(); + HostEvent eventType = entry.getValue(); + switch (eventType) { + case UP: + Host upHost = metadata.getHost(address); + if (upHost == null) { + // We don't have enough information to create a new Host (we are missing it's ID) + // so trigger a full node refresh + submitNodeListRefresh(); + } else { + futures.add(schedule(hostUp(upHost))); + } + break; + case DOWN: + // Note that there is a slight risk we can receive the event late and thus + // mark the host down even though we already had reconnected successfully. + // But it is unlikely, and don't have too much consequence since we'll try + // reconnecting + // right away, so we favor the detection to make the Host.isUp method more reliable. + Host downHost = metadata.getHost(address); + if (downHost != null) { + // Only process DOWN events if we have no active connections to the host . + // Otherwise, we + // wait for the connections to fail. This is to prevent against a bad control host + // aggressively marking DOWN all of its peers. + if (downHost.convictionPolicy.hasActiveConnections()) { + logger.debug( + "Ignoring down event on {} because it still has active connections", + downHost); } else { - return execute(task); + futures.add(execute(hostDown(downHost))); } - } - - // Make sure we call controlConnection.refreshNodeInfo(host) - // so that we have up-to-date infos on that host before adding it (so we typically - // catch that an upgraded node uses a new cassandra version). - - private ExceptionCatchingRunnable hostAdded(final Host host) { - return new ExceptionCatchingRunnable() { - @Override - public void runMayThrow() throws Exception { - if (controlConnection.refreshNodeInfo(host)) { - onAdd(host, null); - submitNodeListRefresh(); - } else { - logger.debug("Not enough info for {}, ignoring host", host); - } - } - }; - } - - private ExceptionCatchingRunnable hostUp(final Host host) { - return new ExceptionCatchingRunnable() { - @Override - public void runMayThrow() throws Exception { - if (controlConnection.refreshNodeInfo(host)) { - onUp(host, null); - } else { - logger.debug("Not enough info for {}, ignoring host", host); + } + break; + case REMOVED: + Host removedHost = metadata.getHost(address); + if (removedHost != null) futures.add(execute(hostRemoved(removedHost))); + break; + } + } + return Futures.allAsList(futures); + } + + private ListenableFuture execute(ExceptionCatchingRunnable task) { + return executor.submit(task); + } + + private ListenableFuture schedule(final ExceptionCatchingRunnable task) { + // Cassandra tends to send notifications for new/up nodes a bit early (it is triggered once + // gossip is up, but that is before the client-side server is up), so we add a delay + // (otherwise the connection will likely fail and have to be retry which is wasteful). + // This has been fixed by CASSANDRA-8236 and does not apply to protocol versions >= 4 + // and C* versions >= 2.2.0 + if (protocolVersion().compareTo(ProtocolVersion.V4) < 0) { + final SettableFuture future = SettableFuture.create(); + scheduledTasksExecutor.schedule( + new ExceptionCatchingRunnable() { + @Override + public void runMayThrow() throws Exception { + ListenableFuture f = execute(task); + GuavaCompatibility.INSTANCE.addCallback( + f, + new FutureCallback() { + @Override + public void onSuccess(Object result) { + future.set(null); } - } - }; - } - - private ExceptionCatchingRunnable hostDown(final Host host) { - return new ExceptionCatchingRunnable() { - @Override - public void runMayThrow() throws Exception { - onDown(host, false, true); - } - }; - } - private ExceptionCatchingRunnable hostRemoved(final Host host) { - return new ExceptionCatchingRunnable() { - @Override - public void runMayThrow() throws Exception { - if (metadata.remove(host)) { - logger.info("Cassandra host {} removed", host); - onRemove(host); - submitNodeListRefresh(); + @Override + public void onFailure(Throwable t) { + future.setException(t); } - } - }; - } - + }); + } + }, + NEW_NODE_DELAY_SECONDS, + TimeUnit.SECONDS); + return future; + } else { + return execute(task); } + } - private class NodeListRefreshRequest { - @Override - public String toString() { - return "Refresh node list and token map"; + private ExceptionCatchingRunnable hostUp(final Host host) { + return new ExceptionCatchingRunnable() { + @Override + public void runMayThrow() throws Exception { + // Make sure we call controlConnection.refreshNodeInfo(host) + // so that we have up-to-date infos on that host before recreating the pools (so we + // typically catch that an upgraded node uses a new cassandra version). + if (controlConnection.refreshNodeInfo(host)) { + onUp(host, null); + } else { + logger.debug("Not enough info for {}, ignoring host", host); } - } - - private class NodeListRefreshRequestDeliveryCallback implements EventDebouncer.DeliveryCallback { + } + }; + } + + private ExceptionCatchingRunnable hostDown(final Host host) { + return new ExceptionCatchingRunnable() { + @Override + public void runMayThrow() throws Exception { + onDown(host, false, true); + } + }; + } - @Override - public ListenableFuture deliver(List events) { - // The number of received requests does not matter - // as long as one request is made, refresh the entire node list - return executor.submit(new ExceptionCatchingRunnable() { - @Override - public void runMayThrow() throws InterruptedException, ExecutionException { - controlConnection.refreshNodeListAndTokenMap(); - } - }); + private ExceptionCatchingRunnable hostRemoved(final Host host) { + return new ExceptionCatchingRunnable() { + @Override + public void runMayThrow() throws Exception { + if (metadata.remove(host)) { + logger.info("Cassandra host {} removed", host); + onRemove(host); + submitNodeListRefresh(); } - } - + } + }; + } } - private enum HostEvent { - UP, DOWN, ADDED, REMOVED + private class NodeListRefreshRequest { + @Override + public String toString() { + return "Refresh node list and token map"; + } } - /** - * Periodically ensures that closed connections are properly terminated once they have no more pending requests. - *

- * This is normally done when the connection errors out, or when the last request is processed; this class acts as - * a last-effort protection since unterminated connections can lead to deadlocks. If it terminates a connection, - * this indicates a bug; warnings are logged so that this can be reported. - * - * @see Connection#tryTerminate(boolean) - */ - static class ConnectionReaper { - private static final int INTERVAL_MS = 15000; - - private final ScheduledExecutorService executor; - @VisibleForTesting - final Map connections = new ConcurrentHashMap(); - - private volatile boolean shutdown; + private class NodeListRefreshRequestDeliveryCallback + implements EventDebouncer.DeliveryCallback { + + @Override + public ListenableFuture deliver(List events) { + // The number of received requests does not matter + // as long as one request is made, refresh the entire node list + return executor.submit( + new ExceptionCatchingRunnable() { + @Override + public void runMayThrow() throws InterruptedException, ExecutionException { + controlConnection.refreshNodeListAndTokenMap(); + } + }); + } + } + } + + private enum HostEvent { + UP, + DOWN, + REMOVED + } + + /** + * Periodically ensures that closed connections are properly terminated once they have no more + * pending requests. + * + *

This is normally done when the connection errors out, or when the last request is processed; + * this class acts as a last-effort protection since unterminated connections can lead to + * deadlocks. If it terminates a connection, this indicates a bug; warnings are logged so that + * this can be reported. + * + * @see Connection#tryTerminate(boolean) + */ + static class ConnectionReaper { + private static final int INTERVAL_MS = 15000; + + private final ScheduledExecutorService executor; - private final Runnable reaperTask = new Runnable() { - @Override - public void run() { - long now = System.currentTimeMillis(); - Iterator> iterator = connections.entrySet().iterator(); - while (iterator.hasNext()) { - Entry entry = iterator.next(); - Connection connection = entry.getKey(); - Long terminateTime = entry.getValue(); - if (terminateTime <= now) { - boolean terminated = connection.tryTerminate(true); - if (terminated) - iterator.remove(); - } - } + @VisibleForTesting + final Map connections = new ConcurrentHashMap(); + + private volatile boolean shutdown; + + private final Runnable reaperTask = + new Runnable() { + @Override + public void run() { + long now = System.currentTimeMillis(); + Iterator> iterator = connections.entrySet().iterator(); + while (iterator.hasNext()) { + Entry entry = iterator.next(); + Connection connection = entry.getKey(); + Long terminateTime = entry.getValue(); + if (terminateTime <= now) { + boolean terminated = connection.tryTerminate(true); + if (terminated) iterator.remove(); + } } + } }; - ConnectionReaper(ScheduledExecutorService executor) { - this.executor = executor; - this.executor.scheduleWithFixedDelay(reaperTask, INTERVAL_MS, INTERVAL_MS, TimeUnit.MILLISECONDS); - } + ConnectionReaper(ScheduledExecutorService executor) { + this.executor = executor; + this.executor.scheduleWithFixedDelay( + reaperTask, INTERVAL_MS, INTERVAL_MS, TimeUnit.MILLISECONDS); + } - void register(Connection connection, long terminateTime) { - if (shutdown) { - // This should not happen since the reaper is shut down after all sessions. - logger.warn("Connection registered after reaper shutdown: {}", connection); - connection.tryTerminate(true); - } else { - connections.put(connection, terminateTime); - } - } + void register(Connection connection, long terminateTime) { + if (shutdown) { + // This should not happen since the reaper is shut down after all sessions. + logger.warn("Connection registered after reaper shutdown: {}", connection); + connection.tryTerminate(true); + } else { + connections.put(connection, terminateTime); + } + } - void shutdown() { - shutdown = true; - // Force shutdown to avoid waiting for the interval, and run the task manually one last time - executor.shutdownNow(); - reaperTask.run(); - } + void shutdown() { + shutdown = true; + // Force shutdown to avoid waiting for the interval, and run the task manually one last time + executor.shutdownNow(); + reaperTask.run(); } + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/ClusterNameMismatchException.java b/driver-core/src/main/java/com/datastax/driver/core/ClusterNameMismatchException.java index d467d346f25..b7154cf5b68 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ClusterNameMismatchException.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ClusterNameMismatchException.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,24 +17,26 @@ */ package com.datastax.driver.core; -import java.net.InetSocketAddress; - /** - * Indicates that we've attempted to connect to a node which cluster name doesn't match that of the other nodes known to the driver. + * Indicates that we've attempted to connect to a node which cluster name doesn't match that of the + * other nodes known to the driver. */ class ClusterNameMismatchException extends Exception { - private static final long serialVersionUID = 0; + private static final long serialVersionUID = 0; - public final InetSocketAddress address; - public final String expectedClusterName; - public final String actualClusterName; + public final EndPoint endPoint; + public final String expectedClusterName; + public final String actualClusterName; - public ClusterNameMismatchException(InetSocketAddress address, String actualClusterName, String expectedClusterName) { - super(String.format("[%s] Host %s reports cluster name '%s' that doesn't match our cluster name '%s'. This host will be ignored.", - address, address, actualClusterName, expectedClusterName)); - this.address = address; - this.expectedClusterName = expectedClusterName; - this.actualClusterName = actualClusterName; - } + public ClusterNameMismatchException( + EndPoint endPoint, String actualClusterName, String expectedClusterName) { + super( + String.format( + "[%s] Host %s reports cluster name '%s' that doesn't match our cluster name '%s'. This host will be ignored.", + endPoint, endPoint, actualClusterName, expectedClusterName)); + this.endPoint = endPoint; + this.expectedClusterName = expectedClusterName; + this.actualClusterName = actualClusterName; + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/ClusterWidePercentileTracker.java b/driver-core/src/main/java/com/datastax/driver/core/ClusterWidePercentileTracker.java index 8766106aaa1..dca97e12300 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ClusterWidePercentileTracker.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ClusterWidePercentileTracker.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,60 +19,67 @@ /** * A {@code PercentileTracker} that aggregates all measurements into a single histogram. - *

- * This gives you global latency percentiles for the whole cluster, meaning that latencies of slower hosts will tend to - * appear in higher percentiles. + * + *

This gives you global latency percentiles for the whole cluster, meaning that latencies of + * slower hosts will tend to appear in higher percentiles. */ public class ClusterWidePercentileTracker extends PercentileTracker { - private volatile Cluster cluster; + private volatile Cluster cluster; - private ClusterWidePercentileTracker(long highestTrackableLatencyMillis, - int numberOfSignificantValueDigits, - int minRecordedValues, - long intervalMs) { - super(highestTrackableLatencyMillis, numberOfSignificantValueDigits, minRecordedValues, intervalMs); - } + private ClusterWidePercentileTracker( + long highestTrackableLatencyMillis, + int numberOfSignificantValueDigits, + int minRecordedValues, + long intervalMs) { + super( + highestTrackableLatencyMillis, + numberOfSignificantValueDigits, + minRecordedValues, + intervalMs); + } - @Override - public void onRegister(Cluster cluster) { - this.cluster = cluster; - } + @Override + public void onRegister(Cluster cluster) { + this.cluster = cluster; + } - @Override - protected Cluster computeKey(Host host, Statement statement, Exception exception) { - return cluster; - } + @Override + protected Cluster computeKey(Host host, Statement statement, Exception exception) { + return cluster; + } - /** - * Returns a builder to create a new instance. - * - * @param highestTrackableLatencyMillis the highest expected latency. If a higher value is reported, it will be - * ignored and a warning will be logged. A good rule of thumb is to set it - * slightly higher than {@link SocketOptions#getReadTimeoutMillis()}. - * @return the builder. - */ - public static Builder builder(long highestTrackableLatencyMillis) { - return new Builder(highestTrackableLatencyMillis); - } + /** + * Returns a builder to create a new instance. + * + * @param highestTrackableLatencyMillis the highest expected latency. If a higher value is + * reported, it will be ignored and a warning will be logged. A good rule of thumb is to set + * it slightly higher than {@link SocketOptions#getReadTimeoutMillis()}. + * @return the builder. + */ + public static Builder builder(long highestTrackableLatencyMillis) { + return new Builder(highestTrackableLatencyMillis); + } - /** - * Helper class to build {@code PerHostPercentileTracker} instances with a fluent interface. - */ - public static class Builder extends PercentileTracker.Builder { + /** Helper class to build {@code PerHostPercentileTracker} instances with a fluent interface. */ + public static class Builder + extends PercentileTracker.Builder { - Builder(long highestTrackableLatencyMillis) { - super(highestTrackableLatencyMillis); - } + Builder(long highestTrackableLatencyMillis) { + super(highestTrackableLatencyMillis); + } - @Override - protected Builder self() { - return this; - } + @Override + protected Builder self() { + return this; + } - @Override - public ClusterWidePercentileTracker build() { - return new ClusterWidePercentileTracker(highestTrackableLatencyMillis, numberOfSignificantValueDigits, - minRecordedValues, intervalMs); - } + @Override + public ClusterWidePercentileTracker build() { + return new ClusterWidePercentileTracker( + highestTrackableLatencyMillis, + numberOfSignificantValueDigits, + minRecordedValues, + intervalMs); } + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/ClusteringOrder.java b/driver-core/src/main/java/com/datastax/driver/core/ClusteringOrder.java index 50cd74bc962..38309ee64af 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ClusteringOrder.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ClusteringOrder.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,12 +19,11 @@ /** * Clustering orders. - *

- * This is used by metadata classes to indicate the clustering - * order of a clustering column in a table or materialized view. + * + *

This is used by metadata classes to indicate the clustering order of a clustering column in a + * table or materialized view. */ public enum ClusteringOrder { - - ASC, DESC; - -} \ No newline at end of file + ASC, + DESC; +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/CodecRegistry.java b/driver-core/src/main/java/com/datastax/driver/core/CodecRegistry.java index c32f0ab607b..04208576924 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/CodecRegistry.java +++ b/driver-core/src/main/java/com/datastax/driver/core/CodecRegistry.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,14 +17,21 @@ */ package com.datastax.driver.core; +import static com.datastax.driver.core.DataType.Name.LIST; +import static com.datastax.driver.core.DataType.Name.MAP; +import static com.datastax.driver.core.DataType.Name.SET; +import static com.google.common.base.Preconditions.checkNotNull; + import com.datastax.driver.core.exceptions.CodecNotFoundException; import com.datastax.driver.core.utils.MoreObjects; -import com.google.common.cache.*; +import com.google.common.cache.CacheBuilder; +import com.google.common.cache.CacheLoader; +import com.google.common.cache.LoadingCache; +import com.google.common.cache.RemovalListener; +import com.google.common.cache.RemovalNotification; +import com.google.common.cache.Weigher; import com.google.common.reflect.TypeToken; import com.google.common.util.concurrent.UncheckedExecutionException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import java.lang.reflect.ParameterizedType; import java.lang.reflect.Type; import java.nio.ByteBuffer; @@ -32,717 +41,774 @@ import java.util.Set; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.ExecutionException; - -import static com.datastax.driver.core.DataType.Name.*; -import static com.google.common.base.Preconditions.checkNotNull; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** - * A registry for {@link TypeCodec}s. When the driver needs to serialize or deserialize a Java type to/from CQL, - * it will lookup in the registry for a suitable codec. The registry is initialized with default codecs that handle - * basic conversions (e.g. CQL {@code text} to {@code java.lang.String}), and users can add their own. Complex - * codecs can also be generated on-the-fly from simpler ones (more details below). - *

- * Creating a registry - *

- * By default, the driver uses {@link CodecRegistry#DEFAULT_INSTANCE}, a shareable, JVM-wide instance initialized with - * built-in codecs for all the base CQL types. - * The only reason to create your own instances is if you have multiple {@code Cluster} objects that use different - * sets of codecs. In that case, use {@link com.datastax.driver.core.Cluster.Builder#withCodecRegistry(CodecRegistry)} - * to associate the registry with the cluster: + * A registry for {@link TypeCodec}s. When the driver needs to serialize or deserialize a Java type + * to/from CQL, it will lookup in the registry for a suitable codec. The registry is initialized + * with default codecs that handle basic conversions (e.g. CQL {@code text} to {@code + * java.lang.String}), and users can add their own. Complex codecs can also be generated on-the-fly + * from simpler ones (more details below). + * + *

Creating a registry

+ * + * By default, the driver uses {@link CodecRegistry#DEFAULT_INSTANCE}, a shareable, JVM-wide + * instance initialized with built-in codecs for all the base CQL types. The only reason to create + * your own instances is if you have multiple {@code Cluster} objects that use different sets of + * codecs. In that case, use {@link + * com.datastax.driver.core.Cluster.Builder#withCodecRegistry(CodecRegistry)} to associate the + * registry with the cluster: + * *
{@code
  * CodecRegistry myCodecRegistry = new CodecRegistry();
  * myCodecRegistry.register(myCodec1, myCodec2, myCodec3);
  * Cluster cluster = Cluster.builder().withCodecRegistry(myCodecRegistry).build();
  *
  * // To retrieve the registry later:
- * CodecRegistry registry = cluster.getConfiguration().getCodecRegistry();}
- * 
+ * CodecRegistry registry = cluster.getConfiguration().getCodecRegistry(); + * } + * * {@code CodecRegistry} instances are thread-safe. - *

- * It is possible to turn on log messages by setting the {@code com.datastax.driver.core.CodecRegistry} logger - * level to {@code TRACE}. Beware that the registry can be very verbose at this log level. - *

- * Registering and using custom codecs - *

- * To create a custom codec, write a class that extends {@link TypeCodec}, create an instance, and pass it to one of - * the {@link #register(TypeCodec) register} methods; for example, one could create a codec that maps CQL - * timestamps to JDK8's {@code java.time.LocalDate}: + * + *

It is possible to turn on log messages by setting the {@code + * com.datastax.driver.core.CodecRegistry} logger level to {@code TRACE}. Beware that the registry + * can be very verbose at this log level. + * + *

Registering and using custom codecs

+ * + * To create a custom codec, write a class that extends {@link TypeCodec}, create an instance, and + * pass it to one of the {@link #register(TypeCodec) register} methods; for example, one could + * create a codec that maps CQL timestamps to JDK8's {@code java.time.LocalDate}: + * *
{@code
  * class LocalDateCodec extends TypeCodec {
  *    ...
  * }
- * myCodecRegistry.register(new LocalDateCodec());}
- * 
+ * myCodecRegistry.register(new LocalDateCodec()); + * } + * * The conversion will be available to: + * *
    - *
  • all driver types that implement {@link GettableByIndexData}, {@link GettableByNameData}, - * {@link SettableByIndexData} and/or {@link SettableByNameData}. Namely: {@link Row}, - * {@link BoundStatement}, {@link UDTValue} and {@link TupleValue};
  • - *
  • {@link SimpleStatement#SimpleStatement(String, Object...) simple statements};
  • - *
  • statements created with the {@link com.datastax.driver.core.querybuilder.QueryBuilder Query builder}.
  • + *
  • all driver types that implement {@link GettableByIndexData}, {@link GettableByNameData}, + * {@link SettableByIndexData} and/or {@link SettableByNameData}. Namely: {@link Row}, {@link + * BoundStatement}, {@link UDTValue} and {@link TupleValue}; + *
  • {@link SimpleStatement#SimpleStatement(String, Object...) simple statements}; + *
  • statements created with the {@link com.datastax.driver.core.querybuilder.QueryBuilder Query + * builder}. *
- *

- * Example: + * + *

Example: + * *

{@code
  * Row row = session.executeQuery("select date from some_table where pk = 1").one();
- * java.time.LocalDate date = row.get(0, java.time.LocalDate.class); // uses LocalDateCodec registered above}
- * 
+ * java.time.LocalDate date = row.get(0, java.time.LocalDate.class); // uses LocalDateCodec registered above + * } + * * You can also bypass the codec registry by passing a standalone codec instance to methods such as * {@link GettableByIndexData#get(int, TypeCodec)}. - *

- * Codec generation - *

- * When a {@code CodecRegistry} cannot find a suitable codec among existing ones, it will attempt to create it on-the-fly. - * It can manage: + * + *

Codec generation

+ * + * When a {@code CodecRegistry} cannot find a suitable codec among existing ones, it will attempt to + * create it on-the-fly. It can manage: + * *
    - *
  • collections (lists, sets and maps) of known types. For example, - * if you registered a codec for JDK8's {@code java.time.LocalDate} like in the example above, you get - * {@code List>} and {@code Set>} handled for free, - * as well as all {@code Map} types whose keys and/or values are {@code java.time.LocalDate}. - * This works recursively for nested collections;
  • - *
  • {@link UserType user types}, mapped to {@link UDTValue} objects. Custom codecs are available recursively - * to the UDT's fields, so if one of your fields is a {@code timestamp} you can use your {@code LocalDateCodec} to retrieve - * it as a {@code java.time.LocalDate};
  • - *
  • {@link TupleType tuple types}, mapped to {@link TupleValue} (with the same rules for nested fields);
  • - *
  • {@link com.datastax.driver.core.DataType.CustomType custom types}, mapped to {@code ByteBuffer}.
  • + *
  • collections (lists, sets and maps) of known types. For example, if you registered a codec + * for JDK8's {@code java.time.LocalDate} like in the example above, you get {@code + * List>} and {@code Set>} handled for free, as well as all {@code Map} + * types whose keys and/or values are {@code java.time.LocalDate}. This works recursively for + * nested collections; + *
  • {@link UserType user types}, mapped to {@link UDTValue} objects. Custom codecs are + * available recursively to the UDT's fields, so if one of your fields is a {@code timestamp} + * you can use your {@code LocalDateCodec} to retrieve it as a {@code java.time.LocalDate}; + *
  • {@link TupleType tuple types}, mapped to {@link TupleValue} (with the same rules for nested + * fields); + *
  • {@link com.datastax.driver.core.DataType.CustomType custom types}, mapped to {@code + * ByteBuffer}. *
- * If the codec registry encounters a mapping that it can't handle automatically, a {@link CodecNotFoundException} is thrown; - * you'll need to register a custom codec for it. - *

- * Performance and caching - *

- * Whenever possible, the registry will cache the result of a codec lookup for a specific type mapping, including any generated - * codec. For example, if you registered {@code LocalDateCodec} and ask the registry for a codec to convert a CQL - * {@code list} to a Java {@code List}: + * + * If the codec registry encounters a mapping that it can't handle automatically, a {@link + * CodecNotFoundException} is thrown; you'll need to register a custom codec for it. + * + *

Performance and caching

+ * + * Whenever possible, the registry will cache the result of a codec lookup for a specific type + * mapping, including any generated codec. For example, if you registered {@code LocalDateCodec} and + * ask the registry for a codec to convert a CQL {@code list} to a Java {@code + * List}: + * *
    - *
  1. the first lookup will generate a {@code TypeCodec>} from {@code LocalDateCodec}, and put it in - * the cache;
  2. - *
  3. the second lookup will hit the cache directly, and reuse the previously generated instance.
  4. + *
  5. the first lookup will generate a {@code TypeCodec>} from {@code + * LocalDateCodec}, and put it in the cache; + *
  6. the second lookup will hit the cache directly, and reuse the previously generated instance. *
- * The javadoc for each {@link #codecFor(DataType) codecFor} variant specifies whether the result can be cached or not. - *

- * Codec order - *

+ * + * The javadoc for each {@link #codecFor(DataType) codecFor} variant specifies whether the result + * can be cached or not. + * + *

Codec order

+ * * When the registry looks up a codec, the rules of precedence are: + * *
    - *
  • if a result was previously cached for that mapping, it is returned;
  • - *
  • otherwise, the registry checks the list of built-in codecs – the default ones – and the ones that were explicitly - * registered (in the order that they were registered). It calls each codec's {@code accepts} methods to determine if - * it can handle the mapping, and if so returns it;
  • - *
  • otherwise, the registry tries to generate a codec, according to the rules outlined above.
  • + *
  • if a result was previously cached for that mapping, it is returned; + *
  • otherwise, the registry checks the list of built-in codecs – the default ones – and the + * ones that were explicitly registered (in the order that they were registered). It calls + * each codec's {@code accepts} methods to determine if it can handle the mapping, and if so + * returns it; + *
  • otherwise, the registry tries to generate a codec, according to the rules outlined above. *
- * It is currently impossible to override an existing codec. If you try to do so, {@link #register(TypeCodec)} will log a - * warning and ignore it. + * + * It is currently impossible to override an existing codec. If you try to do so, {@link + * #register(TypeCodec)} will log a warning and ignore it. */ public final class CodecRegistry { - private static final Logger logger = LoggerFactory.getLogger(CodecRegistry.class); - - private static final Map> BUILT_IN_CODECS_MAP = new EnumMap>(DataType.Name.class); - - static { - BUILT_IN_CODECS_MAP.put(DataType.Name.ASCII, TypeCodec.ascii()); - BUILT_IN_CODECS_MAP.put(DataType.Name.BIGINT, TypeCodec.bigint()); - BUILT_IN_CODECS_MAP.put(DataType.Name.BLOB, TypeCodec.blob()); - BUILT_IN_CODECS_MAP.put(DataType.Name.BOOLEAN, TypeCodec.cboolean()); - BUILT_IN_CODECS_MAP.put(DataType.Name.COUNTER, TypeCodec.counter()); - BUILT_IN_CODECS_MAP.put(DataType.Name.DECIMAL, TypeCodec.decimal()); - BUILT_IN_CODECS_MAP.put(DataType.Name.DOUBLE, TypeCodec.cdouble()); - BUILT_IN_CODECS_MAP.put(DataType.Name.FLOAT, TypeCodec.cfloat()); - BUILT_IN_CODECS_MAP.put(DataType.Name.INET, TypeCodec.inet()); - BUILT_IN_CODECS_MAP.put(DataType.Name.INT, TypeCodec.cint()); - BUILT_IN_CODECS_MAP.put(DataType.Name.TEXT, TypeCodec.varchar()); - BUILT_IN_CODECS_MAP.put(DataType.Name.TIMESTAMP, TypeCodec.timestamp()); - BUILT_IN_CODECS_MAP.put(DataType.Name.UUID, TypeCodec.uuid()); - BUILT_IN_CODECS_MAP.put(DataType.Name.VARCHAR, TypeCodec.varchar()); - BUILT_IN_CODECS_MAP.put(DataType.Name.VARINT, TypeCodec.varint()); - BUILT_IN_CODECS_MAP.put(DataType.Name.TIMEUUID, TypeCodec.timeUUID()); - BUILT_IN_CODECS_MAP.put(DataType.Name.SMALLINT, TypeCodec.smallInt()); - BUILT_IN_CODECS_MAP.put(DataType.Name.TINYINT, TypeCodec.tinyInt()); - BUILT_IN_CODECS_MAP.put(DataType.Name.DATE, TypeCodec.date()); - BUILT_IN_CODECS_MAP.put(DataType.Name.TIME, TypeCodec.time()); - BUILT_IN_CODECS_MAP.put(DataType.Name.DURATION, TypeCodec.duration()); + private static final Logger logger = LoggerFactory.getLogger(CodecRegistry.class); + + private static final Map> BUILT_IN_CODECS_MAP = + new EnumMap>(DataType.Name.class); + + static { + BUILT_IN_CODECS_MAP.put(DataType.Name.ASCII, TypeCodec.ascii()); + BUILT_IN_CODECS_MAP.put(DataType.Name.BIGINT, TypeCodec.bigint()); + BUILT_IN_CODECS_MAP.put(DataType.Name.BLOB, TypeCodec.blob()); + BUILT_IN_CODECS_MAP.put(DataType.Name.BOOLEAN, TypeCodec.cboolean()); + BUILT_IN_CODECS_MAP.put(DataType.Name.COUNTER, TypeCodec.counter()); + BUILT_IN_CODECS_MAP.put(DataType.Name.DECIMAL, TypeCodec.decimal()); + BUILT_IN_CODECS_MAP.put(DataType.Name.DOUBLE, TypeCodec.cdouble()); + BUILT_IN_CODECS_MAP.put(DataType.Name.FLOAT, TypeCodec.cfloat()); + BUILT_IN_CODECS_MAP.put(DataType.Name.INET, TypeCodec.inet()); + BUILT_IN_CODECS_MAP.put(DataType.Name.INT, TypeCodec.cint()); + BUILT_IN_CODECS_MAP.put(DataType.Name.TEXT, TypeCodec.varchar()); + BUILT_IN_CODECS_MAP.put(DataType.Name.TIMESTAMP, TypeCodec.timestamp()); + BUILT_IN_CODECS_MAP.put(DataType.Name.UUID, TypeCodec.uuid()); + BUILT_IN_CODECS_MAP.put(DataType.Name.VARCHAR, TypeCodec.varchar()); + BUILT_IN_CODECS_MAP.put(DataType.Name.VARINT, TypeCodec.varint()); + BUILT_IN_CODECS_MAP.put(DataType.Name.TIMEUUID, TypeCodec.timeUUID()); + BUILT_IN_CODECS_MAP.put(DataType.Name.SMALLINT, TypeCodec.smallInt()); + BUILT_IN_CODECS_MAP.put(DataType.Name.TINYINT, TypeCodec.tinyInt()); + BUILT_IN_CODECS_MAP.put(DataType.Name.DATE, TypeCodec.date()); + BUILT_IN_CODECS_MAP.put(DataType.Name.TIME, TypeCodec.time()); + BUILT_IN_CODECS_MAP.put(DataType.Name.DURATION, TypeCodec.duration()); + } + + // roughly sorted by popularity + private static final TypeCodec[] BUILT_IN_CODECS = + new TypeCodec[] { + TypeCodec + .varchar(), // must be declared before AsciiCodec so it gets chosen when CQL type not + // available + TypeCodec + .uuid(), // must be declared before TimeUUIDCodec so it gets chosen when CQL type not + // available + TypeCodec.timeUUID(), + TypeCodec.timestamp(), + TypeCodec.cint(), + TypeCodec.bigint(), + TypeCodec.blob(), + TypeCodec.cdouble(), + TypeCodec.cfloat(), + TypeCodec.decimal(), + TypeCodec.varint(), + TypeCodec.inet(), + TypeCodec.cboolean(), + TypeCodec.smallInt(), + TypeCodec.tinyInt(), + TypeCodec.date(), + TypeCodec.time(), + TypeCodec.duration(), + TypeCodec.counter(), + TypeCodec.ascii() + }; + + /** + * The default {@code CodecRegistry} instance. + * + *

It will be shared among all {@link Cluster} instances that were not explicitly built with a + * different instance. + */ + public static final CodecRegistry DEFAULT_INSTANCE = new CodecRegistry(); + + /** Cache key for the codecs cache. */ + private static final class CacheKey { + + private final DataType cqlType; + + private final TypeToken javaType; + + CacheKey(DataType cqlType, TypeToken javaType) { + this.javaType = javaType; + this.cqlType = cqlType; } - // roughly sorted by popularity - private static final TypeCodec[] BUILT_IN_CODECS = new TypeCodec[]{ - TypeCodec.varchar(), // must be declared before AsciiCodec so it gets chosen when CQL type not available - TypeCodec.uuid(), // must be declared before TimeUUIDCodec so it gets chosen when CQL type not available - TypeCodec.timeUUID(), - TypeCodec.timestamp(), - TypeCodec.cint(), - TypeCodec.bigint(), - TypeCodec.blob(), - TypeCodec.cdouble(), - TypeCodec.cfloat(), - TypeCodec.decimal(), - TypeCodec.varint(), - TypeCodec.inet(), - TypeCodec.cboolean(), - TypeCodec.smallInt(), - TypeCodec.tinyInt(), - TypeCodec.date(), - TypeCodec.time(), - TypeCodec.duration(), - TypeCodec.counter(), - TypeCodec.ascii() - }; - - /** - * The default {@code CodecRegistry} instance. - *

- * It will be shared among all {@link Cluster} instances that were not explicitly built with a different instance. - */ - public static final CodecRegistry DEFAULT_INSTANCE = new CodecRegistry(); - - /** - * Cache key for the codecs cache. - */ - private static final class CacheKey { - - private final DataType cqlType; - - private final TypeToken javaType; - - CacheKey(DataType cqlType, TypeToken javaType) { - this.javaType = javaType; - this.cqlType = cqlType; - } - - @Override - public boolean equals(Object o) { - if (this == o) - return true; - if (o == null || getClass() != o.getClass()) - return false; - CacheKey cacheKey = (CacheKey) o; - return MoreObjects.equal(cqlType, cacheKey.cqlType) && MoreObjects.equal(javaType, cacheKey.javaType); - } - - @Override - public int hashCode() { - return MoreObjects.hashCode(cqlType, javaType); - } - + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + CacheKey cacheKey = (CacheKey) o; + return MoreObjects.equal(cqlType, cacheKey.cqlType) + && MoreObjects.equal(javaType, cacheKey.javaType); } - /** - * Cache loader for the codecs cache. - */ - private class TypeCodecCacheLoader extends CacheLoader> { - @Override - public TypeCodec load(CacheKey cacheKey) { - checkNotNull(cacheKey.cqlType, "Parameter cqlType cannot be null"); - if (logger.isTraceEnabled()) - logger.trace("Loading codec into cache: [{} <-> {}]", - CodecRegistry.toString(cacheKey.cqlType), - CodecRegistry.toString(cacheKey.javaType)); - for (TypeCodec codec : codecs) { - if (codec.accepts(cacheKey.cqlType) && (cacheKey.javaType == null || codec.accepts(cacheKey.javaType))) { - logger.trace("Already existing codec found: {}", codec); - return codec; - } - } - return createCodec(cacheKey.cqlType, cacheKey.javaType); - } + @Override + public int hashCode() { + return MoreObjects.hashCode(cqlType, javaType); } - - /** - * A complexity-based weigher for the codecs cache. - * Weights are computed mainly according to the CQL type: - *

    - *
  1. Manually-registered codecs always weigh 0; - *
  2. Codecs for primitive types weigh 0; - *
  3. Codecs for collections weigh the total weight of their inner types + the weight of their level of deepness; - *
  4. Codecs for UDTs and tuples weigh the total weight of their inner types + the weight of their level of deepness, but cannot weigh less than 1; - *
  5. Codecs for custom (non-CQL) types weigh 1. - *
- * A consequence of this algorithm is that codecs for primitive types and codecs for all "shallow" collections thereof - * are never evicted. - */ - private class TypeCodecWeigher implements Weigher> { - - @Override - public int weigh(CacheKey key, TypeCodec value) { - return codecs.contains(value) ? 0 : weigh(value.cqlType, 0); + } + + /** Cache loader for the codecs cache. */ + private class TypeCodecCacheLoader extends CacheLoader> { + @Override + public TypeCodec load(CacheKey cacheKey) { + checkNotNull(cacheKey.cqlType, "Parameter cqlType cannot be null"); + if (logger.isTraceEnabled()) + logger.trace( + "Loading codec into cache: [{} <-> {}]", + CodecRegistry.toString(cacheKey.cqlType), + CodecRegistry.toString(cacheKey.javaType)); + for (TypeCodec codec : codecs) { + if (codec.accepts(cacheKey.cqlType) + && (cacheKey.javaType == null || codec.accepts(cacheKey.javaType))) { + logger.trace("Already existing codec found: {}", codec); + return codec; } - - private int weigh(DataType cqlType, int level) { - switch (cqlType.getName()) { - case LIST: - case SET: - case MAP: { - int weight = level; - for (DataType eltType : cqlType.getTypeArguments()) { - weight += weigh(eltType, level + 1); - } - return weight; - } - case UDT: { - int weight = level; - for (UserType.Field field : ((UserType) cqlType)) { - weight += weigh(field.getType(), level + 1); - } - return weight == 0 ? 1 : weight; - } - case TUPLE: { - int weight = level; - for (DataType componentType : ((TupleType) cqlType).getComponentTypes()) { - weight += weigh(componentType, level + 1); - } - return weight == 0 ? 1 : weight; - } - case CUSTOM: - return 1; - default: - return 0; - } - } - } - - /** - * Simple removal listener for the codec cache (can be used for debugging purposes - * by setting the {@code com.datastax.driver.core.CodecRegistry} logger level to {@code TRACE}. - */ - private class TypeCodecRemovalListener implements RemovalListener> { - @Override - public void onRemoval(RemovalNotification> notification) { - logger.trace("Evicting codec from cache: {} (cause: {})", notification.getValue(), notification.getCause()); - } - } - - /** - * The list of user-registered codecs. - */ - private final CopyOnWriteArrayList> codecs; - - /** - * A LoadingCache to serve requests for codecs whenever possible. - * The cache can be used as long as at least the CQL type is known. - */ - private final LoadingCache> cache; - - /** - * Creates a new instance initialized with built-in codecs for all the base CQL types. - */ - public CodecRegistry() { - this.codecs = new CopyOnWriteArrayList>(); - this.cache = defaultCacheBuilder().build(new TypeCodecCacheLoader()); + } + return createCodec(cacheKey.cqlType, cacheKey.javaType); } - - private CacheBuilder> defaultCacheBuilder() { - CacheBuilder> builder = CacheBuilder.newBuilder() - // lists, sets and maps of 20 primitive types = 20 + 20 + 20*20 = 440 codecs, - // so let's start with roughly 1/4 of that - .initialCapacity(100) - .maximumWeight(1000) - .weigher(new TypeCodecWeigher()); - if (logger.isTraceEnabled()) - // do not bother adding a listener if it will be ineffective - builder = builder.removalListener(new TypeCodecRemovalListener()); - return builder; + } + + /** + * A complexity-based weigher for the codecs cache. Weights are computed mainly according to the + * CQL type: + * + *
    + *
  1. Manually-registered codecs always weigh 0; + *
  2. Codecs for primitive types weigh 0; + *
  3. Codecs for collections weigh the total weight of their inner types + the weight of their + * level of deepness; + *
  4. Codecs for UDTs and tuples weigh the total weight of their inner types + the weight of + * their level of deepness, but cannot weigh less than 1; + *
  5. Codecs for custom (non-CQL) types weigh 1. + *
+ * + * A consequence of this algorithm is that codecs for primitive types and codecs for all "shallow" + * collections thereof are never evicted. + */ + private class TypeCodecWeigher implements Weigher> { + + @Override + public int weigh(CacheKey key, TypeCodec value) { + return codecs.contains(value) ? 0 : weigh(value.cqlType, 0); } - /** - * Register the given codec with this registry. - *

- * This method will log a warning and ignore the codec if it collides with a previously registered one. - * Note that this check is not done in a completely thread-safe manner; codecs should typically be registered - * at application startup, not in a highly concurrent context (if a race condition occurs, the worst possible - * outcome is that no warning gets logged, and the codec gets registered but will never actually be used). - * - * @param newCodec The codec to add to the registry. - * @return this CodecRegistry (for method chaining). - */ - public CodecRegistry register(TypeCodec newCodec) { - for (TypeCodec oldCodec : BUILT_IN_CODECS) { - if (oldCodec.accepts(newCodec.getCqlType()) && oldCodec.accepts(newCodec.getJavaType())) { - logger.warn("Ignoring codec {} because it collides with previously registered codec {}", newCodec, oldCodec); - return this; + private int weigh(DataType cqlType, int level) { + switch (cqlType.getName()) { + case LIST: + case SET: + case MAP: + { + int weight = level; + for (DataType eltType : cqlType.getTypeArguments()) { + weight += weigh(eltType, level + 1); } - } - for (TypeCodec oldCodec : codecs) { - if (oldCodec.accepts(newCodec.getCqlType()) && oldCodec.accepts(newCodec.getJavaType())) { - logger.warn("Ignoring codec {} because it collides with previously registered codec {}", newCodec, oldCodec); - return this; + return weight; + } + case UDT: + { + int weight = level; + for (UserType.Field field : ((UserType) cqlType)) { + weight += weigh(field.getType(), level + 1); } - } - CacheKey key = new CacheKey(newCodec.getCqlType(), newCodec.getJavaType()); - TypeCodec existing = cache.getIfPresent(key); - if (existing != null) { - logger.warn("Ignoring codec {} because it collides with previously generated codec {}", newCodec, existing); - return this; - } - this.codecs.add(newCodec); - return this; + return weight == 0 ? 1 : weight; + } + case TUPLE: + { + int weight = level; + for (DataType componentType : ((TupleType) cqlType).getComponentTypes()) { + weight += weigh(componentType, level + 1); + } + return weight == 0 ? 1 : weight; + } + case CUSTOM: + return 1; + default: + return 0; + } } - - /** - * Register the given codecs with this registry. - * - * @param codecs The codecs to add to the registry. - * @return this CodecRegistry (for method chaining). - * @see #register(TypeCodec) - */ - public CodecRegistry register(TypeCodec... codecs) { - for (TypeCodec codec : codecs) - register(codec); + } + + /** + * Simple removal listener for the codec cache (can be used for debugging purposes by setting the + * {@code com.datastax.driver.core.CodecRegistry} logger level to {@code TRACE}. + */ + private class TypeCodecRemovalListener implements RemovalListener> { + @Override + public void onRemoval(RemovalNotification> notification) { + logger.trace( + "Evicting codec from cache: {} (cause: {})", + notification.getValue(), + notification.getCause()); + } + } + + /** The list of user-registered codecs. */ + private final CopyOnWriteArrayList> codecs; + + /** + * A LoadingCache to serve requests for codecs whenever possible. The cache can be used as long as + * at least the CQL type is known. + */ + private final LoadingCache> cache; + + /** Creates a new instance initialized with built-in codecs for all the base CQL types. */ + public CodecRegistry() { + this.codecs = new CopyOnWriteArrayList>(); + this.cache = defaultCacheBuilder().build(new TypeCodecCacheLoader()); + } + + private CacheBuilder> defaultCacheBuilder() { + CacheBuilder> builder = + CacheBuilder.newBuilder() + // lists, sets and maps of 20 primitive types = 20 + 20 + 20*20 = 440 codecs, + // so let's start with roughly 1/4 of that + .initialCapacity(100) + .maximumWeight(1000) + .weigher(new TypeCodecWeigher()); + if (logger.isTraceEnabled()) + // do not bother adding a listener if it will be ineffective + builder = builder.removalListener(new TypeCodecRemovalListener()); + return builder; + } + + /** + * Register the given codec with this registry. + * + *

This method will log a warning and ignore the codec if it collides with a previously + * registered one. Note that this check is not done in a completely thread-safe manner; codecs + * should typically be registered at application startup, not in a highly concurrent context (if a + * race condition occurs, the worst possible outcome is that no warning gets logged, and the codec + * gets registered but will never actually be used). + * + * @param newCodec The codec to add to the registry. + * @return this CodecRegistry (for method chaining). + */ + public CodecRegistry register(TypeCodec newCodec) { + for (TypeCodec oldCodec : BUILT_IN_CODECS) { + if (oldCodec.accepts(newCodec.getCqlType()) && oldCodec.accepts(newCodec.getJavaType())) { + logger.warn( + "Ignoring codec {} because it collides with previously registered codec {}", + newCodec, + oldCodec); return this; + } } - - /** - * Register the given codecs with this registry. - * - * @param codecs The codecs to add to the registry. - * @return this CodecRegistry (for method chaining). - * @see #register(TypeCodec) - */ - public CodecRegistry register(Iterable> codecs) { - for (TypeCodec codec : codecs) - register(codec); + for (TypeCodec oldCodec : codecs) { + if (oldCodec.accepts(newCodec.getCqlType()) && oldCodec.accepts(newCodec.getJavaType())) { + logger.warn( + "Ignoring codec {} because it collides with previously registered codec {}", + newCodec, + oldCodec); return this; + } } - - /** - * Returns a {@link TypeCodec codec} that accepts the given value. - *

- * This method takes an arbitrary Java object and tries to locate a suitable codec for it. - * Codecs must perform a {@link TypeCodec#accepts(Object) runtime inspection} of the object to determine - * if they can accept it or not, which, depending on the implementations, can be expensive; besides, the - * resulting codec cannot be cached. - * Therefore there might be a performance penalty when using this method. - *

- * Furthermore, this method returns the first matching codec, regardless of its accepted CQL type. - * It should be reserved for situations where the target CQL type is not available or unknown. - * In the Java driver, this happens mainly when serializing a value in a - * {@link SimpleStatement#SimpleStatement(String, Object...) SimpleStatement} or in the - * {@link com.datastax.driver.core.querybuilder.QueryBuilder}, where no CQL type information is available. - *

- * Codecs returned by this method are NOT cached (see the {@link CodecRegistry top-level documentation} - * of this class for more explanations about caching). - * - * @param value The value the codec should accept; must not be {@code null}. - * @return A suitable codec. - * @throws CodecNotFoundException if a suitable codec cannot be found. - */ - public TypeCodec codecFor(T value) { - return findCodec(null, value); + CacheKey key = new CacheKey(newCodec.getCqlType(), newCodec.getJavaType()); + TypeCodec existing = cache.getIfPresent(key); + if (existing != null) { + logger.warn( + "Ignoring codec {} because it collides with previously generated codec {}", + newCodec, + existing); + return this; } - - /** - * Returns a {@link TypeCodec codec} that accepts the given {@link DataType CQL type}. - *

- * This method returns the first matching codec, regardless of its accepted Java type. - * It should be reserved for situations where the Java type is not available or unknown. - * In the Java driver, this happens mainly when deserializing a value using the - * {@link GettableByIndexData#getObject(int) getObject} method. - *

- * Codecs returned by this method are cached (see the {@link CodecRegistry top-level documentation} - * of this class for more explanations about caching). - * - * @param cqlType The {@link DataType CQL type} the codec should accept; must not be {@code null}. - * @return A suitable codec. - * @throws CodecNotFoundException if a suitable codec cannot be found. - */ - public TypeCodec codecFor(DataType cqlType) throws CodecNotFoundException { - return lookupCodec(cqlType, null); + this.codecs.add(newCodec); + return this; + } + + /** + * Register the given codecs with this registry. + * + * @param codecs The codecs to add to the registry. + * @return this CodecRegistry (for method chaining). + * @see #register(TypeCodec) + */ + public CodecRegistry register(TypeCodec... codecs) { + for (TypeCodec codec : codecs) register(codec); + return this; + } + + /** + * Register the given codecs with this registry. + * + * @param codecs The codecs to add to the registry. + * @return this CodecRegistry (for method chaining). + * @see #register(TypeCodec) + */ + public CodecRegistry register(Iterable> codecs) { + for (TypeCodec codec : codecs) register(codec); + return this; + } + + /** + * Returns a {@link TypeCodec codec} that accepts the given value. + * + *

This method takes an arbitrary Java object and tries to locate a suitable codec for it. + * Codecs must perform a {@link TypeCodec#accepts(Object) runtime inspection} of the object to + * determine if they can accept it or not, which, depending on the implementations, can be + * expensive; besides, the resulting codec cannot be cached. Therefore there might be a + * performance penalty when using this method. + * + *

Furthermore, this method returns the first matching codec, regardless of its accepted CQL + * type. It should be reserved for situations where the target CQL type is not available or + * unknown. In the Java Driver, this happens mainly when serializing a value in a {@link + * SimpleStatement#SimpleStatement(String, Object...) SimpleStatement} or in the {@link + * com.datastax.driver.core.querybuilder.QueryBuilder}, where no CQL type information is + * available. + * + *

Codecs returned by this method are NOT cached (see the {@link CodecRegistry + * top-level documentation} of this class for more explanations about caching). + * + * @param value The value the codec should accept; must not be {@code null}. + * @return A suitable codec. + * @throws CodecNotFoundException if a suitable codec cannot be found. + */ + public TypeCodec codecFor(T value) { + return findCodec(null, value); + } + + /** + * Returns a {@link TypeCodec codec} that accepts the given {@link DataType CQL type}. + * + *

This method returns the first matching codec, regardless of its accepted Java type. It + * should be reserved for situations where the Java type is not available or unknown. In the Java + * driver, this happens mainly when deserializing a value using the {@link + * GettableByIndexData#getObject(int) getObject} method. + * + *

Codecs returned by this method are cached (see the {@link CodecRegistry top-level + * documentation} of this class for more explanations about caching). + * + * @param cqlType The {@link DataType CQL type} the codec should accept; must not be {@code null}. + * @return A suitable codec. + * @throws CodecNotFoundException if a suitable codec cannot be found. + */ + public TypeCodec codecFor(DataType cqlType) throws CodecNotFoundException { + return lookupCodec(cqlType, null); + } + + /** + * Returns a {@link TypeCodec codec} that accepts the given {@link DataType CQL type} and the + * given Java class. + * + *

This method can only handle raw (non-parameterized) Java types. For parameterized types, use + * {@link #codecFor(DataType, TypeToken)} instead. + * + *

Codecs returned by this method are cached (see the {@link CodecRegistry top-level + * documentation} of this class for more explanations about caching). + * + * @param cqlType The {@link DataType CQL type} the codec should accept; must not be {@code null}. + * @param javaType The Java type the codec should accept; can be {@code null}. + * @return A suitable codec. + * @throws CodecNotFoundException if a suitable codec cannot be found. + */ + public TypeCodec codecFor(DataType cqlType, Class javaType) + throws CodecNotFoundException { + return codecFor(cqlType, TypeToken.of(javaType)); + } + + /** + * Returns a {@link TypeCodec codec} that accepts the given {@link DataType CQL type} and the + * given Java type. + * + *

This method handles parameterized types thanks to Guava's {@link TypeToken} API. + * + *

Codecs returned by this method are cached (see the {@link CodecRegistry top-level + * documentation} of this class for more explanations about caching). + * + * @param cqlType The {@link DataType CQL type} the codec should accept; must not be {@code null}. + * @param javaType The {@link TypeToken Java type} the codec should accept; can be {@code null}. + * @return A suitable codec. + * @throws CodecNotFoundException if a suitable codec cannot be found. + */ + public TypeCodec codecFor(DataType cqlType, TypeToken javaType) + throws CodecNotFoundException { + return lookupCodec(cqlType, javaType); + } + + /** + * Returns a {@link TypeCodec codec} that accepts the given {@link DataType CQL type} and the + * given value. + * + *

This method takes an arbitrary Java object and tries to locate a suitable codec for it. + * Codecs must perform a {@link TypeCodec#accepts(Object) runtime inspection} of the object to + * determine if they can accept it or not, which, depending on the implementations, can be + * expensive; besides, the resulting codec cannot be cached. Therefore there might be a + * performance penalty when using this method. + * + *

Codecs returned by this method are NOT cached (see the {@link CodecRegistry + * top-level documentation} of this class for more explanations about caching). + * + * @param cqlType The {@link DataType CQL type} the codec should accept; can be {@code null}. + * @param value The value the codec should accept; must not be {@code null}. + * @return A suitable codec. + * @throws CodecNotFoundException if a suitable codec cannot be found. + */ + public TypeCodec codecFor(DataType cqlType, T value) { + return findCodec(cqlType, value); + } + + @SuppressWarnings("unchecked") + private TypeCodec lookupCodec(DataType cqlType, TypeToken javaType) { + checkNotNull(cqlType, "Parameter cqlType cannot be null"); + TypeCodec codec = BUILT_IN_CODECS_MAP.get(cqlType.getName()); + if (codec != null && (javaType == null || codec.accepts(javaType))) { + logger.trace("Returning built-in codec {}", codec); + return (TypeCodec) codec; } - - /** - * Returns a {@link TypeCodec codec} that accepts the given {@link DataType CQL type} - * and the given Java class. - *

- * This method can only handle raw (non-parameterized) Java types. - * For parameterized types, use {@link #codecFor(DataType, TypeToken)} instead. - *

- * Codecs returned by this method are cached (see the {@link CodecRegistry top-level documentation} - * of this class for more explanations about caching). - * - * @param cqlType The {@link DataType CQL type} the codec should accept; must not be {@code null}. - * @param javaType The Java type the codec should accept; can be {@code null}. - * @return A suitable codec. - * @throws CodecNotFoundException if a suitable codec cannot be found. - */ - public TypeCodec codecFor(DataType cqlType, Class javaType) throws CodecNotFoundException { - return codecFor(cqlType, TypeToken.of(javaType)); + if (logger.isTraceEnabled()) + logger.trace("Querying cache for codec [{} <-> {}]", toString(cqlType), toString(javaType)); + try { + CacheKey cacheKey = new CacheKey(cqlType, javaType); + codec = cache.get(cacheKey); + } catch (UncheckedExecutionException e) { + if (e.getCause() instanceof CodecNotFoundException) { + throw (CodecNotFoundException) e.getCause(); + } + throw new CodecNotFoundException(e.getCause(), cqlType, javaType); + } catch (RuntimeException e) { + throw new CodecNotFoundException(e.getCause(), cqlType, javaType); + } catch (ExecutionException e) { + throw new CodecNotFoundException(e.getCause(), cqlType, javaType); } - - /** - * Returns a {@link TypeCodec codec} that accepts the given {@link DataType CQL type} - * and the given Java type. - *

- * This method handles parameterized types thanks to Guava's {@link TypeToken} API. - *

- * Codecs returned by this method are cached (see the {@link CodecRegistry top-level documentation} - * of this class for more explanations about caching). - * - * @param cqlType The {@link DataType CQL type} the codec should accept; must not be {@code null}. - * @param javaType The {@link TypeToken Java type} the codec should accept; can be {@code null}. - * @return A suitable codec. - * @throws CodecNotFoundException if a suitable codec cannot be found. - */ - public TypeCodec codecFor(DataType cqlType, TypeToken javaType) throws CodecNotFoundException { - return lookupCodec(cqlType, javaType); + logger.trace("Returning cached codec {}", codec); + return (TypeCodec) codec; + } + + @SuppressWarnings("unchecked") + private TypeCodec findCodec(DataType cqlType, TypeToken javaType) { + checkNotNull(cqlType, "Parameter cqlType cannot be null"); + if (logger.isTraceEnabled()) + logger.trace("Looking for codec [{} <-> {}]", toString(cqlType), toString(javaType)); + + // Look at the built-in codecs first + for (TypeCodec codec : BUILT_IN_CODECS) { + if (codec.accepts(cqlType) && (javaType == null || codec.accepts(javaType))) { + logger.trace("Built-in codec found: {}", codec); + return (TypeCodec) codec; + } } - /** - * Returns a {@link TypeCodec codec} that accepts the given {@link DataType CQL type} - * and the given value. - *

- * This method takes an arbitrary Java object and tries to locate a suitable codec for it. - * Codecs must perform a {@link TypeCodec#accepts(Object) runtime inspection} of the object to determine - * if they can accept it or not, which, depending on the implementations, can be expensive; besides, the - * resulting codec cannot be cached. - * Therefore there might be a performance penalty when using this method. - *

- * Codecs returned by this method are NOT cached (see the {@link CodecRegistry top-level documentation} - * of this class for more explanations about caching). - * - * @param cqlType The {@link DataType CQL type} the codec should accept; can be {@code null}. - * @param value The value the codec should accept; must not be {@code null}. - * @return A suitable codec. - * @throws CodecNotFoundException if a suitable codec cannot be found. - */ - public TypeCodec codecFor(DataType cqlType, T value) { - return findCodec(cqlType, value); + // Look at the user-registered codecs next + for (TypeCodec codec : codecs) { + if (codec.accepts(cqlType) && (javaType == null || codec.accepts(javaType))) { + logger.trace("Already registered codec found: {}", codec); + return (TypeCodec) codec; + } } - - @SuppressWarnings("unchecked") - private TypeCodec lookupCodec(DataType cqlType, TypeToken javaType) { - checkNotNull(cqlType, "Parameter cqlType cannot be null"); - TypeCodec codec = BUILT_IN_CODECS_MAP.get(cqlType.getName()); - if (codec != null && (javaType == null || codec.accepts(javaType))) { - logger.trace("Returning built-in codec {}", codec); - return (TypeCodec) codec; - } - if (logger.isTraceEnabled()) - logger.trace("Querying cache for codec [{} <-> {}]", toString(cqlType), toString(javaType)); - try { - CacheKey cacheKey = new CacheKey(cqlType, javaType); - codec = cache.get(cacheKey); - } catch (UncheckedExecutionException e) { - if (e.getCause() instanceof CodecNotFoundException) { - throw (CodecNotFoundException) e.getCause(); - } - throw new CodecNotFoundException(e.getCause(), cqlType, javaType); - } catch (RuntimeException e) { - throw new CodecNotFoundException(e.getCause(), cqlType, javaType); - } catch (ExecutionException e) { - throw new CodecNotFoundException(e.getCause(), cqlType, javaType); - } - logger.trace("Returning cached codec {}", codec); + return createCodec(cqlType, javaType); + } + + @SuppressWarnings("unchecked") + private TypeCodec findCodec(DataType cqlType, T value) { + checkNotNull(value, "Parameter value cannot be null"); + if (logger.isTraceEnabled()) + logger.trace("Looking for codec [{} <-> {}]", toString(cqlType), value.getClass()); + + // Look at the built-in codecs first + for (TypeCodec codec : BUILT_IN_CODECS) { + if ((cqlType == null || codec.accepts(cqlType)) && codec.accepts(value)) { + logger.trace("Built-in codec found: {}", codec); return (TypeCodec) codec; + } } - @SuppressWarnings("unchecked") - private TypeCodec findCodec(DataType cqlType, TypeToken javaType) { - checkNotNull(cqlType, "Parameter cqlType cannot be null"); - if (logger.isTraceEnabled()) - logger.trace("Looking for codec [{} <-> {}]", toString(cqlType), toString(javaType)); - - // Look at the built-in codecs first - for (TypeCodec codec : BUILT_IN_CODECS) { - if (codec.accepts(cqlType) && (javaType == null || codec.accepts(javaType))) { - logger.trace("Built-in codec found: {}", codec); - return (TypeCodec) codec; - } - } - - // Look at the user-registered codecs next - for (TypeCodec codec : codecs) { - if (codec.accepts(cqlType) && (javaType == null || codec.accepts(javaType))) { - logger.trace("Already registered codec found: {}", codec); - return (TypeCodec) codec; - } - } - return createCodec(cqlType, javaType); + // Look at the user-registered codecs next + for (TypeCodec codec : codecs) { + if ((cqlType == null || codec.accepts(cqlType)) && codec.accepts(value)) { + logger.trace("Already registered codec found: {}", codec); + return (TypeCodec) codec; + } } - - @SuppressWarnings("unchecked") - private TypeCodec findCodec(DataType cqlType, T value) { - checkNotNull(value, "Parameter value cannot be null"); - if (logger.isTraceEnabled()) - logger.trace("Looking for codec [{} <-> {}]", toString(cqlType), value.getClass()); - - // Look at the built-in codecs first - for (TypeCodec codec : BUILT_IN_CODECS) { - if ((cqlType == null || codec.accepts(cqlType)) && codec.accepts(value)) { - logger.trace("Built-in codec found: {}", codec); - return (TypeCodec) codec; - } - } - - // Look at the user-registered codecs next - for (TypeCodec codec : codecs) { - if ((cqlType == null || codec.accepts(cqlType)) && codec.accepts(value)) { - logger.trace("Already registered codec found: {}", codec); - return (TypeCodec) codec; - } - } - return createCodec(cqlType, value); + return createCodec(cqlType, value); + } + + private TypeCodec createCodec(DataType cqlType, TypeToken javaType) { + TypeCodec codec = maybeCreateCodec(cqlType, javaType); + if (codec == null) throw notFound(cqlType, javaType); + // double-check that the created codec satisfies the initial request + // this check can fail specially when creating codecs for collections + // e.g. if B extends A and there is a codec registered for A and + // we request a codec for List, the registry would generate a codec for List + if (!codec.accepts(cqlType) || (javaType != null && !codec.accepts(javaType))) + throw notFound(cqlType, javaType); + logger.trace("Codec created: {}", codec); + return codec; + } + + private TypeCodec createCodec(DataType cqlType, T value) { + TypeCodec codec = maybeCreateCodec(cqlType, value); + if (codec == null) throw notFound(cqlType, TypeToken.of(value.getClass())); + // double-check that the created codec satisfies the initial request + if ((cqlType != null && !codec.accepts(cqlType)) || !codec.accepts(value)) + throw notFound(cqlType, TypeToken.of(value.getClass())); + logger.trace("Codec created: {}", codec); + return codec; + } + + @SuppressWarnings("unchecked") + private TypeCodec maybeCreateCodec(DataType cqlType, TypeToken javaType) { + checkNotNull(cqlType); + + if (cqlType.getName() == LIST + && (javaType == null || List.class.isAssignableFrom(javaType.getRawType()))) { + TypeToken elementType = null; + if (javaType != null && javaType.getType() instanceof ParameterizedType) { + Type[] typeArguments = ((ParameterizedType) javaType.getType()).getActualTypeArguments(); + elementType = TypeToken.of(typeArguments[0]); + } + TypeCodec eltCodec = findCodec(cqlType.getTypeArguments().get(0), elementType); + return (TypeCodec) TypeCodec.list(eltCodec); } - private TypeCodec createCodec(DataType cqlType, TypeToken javaType) { - TypeCodec codec = maybeCreateCodec(cqlType, javaType); - if (codec == null) - throw notFound(cqlType, javaType); - // double-check that the created codec satisfies the initial request - // this check can fail specially when creating codecs for collections - // e.g. if B extends A and there is a codec registered for A and - // we request a codec for List, the registry would generate a codec for List - if (!codec.accepts(cqlType) || (javaType != null && !codec.accepts(javaType))) - throw notFound(cqlType, javaType); - logger.trace("Codec created: {}", codec); - return codec; + if (cqlType.getName() == SET + && (javaType == null || Set.class.isAssignableFrom(javaType.getRawType()))) { + TypeToken elementType = null; + if (javaType != null && javaType.getType() instanceof ParameterizedType) { + Type[] typeArguments = ((ParameterizedType) javaType.getType()).getActualTypeArguments(); + elementType = TypeToken.of(typeArguments[0]); + } + TypeCodec eltCodec = findCodec(cqlType.getTypeArguments().get(0), elementType); + return (TypeCodec) TypeCodec.set(eltCodec); } - private TypeCodec createCodec(DataType cqlType, T value) { - TypeCodec codec = maybeCreateCodec(cqlType, value); - if (codec == null) - throw notFound(cqlType, TypeToken.of(value.getClass())); - // double-check that the created codec satisfies the initial request - if ((cqlType != null && !codec.accepts(cqlType)) || !codec.accepts(value)) - throw notFound(cqlType, TypeToken.of(value.getClass())); - logger.trace("Codec created: {}", codec); - return codec; + if (cqlType.getName() == MAP + && (javaType == null || Map.class.isAssignableFrom(javaType.getRawType()))) { + TypeToken keyType = null; + TypeToken valueType = null; + if (javaType != null && javaType.getType() instanceof ParameterizedType) { + Type[] typeArguments = ((ParameterizedType) javaType.getType()).getActualTypeArguments(); + keyType = TypeToken.of(typeArguments[0]); + valueType = TypeToken.of(typeArguments[1]); + } + TypeCodec keyCodec = findCodec(cqlType.getTypeArguments().get(0), keyType); + TypeCodec valueCodec = findCodec(cqlType.getTypeArguments().get(1), valueType); + return (TypeCodec) TypeCodec.map(keyCodec, valueCodec); } - @SuppressWarnings("unchecked") - private TypeCodec maybeCreateCodec(DataType cqlType, TypeToken javaType) { - checkNotNull(cqlType); - - if (cqlType.getName() == LIST && (javaType == null || List.class.isAssignableFrom(javaType.getRawType()))) { - TypeToken elementType = null; - if (javaType != null && javaType.getType() instanceof ParameterizedType) { - Type[] typeArguments = ((ParameterizedType) javaType.getType()).getActualTypeArguments(); - elementType = TypeToken.of(typeArguments[0]); - } - TypeCodec eltCodec = findCodec(cqlType.getTypeArguments().get(0), elementType); - return (TypeCodec) TypeCodec.list(eltCodec); - } - - if (cqlType.getName() == SET && (javaType == null || Set.class.isAssignableFrom(javaType.getRawType()))) { - TypeToken elementType = null; - if (javaType != null && javaType.getType() instanceof ParameterizedType) { - Type[] typeArguments = ((ParameterizedType) javaType.getType()).getActualTypeArguments(); - elementType = TypeToken.of(typeArguments[0]); - } - TypeCodec eltCodec = findCodec(cqlType.getTypeArguments().get(0), elementType); - return (TypeCodec) TypeCodec.set(eltCodec); - } - - if (cqlType.getName() == MAP && (javaType == null || Map.class.isAssignableFrom(javaType.getRawType()))) { - TypeToken keyType = null; - TypeToken valueType = null; - if (javaType != null && javaType.getType() instanceof ParameterizedType) { - Type[] typeArguments = ((ParameterizedType) javaType.getType()).getActualTypeArguments(); - keyType = TypeToken.of(typeArguments[0]); - valueType = TypeToken.of(typeArguments[1]); - } - TypeCodec keyCodec = findCodec(cqlType.getTypeArguments().get(0), keyType); - TypeCodec valueCodec = findCodec(cqlType.getTypeArguments().get(1), valueType); - return (TypeCodec) TypeCodec.map(keyCodec, valueCodec); - } - - if (cqlType instanceof TupleType && (javaType == null || TupleValue.class.isAssignableFrom(javaType.getRawType()))) { - return (TypeCodec) TypeCodec.tuple((TupleType) cqlType); - } - - if (cqlType instanceof UserType && (javaType == null || UDTValue.class.isAssignableFrom(javaType.getRawType()))) { - return (TypeCodec) TypeCodec.userType((UserType) cqlType); - } - - if (cqlType instanceof DataType.CustomType && (javaType == null || ByteBuffer.class.isAssignableFrom(javaType.getRawType()))) { - return (TypeCodec) TypeCodec.custom((DataType.CustomType) cqlType); - } - - return null; + if (cqlType instanceof TupleType + && (javaType == null || TupleValue.class.isAssignableFrom(javaType.getRawType()))) { + return (TypeCodec) TypeCodec.tuple((TupleType) cqlType); } - @SuppressWarnings({"unchecked", "rawtypes"}) - private TypeCodec maybeCreateCodec(DataType cqlType, T value) { - checkNotNull(value); - - if ((cqlType == null || cqlType.getName() == LIST) && value instanceof List) { - List list = (List) value; - if (list.isEmpty()) { - DataType elementType = (cqlType == null || cqlType.getTypeArguments().isEmpty()) - ? DataType.blob() - : cqlType.getTypeArguments().get(0); - return TypeCodec.list(findCodec(elementType, (TypeToken) null)); - } else { - DataType elementType = (cqlType == null || cqlType.getTypeArguments().isEmpty()) - ? null - : cqlType.getTypeArguments().get(0); - return (TypeCodec) TypeCodec.list(findCodec(elementType, list.iterator().next())); - } - } - - if ((cqlType == null || cqlType.getName() == SET) && value instanceof Set) { - Set set = (Set) value; - if (set.isEmpty()) { - DataType elementType = (cqlType == null || cqlType.getTypeArguments().isEmpty()) - ? DataType.blob() - : cqlType.getTypeArguments().get(0); - return TypeCodec.set(findCodec(elementType, (TypeToken) null)); - } else { - DataType elementType = (cqlType == null || cqlType.getTypeArguments().isEmpty()) - ? null - : cqlType.getTypeArguments().get(0); - return (TypeCodec) TypeCodec.set(findCodec(elementType, set.iterator().next())); - } - } + if (cqlType instanceof UserType + && (javaType == null || UDTValue.class.isAssignableFrom(javaType.getRawType()))) { + return (TypeCodec) TypeCodec.userType((UserType) cqlType); + } - if ((cqlType == null || cqlType.getName() == MAP) && value instanceof Map) { - Map map = (Map) value; - if (map.isEmpty()) { - DataType keyType = (cqlType == null || cqlType.getTypeArguments().size() < 1) - ? DataType.blob() - : cqlType.getTypeArguments().get(0); - DataType valueType = (cqlType == null || cqlType.getTypeArguments().size() < 2) - ? DataType.blob() : - cqlType.getTypeArguments().get(1); - return TypeCodec.map( - findCodec(keyType, (TypeToken) null), - findCodec(valueType, (TypeToken) null)); - } else { - DataType keyType = (cqlType == null || cqlType.getTypeArguments().size() < 1) - ? null - : cqlType.getTypeArguments().get(0); - DataType valueType = (cqlType == null || cqlType.getTypeArguments().size() < 2) - ? null - : cqlType.getTypeArguments().get(1); - Map.Entry entry = (Map.Entry) map.entrySet().iterator().next(); - return (TypeCodec) TypeCodec.map( - findCodec(keyType, entry.getKey()), - findCodec(valueType, entry.getValue())); - } - } + if (cqlType instanceof DataType.CustomType + && (javaType == null || ByteBuffer.class.isAssignableFrom(javaType.getRawType()))) { + return (TypeCodec) TypeCodec.custom((DataType.CustomType) cqlType); + } - if ((cqlType == null || cqlType.getName() == DataType.Name.TUPLE) && value instanceof TupleValue) { - return (TypeCodec) TypeCodec.tuple(cqlType == null ? ((TupleValue) value).getType() : (TupleType) cqlType); - } + return null; + } + + @SuppressWarnings({"unchecked", "rawtypes"}) + private TypeCodec maybeCreateCodec(DataType cqlType, T value) { + checkNotNull(value); + + if ((cqlType == null || cqlType.getName() == LIST) && value instanceof List) { + List list = (List) value; + if (list.isEmpty()) { + DataType elementType = + (cqlType == null || cqlType.getTypeArguments().isEmpty()) + ? DataType.blob() + : cqlType.getTypeArguments().get(0); + return TypeCodec.list(findCodec(elementType, (TypeToken) null)); + } else { + DataType elementType = + (cqlType == null || cqlType.getTypeArguments().isEmpty()) + ? null + : cqlType.getTypeArguments().get(0); + return (TypeCodec) TypeCodec.list(findCodec(elementType, list.iterator().next())); + } + } - if ((cqlType == null || cqlType.getName() == DataType.Name.UDT) && value instanceof UDTValue) { - return (TypeCodec) TypeCodec.userType(cqlType == null ? ((UDTValue) value).getType() : (UserType) cqlType); - } + if ((cqlType == null || cqlType.getName() == SET) && value instanceof Set) { + Set set = (Set) value; + if (set.isEmpty()) { + DataType elementType = + (cqlType == null || cqlType.getTypeArguments().isEmpty()) + ? DataType.blob() + : cqlType.getTypeArguments().get(0); + return TypeCodec.set(findCodec(elementType, (TypeToken) null)); + } else { + DataType elementType = + (cqlType == null || cqlType.getTypeArguments().isEmpty()) + ? null + : cqlType.getTypeArguments().get(0); + return (TypeCodec) TypeCodec.set(findCodec(elementType, set.iterator().next())); + } + } - if ((cqlType != null && cqlType instanceof DataType.CustomType) && value instanceof ByteBuffer) { - return (TypeCodec) TypeCodec.custom((DataType.CustomType) cqlType); - } + if ((cqlType == null || cqlType.getName() == MAP) && value instanceof Map) { + Map map = (Map) value; + if (map.isEmpty()) { + DataType keyType = + (cqlType == null || cqlType.getTypeArguments().size() < 1) + ? DataType.blob() + : cqlType.getTypeArguments().get(0); + DataType valueType = + (cqlType == null || cqlType.getTypeArguments().size() < 2) + ? DataType.blob() + : cqlType.getTypeArguments().get(1); + return TypeCodec.map( + findCodec(keyType, (TypeToken) null), findCodec(valueType, (TypeToken) null)); + } else { + DataType keyType = + (cqlType == null || cqlType.getTypeArguments().size() < 1) + ? null + : cqlType.getTypeArguments().get(0); + DataType valueType = + (cqlType == null || cqlType.getTypeArguments().size() < 2) + ? null + : cqlType.getTypeArguments().get(1); + Map.Entry entry = (Map.Entry) map.entrySet().iterator().next(); + return (TypeCodec) + TypeCodec.map( + findCodec(keyType, entry.getKey()), findCodec(valueType, entry.getValue())); + } + } - return null; + if ((cqlType == null || cqlType.getName() == DataType.Name.TUPLE) + && value instanceof TupleValue) { + return (TypeCodec) + TypeCodec.tuple(cqlType == null ? ((TupleValue) value).getType() : (TupleType) cqlType); } - private static CodecNotFoundException notFound(DataType cqlType, TypeToken javaType) { - String msg = String.format("Codec not found for requested operation: [%s <-> %s]", - toString(cqlType), - toString(javaType)); - return new CodecNotFoundException(msg, cqlType, javaType); + if ((cqlType == null || cqlType.getName() == DataType.Name.UDT) && value instanceof UDTValue) { + return (TypeCodec) + TypeCodec.userType(cqlType == null ? ((UDTValue) value).getType() : (UserType) cqlType); } - private static String toString(Object value) { - return value == null ? "ANY" : value.toString(); + if ((cqlType != null && cqlType instanceof DataType.CustomType) + && value instanceof ByteBuffer) { + return (TypeCodec) TypeCodec.custom((DataType.CustomType) cqlType); } + return null; + } + + private static CodecNotFoundException notFound(DataType cqlType, TypeToken javaType) { + String msg = + String.format( + "Codec not found for requested operation: [%s <-> %s]", + toString(cqlType), toString(javaType)); + return new CodecNotFoundException(msg, cqlType, javaType); + } + + private static String toString(Object value) { + return value == null ? "ANY" : value.toString(); + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/CodecUtils.java b/driver-core/src/main/java/com/datastax/driver/core/CodecUtils.java index 2499b226bc5..6f9ee1e58a8 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/CodecUtils.java +++ b/driver-core/src/main/java/com/datastax/driver/core/CodecUtils.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,233 +19,239 @@ import java.nio.ByteBuffer; -/** - * A set of utility methods to deal with type conversion and serialization. - */ +/** A set of utility methods to deal with type conversion and serialization. */ public final class CodecUtils { - private static final long MAX_CQL_LONG_VALUE = ((1L << 32) - 1); + private static final long MAX_CQL_LONG_VALUE = ((1L << 32) - 1); - private static final long EPOCH_AS_CQL_LONG = (1L << 31); + private static final long EPOCH_AS_CQL_LONG = (1L << 31); - private CodecUtils() { - } + private CodecUtils() {} - /** - * Utility method that "packs" together a list of {@link ByteBuffer}s containing - * serialized collection elements. - * Mainly intended for use with collection codecs when serializing collections. - * - * @param buffers the collection elements - * @param elements the total number of elements - * @param version the protocol version to use - * @return The serialized collection - */ - public static ByteBuffer pack(ByteBuffer[] buffers, int elements, ProtocolVersion version) { - int size = 0; - for (ByteBuffer bb : buffers) { - int elemSize = sizeOfValue(bb, version); - size += elemSize; - } - ByteBuffer result = ByteBuffer.allocate(sizeOfCollectionSize(version) + size); - writeSize(result, elements, version); - for (ByteBuffer bb : buffers) - writeValue(result, bb, version); - return (ByteBuffer) result.flip(); + /** + * Utility method that "packs" together a list of {@link ByteBuffer}s containing serialized + * collection elements. Mainly intended for use with collection codecs when serializing + * collections. + * + * @param buffers the collection elements + * @param elements the total number of elements + * @param version the protocol version to use + * @return The serialized collection + */ + public static ByteBuffer pack(ByteBuffer[] buffers, int elements, ProtocolVersion version) { + int size = 0; + for (ByteBuffer bb : buffers) { + int elemSize = sizeOfValue(bb, version); + size += elemSize; } + ByteBuffer result = ByteBuffer.allocate(sizeOfCollectionSize(version) + size); + writeSize(result, elements, version); + for (ByteBuffer bb : buffers) writeValue(result, bb, version); + return (ByteBuffer) result.flip(); + } - /** - * Utility method that reads a size value. - * Mainly intended for collection codecs when deserializing CQL collections. - * - * @param input The ByteBuffer to read from. - * @param version The protocol version to use. - * @return The size value. - */ - public static int readSize(ByteBuffer input, ProtocolVersion version) { - switch (version) { - case V1: - case V2: - return getUnsignedShort(input); - case V3: - case V4: - case V5: - return input.getInt(); - default: - throw version.unsupported(); - } + /** + * Utility method that reads a size value. Mainly intended for collection codecs when + * deserializing CQL collections. + * + * @param input The ByteBuffer to read from. + * @param version The protocol version to use. + * @return The size value. + */ + public static int readSize(ByteBuffer input, ProtocolVersion version) { + switch (version) { + case V1: + case V2: + return getUnsignedShort(input); + case V3: + case V4: + case V5: + case V6: + return input.getInt(); + default: + throw version.unsupported(); } + } - /** - * Utility method that writes a size value. - * Mainly intended for collection codecs when serializing CQL collections. - * - * @param output The ByteBuffer to write to. - * @param size The collection size. - * @param version The protocol version to use. - */ - public static void writeSize(ByteBuffer output, int size, ProtocolVersion version) { - switch (version) { - case V1: - case V2: - if (size > 65535) - throw new IllegalArgumentException(String.format("Native protocol version %d supports up to 65535 elements in any collection - but collection contains %d elements", version.toInt(), size)); - output.putShort((short) size); - break; - case V3: - case V4: - case V5: - output.putInt(size); - break; - default: - throw version.unsupported(); - } + /** + * Utility method that writes a size value. Mainly intended for collection codecs when serializing + * CQL collections. + * + * @param output The ByteBuffer to write to. + * @param size The collection size. + * @param version The protocol version to use. + */ + public static void writeSize(ByteBuffer output, int size, ProtocolVersion version) { + switch (version) { + case V1: + case V2: + if (size > 65535) + throw new IllegalArgumentException( + String.format( + "Native protocol version %d supports up to 65535 elements in any collection - but collection contains %d elements", + version.toInt(), size)); + output.putShort((short) size); + break; + case V3: + case V4: + case V5: + case V6: + output.putInt(size); + break; + default: + throw version.unsupported(); } + } - /** - * Utility method that reads a value. - * Mainly intended for collection codecs when deserializing CQL collections. - * - * @param input The ByteBuffer to read from. - * @param version The protocol version to use. - * @return The collection element. - */ - public static ByteBuffer readValue(ByteBuffer input, ProtocolVersion version) { - int size = readSize(input, version); - return size < 0 ? null : readBytes(input, size); - } + /** + * Utility method that reads a value. Mainly intended for collection codecs when deserializing CQL + * collections. + * + * @param input The ByteBuffer to read from. + * @param version The protocol version to use. + * @return The collection element. + */ + public static ByteBuffer readValue(ByteBuffer input, ProtocolVersion version) { + int size = readSize(input, version); + return size < 0 ? null : readBytes(input, size); + } - /** - * Utility method that writes a value. - * Mainly intended for collection codecs when deserializing CQL collections. - * - * @param output The ByteBuffer to write to. - * @param value The value to write. - * @param version The protocol version to use. - */ - public static void writeValue(ByteBuffer output, ByteBuffer value, ProtocolVersion version) { - switch (version) { - case V1: - case V2: - assert value != null; - output.putShort((short) value.remaining()); - output.put(value.duplicate()); - break; - case V3: - case V4: - case V5: - if (value == null) { - output.putInt(-1); - } else { - output.putInt(value.remaining()); - output.put(value.duplicate()); - } - break; - default: - throw version.unsupported(); + /** + * Utility method that writes a value. Mainly intended for collection codecs when deserializing + * CQL collections. + * + * @param output The ByteBuffer to write to. + * @param value The value to write. + * @param version The protocol version to use. + */ + public static void writeValue(ByteBuffer output, ByteBuffer value, ProtocolVersion version) { + switch (version) { + case V1: + case V2: + assert value != null; + output.putShort((short) value.remaining()); + output.put(value.duplicate()); + break; + case V3: + case V4: + case V5: + case V6: + if (value == null) { + output.putInt(-1); + } else { + output.putInt(value.remaining()); + output.put(value.duplicate()); } + break; + default: + throw version.unsupported(); } + } - /** - * Read {@code length} bytes from {@code bb} into a new ByteBuffer. - * - * @param bb The ByteBuffer to read. - * @param length The number of bytes to read. - * @return The read bytes. - */ - public static ByteBuffer readBytes(ByteBuffer bb, int length) { - ByteBuffer copy = bb.duplicate(); - copy.limit(copy.position() + length); - bb.position(bb.position() + length); - return copy; - } - - /** - * Converts an "unsigned" int read from a DATE value into a signed int. - *

- * The protocol encodes DATE values as unsigned ints with the Epoch in the middle of the range (2^31). - * This method handles the conversion from an "unsigned" to a signed int. - */ - public static int fromUnsignedToSignedInt(int unsigned) { - return unsigned + Integer.MIN_VALUE; // this relies on overflow for "negative" values - } + /** + * Read {@code length} bytes from {@code bb} into a new ByteBuffer. + * + * @param bb The ByteBuffer to read. + * @param length The number of bytes to read. + * @return The read bytes. + */ + public static ByteBuffer readBytes(ByteBuffer bb, int length) { + ByteBuffer copy = bb.duplicate(); + copy.limit(copy.position() + length); + bb.position(bb.position() + length); + return copy; + } - /** - * Converts an int into an "unsigned" int suitable to be written as a DATE value. - *

- * The protocol encodes DATE values as unsigned ints with the Epoch in the middle of the range (2^31). - * This method handles the conversion from a signed to an "unsigned" int. - */ - public static int fromSignedToUnsignedInt(int signed) { - return signed - Integer.MIN_VALUE; - } + /** + * Converts an "unsigned" int read from a DATE value into a signed int. + * + *

The protocol encodes DATE values as unsigned ints with the Epoch in the middle of + * the range (2^31). This method handles the conversion from an "unsigned" to a signed int. + */ + public static int fromUnsignedToSignedInt(int unsigned) { + return unsigned + Integer.MIN_VALUE; // this relies on overflow for "negative" values + } - /** - * Convert from a raw CQL long representing a numeric DATE literal - * to the number of days since the Epoch. - * In CQL, numeric DATE literals are longs (unsigned integers actually) - * between 0 and 2^32 - 1, with the epoch in the middle; - * this method re-centers the epoch at 0. - * - * @param raw The CQL date value to convert. - * @return The number of days since the Epoch corresponding to the given raw value. - * @throws IllegalArgumentException if the value is out of range. - */ - public static int fromCqlDateToDaysSinceEpoch(long raw) { - if (raw < 0 || raw > MAX_CQL_LONG_VALUE) - throw new IllegalArgumentException(String.format("Numeric literals for DATE must be between 0 and %d (got %d)", MAX_CQL_LONG_VALUE, raw)); - return (int) (raw - EPOCH_AS_CQL_LONG); - } + /** + * Converts an int into an "unsigned" int suitable to be written as a DATE value. + * + *

The protocol encodes DATE values as unsigned ints with the Epoch in the middle of + * the range (2^31). This method handles the conversion from a signed to an "unsigned" int. + */ + public static int fromSignedToUnsignedInt(int signed) { + return signed - Integer.MIN_VALUE; + } - /** - * Convert the number of days since the Epoch into - * a raw CQL long representing a numeric DATE literal. - *

- * In CQL, numeric DATE literals are longs (unsigned integers actually) - * between 0 and 2^32 - 1, with the epoch in the middle; - * this method re-centers the epoch at 2^31. - * - * @param days The number of days since the Epoch convert. - * @return The CQL date value corresponding to the given value. - */ - public static long fromDaysSinceEpochToCqlDate(int days) { - return ((long) days + EPOCH_AS_CQL_LONG); - } + /** + * Convert from a raw CQL long representing a numeric DATE literal to the number of days since the + * Epoch. In CQL, numeric DATE literals are longs (unsigned integers actually) between 0 and 2^32 + * - 1, with the epoch in the middle; this method re-centers the epoch at 0. + * + * @param raw The CQL date value to convert. + * @return The number of days since the Epoch corresponding to the given raw value. + * @throws IllegalArgumentException if the value is out of range. + */ + public static int fromCqlDateToDaysSinceEpoch(long raw) { + if (raw < 0 || raw > MAX_CQL_LONG_VALUE) + throw new IllegalArgumentException( + String.format( + "Numeric literals for DATE must be between 0 and %d (got %d)", + MAX_CQL_LONG_VALUE, raw)); + return (int) (raw - EPOCH_AS_CQL_LONG); + } - private static int sizeOfCollectionSize(ProtocolVersion version) { - switch (version) { - case V1: - case V2: - return 2; - case V3: - case V4: - case V5: - return 4; - default: - throw version.unsupported(); - } - } + /** + * Convert the number of days since the Epoch into a raw CQL long representing a numeric DATE + * literal. + * + *

In CQL, numeric DATE literals are longs (unsigned integers actually) between 0 and 2^32 - 1, + * with the epoch in the middle; this method re-centers the epoch at 2^31. + * + * @param days The number of days since the Epoch convert. + * @return The CQL date value corresponding to the given value. + */ + public static long fromDaysSinceEpochToCqlDate(int days) { + return ((long) days + EPOCH_AS_CQL_LONG); + } - private static int sizeOfValue(ByteBuffer value, ProtocolVersion version) { - switch (version) { - case V1: - case V2: - int elemSize = value.remaining(); - if (elemSize > 65535) - throw new IllegalArgumentException(String.format("Native protocol version %d supports only elements with size up to 65535 bytes - but element size is %d bytes", version.toInt(), elemSize)); - return 2 + elemSize; - case V3: - case V4: - case V5: - return value == null ? 4 : 4 + value.remaining(); - default: - throw version.unsupported(); - } + private static int sizeOfCollectionSize(ProtocolVersion version) { + switch (version) { + case V1: + case V2: + return 2; + case V3: + case V4: + case V5: + case V6: + return 4; + default: + throw version.unsupported(); } + } - private static int getUnsignedShort(ByteBuffer bb) { - int length = (bb.get() & 0xFF) << 8; - return length | (bb.get() & 0xFF); + private static int sizeOfValue(ByteBuffer value, ProtocolVersion version) { + switch (version) { + case V1: + case V2: + int elemSize = value.remaining(); + if (elemSize > 65535) + throw new IllegalArgumentException( + String.format( + "Native protocol version %d supports only elements with size up to 65535 bytes - but element size is %d bytes", + version.toInt(), elemSize)); + return 2 + elemSize; + case V3: + case V4: + case V5: + case V6: + return value == null ? 4 : 4 + value.remaining(); + default: + throw version.unsupported(); } + } + private static int getUnsignedShort(ByteBuffer bb) { + int length = (bb.get() & 0xFF) << 8; + return length | (bb.get() & 0xFF); + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/ColumnDefinitions.java b/driver-core/src/main/java/com/datastax/driver/core/ColumnDefinitions.java index 7b71ed799fe..0b3317e3caf 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ColumnDefinitions.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ColumnDefinitions.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,339 +17,334 @@ */ package com.datastax.driver.core; -import java.util.*; +import java.util.Arrays; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; /** - * Metadata describing the columns returned in a {@link ResultSet} or a - * {@link PreparedStatement}. - *

- * A {@code columnDefinitions}} instance is mainly a list of - * {@code ColumnsDefinitions.Definition}. The definitions or metadata for a column - * can be accessed either by: + * Metadata describing the columns returned in a {@link ResultSet} or a {@link PreparedStatement}. + * + *

A {@code columnDefinitions}} instance is mainly a list of {@code + * ColumnsDefinitions.Definition}. The definitions or metadata for a column can be accessed either + * by: + * *

- *

- * When accessed by name, column selection is case insensitive. In case multiple - * columns only differ by the case of their name, then the column returned with - * be the first column that has been defined in CQL without forcing case sensitivity - * (that is, it has either been defined without quotes or is fully lowercase). - * If none of the columns have been defined in this manner, the first column matching - * (with case insensitivity) is returned. You can force the case of a selection - * by double quoting the name. - *

- * For example: - *

    - *
  • If {@code cd} contains column {@code fOO}, then {@code cd.contains("foo")}, - * {@code cd.contains("fOO")} and {@code cd.contains("Foo")} will return {@code true}.
  • - *
  • If {@code cd} contains both {@code foo} and {@code FOO} then: + * + *

    When accessed by name, column selection is case insensitive. In case multiple columns only + * differ by the case of their name, then the column returned with be the first column that has been + * defined in CQL without forcing case sensitivity (that is, it has either been defined without + * quotes or is fully lowercase). If none of the columns have been defined in this manner, the first + * column matching (with case insensitivity) is returned. You can force the case of a selection by + * double quoting the name. + * + *

    For example: + * *

      - *
    • {@code cd.getType("foo")}, {@code cd.getType("fOO")} and {@code cd.getType("FOO")} - * will all match column {@code foo}.
    • - *
    • {@code cd.getType("\"FOO\"")} will match column {@code FOO}
    • - *
    + *
  • If {@code cd} contains column {@code fOO}, then {@code cd.contains("foo")}, {@code + * cd.contains("fOO")} and {@code cd.contains("Foo")} will return {@code true}. + *
  • If {@code cd} contains both {@code foo} and {@code FOO} then: + *
      + *
    • {@code cd.getType("foo")}, {@code cd.getType("fOO")} and {@code cd.getType("FOO")} + * will all match column {@code foo}. + *
    • {@code cd.getType("\"FOO\"")} will match column {@code FOO} + *
    *
- * Note that the preceding rules mean that if a {@code ColumnDefinitions} object - * contains multiple occurrences of the exact same name (be it the same column - * multiple times or columns from different tables with the same name), you - * will have to use selection by index to disambiguate. + * + * Note that the preceding rules mean that if a {@code ColumnDefinitions} object contains multiple + * occurrences of the exact same name (be it the same column multiple times or columns from + * different tables with the same name), you will have to use selection by index to disambiguate. */ public class ColumnDefinitions implements Iterable { - static final ColumnDefinitions EMPTY = new ColumnDefinitions(new Definition[0], CodecRegistry.DEFAULT_INSTANCE); + static final ColumnDefinitions EMPTY = + new ColumnDefinitions(new Definition[0], CodecRegistry.DEFAULT_INSTANCE); - private final Definition[] byIdx; - private final Map byName; - final CodecRegistry codecRegistry; + private final Definition[] byIdx; + private final Map byName; + final CodecRegistry codecRegistry; - ColumnDefinitions(Definition[] defs, CodecRegistry codecRegistry) { + ColumnDefinitions(Definition[] defs, CodecRegistry codecRegistry) { - this.byIdx = defs; - this.codecRegistry = codecRegistry; - this.byName = new HashMap(defs.length); + this.byIdx = defs; + this.codecRegistry = codecRegistry; + this.byName = new HashMap(defs.length); - for (int i = 0; i < defs.length; i++) { - // Be optimistic, 99% of the time, previous will be null. - int[] previous = this.byName.put(defs[i].name.toLowerCase(), new int[]{i}); - if (previous != null) { - int[] indexes = new int[previous.length + 1]; - System.arraycopy(previous, 0, indexes, 0, previous.length); - indexes[indexes.length - 1] = i; - this.byName.put(defs[i].name.toLowerCase(), indexes); - } - } + for (int i = 0; i < defs.length; i++) { + // Be optimistic, 99% of the time, previous will be null. + int[] previous = this.byName.put(defs[i].name.toLowerCase(), new int[] {i}); + if (previous != null) { + int[] indexes = new int[previous.length + 1]; + System.arraycopy(previous, 0, indexes, 0, previous.length); + indexes[indexes.length - 1] = i; + this.byName.put(defs[i].name.toLowerCase(), indexes); + } } - - /** - * Returns the number of columns described by this {@code Columns} - * instance. - * - * @return the number of columns described by this metadata. - */ - public int size() { - return byIdx.length; + } + + /** + * Returns the number of columns described by this {@code Columns} instance. + * + * @return the number of columns described by this metadata. + */ + public int size() { + return byIdx.length; + } + + /** + * Returns whether this metadata contains a given name. + * + * @param name the name to check. + * @return {@code true} if this metadata contains the column named {@code name}, {@code false} + * otherwise. + */ + public boolean contains(String name) { + return findAllIdx(name) != null; + } + + /** + * The first index in this metadata of the provided name, if present. + * + * @param name the name of the column. + * @return the index of the first occurrence of {@code name} in this metadata if {@code + * contains(name)}, -1 otherwise. + */ + public int getIndexOf(String name) { + return findFirstIdx(name); + } + + /** + * Returns an iterator over the {@link Definition} contained in this metadata. + * + *

The order of the iterator will be the one of this metadata. + * + * @return an iterator over the {@link Definition} contained in this metadata. + */ + @Override + public Iterator iterator() { + return Arrays.asList(byIdx).iterator(); + } + + /** + * Returns a list containing all the definitions of this metadata in order. + * + * @return a list of the {@link Definition} contained in this metadata. + */ + public List asList() { + return Arrays.asList(byIdx); + } + + /** + * Returns the name of the {@code i}th column in this metadata. + * + * @param i the index in this metadata. + * @return the name of the {@code i}th column in this metadata. + * @throws IndexOutOfBoundsException if {@code i < 0} or {@code i >= size()} + */ + public String getName(int i) { + return byIdx[i].name; + } + + /** + * Returns the type of the {@code i}th column in this metadata. + * + *

Note that this method does not set the {@link DataType#isFrozen()} flag on the returned + * object, it will always default to {@code false}. Use {@link Cluster#getMetadata()} to determine + * if a column is frozen. + * + * @param i the index in this metadata. + * @return the type of the {@code i}th column in this metadata. + * @throws IndexOutOfBoundsException if {@code i < 0} or {@code i >= size()} + */ + public DataType getType(int i) { + return byIdx[i].type; + } + + /** + * Returns the type of the first occurrence of {@code name} in this metadata. + * + *

Note that this method does not set the {@link DataType#isFrozen()} flag on the returned + * object, it will always default to {@code false}. Use {@link Cluster#getMetadata()} to determine + * if a column is frozen. + * + * @param name the name of the column. + * @return the type of (the first occurrence of) {@code name} in this metadata. + * @throws IllegalArgumentException if {@code name} is not in this metadata. + */ + public DataType getType(String name) { + return getType(getFirstIdx(name)); + } + + /** + * Returns the keyspace of the {@code i}th column in this metadata. + * + * @param i the index in this metadata. + * @return the keyspace of the {@code i}th column in this metadata. + * @throws IndexOutOfBoundsException if {@code i < 0} or {@code i >= size()} + */ + public String getKeyspace(int i) { + return byIdx[i].keyspace; + } + + /** + * Returns the keyspace of the first occurrence of {@code name} in this metadata. + * + * @param name the name of the column. + * @return the keyspace of (the first occurrence of) column {@code name} in this metadata. + * @throws IllegalArgumentException if {@code name} is not in this metadata. + */ + public String getKeyspace(String name) { + return getKeyspace(getFirstIdx(name)); + } + + /** + * Returns the table of the {@code i}th column in this metadata. + * + * @param i the index in this metadata. + * @return the table of the {@code i}th column in this metadata. + * @throws IndexOutOfBoundsException if {@code i < 0} or {@code i >= size()} + */ + public String getTable(int i) { + return byIdx[i].table; + } + + /** + * Returns the table of first occurrence of {@code name} in this metadata. + * + * @param name the name of the column. + * @return the table of (the first occurrence of) column {@code name} in this metadata. + * @throws IllegalArgumentException if {@code name} is not in this metadata. + */ + public String getTable(String name) { + return getTable(getFirstIdx(name)); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("Columns["); + for (int i = 0; i < size(); i++) { + if (i != 0) sb.append(", "); + Definition def = byIdx[i]; + sb.append(def.name).append('(').append(def.type).append(')'); } + sb.append(']'); + return sb.toString(); + } - /** - * Returns whether this metadata contains a given name. - * - * @param name the name to check. - * @return {@code true} if this metadata contains the column named {@code name}, - * {@code false} otherwise. - */ - public boolean contains(String name) { - return findAllIdx(name) != null; - } + int findFirstIdx(String name) { - /** - * The first index in this metadata of the provided name, if present. - * - * @param name the name of the column. - * @return the index of the first occurrence of {@code name} in this metadata if - * {@code contains(name)}, -1 otherwise. - */ - public int getIndexOf(String name) { - return findFirstIdx(name); - } + int[] indexes = findAllIdx(name); + return indexes == null ? -1 : indexes[0]; + } - /** - * Returns an iterator over the {@link Definition} contained in this metadata. - *

- * The order of the iterator will be the one of this metadata. - * - * @return an iterator over the {@link Definition} contained in this metadata. - */ - @Override - public Iterator iterator() { - return Arrays.asList(byIdx).iterator(); + int[] findAllIdx(String name) { + boolean caseSensitive = false; + if (name.length() >= 2 && name.charAt(0) == '"' && name.charAt(name.length() - 1) == '"') { + name = name.substring(1, name.length() - 1); + caseSensitive = true; } - /** - * Returns a list containing all the definitions of this metadata in order. - * - * @return a list of the {@link Definition} contained in this metadata. - */ - public List asList() { - return Arrays.asList(byIdx); - } + int[] indexes = byName.get(name.toLowerCase()); + if (!caseSensitive || indexes == null) return indexes; - /** - * Returns the name of the {@code i}th column in this metadata. - * - * @param i the index in this metadata. - * @return the name of the {@code i}th column in this metadata. - * @throws IndexOutOfBoundsException if {@code i < 0} or {@code i >= size()} - */ - public String getName(int i) { - return byIdx[i].name; - } + // First, optimistic and assume all are matching + int nbMatch = 0; + for (int i = 0; i < indexes.length; i++) if (name.equals(byIdx[indexes[i]].name)) nbMatch++; - /** - * Returns the type of the {@code i}th column in this metadata. - *

- * Note that this method does not set the {@link DataType#isFrozen()} flag on the returned - * object, it will always default to {@code false}. Use {@link Cluster#getMetadata()} to - * determine if a column is frozen. - * - * @param i the index in this metadata. - * @return the type of the {@code i}th column in this metadata. - * @throws IndexOutOfBoundsException if {@code i < 0} or {@code i >= size()} - */ - public DataType getType(int i) { - return byIdx[i].type; + if (nbMatch == indexes.length) return indexes; + + int[] result = new int[nbMatch]; + int j = 0; + for (int i = 0; i < indexes.length; i++) { + int idx = indexes[i]; + if (name.equals(byIdx[idx].name)) result[j++] = idx; } - /** - * Returns the type of the first occurrence of {@code name} in this metadata. - *

- * Note that this method does not set the {@link DataType#isFrozen()} flag on the returned - * object, it will always default to {@code false}. Use {@link Cluster#getMetadata()} to - * determine if a column is frozen. - * - * @param name the name of the column. - * @return the type of (the first occurrence of) {@code name} in this metadata. - * @throws IllegalArgumentException if {@code name} is not in this metadata. - */ - public DataType getType(String name) { - return getType(getFirstIdx(name)); + return result; + } + + int[] getAllIdx(String name) { + int[] indexes = findAllIdx(name); + if (indexes == null) + throw new IllegalArgumentException(name + " is not a column defined in this metadata"); + + return indexes; + } + + int getFirstIdx(String name) { + return getAllIdx(name)[0]; + } + + /** A column definition. */ + public static class Definition { + + private final String keyspace; + private final String table; + private final String name; + private final DataType type; + + Definition(String keyspace, String table, String name, DataType type) { + this.keyspace = keyspace; + this.table = table; + this.name = name; + this.type = type; } /** - * Returns the keyspace of the {@code i}th column in this metadata. + * The name of the keyspace this column is part of. * - * @param i the index in this metadata. - * @return the keyspace of the {@code i}th column in this metadata. - * @throws IndexOutOfBoundsException if {@code i < 0} or {@code i >= size()} + * @return the name of the keyspace this column is part of. */ - public String getKeyspace(int i) { - return byIdx[i].keyspace; + public String getKeyspace() { + return keyspace; } /** - * Returns the keyspace of the first occurrence of {@code name} in this metadata. + * Returns the name of the table this column is part of. * - * @param name the name of the column. - * @return the keyspace of (the first occurrence of) column {@code name} in this metadata. - * @throws IllegalArgumentException if {@code name} is not in this metadata. + * @return the name of the table this column is part of. */ - public String getKeyspace(String name) { - return getKeyspace(getFirstIdx(name)); + public String getTable() { + return table; } /** - * Returns the table of the {@code i}th column in this metadata. + * Returns the name of the column. * - * @param i the index in this metadata. - * @return the table of the {@code i}th column in this metadata. - * @throws IndexOutOfBoundsException if {@code i < 0} or {@code i >= size()} + * @return the name of the column. */ - public String getTable(int i) { - return byIdx[i].table; + public String getName() { + return name; } /** - * Returns the table of first occurrence of {@code name} in this metadata. + * Returns the type of the column. * - * @param name the name of the column. - * @return the table of (the first occurrence of) column {@code name} in this metadata. - * @throws IllegalArgumentException if {@code name} is not in this metadata. + * @return the type of the column. */ - public String getTable(String name) { - return getTable(getFirstIdx(name)); + public DataType getType() { + return type; } @Override - public String toString() { - StringBuilder sb = new StringBuilder(); - sb.append("Columns["); - for (int i = 0; i < size(); i++) { - if (i != 0) - sb.append(", "); - Definition def = byIdx[i]; - sb.append(def.name).append('(').append(def.type).append(')'); - } - sb.append(']'); - return sb.toString(); - } - - int findFirstIdx(String name) { - - int[] indexes = findAllIdx(name); - return indexes == null ? -1 : indexes[0]; - } - - int[] findAllIdx(String name) { - boolean caseSensitive = false; - if (name.length() >= 2 && name.charAt(0) == '"' && name.charAt(name.length() - 1) == '"') { - name = name.substring(1, name.length() - 1); - caseSensitive = true; - } - - int[] indexes = byName.get(name.toLowerCase()); - if (!caseSensitive || indexes == null) - return indexes; - - // First, optimistic and assume all are matching - int nbMatch = 0; - for (int i = 0; i < indexes.length; i++) - if (name.equals(byIdx[indexes[i]].name)) - nbMatch++; - - if (nbMatch == indexes.length) - return indexes; - - int[] result = new int[nbMatch]; - int j = 0; - for (int i = 0; i < indexes.length; i++) { - int idx = indexes[i]; - if (name.equals(byIdx[idx].name)) - result[j++] = idx; - } - - return result; - } - - int[] getAllIdx(String name) { - int[] indexes = findAllIdx(name); - if (indexes == null) - throw new IllegalArgumentException(name + " is not a column defined in this metadata"); - - return indexes; - } - - int getFirstIdx(String name) { - return getAllIdx(name)[0]; + public final int hashCode() { + return Arrays.hashCode(new Object[] {keyspace, table, name, type}); } - /** - * A column definition. - */ - public static class Definition { - - private final String keyspace; - private final String table; - private final String name; - private final DataType type; - - Definition(String keyspace, String table, String name, DataType type) { - this.keyspace = keyspace; - this.table = table; - this.name = name; - this.type = type; - } - - /** - * The name of the keyspace this column is part of. - * - * @return the name of the keyspace this column is part of. - */ - public String getKeyspace() { - return keyspace; - } - - /** - * Returns the name of the table this column is part of. - * - * @return the name of the table this column is part of. - */ - public String getTable() { - return table; - } - - /** - * Returns the name of the column. - * - * @return the name of the column. - */ - public String getName() { - return name; - } - - /** - * Returns the type of the column. - * - * @return the type of the column. - */ - public DataType getType() { - return type; - } - - @Override - public final int hashCode() { - return Arrays.hashCode(new Object[]{keyspace, table, name, type}); - } - - @Override - public final boolean equals(Object o) { - if (!(o instanceof Definition)) - return false; - - Definition other = (Definition) o; - return keyspace.equals(other.keyspace) - && table.equals(other.table) - && name.equals(other.name) - && type.equals(other.type); - } + @Override + public final boolean equals(Object o) { + if (!(o instanceof Definition)) return false; + + Definition other = (Definition) o; + return keyspace.equals(other.keyspace) + && table.equals(other.table) + && name.equals(other.name) + && type.equals(other.type); } + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/ColumnMetadata.java b/driver-core/src/main/java/com/datastax/driver/core/ColumnMetadata.java index ad1c503beb7..67e05ecdc43 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ColumnMetadata.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ColumnMetadata.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,209 +18,205 @@ package com.datastax.driver.core; import com.datastax.driver.core.utils.MoreObjects; - import java.util.Arrays; import java.util.HashMap; import java.util.Map; -/** - * Describes a Column. - */ +/** Describes a Column. */ public class ColumnMetadata { - static final String COLUMN_NAME = "column_name"; - - static final String VALIDATOR = "validator"; // v2 only - static final String TYPE = "type"; // replaces validator, v3 onwards - - static final String COMPONENT_INDEX = "component_index"; // v2 only - static final String POSITION = "position"; // replaces component_index, v3 onwards - - static final String KIND_V2 = "type"; // v2 only - static final String KIND_V3 = "kind"; // replaces type, v3 onwards - - static final String CLUSTERING_ORDER = "clustering_order"; - static final String DESC = "desc"; - - static final String INDEX_TYPE = "index_type"; - static final String INDEX_OPTIONS = "index_options"; - static final String INDEX_NAME = "index_name"; - - private final AbstractTableMetadata parent; - private final String name; - private final DataType type; - private final boolean isStatic; - - private ColumnMetadata(AbstractTableMetadata parent, String name, DataType type, boolean isStatic) { - this.parent = parent; - this.name = name; - this.type = type; - this.isStatic = isStatic; - } - - static ColumnMetadata fromRaw(AbstractTableMetadata tm, Raw raw, DataType dataType) { - return new ColumnMetadata(tm, raw.name, dataType, raw.kind == Raw.Kind.STATIC); - } - - static ColumnMetadata forAlias(TableMetadata tm, String name, DataType type) { - return new ColumnMetadata(tm, name, type, false); - } - - /** - * Returns the name of the column. - * - * @return the name of the column. - */ - public String getName() { - return name; - } - - /** - * Returns the parent object of this column. This can be a {@link TableMetadata} - * or a {@link MaterializedViewMetadata} object. - * - * @return the parent object of this column. - */ - public AbstractTableMetadata getParent() { - return parent; - } - - /** - * Returns the type of the column. - * - * @return the type of the column. - */ - public DataType getType() { - return type; - } + static final String COLUMN_NAME = "column_name"; + + static final String VALIDATOR = "validator"; // v2 only + static final String TYPE = "type"; // replaces validator, v3 onwards + + static final String COMPONENT_INDEX = "component_index"; // v2 only + static final String POSITION = "position"; // replaces component_index, v3 onwards + + static final String KIND_V2 = "type"; // v2 only + static final String KIND_V3 = "kind"; // replaces type, v3 onwards + + static final String CLUSTERING_ORDER = "clustering_order"; + static final String DESC = "desc"; + + static final String INDEX_TYPE = "index_type"; + static final String INDEX_OPTIONS = "index_options"; + static final String INDEX_NAME = "index_name"; + + private final AbstractTableMetadata parent; + private final String name; + private final DataType type; + private final boolean isStatic; + + private ColumnMetadata( + AbstractTableMetadata parent, String name, DataType type, boolean isStatic) { + this.parent = parent; + this.name = name; + this.type = type; + this.isStatic = isStatic; + } + + static ColumnMetadata fromRaw(AbstractTableMetadata tm, Raw raw, DataType dataType) { + return new ColumnMetadata(tm, raw.name, dataType, raw.kind == Raw.Kind.STATIC); + } + + static ColumnMetadata forAlias(TableMetadata tm, String name, DataType type) { + return new ColumnMetadata(tm, name, type, false); + } + + /** + * Returns the name of the column. + * + * @return the name of the column. + */ + public String getName() { + return name; + } + + /** + * Returns the parent object of this column. This can be a {@link TableMetadata} or a {@link + * MaterializedViewMetadata} object. + * + * @return the parent object of this column. + */ + public AbstractTableMetadata getParent() { + return parent; + } + + /** + * Returns the type of the column. + * + * @return the type of the column. + */ + public DataType getType() { + return type; + } + + /** + * Whether this column is a static column. + * + * @return Whether this column is a static column or not. + */ + public boolean isStatic() { + return isStatic; + } + + @Override + public boolean equals(Object other) { + if (other == this) return true; + if (!(other instanceof ColumnMetadata)) return false; + + ColumnMetadata that = (ColumnMetadata) other; + return this.name.equals(that.name) + && this.isStatic == that.isStatic + && this.type.equals(that.type); + } + + @Override + public int hashCode() { + return MoreObjects.hashCode(name, isStatic, type); + } + + @Override + public String toString() { + String str = Metadata.quoteIfNecessary(name) + ' ' + type; + return isStatic ? str + " static" : str; + } + + // Temporary class that is used to make building the schema easier. Not meant to be + // exposed publicly at all. + static class Raw { + + public enum Kind { + PARTITION_KEY("PARTITION_KEY", "PARTITION_KEY"), + CLUSTERING_COLUMN("CLUSTERING_KEY", "CLUSTERING"), + REGULAR("REGULAR", "REGULAR"), + COMPACT_VALUE("COMPACT_VALUE", ""), // v2 only + STATIC("STATIC", "STATIC"); + + final String v2; + final String v3; + + Kind(String v2, String v3) { + this.v2 = v2; + this.v3 = v3; + } + + static Kind fromStringV2(String s) { + for (Kind kind : Kind.values()) { + if (kind.v2.equalsIgnoreCase(s)) return kind; + } + throw new IllegalArgumentException(s); + } - /** - * Whether this column is a static column. - * - * @return Whether this column is a static column or not. - */ - public boolean isStatic() { - return isStatic; + static Kind fromStringV3(String s) { + for (Kind kind : Kind.values()) { + if (kind.v3.equalsIgnoreCase(s)) return kind; + } + throw new IllegalArgumentException(s); + } } - @Override - public boolean equals(Object other) { - if (other == this) - return true; - if (!(other instanceof ColumnMetadata)) - return false; - - ColumnMetadata that = (ColumnMetadata) other; - return this.name.equals(that.name) && - this.isStatic == that.isStatic && - this.type.equals(that.type); - } + public final String name; + public Kind kind; + public final int position; + public final String dataType; + public final boolean isReversed; - @Override - public int hashCode() { - return MoreObjects.hashCode(name, isStatic, type); - } + public final Map indexColumns = new HashMap(); - @Override - public String toString() { - String str = Metadata.quoteIfNecessary(name) + ' ' + type; - return isStatic ? str + " static" : str; + Raw(String name, Kind kind, int position, String dataType, boolean isReversed) { + this.name = name; + this.kind = kind; + this.position = position; + this.dataType = dataType; + this.isReversed = isReversed; } - // Temporary class that is used to make building the schema easier. Not meant to be - // exposed publicly at all. - static class Raw { - - public enum Kind { - - PARTITION_KEY("PARTITION_KEY", "PARTITION_KEY"), - CLUSTERING_COLUMN("CLUSTERING_KEY", "CLUSTERING"), - REGULAR("REGULAR", "REGULAR"), - COMPACT_VALUE("COMPACT_VALUE", ""), // v2 only - STATIC("STATIC", "STATIC"); - - final String v2; - final String v3; - - Kind(String v2, String v3) { - this.v2 = v2; - this.v3 = v3; - } - - static Kind fromStringV2(String s) { - for (Kind kind : Kind.values()) { - if (kind.v2.equalsIgnoreCase(s)) - return kind; - } - throw new IllegalArgumentException(s); - } - - static Kind fromStringV3(String s) { - for (Kind kind : Kind.values()) { - if (kind.v3.equalsIgnoreCase(s)) - return kind; - } - throw new IllegalArgumentException(s); - } - } - - public final String name; - public Kind kind; - public final int position; - public final String dataType; - public final boolean isReversed; - - public final Map indexColumns = new HashMap(); - - Raw(String name, Kind kind, int position, String dataType, boolean isReversed) { - this.name = name; - this.kind = kind; - this.position = position; - this.dataType = dataType; - this.isReversed = isReversed; - } - - static Raw fromRow(Row row, VersionNumber version) { - String name = row.getString(COLUMN_NAME); - - Kind kind; - if (version.getMajor() < 2) { - kind = Kind.REGULAR; - } else if (version.getMajor() < 3) { - kind = row.isNull(KIND_V2) ? Kind.REGULAR : Kind.fromStringV2(row.getString(KIND_V2)); - } else { - kind = row.isNull(KIND_V3) ? Kind.REGULAR : Kind.fromStringV3(row.getString(KIND_V3)); - } - - int position; - if (version.getMajor() >= 3) { - position = row.getInt(POSITION); // cannot be null, -1 is used as a special value instead of null to avoid tombstones - if (position == -1) position = 0; - } else { - position = row.isNull(COMPONENT_INDEX) ? 0 : row.getInt(COMPONENT_INDEX); - } - - String dataType; - boolean reversed; - if (version.getMajor() >= 3) { - dataType = row.getString(TYPE); - String clusteringOrderStr = row.getString(CLUSTERING_ORDER); - reversed = clusteringOrderStr.equals(DESC); - } else { - dataType = row.getString(VALIDATOR); - reversed = DataTypeClassNameParser.isReversed(dataType); - } - - Raw c = new Raw(name, kind, position, dataType, reversed); - - // secondary indexes (C* < 3.0.0) - // from C* 3.0 onwards 2i are defined in a separate table - if (version.getMajor() < 3) { - for (String str : Arrays.asList(INDEX_TYPE, INDEX_NAME, INDEX_OPTIONS)) - if (row.getColumnDefinitions().contains(str) && !row.isNull(str)) - c.indexColumns.put(str, row.getString(str)); - } - return c; - } + static Raw fromRow(Row row, VersionNumber version) { + String name = row.getString(COLUMN_NAME); + + Kind kind; + if (version.getMajor() < 2) { + kind = Kind.REGULAR; + } else if (version.getMajor() < 3) { + kind = row.isNull(KIND_V2) ? Kind.REGULAR : Kind.fromStringV2(row.getString(KIND_V2)); + } else { + kind = row.isNull(KIND_V3) ? Kind.REGULAR : Kind.fromStringV3(row.getString(KIND_V3)); + } + + int position; + if (version.getMajor() >= 3) { + position = + row.getInt( + POSITION); // cannot be null, -1 is used as a special value instead of null to avoid + // tombstones + if (position == -1) position = 0; + } else { + position = row.isNull(COMPONENT_INDEX) ? 0 : row.getInt(COMPONENT_INDEX); + } + + String dataType; + boolean reversed; + if (version.getMajor() >= 3) { + dataType = row.getString(TYPE); + String clusteringOrderStr = row.getString(CLUSTERING_ORDER); + reversed = clusteringOrderStr.equals(DESC); + } else { + dataType = row.getString(VALIDATOR); + reversed = DataTypeClassNameParser.isReversed(dataType); + } + + Raw c = new Raw(name, kind, position, dataType, reversed); + + // secondary indexes (C* < 3.0.0) + // from C* 3.0 onwards 2i are defined in a separate table + if (version.getMajor() < 3) { + for (String str : Arrays.asList(INDEX_TYPE, INDEX_NAME, INDEX_OPTIONS)) + if (row.getColumnDefinitions().contains(str) && !row.isNull(str)) + c.indexColumns.put(str, row.getString(str)); + } + return c; } + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/Configuration.java b/driver-core/src/main/java/com/datastax/driver/core/Configuration.java index fc0e77a6888..e500e3c3167 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Configuration.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Configuration.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,313 +18,357 @@ package com.datastax.driver.core; import com.datastax.driver.core.policies.Policies; +import com.google.common.base.Joiner; +import java.util.ArrayList; +import java.util.List; /** - * The configuration of the cluster. - * It configures the following: + * The configuration of the cluster. It configures the following: + * *

    - *
  • Cassandra protocol level configuration (compression).
  • - *
  • Connection pooling configurations.
  • - *
  • low-level TCP configuration options (tcpNoDelay, keepAlive, ...).
  • - *
  • Metrics related options.
  • - *
  • Query related options (default consistency level, fetchSize, ...).
  • - *
  • Netty layer customization options.
  • + *
  • Cassandra protocol level configuration (compression). + *
  • Connection pooling configurations. + *
  • low-level TCP configuration options (tcpNoDelay, keepAlive, ...). + *
  • Metrics related options. + *
  • Query related options (default consistency level, fetchSize, ...). + *
  • Netty layer customization options. *
- * This is also where you get the configured policies, though those cannot be changed - * (they are set during the built of the Cluster object). + * + * This is also where you get the configured policies, though those cannot be changed (they are set + * during the built of the Cluster object). */ public class Configuration { - /** - * Returns a builder to create a new {@code Configuration} object. - *

- * You only need this if you are building the configuration yourself. If you - * use {@link Cluster#builder()}, it will be done under the hood for you. - * - * @return the builder. - */ - public static Builder builder() { - return new Builder(); - } + /** + * Returns a builder to create a new {@code Configuration} object. + * + *

You only need this if you are building the configuration yourself. If you use {@link + * Cluster#builder()}, it will be done under the hood for you. + * + * @return the builder. + */ + public static Builder builder() { + return new Builder(); + } - private final Policies policies; - private final ProtocolOptions protocolOptions; - private final PoolingOptions poolingOptions; - private final SocketOptions socketOptions; - private final MetricsOptions metricsOptions; - private final QueryOptions queryOptions; - private final ThreadingOptions threadingOptions; - private final NettyOptions nettyOptions; - private final CodecRegistry codecRegistry; + private final Policies policies; + private final ProtocolOptions protocolOptions; + private final PoolingOptions poolingOptions; + private final SocketOptions socketOptions; + private final MetricsOptions metricsOptions; + private final QueryOptions queryOptions; + private final ThreadingOptions threadingOptions; + private final NettyOptions nettyOptions; + private final CodecRegistry codecRegistry; + private final String defaultKeyspace; - private Configuration(Policies policies, - ProtocolOptions protocolOptions, - PoolingOptions poolingOptions, - SocketOptions socketOptions, - MetricsOptions metricsOptions, - QueryOptions queryOptions, - ThreadingOptions threadingOptions, - NettyOptions nettyOptions, - CodecRegistry codecRegistry) { - this.policies = policies; - this.protocolOptions = protocolOptions; - this.poolingOptions = poolingOptions; - this.socketOptions = socketOptions; - this.metricsOptions = metricsOptions; - this.queryOptions = queryOptions; - this.threadingOptions = threadingOptions; - this.nettyOptions = nettyOptions; - this.codecRegistry = codecRegistry; - } + private Configuration( + Policies policies, + ProtocolOptions protocolOptions, + PoolingOptions poolingOptions, + SocketOptions socketOptions, + MetricsOptions metricsOptions, + QueryOptions queryOptions, + ThreadingOptions threadingOptions, + NettyOptions nettyOptions, + CodecRegistry codecRegistry, + String defaultKeyspace) { + this.policies = policies; + this.protocolOptions = protocolOptions; + this.poolingOptions = poolingOptions; + this.socketOptions = socketOptions; + this.metricsOptions = metricsOptions; + this.queryOptions = queryOptions; + this.threadingOptions = threadingOptions; + this.nettyOptions = nettyOptions; + this.codecRegistry = codecRegistry; + this.defaultKeyspace = defaultKeyspace; + } - /** - * Copy constructor. - * - * @param toCopy the object to copy from. - */ - protected Configuration(Configuration toCopy) { - this( - toCopy.getPolicies(), - toCopy.getProtocolOptions(), - toCopy.getPoolingOptions(), - toCopy.getSocketOptions(), - toCopy.getMetricsOptions(), - toCopy.getQueryOptions(), - toCopy.getThreadingOptions(), - toCopy.getNettyOptions(), - toCopy.getCodecRegistry() - ); - } + /** + * Copy constructor. + * + * @param toCopy the object to copy from. + */ + protected Configuration(Configuration toCopy) { + this( + toCopy.getPolicies(), + toCopy.getProtocolOptions(), + toCopy.getPoolingOptions(), + toCopy.getSocketOptions(), + toCopy.getMetricsOptions(), + toCopy.getQueryOptions(), + toCopy.getThreadingOptions(), + toCopy.getNettyOptions(), + toCopy.getCodecRegistry(), + toCopy.getDefaultKeyspace()); + } + + void register(Cluster.Manager manager) { + protocolOptions.register(manager); + poolingOptions.register(manager); + queryOptions.register(manager); + policies.getEndPointFactory().init(manager.getCluster()); - void register(Cluster.Manager manager) { - protocolOptions.register(manager); - poolingOptions.register(manager); - queryOptions.register(manager); + checkPoliciesIfSni(); + } + + // If using SNI endpoints, the SSL options and auth provider MUST be the "extended" versions, the + // base versions work with IP addresses that might not be unique to a node. + // Throw now since that's probably a configuration error. + private void checkPoliciesIfSni() { + if (policies.getEndPointFactory() instanceof SniEndPointFactory) { + SSLOptions sslOptions = protocolOptions.getSSLOptions(); + List errors = new ArrayList(); + if (sslOptions != null && !(sslOptions instanceof ExtendedRemoteEndpointAwareSslOptions)) { + errors.add( + String.format( + "the configured %s must implement %s", + SSLOptions.class.getSimpleName(), + ExtendedRemoteEndpointAwareSslOptions.class.getSimpleName())); + } + AuthProvider authProvider = protocolOptions.getAuthProvider(); + if (authProvider != null && !(authProvider instanceof ExtendedAuthProvider)) { + errors.add( + String.format( + "the configured %s must implement %s", + AuthProvider.class.getSimpleName(), ExtendedAuthProvider.class.getSimpleName())); + } + if (!errors.isEmpty()) { + throw new IllegalStateException( + "Configuration error: if SNI endpoints are in use, " + Joiner.on(',').join(errors)); + } } + } + + /** + * Returns the policies set for the cluster. + * + * @return the policies set for the cluster. + */ + public Policies getPolicies() { + return policies; + } + + /** + * Returns the low-level TCP configuration options used (tcpNoDelay, keepAlive, ...). + * + * @return the socket options. + */ + public SocketOptions getSocketOptions() { + return socketOptions; + } + + /** + * Returns the Cassandra binary protocol level configuration (compression). + * + * @return the protocol options. + */ + public ProtocolOptions getProtocolOptions() { + return protocolOptions; + } + + /** + * Returns the connection pooling configuration. + * + * @return the pooling options. + */ + public PoolingOptions getPoolingOptions() { + return poolingOptions; + } + + /** + * Returns the metrics configuration, if metrics are enabled. + * + *

Metrics collection is enabled by default but can be disabled at cluster construction time + * through {@link Cluster.Builder#withoutMetrics}. + * + * @return the metrics options or {@code null} if metrics are not enabled. + */ + public MetricsOptions getMetricsOptions() { + return metricsOptions; + } + + /** + * Returns the queries configuration. + * + * @return the queries options. + */ + public QueryOptions getQueryOptions() { + return queryOptions; + } + + /** @return the threading options for this configuration. */ + public ThreadingOptions getThreadingOptions() { + return threadingOptions; + } + + /** + * Returns the {@link NettyOptions} instance for this configuration. + * + * @return the {@link NettyOptions} instance for this configuration. + */ + public NettyOptions getNettyOptions() { + return nettyOptions; + } + + public String getDefaultKeyspace() { + return defaultKeyspace; + } + /** + * Returns the {@link CodecRegistry} instance for this configuration. + * + *

Note that this method could return {@link CodecRegistry#DEFAULT_INSTANCE} if no specific + * codec registry has been set on the {@link Cluster}. In this case, care should be taken when + * registering new codecs as they would be immediately available to other {@link Cluster} + * instances sharing the same default instance. + * + * @return the {@link CodecRegistry} instance for this configuration. + */ + public CodecRegistry getCodecRegistry() { + return codecRegistry; + } + + /** A builder to create a new {@code Configuration} object. */ + public static class Builder { + private Policies policies; + private ProtocolOptions protocolOptions; + private PoolingOptions poolingOptions; + private SocketOptions socketOptions; + private MetricsOptions metricsOptions; + private QueryOptions queryOptions; + private ThreadingOptions threadingOptions; + private NettyOptions nettyOptions; + private CodecRegistry codecRegistry; + private String defaultKeyspace; /** - * Returns the policies set for the cluster. + * Sets the policies for this cluster. * - * @return the policies set for the cluster. + * @param policies the policies. + * @return this builder. */ - public Policies getPolicies() { - return policies; + public Builder withPolicies(Policies policies) { + this.policies = policies; + return this; } /** - * Returns the low-level TCP configuration options used (tcpNoDelay, keepAlive, ...). + * Sets the protocol options for this cluster. * - * @return the socket options. + * @param protocolOptions the protocol options. + * @return this builder. */ - public SocketOptions getSocketOptions() { - return socketOptions; + public Builder withProtocolOptions(ProtocolOptions protocolOptions) { + this.protocolOptions = protocolOptions; + return this; } /** - * Returns the Cassandra binary protocol level configuration (compression). + * Sets the pooling options for this cluster. * - * @return the protocol options. + * @param poolingOptions the pooling options. + * @return this builder. */ - public ProtocolOptions getProtocolOptions() { - return protocolOptions; + public Builder withPoolingOptions(PoolingOptions poolingOptions) { + this.poolingOptions = poolingOptions; + return this; } /** - * Returns the connection pooling configuration. + * Sets the socket options for this cluster. * - * @return the pooling options. + * @param socketOptions the socket options. + * @return this builder. */ - public PoolingOptions getPoolingOptions() { - return poolingOptions; + public Builder withSocketOptions(SocketOptions socketOptions) { + this.socketOptions = socketOptions; + return this; } /** - * Returns the metrics configuration, if metrics are enabled. - *

- * Metrics collection is enabled by default but can be disabled at cluster - * construction time through {@link Cluster.Builder#withoutMetrics}. + * Sets the metrics options for this cluster. + * + *

If this method doesn't get called, the configuration will use the defaults: metrics + * enabled with JMX reporting enabled. To disable metrics, call this method with an instance + * where {@link MetricsOptions#isEnabled() isEnabled()} returns false. * - * @return the metrics options or {@code null} if metrics are not enabled. + * @param metricsOptions the metrics options. + * @return this builder. */ - public MetricsOptions getMetricsOptions() { - return metricsOptions; + public Builder withMetricsOptions(MetricsOptions metricsOptions) { + this.metricsOptions = metricsOptions; + return this; } /** - * Returns the queries configuration. + * Sets the query options for this cluster. * - * @return the queries options. + * @param queryOptions the query options. + * @return this builder. */ - public QueryOptions getQueryOptions() { - return queryOptions; + public Builder withQueryOptions(QueryOptions queryOptions) { + this.queryOptions = queryOptions; + return this; } /** - * @return the threading options for this configuration. + * Sets the threading options for this cluster. + * + * @param threadingOptions the threading options to set. + * @return this builder. */ - public ThreadingOptions getThreadingOptions() { - return threadingOptions; + public Builder withThreadingOptions(ThreadingOptions threadingOptions) { + this.threadingOptions = threadingOptions; + return this; } /** - * Returns the {@link NettyOptions} instance for this configuration. + * Sets the Netty options for this cluster. * - * @return the {@link NettyOptions} instance for this configuration. + * @param nettyOptions the Netty options. + * @return this builder. */ - public NettyOptions getNettyOptions() { - return nettyOptions; + public Builder withNettyOptions(NettyOptions nettyOptions) { + this.nettyOptions = nettyOptions; + return this; } /** - * Returns the {@link CodecRegistry} instance for this configuration. - *

- * Note that this method could return {@link CodecRegistry#DEFAULT_INSTANCE} - * if no specific codec registry has been set on the {@link Cluster}. - * In this case, care should be taken when registering new codecs as they would be - * immediately available to other {@link Cluster} instances sharing the same default instance. + * Sets the codec registry for this cluster. * - * @return the {@link CodecRegistry} instance for this configuration. + * @param codecRegistry the codec registry. + * @return this builder. */ - public CodecRegistry getCodecRegistry() { - return codecRegistry; + public Builder withCodecRegistry(CodecRegistry codecRegistry) { + this.codecRegistry = codecRegistry; + return this; + } + + public Builder withDefaultKeyspace(String keyspace) { + this.defaultKeyspace = keyspace; + return this; } /** - * A builder to create a new {@code Configuration} object. + * Builds the final object from this builder. + * + *

Any field that hasn't been set explicitly will get its default value. + * + * @return the object. */ - public static class Builder { - private Policies policies; - private ProtocolOptions protocolOptions; - private PoolingOptions poolingOptions; - private SocketOptions socketOptions; - private MetricsOptions metricsOptions; - private QueryOptions queryOptions; - private ThreadingOptions threadingOptions; - private NettyOptions nettyOptions; - private CodecRegistry codecRegistry; - - /** - * Sets the policies for this cluster. - * - * @param policies the policies. - * @return this builder. - */ - public Builder withPolicies(Policies policies) { - this.policies = policies; - return this; - } - - /** - * Sets the protocol options for this cluster. - * - * @param protocolOptions the protocol options. - * @return this builder. - */ - public Builder withProtocolOptions(ProtocolOptions protocolOptions) { - this.protocolOptions = protocolOptions; - return this; - } - - /** - * Sets the pooling options for this cluster. - * - * @param poolingOptions the pooling options. - * @return this builder. - */ - public Builder withPoolingOptions(PoolingOptions poolingOptions) { - this.poolingOptions = poolingOptions; - return this; - } - - /** - * Sets the socket options for this cluster. - * - * @param socketOptions the socket options. - * @return this builder. - */ - public Builder withSocketOptions(SocketOptions socketOptions) { - this.socketOptions = socketOptions; - return this; - } - - /** - * Sets the metrics options for this cluster. - *

- * If this method doesn't get called, the configuration will use the - * defaults: metrics enabled with JMX reporting enabled. - * To disable metrics, call this method with an instance where - * {@link MetricsOptions#isEnabled() isEnabled()} returns false. - * - * @param metricsOptions the metrics options. - * @return this builder. - */ - public Builder withMetricsOptions(MetricsOptions metricsOptions) { - this.metricsOptions = metricsOptions; - return this; - } - - /** - * Sets the query options for this cluster. - * - * @param queryOptions the query options. - * @return this builder. - */ - public Builder withQueryOptions(QueryOptions queryOptions) { - this.queryOptions = queryOptions; - return this; - } - - /** - * Sets the threading options for this cluster. - * - * @param threadingOptions the threading options to set. - * @return this builder. - */ - public Builder withThreadingOptions(ThreadingOptions threadingOptions) { - this.threadingOptions = threadingOptions; - return this; - } - - /** - * Sets the Netty options for this cluster. - * - * @param nettyOptions the Netty options. - * @return this builder. - */ - public Builder withNettyOptions(NettyOptions nettyOptions) { - this.nettyOptions = nettyOptions; - return this; - } - - /** - * Sets the codec registry for this cluster. - * - * @param codecRegistry the codec registry. - * @return this builder. - */ - public Builder withCodecRegistry(CodecRegistry codecRegistry) { - this.codecRegistry = codecRegistry; - return this; - } - - /** - * Builds the final object from this builder. - *

- * Any field that hasn't been set explicitly will get its default value. - * - * @return the object. - */ - public Configuration build() { - return new Configuration( - policies != null ? policies : Policies.builder().build(), - protocolOptions != null ? protocolOptions : new ProtocolOptions(), - poolingOptions != null ? poolingOptions : new PoolingOptions(), - socketOptions != null ? socketOptions : new SocketOptions(), - metricsOptions != null ? metricsOptions : new MetricsOptions(), - queryOptions != null ? queryOptions : new QueryOptions(), - threadingOptions != null ? threadingOptions : new ThreadingOptions(), - nettyOptions != null ? nettyOptions : NettyOptions.DEFAULT_INSTANCE, - codecRegistry != null ? codecRegistry : CodecRegistry.DEFAULT_INSTANCE); - } + public Configuration build() { + return new Configuration( + policies != null ? policies : Policies.builder().build(), + protocolOptions != null ? protocolOptions : new ProtocolOptions(), + poolingOptions != null ? poolingOptions : new PoolingOptions(), + socketOptions != null ? socketOptions : new SocketOptions(), + metricsOptions != null ? metricsOptions : new MetricsOptions(), + queryOptions != null ? queryOptions : new QueryOptions(), + threadingOptions != null ? threadingOptions : new ThreadingOptions(), + nettyOptions != null ? nettyOptions : NettyOptions.DEFAULT_INSTANCE, + codecRegistry != null ? codecRegistry : CodecRegistry.DEFAULT_INSTANCE, + defaultKeyspace); } + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/Connection.java b/driver-core/src/main/java/com/datastax/driver/core/Connection.java index 73cff42a968..b75d31dc29c 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Connection.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Connection.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,16 +17,45 @@ */ package com.datastax.driver.core; +import static com.datastax.driver.core.Message.Response.Type.ERROR; +import static io.netty.handler.timeout.IdleState.READER_IDLE; + import com.datastax.driver.core.Responses.Result.SetKeyspace; -import com.datastax.driver.core.exceptions.*; +import com.datastax.driver.core.exceptions.AuthenticationException; +import com.datastax.driver.core.exceptions.BusyConnectionException; +import com.datastax.driver.core.exceptions.ConnectionException; +import com.datastax.driver.core.exceptions.CrcMismatchException; +import com.datastax.driver.core.exceptions.DriverException; +import com.datastax.driver.core.exceptions.DriverInternalError; +import com.datastax.driver.core.exceptions.FrameTooLongException; +import com.datastax.driver.core.exceptions.OperationTimedOutException; +import com.datastax.driver.core.exceptions.TransportException; +import com.datastax.driver.core.exceptions.UnsupportedProtocolVersionException; import com.datastax.driver.core.utils.MoreFutures; import com.datastax.driver.core.utils.MoreObjects; import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.Lists; import com.google.common.collect.MapMaker; -import com.google.common.util.concurrent.*; +import com.google.common.util.concurrent.AbstractFuture; +import com.google.common.util.concurrent.AsyncFunction; +import com.google.common.util.concurrent.FutureCallback; +import com.google.common.util.concurrent.Futures; +import com.google.common.util.concurrent.ListenableFuture; +import com.google.common.util.concurrent.ListeningExecutorService; +import com.google.common.util.concurrent.SettableFuture; +import com.google.common.util.concurrent.Uninterruptibles; import io.netty.bootstrap.Bootstrap; -import io.netty.channel.*; +import io.netty.channel.Channel; +import io.netty.channel.ChannelFuture; +import io.netty.channel.ChannelFutureListener; +import io.netty.channel.ChannelHandler; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelInitializer; +import io.netty.channel.ChannelOption; +import io.netty.channel.ChannelPipeline; +import io.netty.channel.EventLoop; +import io.netty.channel.EventLoopGroup; +import io.netty.channel.SimpleChannelInboundHandler; import io.netty.channel.group.ChannelGroup; import io.netty.channel.group.DefaultChannelGroup; import io.netty.channel.socket.SocketChannel; @@ -36,1449 +67,1709 @@ import io.netty.util.Timer; import io.netty.util.TimerTask; import io.netty.util.concurrent.GlobalEventExecutor; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import java.lang.ref.WeakReference; -import java.net.InetSocketAddress; import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Queue; -import java.util.concurrent.*; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Executor; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; - -import static com.datastax.driver.core.Message.Response.Type.ERROR; -import static io.netty.handler.timeout.IdleState.READER_IDLE; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; // For LoggingHandler -//import org.jboss.netty.handler.logging.LoggingHandler; -//import org.jboss.netty.logging.InternalLogLevel; +// import org.jboss.netty.handler.logging.LoggingHandler; +// import org.jboss.netty.logging.InternalLogLevel; -/** - * A connection to a Cassandra Node. - */ +/** A connection to a Cassandra Node. */ class Connection { - private static final Logger logger = LoggerFactory.getLogger(Connection.class); - private static final byte[] EMPTY_BYTE_ARRAY = new byte[0]; - - private static final boolean DISABLE_COALESCING = SystemProperties.getBoolean("com.datastax.driver.DISABLE_COALESCING", false); - - enum State {OPEN, TRASHED, RESURRECTING, GONE} - - final AtomicReference state = new AtomicReference(State.OPEN); - - volatile long maxIdleTime; - - final InetSocketAddress address; - private final String name; - - @VisibleForTesting - volatile Channel channel; - private final Factory factory; - - @VisibleForTesting - final Dispatcher dispatcher; - - // Used by connection pooling to count how many requests are "in flight" on that connection. - final AtomicInteger inFlight = new AtomicInteger(0); - - private final AtomicInteger writer = new AtomicInteger(0); - - private final AtomicReference targetKeyspace; - private final SetKeyspaceAttempt defaultKeyspaceAttempt; - - private volatile boolean isInitialized; - private final AtomicBoolean isDefunct = new AtomicBoolean(); - private final AtomicBoolean signaled = new AtomicBoolean(); - - private final AtomicReference closeFuture = new AtomicReference(); - - private final AtomicReference ownerRef = new AtomicReference(); - - /** - * Create a new connection to a Cassandra node and associate it with the given pool. - * - * @param name the connection name - * @param address the remote address - * @param factory the connection factory to use - * @param owner the component owning this connection (may be null). - * Note that an existing connection can also be associated to an owner later with {@link #setOwner(Owner)}. - */ - protected Connection(String name, InetSocketAddress address, Factory factory, Owner owner) { - this.address = address; - this.factory = factory; - this.dispatcher = new Dispatcher(); - this.name = name; - this.ownerRef.set(owner); - ListenableFuture thisFuture = Futures.immediateFuture(this); - this.defaultKeyspaceAttempt = new SetKeyspaceAttempt(null, thisFuture); - this.targetKeyspace = new AtomicReference(defaultKeyspaceAttempt); - } - - /** - * Create a new connection to a Cassandra node. - */ - Connection(String name, InetSocketAddress address, Factory factory) { - this(name, address, factory, null); - } - - ListenableFuture initAsync() { - if (factory.isShutdown) - return Futures.immediateFailedFuture(new ConnectionException(address, "Connection factory is shut down")); - - ProtocolVersion protocolVersion = factory.protocolVersion == null ? ProtocolVersion.NEWEST_SUPPORTED : factory.protocolVersion; - final SettableFuture channelReadyFuture = SettableFuture.create(); - - try { - Bootstrap bootstrap = factory.newBootstrap(); - ProtocolOptions protocolOptions = factory.configuration.getProtocolOptions(); - bootstrap.handler( - new Initializer(this, protocolVersion, protocolOptions.getCompression().compressor(), protocolOptions.getSSLOptions(), - factory.configuration.getPoolingOptions().getHeartbeatIntervalSeconds(), - factory.configuration.getNettyOptions(), - factory.configuration.getCodecRegistry())); - - ChannelFuture future = bootstrap.connect(address); - - writer.incrementAndGet(); - future.addListener(new ChannelFutureListener() { - @Override - public void operationComplete(ChannelFuture future) throws Exception { - writer.decrementAndGet(); - channel = future.channel(); - if (isClosed()) { - channel.close().addListener(new ChannelFutureListener() { - @Override - public void operationComplete(ChannelFuture future) throws Exception { - channelReadyFuture.setException(new TransportException(Connection.this.address, "Connection closed during initialization.")); - } + private static final Logger logger = LoggerFactory.getLogger(Connection.class); + private static final byte[] EMPTY_BYTE_ARRAY = new byte[0]; + + private static final boolean DISABLE_COALESCING = + SystemProperties.getBoolean("com.datastax.driver.DISABLE_COALESCING", false); + private static final int FLUSHER_SCHEDULE_PERIOD_NS = + SystemProperties.getInt("com.datastax.driver.FLUSHER_SCHEDULE_PERIOD_NS", 10000); + + enum State { + OPEN, + TRASHED, + RESURRECTING, + GONE + } + + final AtomicReference state = new AtomicReference(State.OPEN); + + volatile long maxIdleTime; + + final EndPoint endPoint; + private final String name; + + @VisibleForTesting volatile Channel channel; + private final Factory factory; + + @VisibleForTesting final Dispatcher dispatcher; + + // Used by connection pooling to count how many requests are "in flight" on that connection. + final AtomicInteger inFlight = new AtomicInteger(0); + + private final AtomicInteger writer = new AtomicInteger(0); + + private final AtomicReference targetKeyspace; + private final SetKeyspaceAttempt defaultKeyspaceAttempt; + + private volatile boolean isInitialized; + private final AtomicBoolean isDefunct = new AtomicBoolean(); + private final AtomicBoolean signaled = new AtomicBoolean(); + + private final AtomicReference closeFuture = + new AtomicReference(); + + private final AtomicReference ownerRef = new AtomicReference(); + + /** + * Create a new connection to a Cassandra node and associate it with the given pool. + * + * @param name the connection name + * @param endPoint the information to connect to the node + * @param factory the connection factory to use + * @param owner the component owning this connection (may be null). Note that an existing + * connection can also be associated to an owner later with {@link #setOwner(Owner)}. + */ + protected Connection(String name, EndPoint endPoint, Factory factory, Owner owner) { + this.endPoint = endPoint; + this.factory = factory; + this.dispatcher = new Dispatcher(); + this.name = name; + this.ownerRef.set(owner); + ListenableFuture thisFuture = Futures.immediateFuture(this); + this.defaultKeyspaceAttempt = new SetKeyspaceAttempt(null, thisFuture); + this.targetKeyspace = new AtomicReference(defaultKeyspaceAttempt); + } + + /** Create a new connection to a Cassandra node. */ + Connection(String name, EndPoint endPoint, Factory factory) { + this(name, endPoint, factory, null); + } + + ListenableFuture initAsync() { + if (factory.isShutdown) + return Futures.immediateFailedFuture( + new ConnectionException(endPoint, "Connection factory is shut down")); + + ProtocolVersion protocolVersion = + factory.protocolVersion == null + ? ProtocolVersion.NEWEST_SUPPORTED + : factory.protocolVersion; + final SettableFuture channelReadyFuture = SettableFuture.create(); + + try { + Bootstrap bootstrap = factory.newBootstrap(); + ProtocolOptions protocolOptions = factory.configuration.getProtocolOptions(); + bootstrap.handler( + new Initializer( + this, + protocolVersion, + protocolOptions.getCompression().compressor(), + protocolOptions.getSSLOptions(), + factory.configuration.getPoolingOptions().getHeartbeatIntervalSeconds(), + factory.configuration.getNettyOptions(), + factory.configuration.getCodecRegistry(), + factory.configuration.getMetricsOptions().isEnabled() + ? factory.manager.metrics + : null)); + + ChannelFuture future = bootstrap.connect(endPoint.resolve()); + + writer.incrementAndGet(); + future.addListener( + new ChannelFutureListener() { + @Override + public void operationComplete(ChannelFuture future) throws Exception { + writer.decrementAndGet(); + // Note: future.channel() can be null in some error cases, so we need to guard against + // it in the rest of the code below. + channel = future.channel(); + if (isClosed() && channel != null) { + channel + .close() + .addListener( + new ChannelFutureListener() { + @Override + public void operationComplete(ChannelFuture future) throws Exception { + channelReadyFuture.setException( + new TransportException( + Connection.this.endPoint, + "Connection closed during initialization.")); + } }); - } else { - Connection.this.factory.allChannels.add(channel); - if (!future.isSuccess()) { - if (logger.isDebugEnabled()) - logger.debug(String.format("%s Error connecting to %s%s", Connection.this, Connection.this.address, extractMessage(future.cause()))); - channelReadyFuture.setException(new TransportException(Connection.this.address, "Cannot connect", future.cause())); - } else { - logger.debug("{} Connection established, initializing transport", Connection.this); - channel.closeFuture().addListener(new ChannelCloseListener()); - channelReadyFuture.set(null); - } - } + } else { + if (channel != null) { + Connection.this.factory.allChannels.add(channel); } - }); - } catch (RuntimeException e) { - closeAsync().force(); - throw e; - } + if (!future.isSuccess()) { + if (logger.isDebugEnabled()) + logger.debug( + String.format( + "%s Error connecting to %s%s", + Connection.this, + Connection.this.endPoint, + extractMessage(future.cause()))); + channelReadyFuture.setException( + new TransportException( + Connection.this.endPoint, "Cannot connect", future.cause())); + } else { + assert channel != null; + logger.debug( + "{} Connection established, initializing transport", Connection.this); + channel.closeFuture().addListener(new ChannelCloseListener()); + channelReadyFuture.set(null); + } + } + } + }); + } catch (RuntimeException e) { + closeAsync().force(); + throw e; + } - Executor initExecutor = factory.manager.configuration.getPoolingOptions().getInitializationExecutor(); + Executor initExecutor = + factory.manager.configuration.getPoolingOptions().getInitializationExecutor(); - ListenableFuture initializeTransportFuture = GuavaCompatibility.INSTANCE.transformAsync(channelReadyFuture, - onChannelReady(protocolVersion, initExecutor), initExecutor); + ListenableFuture initializeTransportFuture = + GuavaCompatibility.INSTANCE.transformAsync( + channelReadyFuture, onChannelReady(protocolVersion, initExecutor), initExecutor); - // Fallback on initializeTransportFuture so we can properly propagate specific exceptions. - ListenableFuture initFuture = GuavaCompatibility.INSTANCE.withFallback(initializeTransportFuture, new AsyncFunction() { - @Override - public ListenableFuture apply(Throwable t) throws Exception { + // Fallback on initializeTransportFuture so we can properly propagate specific exceptions. + ListenableFuture initFuture = + GuavaCompatibility.INSTANCE.withFallback( + initializeTransportFuture, + new AsyncFunction() { + @Override + public ListenableFuture apply(Throwable t) throws Exception { SettableFuture future = SettableFuture.create(); // Make sure the connection gets properly closed. - if (t instanceof ClusterNameMismatchException || t instanceof UnsupportedProtocolVersionException) { - // Just propagate - closeAsync().force(); - future.setException(t); + if (t instanceof ClusterNameMismatchException + || t instanceof UnsupportedProtocolVersionException) { + // Just propagate + closeAsync().force(); + future.setException(t); } else { - // Defunct to ensure that the error will be signaled (marking the host down) - Throwable e = (t instanceof ConnectionException || t instanceof DriverException || t instanceof InterruptedException || t instanceof Error) - ? t - : new ConnectionException(Connection.this.address, - String.format("Unexpected error during transport initialization (%s)", t), - t); - future.setException(defunct(e)); + // Defunct to ensure that the error will be signaled (marking the host down) + Throwable e = + (t instanceof ConnectionException + || t instanceof DriverException + || t instanceof InterruptedException + || t instanceof Error) + ? t + : new ConnectionException( + Connection.this.endPoint, + String.format( + "Unexpected error during transport initialization (%s)", t), + t); + future.setException(defunct(e)); } return future; + } + }, + initExecutor); + + // Ensure the connection gets closed if the caller cancels the returned future. + GuavaCompatibility.INSTANCE.addCallback( + initFuture, + new MoreFutures.FailureCallback() { + @Override + public void onFailure(Throwable t) { + if (!isClosed()) { + closeAsync().force(); } - }, initExecutor); - - // Ensure the connection gets closed if the caller cancels the returned future. - Futures.addCallback(initFuture, new MoreFutures.FailureCallback() { - @Override - public void onFailure(Throwable t) { - if (!isClosed()) { - closeAsync().force(); - } + } + }, + initExecutor); + + return initFuture; + } + + private static String extractMessage(Throwable t) { + if (t == null) return ""; + String msg = t.getMessage() == null || t.getMessage().isEmpty() ? t.toString() : t.getMessage(); + return " (" + msg + ')'; + } + + public ListenableFuture optionsQuery() { + Future startupOptionsFuture = write(new Requests.Options()); + + return GuavaCompatibility.INSTANCE.transformAsync(startupOptionsFuture, onSupportedResponse()); + } + + private AsyncFunction onChannelReady( + final ProtocolVersion protocolVersion, final Executor initExecutor) { + return new AsyncFunction() { + @Override + public ListenableFuture apply(Void input) throws Exception { + ProtocolOptions protocolOptions = factory.configuration.getProtocolOptions(); + Future startupResponseFuture = + write( + new Requests.Startup( + protocolOptions.getCompression(), protocolOptions.isNoCompact())); + return GuavaCompatibility.INSTANCE.transformAsync( + startupResponseFuture, onStartupResponse(protocolVersion, initExecutor), initExecutor); + } + }; + } + + private AsyncFunction onSupportedResponse() { + return new AsyncFunction() { + @Override + public ListenableFuture apply(Message.Response response) throws Exception { + switch (response.type) { + case SUPPORTED: + return getProductType((Responses.Supported) response); + case ERROR: + Responses.Error error = (Responses.Error) response; + throw new TransportException( + endPoint, String.format("Error initializing connection: %s", error.message)); + default: + throw new TransportException( + endPoint, + String.format( + "Unexpected %s response message from server to a STARTUP message", + response.type)); + } + } + }; + } + + private AsyncFunction onStartupResponse( + final ProtocolVersion protocolVersion, final Executor initExecutor) { + return new AsyncFunction() { + @Override + public ListenableFuture apply(Message.Response response) throws Exception { + switch (response.type) { + case READY: + return checkClusterName(protocolVersion, initExecutor); + case ERROR: + Responses.Error error = (Responses.Error) response; + if (isUnsupportedProtocolVersion(error)) + throw unsupportedProtocolVersionException( + protocolVersion, error.serverProtocolVersion); + throw new TransportException( + endPoint, String.format("Error initializing connection: %s", error.message)); + case AUTHENTICATE: + Responses.Authenticate authenticate = (Responses.Authenticate) response; + Authenticator authenticator; + try { + if (factory.authProvider instanceof ExtendedAuthProvider) { + authenticator = + ((ExtendedAuthProvider) factory.authProvider) + .newAuthenticator(endPoint, authenticate.authenticator); + } else { + authenticator = + factory.authProvider.newAuthenticator( + endPoint.resolve(), authenticate.authenticator); + } + } catch (AuthenticationException e) { + incrementAuthErrorMetric(); + throw e; } - }, initExecutor); - - return initFuture; - } - - private static String extractMessage(Throwable t) { - if (t == null) - return ""; - String msg = t.getMessage() == null || t.getMessage().isEmpty() - ? t.toString() - : t.getMessage(); - return " (" + msg + ')'; - } - - private AsyncFunction onChannelReady(final ProtocolVersion protocolVersion, final Executor initExecutor) { - return new AsyncFunction() { - @Override - public ListenableFuture apply(Void input) throws Exception { - ProtocolOptions.Compression compression = factory.configuration.getProtocolOptions().getCompression(); - Future startupResponseFuture = write(new Requests.Startup(compression)); - return GuavaCompatibility.INSTANCE.transformAsync(startupResponseFuture, - onStartupResponse(protocolVersion, initExecutor), initExecutor); + switch (protocolVersion) { + case V1: + if (authenticator instanceof ProtocolV1Authenticator) + return authenticateV1(authenticator, protocolVersion, initExecutor); + else + // DSE 3.x always uses SASL authentication backported from protocol v2 + return authenticateV2(authenticator, protocolVersion, initExecutor); + case V2: + case V3: + case V4: + case V5: + case V6: + return authenticateV2(authenticator, protocolVersion, initExecutor); + default: + throw defunct(protocolVersion.unsupported()); } - }; + default: + throw new TransportException( + endPoint, + String.format( + "Unexpected %s response message from server to a STARTUP message", + response.type)); + } + } + }; + } + + // Due to C* gossip bugs, system.peers may report nodes that are gone from the cluster. + // If these nodes have been recommissionned to another cluster and are up, nothing prevents the + // driver from connecting + // to them. So we check that the cluster the node thinks it belongs to is our cluster (JAVA-397). + private ListenableFuture checkClusterName( + ProtocolVersion protocolVersion, final Executor executor) { + final String expected = factory.manager.metadata.clusterName; + + // At initialization, the cluster is not known yet + if (expected == null) { + markInitialized(); + return MoreFutures.VOID_SUCCESS; } - private AsyncFunction onStartupResponse(final ProtocolVersion protocolVersion, final Executor initExecutor) { - return new AsyncFunction() { + DefaultResultSetFuture clusterNameFuture = + new DefaultResultSetFuture( + null, + protocolVersion, + new Requests.Query("select cluster_name from system.local where key = 'local'")); + try { + write(clusterNameFuture); + return GuavaCompatibility.INSTANCE.transformAsync( + clusterNameFuture, + new AsyncFunction() { @Override - public ListenableFuture apply(Message.Response response) throws Exception { - switch (response.type) { - case READY: - return checkClusterName(protocolVersion, initExecutor); - case ERROR: - Responses.Error error = (Responses.Error) response; - if (isUnsupportedProtocolVersion(error)) - throw unsupportedProtocolVersionException(protocolVersion, error.serverProtocolVersion); - throw new TransportException(address, String.format("Error initializing connection: %s", error.message)); - case AUTHENTICATE: - Responses.Authenticate authenticate = (Responses.Authenticate) response; - Authenticator authenticator; - try { - authenticator = factory.authProvider.newAuthenticator(address, authenticate.authenticator); - } catch (AuthenticationException e) { - incrementAuthErrorMetric(); - throw e; - } - switch (protocolVersion) { - case V1: - if (authenticator instanceof ProtocolV1Authenticator) - return authenticateV1(authenticator, protocolVersion, initExecutor); - else - // DSE 3.x always uses SASL authentication backported from protocol v2 - return authenticateV2(authenticator, protocolVersion, initExecutor); - case V2: - case V3: - case V4: - case V5: - return authenticateV2(authenticator, protocolVersion, initExecutor); - default: - throw defunct(protocolVersion.unsupported()); - } - default: - throw new TransportException(address, String.format("Unexpected %s response message from server to a STARTUP message", response.type)); - } + public ListenableFuture apply(ResultSet rs) throws Exception { + Row row = rs.one(); + String actual = row.getString("cluster_name"); + if (!expected.equals(actual)) + throw new ClusterNameMismatchException(endPoint, actual, expected); + markInitialized(); + return MoreFutures.VOID_SUCCESS; } - }; - } - - // Due to C* gossip bugs, system.peers may report nodes that are gone from the cluster. - // If these nodes have been recommissionned to another cluster and are up, nothing prevents the driver from connecting - // to them. So we check that the cluster the node thinks it belongs to is our cluster (JAVA-397). - private ListenableFuture checkClusterName(ProtocolVersion protocolVersion, final Executor executor) { - final String expected = factory.manager.metadata.clusterName; - - // At initialization, the cluster is not known yet - if (expected == null) { - markInitialized(); - return MoreFutures.VOID_SUCCESS; - } - - DefaultResultSetFuture clusterNameFuture = new DefaultResultSetFuture(null, protocolVersion, new Requests.Query("select cluster_name from system.local")); - try { - write(clusterNameFuture); - return GuavaCompatibility.INSTANCE.transformAsync(clusterNameFuture, - new AsyncFunction() { - @Override - public ListenableFuture apply(ResultSet rs) throws Exception { - Row row = rs.one(); - String actual = row.getString("cluster_name"); - if (!expected.equals(actual)) - throw new ClusterNameMismatchException(address, actual, expected); - markInitialized(); - return MoreFutures.VOID_SUCCESS; - } - }, executor); - } catch (Exception e) { - return Futures.immediateFailedFuture(e); - } + }, + executor); + } catch (Exception e) { + return Futures.immediateFailedFuture(e); } - - private void markInitialized() { - isInitialized = true; - Host.statesLogger.debug("[{}] {} Transport initialized, connection ready", address, this); - } - - private ListenableFuture authenticateV1(Authenticator authenticator, final ProtocolVersion protocolVersion, final Executor executor) { - Requests.Credentials creds = new Requests.Credentials(((ProtocolV1Authenticator) authenticator).getCredentials()); - try { - Future authResponseFuture = write(creds); - return GuavaCompatibility.INSTANCE.transformAsync(authResponseFuture, - new AsyncFunction() { - @Override - public ListenableFuture apply(Message.Response authResponse) throws Exception { - switch (authResponse.type) { - case READY: - return checkClusterName(protocolVersion, executor); - case ERROR: - incrementAuthErrorMetric(); - throw new AuthenticationException(address, ((Responses.Error) authResponse).message); - default: - throw new TransportException(address, String.format("Unexpected %s response message from server to a CREDENTIALS message", authResponse.type)); - } - } - }, executor); - } catch (Exception e) { - return Futures.immediateFailedFuture(e); - } - } - - private ListenableFuture authenticateV2(final Authenticator authenticator, final ProtocolVersion protocolVersion, final Executor executor) { - byte[] initialResponse = authenticator.initialResponse(); - if (null == initialResponse) - initialResponse = EMPTY_BYTE_ARRAY; - - try { - Future authResponseFuture = write(new Requests.AuthResponse(initialResponse)); - return GuavaCompatibility.INSTANCE.transformAsync(authResponseFuture, onV2AuthResponse(authenticator, protocolVersion, executor), executor); - } catch (Exception e) { - return Futures.immediateFailedFuture(e); - } + } + + private ListenableFuture getProductType(Responses.Supported response) { + if (response.supported.containsKey("PRODUCT_TYPE") + && response.supported.get("PRODUCT_TYPE").size() > 0) { + return Futures.immediateFuture(response.supported.get("PRODUCT_TYPE").get(0)); + } else { + return Futures.immediateFuture(""); } - - private AsyncFunction onV2AuthResponse(final Authenticator authenticator, final ProtocolVersion protocolVersion, final Executor executor) { - return new AsyncFunction() { + } + + private void markInitialized() { + isInitialized = true; + Host.statesLogger.debug("[{}] {} Transport initialized, connection ready", endPoint, this); + } + + private ListenableFuture authenticateV1( + Authenticator authenticator, final ProtocolVersion protocolVersion, final Executor executor) { + Requests.Credentials creds = + new Requests.Credentials(((ProtocolV1Authenticator) authenticator).getCredentials()); + try { + Future authResponseFuture = write(creds); + return GuavaCompatibility.INSTANCE.transformAsync( + authResponseFuture, + new AsyncFunction() { @Override public ListenableFuture apply(Message.Response authResponse) throws Exception { - switch (authResponse.type) { - case AUTH_SUCCESS: - logger.trace("{} Authentication complete", this); - authenticator.onAuthenticationSuccess(((Responses.AuthSuccess) authResponse).token); - return checkClusterName(protocolVersion, executor); - case AUTH_CHALLENGE: - byte[] responseToServer = authenticator.evaluateChallenge(((Responses.AuthChallenge) authResponse).token); - if (responseToServer == null) { - // If we generate a null response, then authentication has completed, proceed without - // sending a further response back to the server. - logger.trace("{} Authentication complete (No response to server)", this); - return checkClusterName(protocolVersion, executor); - } else { - // Otherwise, send the challenge response back to the server - logger.trace("{} Sending Auth response to challenge", this); - Future nextResponseFuture = write(new Requests.AuthResponse(responseToServer)); - return GuavaCompatibility.INSTANCE.transformAsync(nextResponseFuture, onV2AuthResponse(authenticator, protocolVersion, executor), executor); - } - case ERROR: - // This is not very nice, but we're trying to identify if we - // attempted v2 auth against a server which only supports v1 - // The AIOOBE indicates that the server didn't recognise the - // initial AuthResponse message - String message = ((Responses.Error) authResponse).message; - if (message.startsWith("java.lang.ArrayIndexOutOfBoundsException: 15")) - message = String.format("Cannot use authenticator %s with protocol version 1, " - + "only plain text authentication is supported with this protocol version", authenticator); - incrementAuthErrorMetric(); - throw new AuthenticationException(address, message); - default: - throw new TransportException(address, String.format("Unexpected %s response message from server to authentication message", authResponse.type)); - } + switch (authResponse.type) { + case READY: + return checkClusterName(protocolVersion, executor); + case ERROR: + incrementAuthErrorMetric(); + throw new AuthenticationException( + endPoint, ((Responses.Error) authResponse).message); + default: + throw new TransportException( + endPoint, + String.format( + "Unexpected %s response message from server to a CREDENTIALS message", + authResponse.type)); + } } - }; - } - - private void incrementAuthErrorMetric() { - if (factory.manager.configuration.getMetricsOptions().isEnabled()) { - factory.manager.metrics.getErrorMetrics().getAuthenticationErrors().inc(); - } + }, + executor); + } catch (Exception e) { + return Futures.immediateFailedFuture(e); } - - private boolean isUnsupportedProtocolVersion(Responses.Error error) { - // Testing for a specific string is a tad fragile but well, we don't have much choice - // C* 2.1 reports a server error instead of protocol error, see CASSANDRA-9451 - return (error.code == ExceptionCode.PROTOCOL_ERROR || error.code == ExceptionCode.SERVER_ERROR) && - error.message.contains("Invalid or unsupported protocol version"); + } + + private ListenableFuture authenticateV2( + final Authenticator authenticator, + final ProtocolVersion protocolVersion, + final Executor executor) { + byte[] initialResponse = authenticator.initialResponse(); + if (null == initialResponse) initialResponse = EMPTY_BYTE_ARRAY; + + try { + Future authResponseFuture = write(new Requests.AuthResponse(initialResponse)); + return GuavaCompatibility.INSTANCE.transformAsync( + authResponseFuture, onV2AuthResponse(authenticator, protocolVersion, executor), executor); + } catch (Exception e) { + return Futures.immediateFailedFuture(e); } + } + + private AsyncFunction onV2AuthResponse( + final Authenticator authenticator, + final ProtocolVersion protocolVersion, + final Executor executor) { + return new AsyncFunction() { + @Override + public ListenableFuture apply(Message.Response authResponse) throws Exception { + switch (authResponse.type) { + case AUTH_SUCCESS: + logger.trace("{} Authentication complete", this); + authenticator.onAuthenticationSuccess(((Responses.AuthSuccess) authResponse).token); + return checkClusterName(protocolVersion, executor); + case AUTH_CHALLENGE: + byte[] responseToServer = + authenticator.evaluateChallenge(((Responses.AuthChallenge) authResponse).token); + if (responseToServer == null) { + // If we generate a null response, then authentication has completed, proceed without + // sending a further response back to the server. + logger.trace("{} Authentication complete (No response to server)", this); + return checkClusterName(protocolVersion, executor); + } else { + // Otherwise, send the challenge response back to the server + logger.trace("{} Sending Auth response to challenge", this); + Future nextResponseFuture = write(new Requests.AuthResponse(responseToServer)); + return GuavaCompatibility.INSTANCE.transformAsync( + nextResponseFuture, + onV2AuthResponse(authenticator, protocolVersion, executor), + executor); + } + case ERROR: + // This is not very nice, but we're trying to identify if we + // attempted v2 auth against a server which only supports v1 + // The AIOOBE indicates that the server didn't recognise the + // initial AuthResponse message + String message = ((Responses.Error) authResponse).message; + if (message.startsWith("java.lang.ArrayIndexOutOfBoundsException: 15")) + message = + String.format( + "Cannot use authenticator %s with protocol version 1, " + + "only plain text authentication is supported with this protocol version", + authenticator); + incrementAuthErrorMetric(); + throw new AuthenticationException(endPoint, message); + default: + throw new TransportException( + endPoint, + String.format( + "Unexpected %s response message from server to authentication message", + authResponse.type)); + } + } + }; + } - private UnsupportedProtocolVersionException unsupportedProtocolVersionException(ProtocolVersion triedVersion, ProtocolVersion serverProtocolVersion) { - UnsupportedProtocolVersionException e = new UnsupportedProtocolVersionException(address, triedVersion, serverProtocolVersion); - logger.debug(e.getMessage()); - return e; + private void incrementAuthErrorMetric() { + if (factory.manager.configuration.getMetricsOptions().isEnabled()) { + factory.manager.metrics.getErrorMetrics().getAuthenticationErrors().inc(); } + } + + private boolean isUnsupportedProtocolVersion(Responses.Error error) { + // Testing for a specific string is a tad fragile but well, we don't have much choice + // C* 2.1 reports a server error instead of protocol error, see CASSANDRA-9451 + return (error.code == ExceptionCode.PROTOCOL_ERROR || error.code == ExceptionCode.SERVER_ERROR) + && (error.message.contains("Invalid or unsupported protocol version") + // JAVA-2924: server is behind driver and considers the proposed version as beta + || error.message.contains("Beta version of the protocol used")); + } + + private UnsupportedProtocolVersionException unsupportedProtocolVersionException( + ProtocolVersion triedVersion, ProtocolVersion serverProtocolVersion) { + UnsupportedProtocolVersionException e = + new UnsupportedProtocolVersionException(endPoint, triedVersion, serverProtocolVersion); + logger.debug(e.getMessage()); + return e; + } + + boolean isDefunct() { + return isDefunct.get(); + } + + int maxAvailableStreams() { + return dispatcher.streamIdHandler.maxAvailableStreams(); + } + + E defunct(E e) { + if (isDefunct.compareAndSet(false, true)) { + + if (Host.statesLogger.isTraceEnabled()) Host.statesLogger.trace("Defuncting " + this, e); + else if (Host.statesLogger.isDebugEnabled()) + Host.statesLogger.debug("Defuncting {} because: {}", this, e.getMessage()); + + Host host = getHost(); + if (host != null) { + // Sometimes close() can be called before defunct(); avoid decrementing the connection count + // twice, but + // we still want to signal the error to the conviction policy. + boolean decrement = signaled.compareAndSet(false, true); + + boolean hostDown = host.convictionPolicy.signalConnectionFailure(this, decrement); + if (hostDown) { + factory.manager.signalHostDown(host, host.wasJustAdded()); + } else { + notifyOwnerWhenDefunct(); + } + } - boolean isDefunct() { - return isDefunct.get(); + // Force the connection to close to make sure the future completes. Otherwise force() might + // never get called and + // threads will wait on the future forever. + // (this also errors out pending handlers) + closeAsync().force(); } - - int maxAvailableStreams() { - return dispatcher.streamIdHandler.maxAvailableStreams(); + return e; + } + + private void notifyOwnerWhenDefunct() { + // If an error happens during initialization, the owner will detect it and take appropriate + // action + if (!isInitialized) return; + + Owner owner = this.ownerRef.get(); + if (owner != null) owner.onConnectionDefunct(this); + } + + String keyspace() { + return targetKeyspace.get().keyspace; + } + + void setKeyspace(String keyspace) throws ConnectionException { + if (keyspace == null) return; + + if (MoreObjects.equal(keyspace(), keyspace)) return; + + try { + Uninterruptibles.getUninterruptibly(setKeyspaceAsync(keyspace)); + } catch (ConnectionException e) { + throw defunct(e); + } catch (BusyConnectionException e) { + logger.warn( + "Tried to set the keyspace on busy {}. " + + "This should not happen but is not critical (it will be retried)", + this); + throw new ConnectionException(endPoint, "Tried to set the keyspace on busy connection"); + } catch (ExecutionException e) { + Throwable cause = e.getCause(); + if (cause instanceof OperationTimedOutException) { + // Rethrow so that the caller doesn't try to use the connection, but do not defunct as we + // don't want to mark down + logger.warn( + "Timeout while setting keyspace on {}. " + + "This should not happen but is not critical (it will be retried)", + this); + throw new ConnectionException(endPoint, "Timeout while setting keyspace on connection"); + } else { + throw defunct(new ConnectionException(endPoint, "Error while setting keyspace", cause)); + } } - - E defunct(E e) { - if (isDefunct.compareAndSet(false, true)) { - - if (Host.statesLogger.isTraceEnabled()) - Host.statesLogger.trace("Defuncting " + this, e); - else if (Host.statesLogger.isDebugEnabled()) - Host.statesLogger.debug("Defuncting {} because: {}", this, e.getMessage()); - - Host host = factory.manager.metadata.getHost(address); - if (host != null) { - // Sometimes close() can be called before defunct(); avoid decrementing the connection count twice, but - // we still want to signal the error to the conviction policy. - boolean decrement = signaled.compareAndSet(false, true); - - boolean hostDown = host.convictionPolicy.signalConnectionFailure(this, decrement); - if (hostDown) { - factory.manager.signalHostDown(host, host.wasJustAdded()); + } + + ListenableFuture setKeyspaceAsync(final String keyspace) + throws ConnectionException, BusyConnectionException { + SetKeyspaceAttempt existingAttempt = targetKeyspace.get(); + if (MoreObjects.equal(existingAttempt.keyspace, keyspace)) return existingAttempt.future; + + final SettableFuture ksFuture = SettableFuture.create(); + final SetKeyspaceAttempt attempt = new SetKeyspaceAttempt(keyspace, ksFuture); + + // Check for an existing keyspace attempt. + while (true) { + existingAttempt = targetKeyspace.get(); + // if existing attempts' keyspace matches what we are trying to set, use it. + if (attempt.equals(existingAttempt)) { + return existingAttempt.future; + } else if (!existingAttempt.future.isDone()) { + // If the existing attempt is still in flight, fail this attempt immediately. + ksFuture.setException( + new DriverException( + "Aborting attempt to set keyspace to '" + + keyspace + + "' since there is already an in flight attempt to set keyspace to '" + + existingAttempt.keyspace + + "'. " + + "This can happen if you try to USE different keyspaces from the same session simultaneously.")); + return ksFuture; + } else if (targetKeyspace.compareAndSet(existingAttempt, attempt)) { + // Otherwise, if the existing attempt is done, start a new set keyspace attempt for the new + // keyspace. + logger.debug("{} Setting keyspace {}", this, keyspace); + // Note: we quote the keyspace below, because the name is the one coming from Cassandra, so + // it's in the right case already + Future future = write(new Requests.Query("USE \"" + keyspace + '"')); + GuavaCompatibility.INSTANCE.addCallback( + future, + new FutureCallback() { + + @Override + public void onSuccess(Message.Response response) { + if (response instanceof SetKeyspace) { + logger.debug("{} Keyspace set to {}", Connection.this, keyspace); + ksFuture.set(Connection.this); } else { - notifyOwnerWhenDefunct(); + // Unset this attempt so new attempts may be made for the same keyspace. + targetKeyspace.compareAndSet(attempt, defaultKeyspaceAttempt); + if (response.type == ERROR) { + Responses.Error error = (Responses.Error) response; + ksFuture.setException(defunct(error.asException(endPoint))); + } else { + ksFuture.setException( + defunct( + new DriverInternalError( + "Unexpected response while setting keyspace: " + response))); + } } - } - - // Force the connection to close to make sure the future completes. Otherwise force() might never get called and - // threads will wait on the future forever. - // (this also errors out pending handlers) - closeAsync().force(); - } - return e; + } + + @Override + public void onFailure(Throwable t) { + targetKeyspace.compareAndSet(attempt, defaultKeyspaceAttempt); + ksFuture.setException(t); + } + }, + factory.manager.configuration.getPoolingOptions().getInitializationExecutor()); + + return ksFuture; + } } - - private void notifyOwnerWhenDefunct() { - // If an error happens during initialization, the owner will detect it and take appropriate action - if (!isInitialized) - return; - - Owner owner = this.ownerRef.get(); - if (owner != null) - owner.onConnectionDefunct(this); + } + + /** + * Write a request on this connection. + * + * @param request the request to send + * @return a future on the server response + * @throws ConnectionException if the connection is closed + * @throws TransportException if an I/O error while sending the request + */ + Future write(Message.Request request) throws ConnectionException, BusyConnectionException { + Future future = new Future(request); + write(future); + return future; + } + + ResponseHandler write(ResponseCallback callback) + throws ConnectionException, BusyConnectionException { + return write(callback, -1, true); + } + + ResponseHandler write( + ResponseCallback callback, long statementReadTimeoutMillis, boolean startTimeout) + throws ConnectionException, BusyConnectionException { + + ResponseHandler handler = new ResponseHandler(this, statementReadTimeoutMillis, callback); + dispatcher.add(handler); + + Message.Request request = callback.request().setStreamId(handler.streamId); + + /* + * We check for close/defunct *after* having set the handler because closing/defuncting + * will set their flag and then error out handler if need. So, by doing the check after + * having set the handler, we guarantee that even if we race with defunct/close, we may + * never leave a handler that won't get an answer or be errored out. + */ + if (isDefunct.get()) { + dispatcher.removeHandler(handler, true); + throw new ConnectionException(endPoint, "Write attempt on defunct connection"); } - String keyspace() { - return targetKeyspace.get().keyspace; + if (isClosed()) { + dispatcher.removeHandler(handler, true); + throw new ConnectionException(endPoint, "Connection has been closed"); } - void setKeyspace(String keyspace) throws ConnectionException { - if (keyspace == null) - return; - - if (MoreObjects.equal(keyspace(), keyspace)) - return; + logger.trace("{}, stream {}, writing request {}", this, request.getStreamId(), request); + writer.incrementAndGet(); - try { - Uninterruptibles.getUninterruptibly(setKeyspaceAsync(keyspace)); - } catch (ConnectionException e) { - throw defunct(e); - } catch (BusyConnectionException e) { - logger.warn("Tried to set the keyspace on busy {}. " - + "This should not happen but is not critical (it will be retried)", this); - throw new ConnectionException(address, "Tried to set the keyspace on busy connection"); - } catch (ExecutionException e) { - Throwable cause = e.getCause(); - if (cause instanceof OperationTimedOutException) { - // Rethrow so that the caller doesn't try to use the connection, but do not defunct as we don't want to mark down - logger.warn("Timeout while setting keyspace on {}. " - + "This should not happen but is not critical (it will be retried)", this); - throw new ConnectionException(address, "Timeout while setting keyspace on connection"); - } else { - throw defunct(new ConnectionException(address, "Error while setting keyspace", cause)); - } - } + if (DISABLE_COALESCING) { + channel.writeAndFlush(request).addListener(writeHandler(request, handler)); + } else { + flush(new FlushItem(channel, request, writeHandler(request, handler))); } - - ListenableFuture setKeyspaceAsync(final String keyspace) throws ConnectionException, BusyConnectionException { - SetKeyspaceAttempt existingAttempt = targetKeyspace.get(); - if (MoreObjects.equal(existingAttempt.keyspace, keyspace)) - return existingAttempt.future; - - final SettableFuture ksFuture = SettableFuture.create(); - final SetKeyspaceAttempt attempt = new SetKeyspaceAttempt(keyspace, ksFuture); - - // Check for an existing keyspace attempt. - while (true) { - existingAttempt = targetKeyspace.get(); - // if existing attempts' keyspace matches what we are trying to set, use it. - if (attempt.equals(existingAttempt)) { - return existingAttempt.future; - } else if (!existingAttempt.future.isDone()) { - // If the existing attempt is still in flight, fail this attempt immediately. - ksFuture.setException(new DriverException("Aborting attempt to set keyspace to '" + keyspace + "' since there is already an in flight attempt to set keyspace to '" + existingAttempt.keyspace + "'. " - + "This can happen if you try to USE different keyspaces from the same session simultaneously.")); - return ksFuture; - } else if (targetKeyspace.compareAndSet(existingAttempt, attempt)) { - // Otherwise, if the existing attempt is done, start a new set keyspace attempt for the new keyspace. - logger.debug("{} Setting keyspace {}", this, keyspace); - // Note: we quote the keyspace below, because the name is the one coming from Cassandra, so it's in the right case already - Future future = write(new Requests.Query("USE \"" + keyspace + '"')); - Futures.addCallback(future, new FutureCallback() { - - @Override - public void onSuccess(Message.Response response) { - if (response instanceof SetKeyspace) { - logger.debug("{} Keyspace set to {}", Connection.this, keyspace); - ksFuture.set(Connection.this); - } else { - // Unset this attempt so new attempts may be made for the same keyspace. - targetKeyspace.compareAndSet(attempt, defaultKeyspaceAttempt); - if (response.type == ERROR) { - Responses.Error error = (Responses.Error) response; - ksFuture.setException(defunct(error.asException(address))); - } else { - ksFuture.setException(defunct(new DriverInternalError("Unexpected response while setting keyspace: " + response))); - } - } - } - - @Override - public void onFailure(Throwable t) { - targetKeyspace.compareAndSet(attempt, defaultKeyspaceAttempt); - ksFuture.setException(t); - } - }, factory.manager.configuration.getPoolingOptions().getInitializationExecutor()); - - return ksFuture; - } + if (startTimeout) handler.startTimeout(); + + return handler; + } + + private ChannelFutureListener writeHandler( + final Message.Request request, final ResponseHandler handler) { + return new ChannelFutureListener() { + @Override + public void operationComplete(ChannelFuture writeFuture) { + + writer.decrementAndGet(); + + if (!writeFuture.isSuccess()) { + logger.debug( + "{}, stream {}, Error writing request {}", + Connection.this, + request.getStreamId(), + request); + // Remove this handler from the dispatcher so it don't get notified of the error + // twice (we will fail that method already) + dispatcher.removeHandler(handler, true); + + final ConnectionException ce; + if (writeFuture.cause() instanceof java.nio.channels.ClosedChannelException) { + ce = new TransportException(endPoint, "Error writing: Closed channel"); + } else { + ce = new TransportException(endPoint, "Error writing", writeFuture.cause()); + } + final long latency = System.nanoTime() - handler.startTime; + // This handler is executed while holding the writeLock of the channel. + // defunct might close the pool, which will close all of its connections; closing a + // connection also + // requires its writeLock. + // Therefore if multiple connections in the same pool get a write error, they could + // deadlock; + // we run defunct on a separate thread to avoid that. + ListeningExecutorService executor = factory.manager.executor; + if (!executor.isShutdown()) + executor.execute( + new Runnable() { + @Override + public void run() { + handler.callback.onException( + Connection.this, defunct(ce), latency, handler.retryCount); + } + }); + } else { + logger.trace( + "{}, stream {}, request sent successfully", Connection.this, request.getStreamId()); } + } + }; + } + + boolean hasOwner() { + return this.ownerRef.get() != null; + } + + /** @return whether the connection was already associated with an owner */ + boolean setOwner(Owner owner) { + return ownerRef.compareAndSet(null, owner); + } + + /** + * If the connection is part of a pool, return it to the pool. The connection should generally not + * be reused after that. + */ + void release(boolean busy) { + Owner owner = ownerRef.get(); + if (owner instanceof HostConnectionPool) + ((HostConnectionPool) owner).returnConnection(this, busy); + } + + void release() { + release(false); + } + + boolean isClosed() { + return closeFuture.get() != null; + } + + /** + * Closes the connection: no new writes will be accepted after this method has returned. + * + *

However, a closed connection might still have ongoing queries awaiting for their result. + * When all these ongoing queries have completed, the underlying channel will be closed; we refer + * to this final state as "terminated". + * + * @return a future that will complete once the connection has terminated. + * @see #tryTerminate(boolean) + */ + CloseFuture closeAsync() { + + ConnectionCloseFuture future = new ConnectionCloseFuture(); + if (!closeFuture.compareAndSet(null, future)) { + // close had already been called, return the existing future + return closeFuture.get(); } - /** - * Write a request on this connection. - * - * @param request the request to send - * @return a future on the server response - * @throws ConnectionException if the connection is closed - * @throws TransportException if an I/O error while sending the request - */ - Future write(Message.Request request) throws ConnectionException, BusyConnectionException { - Future future = new Future(request); - write(future); - return future; - } - - ResponseHandler write(ResponseCallback callback) throws ConnectionException, BusyConnectionException { - return write(callback, -1, true); - } - - ResponseHandler write(ResponseCallback callback, long statementReadTimeoutMillis, boolean startTimeout) throws ConnectionException, BusyConnectionException { - - ResponseHandler handler = new ResponseHandler(this, statementReadTimeoutMillis, callback); - dispatcher.add(handler); - - Message.Request request = callback.request().setStreamId(handler.streamId); - - /* - * We check for close/defunct *after* having set the handler because closing/defuncting - * will set their flag and then error out handler if need. So, by doing the check after - * having set the handler, we guarantee that even if we race with defunct/close, we may - * never leave a handler that won't get an answer or be errored out. - */ - if (isDefunct.get()) { - dispatcher.removeHandler(handler, true); - throw new ConnectionException(address, "Write attempt on defunct connection"); - } - - if (isClosed()) { - dispatcher.removeHandler(handler, true); - throw new ConnectionException(address, "Connection has been closed"); - } + logger.debug("{} closing connection", this); - logger.trace("{}, stream {}, writing request {}", this, request.getStreamId(), request); - writer.incrementAndGet(); - - if (DISABLE_COALESCING) { - channel.writeAndFlush(request).addListener(writeHandler(request, handler)); - } else { - flush(new FlushItem(channel, request, writeHandler(request, handler))); - } - if (startTimeout) - handler.startTimeout(); - - return handler; + // Only signal if defunct hasn't done it already + if (signaled.compareAndSet(false, true)) { + Host host = getHost(); + if (host != null) { + host.convictionPolicy.signalConnectionClosed(this); + } } - private ChannelFutureListener writeHandler(final Message.Request request, final ResponseHandler handler) { - return new ChannelFutureListener() { - @Override - public void operationComplete(ChannelFuture writeFuture) { - - writer.decrementAndGet(); - - if (!writeFuture.isSuccess()) { - logger.debug("{}, stream {}, Error writing request {}", Connection.this, request.getStreamId(), request); - // Remove this handler from the dispatcher so it don't get notified of the error - // twice (we will fail that method already) - dispatcher.removeHandler(handler, true); - - final ConnectionException ce; - if (writeFuture.cause() instanceof java.nio.channels.ClosedChannelException) { - ce = new TransportException(address, "Error writing: Closed channel"); - } else { - ce = new TransportException(address, "Error writing", writeFuture.cause()); - } - final long latency = System.nanoTime() - handler.startTime; - // This handler is executed while holding the writeLock of the channel. - // defunct might close the pool, which will close all of its connections; closing a connection also - // requires its writeLock. - // Therefore if multiple connections in the same pool get a write error, they could deadlock; - // we run defunct on a separate thread to avoid that. - ListeningExecutorService executor = factory.manager.executor; - if (!executor.isShutdown()) - executor.execute(new Runnable() { - @Override - public void run() { - handler.callback.onException(Connection.this, defunct(ce), latency, handler.retryCount); - } - }); - } else { - logger.trace("{}, stream {}, request sent successfully", Connection.this, request.getStreamId()); - } - } - }; + boolean terminated = tryTerminate(false); + if (!terminated) { + // The time by which all pending requests should have normally completed (use twice the read + // timeout for a generous + // estimate -- note that this does not cover the eventuality that read timeout is updated + // dynamically, but we can live + // with that). + long terminateTime = System.currentTimeMillis() + 2 * factory.getReadTimeoutMillis(); + factory.reaper.register(this, terminateTime); } - - boolean hasOwner() { - return this.ownerRef.get() != null; + return future; + } + + private Host getHost() { + Metadata metadata = factory.manager.metadata; + Host host = metadata.getHost(endPoint); + // During init the host might not be in metatada.hosts yet, try the contact points + if (host == null) { + host = metadata.getContactPoint(endPoint); } - - /** - * @return whether the connection was already associated with an owner - */ - boolean setOwner(Owner owner) { - return ownerRef.compareAndSet(null, owner); + return host; + } + + /** + * Tries to terminate a closed connection, i.e. release system resources. + * + *

This is called both by "normal" code and by {@link Cluster.ConnectionReaper}. + * + * @param force whether to proceed if there are still outstanding requests. + * @return whether the connection has actually terminated. + * @see #closeAsync() + */ + boolean tryTerminate(boolean force) { + assert isClosed(); + ConnectionCloseFuture future = closeFuture.get(); + + if (future.isDone()) { + logger.debug("{} has already terminated", this); + return true; + } else { + if (force || dispatcher.pending.isEmpty()) { + if (force) + logger.warn( + "Forcing termination of {}. This should not happen and is likely a bug, please report.", + this); + future.force(); + return true; + } else { + logger.debug("Not terminating {}: there are still pending requests", this); + return false; + } } - - /** - * If the connection is part of a pool, return it to the pool. - * The connection should generally not be reused after that. - */ - void release() { - Owner owner = ownerRef.get(); - if (owner instanceof HostConnectionPool) - ((HostConnectionPool) owner).returnConnection(this); + } + + @Override + public String toString() { + return String.format( + "Connection[%s, inFlight=%d, closed=%b]", name, inFlight.get(), isClosed()); + } + + static class Factory { + + final Timer timer; + + final EventLoopGroup eventLoopGroup; + private final Class channelClass; + + private final ChannelGroup allChannels = new DefaultChannelGroup(GlobalEventExecutor.INSTANCE); + + private final ConcurrentMap idGenerators = + new ConcurrentHashMap(); + final DefaultResponseHandler defaultHandler; + final Cluster.Manager manager; + final Cluster.ConnectionReaper reaper; + final Configuration configuration; + + final AuthProvider authProvider; + private volatile boolean isShutdown; + + volatile ProtocolVersion protocolVersion; + private final NettyOptions nettyOptions; + + Factory(Cluster.Manager manager, Configuration configuration) { + this.defaultHandler = manager; + this.manager = manager; + this.reaper = manager.reaper; + this.configuration = configuration; + this.authProvider = configuration.getProtocolOptions().getAuthProvider(); + this.protocolVersion = configuration.getProtocolOptions().initialProtocolVersion; + this.nettyOptions = configuration.getNettyOptions(); + this.eventLoopGroup = + nettyOptions.eventLoopGroup( + manager + .configuration + .getThreadingOptions() + .createThreadFactory(manager.clusterName, "nio-worker")); + this.channelClass = nettyOptions.channelClass(); + this.timer = + nettyOptions.timer( + manager + .configuration + .getThreadingOptions() + .createThreadFactory(manager.clusterName, "timeouter")); } - boolean isClosed() { - return closeFuture.get() != null; + int getPort() { + return configuration.getProtocolOptions().getPort(); } /** - * Closes the connection: no new writes will be accepted after this method has returned. - *

- * However, a closed connection might still have ongoing queries awaiting for their result. - * When all these ongoing queries have completed, the underlying channel will be closed; we - * refer to this final state as "terminated". + * Opens a new connection to the node this factory points to. * - * @return a future that will complete once the connection has terminated. - * @see #tryTerminate(boolean) + * @return the newly created (and initialized) connection. + * @throws ConnectionException if connection attempt fails. */ - CloseFuture closeAsync() { - - ConnectionCloseFuture future = new ConnectionCloseFuture(); - if (!closeFuture.compareAndSet(null, future)) { - // close had already been called, return the existing future - return closeFuture.get(); - } - - logger.debug("{} closing connection", this); - - // Only signal if defunct hasn't done it already - if (signaled.compareAndSet(false, true)) { - Host host = factory.manager.metadata.getHost(address); - if (host != null) { - host.convictionPolicy.signalConnectionClosed(this); - } - } + Connection open(Host host) + throws ConnectionException, InterruptedException, UnsupportedProtocolVersionException, + ClusterNameMismatchException { + EndPoint endPoint = host.getEndPoint(); + + if (isShutdown) throw new ConnectionException(endPoint, "Connection factory is shut down"); + + host.convictionPolicy.signalConnectionsOpening(1); + Connection connection = new Connection(buildConnectionName(host), endPoint, this); + // This method opens the connection synchronously, so wait until it's initialized + try { + connection.initAsync().get(); + return connection; + } catch (ExecutionException e) { + throw launderAsyncInitException(e); + } + } - boolean terminated = tryTerminate(false); - if (!terminated) { - // The time by which all pending requests should have normally completed (use twice the read timeout for a generous - // estimate -- note that this does not cover the eventuality that read timeout is updated dynamically, but we can live - // with that). - long terminateTime = System.currentTimeMillis() + 2 * factory.getReadTimeoutMillis(); - factory.reaper.register(this, terminateTime); - } - return future; + /** Same as open, but associate the created connection to the provided connection pool. */ + Connection open(HostConnectionPool pool) + throws ConnectionException, InterruptedException, UnsupportedProtocolVersionException, + ClusterNameMismatchException { + pool.host.convictionPolicy.signalConnectionsOpening(1); + Connection connection = + new Connection(buildConnectionName(pool.host), pool.host.getEndPoint(), this, pool); + try { + connection.initAsync().get(); + return connection; + } catch (ExecutionException e) { + throw launderAsyncInitException(e); + } } /** - * Tries to terminate a closed connection, i.e. release system resources. - *

- * This is called both by "normal" code and by {@link Cluster.ConnectionReaper}. - * - * @param force whether to proceed if there are still outstanding requests. - * @return whether the connection has actually terminated. - * @see #closeAsync() + * Creates new connections and associate them to the provided connection pool, but does not + * start them. */ - boolean tryTerminate(boolean force) { - assert isClosed(); - ConnectionCloseFuture future = closeFuture.get(); - - if (future.isDone()) { - logger.debug("{} has already terminated", this); - return true; - } else { - if (force || dispatcher.pending.isEmpty()) { - if (force) - logger.warn("Forcing termination of {}. This should not happen and is likely a bug, please report.", this); - future.force(); - return true; - } else { - logger.debug("Not terminating {}: there are still pending requests", this); - return false; - } - } + List newConnections(HostConnectionPool pool, int count) { + pool.host.convictionPolicy.signalConnectionsOpening(count); + List connections = Lists.newArrayListWithCapacity(count); + for (int i = 0; i < count; i++) + connections.add( + new Connection(buildConnectionName(pool.host), pool.host.getEndPoint(), this, pool)); + return connections; } - @Override - public String toString() { - return String.format("Connection[%s, inFlight=%d, closed=%b]", name, inFlight.get(), isClosed()); + private String buildConnectionName(Host host) { + return host.getEndPoint().toString() + '-' + getIdGenerator(host).getAndIncrement(); } - static class Factory { - - final Timer timer; - - final EventLoopGroup eventLoopGroup; - private final Class channelClass; - - private final ChannelGroup allChannels = new DefaultChannelGroup(GlobalEventExecutor.INSTANCE); - - private final ConcurrentMap idGenerators = new ConcurrentHashMap(); - final DefaultResponseHandler defaultHandler; - final Cluster.Manager manager; - final Cluster.ConnectionReaper reaper; - final Configuration configuration; - - final AuthProvider authProvider; - private volatile boolean isShutdown; - - volatile ProtocolVersion protocolVersion; - private final NettyOptions nettyOptions; - - Factory(Cluster.Manager manager, Configuration configuration) { - this.defaultHandler = manager; - this.manager = manager; - this.reaper = manager.reaper; - this.configuration = configuration; - this.authProvider = configuration.getProtocolOptions().getAuthProvider(); - this.protocolVersion = configuration.getProtocolOptions().initialProtocolVersion; - this.nettyOptions = configuration.getNettyOptions(); - this.eventLoopGroup = nettyOptions.eventLoopGroup( - manager.configuration.getThreadingOptions().createThreadFactory(manager.clusterName, "nio-worker")); - this.channelClass = nettyOptions.channelClass(); - this.timer = nettyOptions.timer( - manager.configuration.getThreadingOptions().createThreadFactory(manager.clusterName, "timeouter")); - } - - int getPort() { - return configuration.getProtocolOptions().getPort(); - } - - /** - * Opens a new connection to the node this factory points to. - * - * @return the newly created (and initialized) connection. - * @throws ConnectionException if connection attempt fails. - */ - Connection open(Host host) throws ConnectionException, InterruptedException, UnsupportedProtocolVersionException, ClusterNameMismatchException { - InetSocketAddress address = host.getSocketAddress(); - - if (isShutdown) - throw new ConnectionException(address, "Connection factory is shut down"); - - host.convictionPolicy.signalConnectionsOpening(1); - Connection connection = new Connection(buildConnectionName(host), address, this); - // This method opens the connection synchronously, so wait until it's initialized - try { - connection.initAsync().get(); - return connection; - } catch (ExecutionException e) { - throw launderAsyncInitException(e); - } - } - - /** - * Same as open, but associate the created connection to the provided connection pool. - */ - Connection open(HostConnectionPool pool) throws ConnectionException, InterruptedException, UnsupportedProtocolVersionException, ClusterNameMismatchException { - pool.host.convictionPolicy.signalConnectionsOpening(1); - Connection connection = new Connection(buildConnectionName(pool.host), pool.host.getSocketAddress(), this, pool); - try { - connection.initAsync().get(); - return connection; - } catch (ExecutionException e) { - throw launderAsyncInitException(e); - } - } - - /** - * Creates new connections and associate them to the provided connection pool, but does not start them. - */ - List newConnections(HostConnectionPool pool, int count) { - pool.host.convictionPolicy.signalConnectionsOpening(count); - List connections = Lists.newArrayListWithCapacity(count); - for (int i = 0; i < count; i++) - connections.add(new Connection(buildConnectionName(pool.host), pool.host.getSocketAddress(), this, pool)); - return connections; - } + static RuntimeException launderAsyncInitException(ExecutionException e) + throws ConnectionException, InterruptedException, UnsupportedProtocolVersionException, + ClusterNameMismatchException { + Throwable t = e.getCause(); + if (t instanceof ConnectionException) throw (ConnectionException) t; + if (t instanceof InterruptedException) throw (InterruptedException) t; + if (t instanceof UnsupportedProtocolVersionException) + throw (UnsupportedProtocolVersionException) t; + if (t instanceof ClusterNameMismatchException) throw (ClusterNameMismatchException) t; + if (t instanceof DriverException) throw (DriverException) t; + if (t instanceof Error) throw (Error) t; + + return new RuntimeException("Unexpected exception during connection initialization", t); + } - private String buildConnectionName(Host host) { - return host.getSocketAddress().toString() + '-' + getIdGenerator(host).getAndIncrement(); - } + private AtomicInteger getIdGenerator(Host host) { + AtomicInteger g = idGenerators.get(host); + if (g == null) { + g = new AtomicInteger(1); + AtomicInteger old = idGenerators.putIfAbsent(host, g); + if (old != null) g = old; + } + return g; + } - static RuntimeException launderAsyncInitException(ExecutionException e) throws ConnectionException, InterruptedException, UnsupportedProtocolVersionException, ClusterNameMismatchException { - Throwable t = e.getCause(); - if (t instanceof ConnectionException) - throw (ConnectionException) t; - if (t instanceof InterruptedException) - throw (InterruptedException) t; - if (t instanceof UnsupportedProtocolVersionException) - throw (UnsupportedProtocolVersionException) t; - if (t instanceof ClusterNameMismatchException) - throw (ClusterNameMismatchException) t; - if (t instanceof DriverException) - throw (DriverException) t; - if (t instanceof Error) - throw (Error) t; - - return new RuntimeException("Unexpected exception during connection initialization", t); - } + long getReadTimeoutMillis() { + return configuration.getSocketOptions().getReadTimeoutMillis(); + } - private AtomicInteger getIdGenerator(Host host) { - AtomicInteger g = idGenerators.get(host); - if (g == null) { - g = new AtomicInteger(1); - AtomicInteger old = idGenerators.putIfAbsent(host, g); - if (old != null) - g = old; - } - return g; - } + private Bootstrap newBootstrap() { + Bootstrap b = new Bootstrap(); + b.group(eventLoopGroup).channel(channelClass); + + SocketOptions options = configuration.getSocketOptions(); + + b.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, options.getConnectTimeoutMillis()); + Boolean keepAlive = options.getKeepAlive(); + if (keepAlive != null) b.option(ChannelOption.SO_KEEPALIVE, keepAlive); + Boolean reuseAddress = options.getReuseAddress(); + if (reuseAddress != null) b.option(ChannelOption.SO_REUSEADDR, reuseAddress); + Integer soLinger = options.getSoLinger(); + if (soLinger != null) b.option(ChannelOption.SO_LINGER, soLinger); + Boolean tcpNoDelay = options.getTcpNoDelay(); + if (tcpNoDelay != null) b.option(ChannelOption.TCP_NODELAY, tcpNoDelay); + Integer receiveBufferSize = options.getReceiveBufferSize(); + if (receiveBufferSize != null) b.option(ChannelOption.SO_RCVBUF, receiveBufferSize); + Integer sendBufferSize = options.getSendBufferSize(); + if (sendBufferSize != null) b.option(ChannelOption.SO_SNDBUF, sendBufferSize); + + nettyOptions.afterBootstrapInitialized(b); + return b; + } - long getReadTimeoutMillis() { - return configuration.getSocketOptions().getReadTimeoutMillis(); - } + void shutdown() { + // Make sure we skip creating connection from now on. + isShutdown = true; - private Bootstrap newBootstrap() { - Bootstrap b = new Bootstrap(); - b.group(eventLoopGroup) - .channel(channelClass); - - SocketOptions options = configuration.getSocketOptions(); - - b.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, options.getConnectTimeoutMillis()); - Boolean keepAlive = options.getKeepAlive(); - if (keepAlive != null) - b.option(ChannelOption.SO_KEEPALIVE, keepAlive); - Boolean reuseAddress = options.getReuseAddress(); - if (reuseAddress != null) - b.option(ChannelOption.SO_REUSEADDR, reuseAddress); - Integer soLinger = options.getSoLinger(); - if (soLinger != null) - b.option(ChannelOption.SO_LINGER, soLinger); - Boolean tcpNoDelay = options.getTcpNoDelay(); - if (tcpNoDelay != null) - b.option(ChannelOption.TCP_NODELAY, tcpNoDelay); - Integer receiveBufferSize = options.getReceiveBufferSize(); - if (receiveBufferSize != null) - b.option(ChannelOption.SO_RCVBUF, receiveBufferSize); - Integer sendBufferSize = options.getSendBufferSize(); - if (sendBufferSize != null) - b.option(ChannelOption.SO_SNDBUF, sendBufferSize); - - nettyOptions.afterBootstrapInitialized(b); - return b; - } + // All channels should be closed already, we call this just to be sure. And we know + // we're not on an I/O thread or anything, so just call await. + allChannels.close().awaitUninterruptibly(); - void shutdown() { - // Make sure we skip creating connection from now on. - isShutdown = true; + nettyOptions.onClusterClose(eventLoopGroup); + nettyOptions.onClusterClose(timer); + } + } - // All channels should be closed already, we call this just to be sure. And we know - // we're not on an I/O thread or anything, so just call await. - allChannels.close().awaitUninterruptibly(); + private static final class Flusher implements Runnable { + final WeakReference eventLoopRef; + final Queue queued = new ConcurrentLinkedQueue(); + final AtomicBoolean running = new AtomicBoolean(false); + final HashSet channels = new HashSet(); - nettyOptions.onClusterClose(eventLoopGroup); - nettyOptions.onClusterClose(timer); - } + private Flusher(EventLoop eventLoop) { + this.eventLoopRef = new WeakReference(eventLoop); } - private static final class Flusher implements Runnable { - final WeakReference eventLoopRef; - final Queue queued = new ConcurrentLinkedQueue(); - final AtomicBoolean running = new AtomicBoolean(false); - final HashSet channels = new HashSet(); - int runsWithNoWork = 0; + void start() { + if (!running.get() && running.compareAndSet(false, true)) { + EventLoop eventLoop = eventLoopRef.get(); + if (eventLoop != null) eventLoop.execute(this); + } + } - private Flusher(EventLoop eventLoop) { - this.eventLoopRef = new WeakReference(eventLoop); - } + @Override + public void run() { - void start() { - if (!running.get() && running.compareAndSet(false, true)) { - EventLoop eventLoop = eventLoopRef.get(); - if (eventLoop != null) - eventLoop.execute(this); - } + FlushItem flush; + while (null != (flush = queued.poll())) { + Channel channel = flush.channel; + if (channel.isActive()) { + channels.add(channel); + channel.write(flush.request).addListener(flush.listener); } + } - @Override - public void run() { - - boolean doneWork = false; - FlushItem flush; - while (null != (flush = queued.poll())) { - Channel channel = flush.channel; - if (channel.isActive()) { - channels.add(channel); - channel.write(flush.request).addListener(flush.listener); - doneWork = true; - } - } - - // Always flush what we have (don't artificially delay to try to coalesce more messages) - for (Channel channel : channels) - channel.flush(); - channels.clear(); + // Always flush what we have (don't artificially delay to try to coalesce more messages) + for (Channel channel : channels) channel.flush(); + channels.clear(); - if (doneWork) { - runsWithNoWork = 0; - } else { - // either reschedule or cancel - if (++runsWithNoWork > 5) { - running.set(false); - if (queued.isEmpty() || !running.compareAndSet(false, true)) - return; - } - } + // either reschedule or cancel + running.set(false); + if (queued.isEmpty() || !running.compareAndSet(false, true)) return; - EventLoop eventLoop = eventLoopRef.get(); - if (eventLoop != null && !eventLoop.isShuttingDown()) { - eventLoop.schedule(this, 10000, TimeUnit.NANOSECONDS); - } + EventLoop eventLoop = eventLoopRef.get(); + if (eventLoop != null && !eventLoop.isShuttingDown()) { + if (FLUSHER_SCHEDULE_PERIOD_NS > 0) { + eventLoop.schedule(this, FLUSHER_SCHEDULE_PERIOD_NS, TimeUnit.NANOSECONDS); + } else { + eventLoop.execute(this); } + } } + } - private static final ConcurrentMap flusherLookup = new MapMaker() - .concurrencyLevel(16) - .weakKeys() - .makeMap(); + private static final ConcurrentMap flusherLookup = + new MapMaker().concurrencyLevel(16).weakKeys().makeMap(); - private static class FlushItem { - final Channel channel; - final Object request; - final ChannelFutureListener listener; + private static class FlushItem { + final Channel channel; + final Object request; + final ChannelFutureListener listener; - private FlushItem(Channel channel, Object request, ChannelFutureListener listener) { - this.channel = channel; - this.request = request; - this.listener = listener; - } + private FlushItem(Channel channel, Object request, ChannelFutureListener listener) { + this.channel = channel; + this.request = request; + this.listener = listener; } - - private void flush(FlushItem item) { - EventLoop loop = item.channel.eventLoop(); - Flusher flusher = flusherLookup.get(loop); - if (flusher == null) { - Flusher alt = flusherLookup.putIfAbsent(loop, flusher = new Flusher(loop)); - if (alt != null) - flusher = alt; - } - - flusher.queued.add(item); - flusher.start(); + } + + private void flush(FlushItem item) { + EventLoop loop = item.channel.eventLoop(); + Flusher flusher = flusherLookup.get(loop); + if (flusher == null) { + Flusher alt = flusherLookup.putIfAbsent(loop, flusher = new Flusher(loop)); + if (alt != null) flusher = alt; } - class Dispatcher extends SimpleChannelInboundHandler { + flusher.queued.add(item); + flusher.start(); + } - final StreamIdGenerator streamIdHandler; - private final ConcurrentMap pending = new ConcurrentHashMap(); + class Dispatcher extends SimpleChannelInboundHandler { - Dispatcher() { - ProtocolVersion protocolVersion = factory.protocolVersion; - if (protocolVersion == null) { - // This happens for the first control connection because the protocol version has not been - // negotiated yet. - protocolVersion = ProtocolVersion.V2; - } - streamIdHandler = StreamIdGenerator.newInstance(protocolVersion); - } + final StreamIdGenerator streamIdHandler; + private final ConcurrentMap pending = + new ConcurrentHashMap(); - void add(ResponseHandler handler) { - ResponseHandler old = pending.put(handler.streamId, handler); - assert old == null; - } - - void removeHandler(ResponseHandler handler, boolean releaseStreamId) { - - // If we don't release the ID, mark first so that we can rely later on the fact that if - // we receive a response for an ID with no handler, it's that this ID has been marked. - if (!releaseStreamId) - streamIdHandler.mark(handler.streamId); - - // If a RequestHandler is cancelled right when the response arrives, this method (called with releaseStreamId=false) will race with messageReceived. - // messageReceived could have already released the streamId, which could have already been reused by another request. We must not remove the handler - // if it's not ours, because that would cause the other request to hang forever. - boolean removed = pending.remove(handler.streamId, handler); - if (!removed) { - // We raced, so if we marked the streamId above, that was wrong. - if (!releaseStreamId) - streamIdHandler.unmark(handler.streamId); - return; - } - handler.cancelTimeout(); + Dispatcher() { + ProtocolVersion protocolVersion = factory.protocolVersion; + if (protocolVersion == null) { + // This happens for the first control connection because the protocol version has not been + // negotiated yet. + protocolVersion = ProtocolVersion.V2; + } + streamIdHandler = StreamIdGenerator.newInstance(protocolVersion); + } - if (releaseStreamId) - streamIdHandler.release(handler.streamId); + void add(ResponseHandler handler) { + ResponseHandler old = pending.put(handler.streamId, handler); + assert old == null; + } - if (isClosed()) - tryTerminate(false); - } + void removeHandler(ResponseHandler handler, boolean releaseStreamId) { - @Override - protected void channelRead0(ChannelHandlerContext ctx, Message.Response response) throws Exception { - int streamId = response.getStreamId(); + // If we don't release the ID, mark first so that we can rely later on the fact that if + // we receive a response for an ID with no handler, it's that this ID has been marked. + if (!releaseStreamId) streamIdHandler.mark(handler.streamId); - if (logger.isTraceEnabled()) - logger.trace("{}, stream {}, received: {}", Connection.this, streamId, asDebugString(response)); + // If a RequestHandler is cancelled right when the response arrives, this method (called with + // releaseStreamId=false) will race with messageReceived. + // messageReceived could have already released the streamId, which could have already been + // reused by another request. We must not remove the handler + // if it's not ours, because that would cause the other request to hang forever. + boolean removed = pending.remove(handler.streamId, handler); + if (!removed) { + // We raced, so if we marked the streamId above, that was wrong. + if (!releaseStreamId) streamIdHandler.unmark(handler.streamId); + return; + } + handler.cancelTimeout(); - if (streamId < 0) { - factory.defaultHandler.handle(response); - return; - } + if (releaseStreamId) streamIdHandler.release(handler.streamId); - ResponseHandler handler = pending.remove(streamId); - streamIdHandler.release(streamId); - if (handler == null) { - /* - * During normal operation, we should not receive responses for which we don't have a handler. There is - * two cases however where this can happen: - * 1) The connection has been defuncted due to some internal error and we've raced between removing the - * handler and actually closing the connection; since the original error has been logged, we're fine - * ignoring this completely. - * 2) This request has timed out. In that case, we've already switched to another host (or errored out - * to the user). So log it for debugging purpose, but it's fine ignoring otherwise. - */ - streamIdHandler.unmark(streamId); - if (logger.isDebugEnabled()) - logger.debug("{} Response received on stream {} but no handler set anymore (either the request has " - + "timed out or it was closed due to another error). Received message is {}", Connection.this, streamId, asDebugString(response)); - return; - } - handler.cancelTimeout(); - handler.callback.onSet(Connection.this, response, System.nanoTime() - handler.startTime, handler.retryCount); + if (isClosed()) tryTerminate(false); + } - // If we happen to be closed and we're the last outstanding request, we need to terminate the connection - // (note: this is racy as the signaling can be called more than once, but that's not a problem) - if (isClosed()) - tryTerminate(false); - } + @Override + protected void channelRead0(ChannelHandlerContext ctx, Message.Response response) + throws Exception { + int streamId = response.getStreamId(); + + if (logger.isTraceEnabled()) + logger.trace( + "{}, stream {}, received: {}", Connection.this, streamId, asDebugString(response)); + + if (streamId < 0) { + factory.defaultHandler.handle(response); + return; + } + + ResponseHandler handler = pending.remove(streamId); + streamIdHandler.release(streamId); + if (handler == null) { + /* + * During normal operation, we should not receive responses for which we don't have a handler. There is + * two cases however where this can happen: + * 1) The connection has been defuncted due to some internal error and we've raced between removing the + * handler and actually closing the connection; since the original error has been logged, we're fine + * ignoring this completely. + * 2) This request has timed out. In that case, we've already switched to another host (or errored out + * to the user). So log it for debugging purpose, but it's fine ignoring otherwise. + */ + streamIdHandler.unmark(streamId); + if (logger.isDebugEnabled()) + logger.debug( + "{} Response received on stream {} but no handler set anymore (either the request has " + + "timed out or it was closed due to another error). Received message is {}", + Connection.this, + streamId, + asDebugString(response)); + return; + } + handler.cancelTimeout(); + handler.callback.onSet( + Connection.this, response, System.nanoTime() - handler.startTime, handler.retryCount); + + // If we happen to be closed and we're the last outstanding request, we need to terminate the + // connection + // (note: this is racy as the signaling can be called more than once, but that's not a + // problem) + if (isClosed()) tryTerminate(false); + } - @Override - public void userEventTriggered(ChannelHandlerContext ctx, Object evt) throws Exception { - if (isInitialized && !isClosed() && evt instanceof IdleStateEvent && ((IdleStateEvent) evt).state() == READER_IDLE) { - logger.debug("{} was inactive for {} seconds, sending heartbeat", Connection.this, factory.configuration.getPoolingOptions().getHeartbeatIntervalSeconds()); - write(HEARTBEAT_CALLBACK); - } - } + @Override + public void userEventTriggered(ChannelHandlerContext ctx, Object evt) throws Exception { + if (isInitialized + && !isClosed() + && evt instanceof IdleStateEvent + && ((IdleStateEvent) evt).state() == READER_IDLE) { + logger.debug( + "{} was inactive for {} seconds, sending heartbeat", + Connection.this, + factory.configuration.getPoolingOptions().getHeartbeatIntervalSeconds()); + write(HEARTBEAT_CALLBACK); + } + } - // Make sure we don't print huge responses in debug/error logs. - private String asDebugString(Object obj) { - if (obj == null) - return "null"; + // Make sure we don't print huge responses in debug/error logs. + private String asDebugString(Object obj) { + if (obj == null) return "null"; - String msg = obj.toString(); - if (msg.length() < 500) - return msg; + String msg = obj.toString(); + if (msg.length() < 500) return msg; - return msg.substring(0, 500) + "... [message of size " + msg.length() + " truncated]"; - } + return msg.substring(0, 500) + "... [message of size " + msg.length() + " truncated]"; + } - @Override - public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception { + @Override + public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception { + if (logger.isDebugEnabled()) + logger.debug(String.format("%s connection error", Connection.this), cause); + + // Ignore exception while writing, this will be handled by write() directly + if (writer.get() > 0) return; + + if (cause instanceof DecoderException) { + Throwable error = cause.getCause(); + // Special case, if we encountered a FrameTooLongException, raise exception on handler and + // don't defunct it since + // the connection is in an ok state. + if (error instanceof FrameTooLongException) { + FrameTooLongException ftle = (FrameTooLongException) error; + int streamId = ftle.getStreamId(); + ResponseHandler handler = pending.remove(streamId); + streamIdHandler.release(streamId); + if (handler == null) { + streamIdHandler.unmark(streamId); if (logger.isDebugEnabled()) - logger.debug(String.format("%s connection error", Connection.this), cause); - - // Ignore exception while writing, this will be handled by write() directly - if (writer.get() > 0) - return; - - if (cause instanceof DecoderException) { - Throwable error = cause.getCause(); - // Special case, if we encountered a FrameTooLongException, raise exception on handler and don't defunct it since - // the connection is in an ok state. - if (error != null && error instanceof FrameTooLongException) { - FrameTooLongException ftle = (FrameTooLongException) error; - int streamId = ftle.getStreamId(); - ResponseHandler handler = pending.remove(streamId); - streamIdHandler.release(streamId); - if (handler == null) { - streamIdHandler.unmark(streamId); - if (logger.isDebugEnabled()) - logger.debug("{} FrameTooLongException received on stream {} but no handler set anymore (either the request has " - + "timed out or it was closed due to another error).", Connection.this, streamId); - return; - } - handler.cancelTimeout(); - handler.callback.onException(Connection.this, ftle, System.nanoTime() - handler.startTime, handler.retryCount); - return; - } - } - defunct(new TransportException(address, String.format("Unexpected exception triggered (%s)", cause), cause)); - } + logger.debug( + "{} FrameTooLongException received on stream {} but no handler set anymore (either the request has " + + "timed out or it was closed due to another error).", + Connection.this, + streamId); + return; + } + handler.cancelTimeout(); + handler.callback.onException( + Connection.this, ftle, System.nanoTime() - handler.startTime, handler.retryCount); + return; + } else if (error instanceof CrcMismatchException) { + // Fall back to the defunct call below, but we want a clear warning in the logs + logger.warn("CRC mismatch while decoding a response, dropping the connection", error); + } + } + defunct( + new TransportException( + endPoint, String.format("Unexpected exception triggered (%s)", cause), cause)); + } - void errorOutAllHandler(ConnectionException ce) { - Iterator iter = pending.values().iterator(); - while (iter.hasNext()) { - ResponseHandler handler = iter.next(); - handler.cancelTimeout(); - handler.callback.onException(Connection.this, ce, System.nanoTime() - handler.startTime, handler.retryCount); - iter.remove(); - } - } + void errorOutAllHandler(ConnectionException ce) { + Iterator iter = pending.values().iterator(); + while (iter.hasNext()) { + ResponseHandler handler = iter.next(); + handler.cancelTimeout(); + handler.callback.onException( + Connection.this, ce, System.nanoTime() - handler.startTime, handler.retryCount); + iter.remove(); + } } + } - private class ChannelCloseListener implements ChannelFutureListener { - @Override - public void operationComplete(ChannelFuture future) throws Exception { - // If we've closed the channel client side then we don't really want to defunct the connection, but - // if there is remaining thread waiting on us, we still want to wake them up - if (!isInitialized || isClosed()) { - dispatcher.errorOutAllHandler(new TransportException(address, "Channel has been closed")); - // we still want to force so that the future completes - Connection.this.closeAsync().force(); - } else - defunct(new TransportException(address, "Channel has been closed")); - } + private class ChannelCloseListener implements ChannelFutureListener { + @Override + public void operationComplete(ChannelFuture future) throws Exception { + // If we've closed the channel client side then we don't really want to defunct the + // connection, but + // if there is remaining thread waiting on us, we still want to wake them up + if (!isInitialized || isClosed()) { + dispatcher.errorOutAllHandler(new TransportException(endPoint, "Channel has been closed")); + // we still want to force so that the future completes + Connection.this.closeAsync().force(); + } else defunct(new TransportException(endPoint, "Channel has been closed")); } + } - private static final ResponseCallback HEARTBEAT_CALLBACK = new ResponseCallback() { + private static final ResponseCallback HEARTBEAT_CALLBACK = + new ResponseCallback() { @Override public Message.Request request() { - return new Requests.Options(); + return new Requests.Options(); } @Override public int retryCount() { - return 0; // no retries here + return 0; // no retries here } @Override - public void onSet(Connection connection, Message.Response response, long latency, int retryCount) { - switch (response.type) { - case SUPPORTED: - logger.debug("{} heartbeat query succeeded", connection); - break; - default: - fail(connection, new ConnectionException(connection.address, "Unexpected heartbeat response: " + response)); - } + public void onSet( + Connection connection, Message.Response response, long latency, int retryCount) { + switch (response.type) { + case SUPPORTED: + logger.debug("{} heartbeat query succeeded", connection); + break; + default: + fail( + connection, + new ConnectionException( + connection.endPoint, "Unexpected heartbeat response: " + response)); + } } @Override - public void onException(Connection connection, Exception exception, long latency, int retryCount) { - // Nothing to do: the connection is already defunct if we arrive here + public void onException( + Connection connection, Exception exception, long latency, int retryCount) { + // Nothing to do: the connection is already defunct if we arrive here } @Override public boolean onTimeout(Connection connection, long latency, int retryCount) { - fail(connection, new ConnectionException(connection.address, "Heartbeat query timed out")); - return true; + fail( + connection, + new ConnectionException(connection.endPoint, "Heartbeat query timed out")); + return true; } private void fail(Connection connection, Exception e) { - connection.defunct(e); + connection.defunct(e); } - }; - - private class ConnectionCloseFuture extends CloseFuture { + }; - @Override - public ConnectionCloseFuture force() { - // Note: we must not call releaseExternalResources on the bootstrap, because this shutdown the executors, which are shared + private class ConnectionCloseFuture extends CloseFuture { - // This method can be thrown during initialization, at which point channel is not yet set. This is ok. - if (channel == null) { - set(null); - return this; + @Override + public ConnectionCloseFuture force() { + // Note: we must not call releaseExternalResources on the bootstrap, because this shutdown the + // executors, which are shared + + // This method can be thrown during initialization, at which point channel is not yet set. + // This is ok. + if (channel == null) { + set(null); + return this; + } + + // We're going to close this channel. If anyone is waiting on that connection, we should + // defunct it otherwise it'll wait + // forever. In general this won't happen since we get there only when all ongoing query are + // done, but this can happen + // if the shutdown is forced. This is a no-op if there is no handler set anymore. + dispatcher.errorOutAllHandler(new TransportException(endPoint, "Connection has been closed")); + + ChannelFuture future = channel.close(); + future.addListener( + new ChannelFutureListener() { + @Override + public void operationComplete(ChannelFuture future) { + factory.allChannels.remove(channel); + if (future.cause() != null) { + logger.warn("Error closing channel", future.cause()); + ConnectionCloseFuture.this.setException(future.cause()); + } else ConnectionCloseFuture.this.set(null); } - - // We're going to close this channel. If anyone is waiting on that connection, we should defunct it otherwise it'll wait - // forever. In general this won't happen since we get there only when all ongoing query are done, but this can happen - // if the shutdown is forced. This is a no-op if there is no handler set anymore. - dispatcher.errorOutAllHandler(new TransportException(address, "Connection has been closed")); - - ChannelFuture future = channel.close(); - future.addListener(new ChannelFutureListener() { - @Override - public void operationComplete(ChannelFuture future) { - factory.allChannels.remove(channel); - if (future.cause() != null) { - logger.warn("Error closing channel", future.cause()); - ConnectionCloseFuture.this.setException(future.cause()); - } else - ConnectionCloseFuture.this.set(null); - } - }); - return this; - } + }); + return this; } + } - private class SetKeyspaceAttempt { - private final String keyspace; - private final ListenableFuture future; - - SetKeyspaceAttempt(String keyspace, ListenableFuture future) { - this.keyspace = keyspace; - this.future = future; - } - - @Override - public boolean equals(Object o) { - if (this == o) - return true; - if (!(o instanceof SetKeyspaceAttempt)) - return false; + private class SetKeyspaceAttempt { + private final String keyspace; + private final ListenableFuture future; - SetKeyspaceAttempt that = (SetKeyspaceAttempt) o; + SetKeyspaceAttempt(String keyspace, ListenableFuture future) { + this.keyspace = keyspace; + this.future = future; + } - return keyspace != null ? keyspace.equals(that.keyspace) : that.keyspace == null; + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (!(o instanceof SetKeyspaceAttempt)) return false; - } + SetKeyspaceAttempt that = (SetKeyspaceAttempt) o; - @Override - public int hashCode() { - return keyspace != null ? keyspace.hashCode() : 0; - } + return keyspace != null ? keyspace.equals(that.keyspace) : that.keyspace == null; } - static class Future extends AbstractFuture implements RequestHandler.Callback { + @Override + public int hashCode() { + return keyspace != null ? keyspace.hashCode() : 0; + } + } - private final Message.Request request; - private volatile InetSocketAddress address; + static class Future extends AbstractFuture implements RequestHandler.Callback { - Future(Message.Request request) { - this.request = request; - } + private final Message.Request request; + private volatile EndPoint endPoint; - @Override - public void register(RequestHandler handler) { - // noop, we don't care about the handler here so far - } + Future(Message.Request request) { + this.request = request; + } - @Override - public Message.Request request() { - return request; - } + @Override + public void register(RequestHandler handler) { + // noop, we don't care about the handler here so far + } - @Override - public int retryCount() { - // This is ignored, as there is no retry logic in this class - return 0; - } + @Override + public Message.Request request() { + return request; + } - @Override - public void onSet(Connection connection, Message.Response response, ExecutionInfo info, Statement statement, long latency) { - onSet(connection, response, latency, 0); - } + @Override + public int retryCount() { + // This is ignored, as there is no retry logic in this class + return 0; + } - @Override - public void onSet(Connection connection, Message.Response response, long latency, int retryCount) { - this.address = connection.address; - super.set(response); - } + @Override + public void onSet( + Connection connection, + Message.Response response, + ExecutionInfo info, + Statement statement, + long latency) { + onSet(connection, response, latency, 0); + } - @Override - public void onException(Connection connection, Exception exception, long latency, int retryCount) { - // If all nodes are down, we will get a null connection here. This is fine, if we have - // an exception, consumers shouldn't assume the address is not null. - if (connection != null) - this.address = connection.address; - super.setException(exception); - } + @Override + public void onSet( + Connection connection, Message.Response response, long latency, int retryCount) { + this.endPoint = connection.endPoint; + super.set(response); + } - @Override - public boolean onTimeout(Connection connection, long latency, int retryCount) { - assert connection != null; // We always timeout on a specific connection, so this shouldn't be null - this.address = connection.address; - return super.setException(new OperationTimedOutException(connection.address)); - } + @Override + public void onException( + Connection connection, Exception exception, long latency, int retryCount) { + // If all nodes are down, we will get a null connection here. This is fine, if we have + // an exception, consumers shouldn't assume the address is not null. + if (connection != null) this.endPoint = connection.endPoint; + super.setException(exception); + } - InetSocketAddress getAddress() { - return address; - } + @Override + public boolean onTimeout(Connection connection, long latency, int retryCount) { + assert connection + != null; // We always timeout on a specific connection, so this shouldn't be null + this.endPoint = connection.endPoint; + return super.setException(new OperationTimedOutException(connection.endPoint)); } - interface ResponseCallback { - Message.Request request(); + EndPoint getEndPoint() { + return endPoint; + } + } - int retryCount(); + interface ResponseCallback { + Message.Request request(); - void onSet(Connection connection, Message.Response response, long latency, int retryCount); + int retryCount(); - void onException(Connection connection, Exception exception, long latency, int retryCount); + void onSet(Connection connection, Message.Response response, long latency, int retryCount); - boolean onTimeout(Connection connection, long latency, int retryCount); - } + void onException(Connection connection, Exception exception, long latency, int retryCount); - static class ResponseHandler { + boolean onTimeout(Connection connection, long latency, int retryCount); + } - final Connection connection; - final int streamId; - final ResponseCallback callback; - final int retryCount; - private final long readTimeoutMillis; + static class ResponseHandler { - private final long startTime; - private volatile Timeout timeout; + final Connection connection; + final int streamId; + final ResponseCallback callback; + final int retryCount; + private final long readTimeoutMillis; - private final AtomicBoolean isCancelled = new AtomicBoolean(); + private final long startTime; + private volatile Timeout timeout; - ResponseHandler(Connection connection, long statementReadTimeoutMillis, ResponseCallback callback) throws BusyConnectionException { - this.connection = connection; - this.readTimeoutMillis = (statementReadTimeoutMillis >= 0) ? statementReadTimeoutMillis : connection.factory.getReadTimeoutMillis(); - this.streamId = connection.dispatcher.streamIdHandler.next(); - if (streamId == -1) - throw new BusyConnectionException(connection.address); - this.callback = callback; - this.retryCount = callback.retryCount(); + private final AtomicBoolean isCancelled = new AtomicBoolean(); - this.startTime = System.nanoTime(); - } + ResponseHandler( + Connection connection, long statementReadTimeoutMillis, ResponseCallback callback) + throws BusyConnectionException { + this.connection = connection; + this.readTimeoutMillis = + (statementReadTimeoutMillis >= 0) + ? statementReadTimeoutMillis + : connection.factory.getReadTimeoutMillis(); + this.streamId = connection.dispatcher.streamIdHandler.next(); + if (streamId == -1) throw new BusyConnectionException(connection.endPoint); + this.callback = callback; + this.retryCount = callback.retryCount(); - void startTimeout() { - this.timeout = this.readTimeoutMillis <= 0 ? null : connection.factory.timer.newTimeout(onTimeoutTask(), this.readTimeoutMillis, TimeUnit.MILLISECONDS); - } + this.startTime = System.nanoTime(); + } - void cancelTimeout() { - if (timeout != null) - timeout.cancel(); - } + void startTimeout() { + this.timeout = + this.readTimeoutMillis <= 0 + ? null + : connection.factory.timer.newTimeout( + onTimeoutTask(), this.readTimeoutMillis, TimeUnit.MILLISECONDS); + } - boolean cancelHandler() { - if (!isCancelled.compareAndSet(false, true)) - return false; + void cancelTimeout() { + if (timeout != null) timeout.cancel(); + } - // We haven't really received a response: we want to remove the handle because we gave up on that - // request and there is no point in holding the handler, but we don't release the streamId. If we - // were, a new request could reuse that ID but get the answer to the request we just gave up on instead - // of its own answer, and we would have no way to detect that. - connection.dispatcher.removeHandler(this, false); - return true; - } + boolean cancelHandler() { + if (!isCancelled.compareAndSet(false, true)) return false; + + // We haven't really received a response: we want to remove the handle because we gave up on + // that + // request and there is no point in holding the handler, but we don't release the streamId. If + // we + // were, a new request could reuse that ID but get the answer to the request we just gave up + // on instead + // of its own answer, and we would have no way to detect that. + connection.dispatcher.removeHandler(this, false); + return true; + } - private TimerTask onTimeoutTask() { - return new TimerTask() { - @Override - public void run(Timeout timeout) { - if (callback.onTimeout(connection, System.nanoTime() - startTime, retryCount)) - cancelHandler(); - } - }; + private TimerTask onTimeoutTask() { + return new TimerTask() { + @Override + public void run(Timeout timeout) { + if (callback.onTimeout(connection, System.nanoTime() - startTime, retryCount)) + cancelHandler(); } + }; + } + } + + interface DefaultResponseHandler { + void handle(Message.Response response); + } + + private static class Initializer extends ChannelInitializer { + // Stateless handlers + private static final Message.ProtocolDecoder messageDecoder = new Message.ProtocolDecoder(); + private static final Message.ProtocolEncoder messageEncoderV1 = + new Message.ProtocolEncoder(ProtocolVersion.V1); + private static final Message.ProtocolEncoder messageEncoderV2 = + new Message.ProtocolEncoder(ProtocolVersion.V2); + private static final Message.ProtocolEncoder messageEncoderV3 = + new Message.ProtocolEncoder(ProtocolVersion.V3); + private static final Message.ProtocolEncoder messageEncoderV4 = + new Message.ProtocolEncoder(ProtocolVersion.V4); + private static final Message.ProtocolEncoder messageEncoderV5 = + new Message.ProtocolEncoder(ProtocolVersion.V5); + private static final Message.ProtocolEncoder messageEncoderV6 = + new Message.ProtocolEncoder(ProtocolVersion.V6); + private static final Frame.Encoder frameEncoder = new Frame.Encoder(); + + private final ProtocolVersion protocolVersion; + private final Connection connection; + private final FrameCompressor compressor; + private final SSLOptions sslOptions; + private final NettyOptions nettyOptions; + private final ChannelHandler idleStateHandler; + private final CodecRegistry codecRegistry; + private final Metrics metrics; + + Initializer( + Connection connection, + ProtocolVersion protocolVersion, + FrameCompressor compressor, + SSLOptions sslOptions, + int heartBeatIntervalSeconds, + NettyOptions nettyOptions, + CodecRegistry codecRegistry, + Metrics metrics) { + this.connection = connection; + this.protocolVersion = protocolVersion; + this.compressor = compressor; + this.sslOptions = sslOptions; + this.nettyOptions = nettyOptions; + this.codecRegistry = codecRegistry; + this.idleStateHandler = new IdleStateHandler(heartBeatIntervalSeconds, 0, 0); + this.metrics = metrics; } - interface DefaultResponseHandler { - void handle(Message.Response response); - } - - private static class Initializer extends ChannelInitializer { - // Stateless handlers - private static final Message.ProtocolDecoder messageDecoder = new Message.ProtocolDecoder(); - private static final Message.ProtocolEncoder messageEncoderV1 = new Message.ProtocolEncoder(ProtocolVersion.V1); - private static final Message.ProtocolEncoder messageEncoderV2 = new Message.ProtocolEncoder(ProtocolVersion.V2); - private static final Message.ProtocolEncoder messageEncoderV3 = new Message.ProtocolEncoder(ProtocolVersion.V3); - private static final Message.ProtocolEncoder messageEncoderV4 = new Message.ProtocolEncoder(ProtocolVersion.V4); - private static final Message.ProtocolEncoder messageEncoderV5 = new Message.ProtocolEncoder(ProtocolVersion.V5); - private static final Frame.Encoder frameEncoder = new Frame.Encoder(); - - private final ProtocolVersion protocolVersion; - private final Connection connection; - private final FrameCompressor compressor; - private final SSLOptions sslOptions; - private final NettyOptions nettyOptions; - private final ChannelHandler idleStateHandler; - private final CodecRegistry codecRegistry; - - Initializer(Connection connection, ProtocolVersion protocolVersion, FrameCompressor compressor, SSLOptions sslOptions, int heartBeatIntervalSeconds, NettyOptions nettyOptions, CodecRegistry codecRegistry) { - this.connection = connection; - this.protocolVersion = protocolVersion; - this.compressor = compressor; - this.sslOptions = sslOptions; - this.nettyOptions = nettyOptions; - this.codecRegistry = codecRegistry; - this.idleStateHandler = new IdleStateHandler(heartBeatIntervalSeconds, 0, 0); - } + @Override + protected void initChannel(SocketChannel channel) throws Exception { - @Override - protected void initChannel(SocketChannel channel) throws Exception { + // set the codec registry so that it can be accessed by ProtocolDecoder + channel.attr(Message.CODEC_REGISTRY_ATTRIBUTE_KEY).set(codecRegistry); - // set the codec registry so that it can be accessed by ProtocolDecoder - channel.attr(Message.CODEC_REGISTRY_ATTRIBUTE_KEY).set(codecRegistry); + ChannelPipeline pipeline = channel.pipeline(); - ChannelPipeline pipeline = channel.pipeline(); + if (sslOptions != null) { + SslHandler handler; + if (sslOptions instanceof ExtendedRemoteEndpointAwareSslOptions) { + handler = + ((ExtendedRemoteEndpointAwareSslOptions) sslOptions) + .newSSLHandler(channel, connection.endPoint); - if (sslOptions != null) { - if (sslOptions instanceof RemoteEndpointAwareSSLOptions) { - SslHandler handler = ((RemoteEndpointAwareSSLOptions) sslOptions).newSSLHandler(channel, connection.address); - pipeline.addLast("ssl", handler); - } else { - @SuppressWarnings("deprecation") - SslHandler handler = sslOptions.newSSLHandler(channel); - pipeline.addLast("ssl", handler); - } - } + } else if (sslOptions instanceof RemoteEndpointAwareSSLOptions) { + handler = + ((RemoteEndpointAwareSSLOptions) sslOptions) + .newSSLHandler(channel, connection.endPoint.resolve()); + } else { + handler = sslOptions.newSSLHandler(channel); + } + pipeline.addLast("ssl", handler); + } - // pipeline.addLast("debug", new LoggingHandler(LogLevel.INFO)); + // pipeline.addLast("debug", new LoggingHandler(LogLevel.INFO)); - pipeline.addLast("frameDecoder", new Frame.Decoder()); - pipeline.addLast("frameEncoder", frameEncoder); + if (metrics != null) { + pipeline.addLast( + "inboundTrafficMeter", new InboundTrafficMeter(metrics.getBytesReceived())); + pipeline.addLast("outboundTrafficMeter", new OutboundTrafficMeter(metrics.getBytesSent())); + } - if (compressor != null) { - pipeline.addLast("frameDecompressor", new Frame.Decompressor(compressor)); - pipeline.addLast("frameCompressor", new Frame.Compressor(compressor)); - } + pipeline.addLast("frameDecoder", new Frame.Decoder()); + pipeline.addLast("frameEncoder", frameEncoder); - pipeline.addLast("messageDecoder", messageDecoder); - pipeline.addLast("messageEncoder", messageEncoderFor(protocolVersion)); + pipeline.addLast("framingFormatHandler", new FramingFormatHandler(connection.factory)); - pipeline.addLast("idleStateHandler", idleStateHandler); + if (compressor != null + // Frame-level compression is only done in legacy protocol versions. In V5 and above, it + // happens at a higher level ("segment" that groups multiple frames), so never install + // those handlers. + && protocolVersion.compareTo(ProtocolVersion.V5) < 0) { + pipeline.addLast("frameDecompressor", new Frame.Decompressor(compressor)); + pipeline.addLast("frameCompressor", new Frame.Compressor(compressor)); + } - pipeline.addLast("dispatcher", connection.dispatcher); + pipeline.addLast("messageDecoder", messageDecoder); + pipeline.addLast("messageEncoder", messageEncoderFor(protocolVersion)); - nettyOptions.afterChannelInitialized(channel); - } + pipeline.addLast("idleStateHandler", idleStateHandler); - private Message.ProtocolEncoder messageEncoderFor(ProtocolVersion version) { - switch (version) { - case V1: - return messageEncoderV1; - case V2: - return messageEncoderV2; - case V3: - return messageEncoderV3; - case V4: - return messageEncoderV4; - case V5: - return messageEncoderV5; - default: - throw new DriverInternalError("Unsupported protocol version " + protocolVersion); - } - } + pipeline.addLast("dispatcher", connection.dispatcher); + + nettyOptions.afterChannelInitialized(channel); } - /** - * A component that "owns" a connection, and should be notified when it dies. - */ - interface Owner { - void onConnectionDefunct(Connection connection); + private Message.ProtocolEncoder messageEncoderFor(ProtocolVersion version) { + switch (version) { + case V1: + return messageEncoderV1; + case V2: + return messageEncoderV2; + case V3: + return messageEncoderV3; + case V4: + return messageEncoderV4; + case V5: + return messageEncoderV5; + case V6: + return messageEncoderV6; + default: + throw new DriverInternalError("Unsupported protocol version " + protocolVersion); + } } + } + + /** A component that "owns" a connection, and should be notified when it dies. */ + interface Owner { + void onConnectionDefunct(Connection connection); + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/ConsistencyLevel.java b/driver-core/src/main/java/com/datastax/driver/core/ConsistencyLevel.java index 243978048bd..cba174772ed 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ConsistencyLevel.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ConsistencyLevel.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,70 +20,70 @@ import com.datastax.driver.core.exceptions.DriverInternalError; public enum ConsistencyLevel { + ANY(0), + ONE(1), + TWO(2), + THREE(3), + QUORUM(4), + ALL(5), + LOCAL_QUORUM(6), + EACH_QUORUM(7), + SERIAL(8), + LOCAL_SERIAL(9), + LOCAL_ONE(10); - ANY(0), - ONE(1), - TWO(2), - THREE(3), - QUORUM(4), - ALL(5), - LOCAL_QUORUM(6), - EACH_QUORUM(7), - SERIAL(8), - LOCAL_SERIAL(9), - LOCAL_ONE(10); - - // Used by the native protocol - final int code; - private static final ConsistencyLevel[] codeIdx; + // Used by the native protocol + final int code; + private static final ConsistencyLevel[] codeIdx; - static { - int maxCode = -1; - for (ConsistencyLevel cl : ConsistencyLevel.values()) - maxCode = Math.max(maxCode, cl.code); - codeIdx = new ConsistencyLevel[maxCode + 1]; - for (ConsistencyLevel cl : ConsistencyLevel.values()) { - if (codeIdx[cl.code] != null) - throw new IllegalStateException("Duplicate code"); - codeIdx[cl.code] = cl; - } + static { + int maxCode = -1; + for (ConsistencyLevel cl : ConsistencyLevel.values()) maxCode = Math.max(maxCode, cl.code); + codeIdx = new ConsistencyLevel[maxCode + 1]; + for (ConsistencyLevel cl : ConsistencyLevel.values()) { + if (codeIdx[cl.code] != null) throw new IllegalStateException("Duplicate code"); + codeIdx[cl.code] = cl; } + } - private ConsistencyLevel(int code) { - this.code = code; - } + private ConsistencyLevel(int code) { + this.code = code; + } - static ConsistencyLevel fromCode(int code) { - if (code < 0 || code >= codeIdx.length) - throw new DriverInternalError(String.format("Unknown code %d for a consistency level", code)); - return codeIdx[code]; - } + static ConsistencyLevel fromCode(int code) { + if (code < 0 || code >= codeIdx.length) + throw new DriverInternalError(String.format("Unknown code %d for a consistency level", code)); + return codeIdx[code]; + } - /** - * Whether or not this consistency level applies to the local data-center only. - * - * @return whether this consistency level is {@code LOCAL_ONE} or {@code LOCAL_QUORUM}. - */ - public boolean isDCLocal() { - return this == LOCAL_ONE || this == LOCAL_QUORUM; - } - - /** - * Whether or not this consistency level is serial, that is, - * applies only to the "paxos" phase of a - * Lightweight transaction. - *

- * Serial consistency levels are only meaningful when executing conditional updates ({@code INSERT}, {@code UPDATE} - * or {@code DELETE} statements with an {@code IF} condition). - *

- * Two consistency levels belong to this category: {@link #SERIAL} and {@link #LOCAL_SERIAL}. - * - * @return whether this consistency level is {@link #SERIAL} or {@link #LOCAL_SERIAL}. - * @see Statement#setSerialConsistencyLevel(ConsistencyLevel) - * @see Lightweight transactions - */ - public boolean isSerial() { - return this == SERIAL || this == LOCAL_SERIAL; - } + /** + * Whether or not this consistency level applies to the local data-center only. + * + * @return whether this consistency level is {@code LOCAL_ONE}, {@code LOCAL_QUORUM}, or {@code + * LOCAL_SERIAL}. + */ + public boolean isDCLocal() { + return this == LOCAL_ONE || this == LOCAL_QUORUM || this == LOCAL_SERIAL; + } + /** + * Whether or not this consistency level is serial, that is, applies only to the "paxos" phase of + * a Lightweight + * transaction. + * + *

Serial consistency levels are only meaningful when executing conditional updates ({@code + * INSERT}, {@code UPDATE} or {@code DELETE} statements with an {@code IF} condition). + * + *

Two consistency levels belong to this category: {@link #SERIAL} and {@link #LOCAL_SERIAL}. + * + * @return whether this consistency level is {@link #SERIAL} or {@link #LOCAL_SERIAL}. + * @see Statement#setSerialConsistencyLevel(ConsistencyLevel) + * @see Lightweight + * transactions + */ + public boolean isSerial() { + return this == SERIAL || this == LOCAL_SERIAL; + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java b/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java index c1c5531d1ca..b5fd7689d67 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,793 +17,1119 @@ */ package com.datastax.driver.core; -import com.datastax.driver.core.exceptions.*; +import static com.datastax.driver.core.SchemaElement.KEYSPACE; + +import com.datastax.driver.core.exceptions.BusyConnectionException; +import com.datastax.driver.core.exceptions.ConnectionException; +import com.datastax.driver.core.exceptions.DriverException; +import com.datastax.driver.core.exceptions.DriverInternalError; +import com.datastax.driver.core.exceptions.InvalidQueryException; +import com.datastax.driver.core.exceptions.NoHostAvailableException; +import com.datastax.driver.core.exceptions.ServerError; +import com.datastax.driver.core.exceptions.UnsupportedProtocolVersionException; +import com.datastax.driver.core.utils.MoreFutures; import com.datastax.driver.core.utils.MoreObjects; import com.google.common.annotations.VisibleForTesting; +import com.google.common.util.concurrent.FutureCallback; import com.google.common.util.concurrent.ListenableFuture; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - +import com.google.common.util.concurrent.SettableFuture; import java.net.InetAddress; import java.net.InetSocketAddress; import java.net.UnknownHostException; -import java.util.*; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.UUID; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; - -import static com.datastax.driver.core.SchemaElement.KEYSPACE; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; class ControlConnection implements Connection.Owner { - private static final Logger logger = LoggerFactory.getLogger(ControlConnection.class); + private static final Logger logger = LoggerFactory.getLogger(ControlConnection.class); - private static final boolean EXTENDED_PEER_CHECK = SystemProperties.getBoolean("com.datastax.driver.EXTENDED_PEER_CHECK", true); + private static final boolean EXTENDED_PEER_CHECK = + SystemProperties.getBoolean("com.datastax.driver.EXTENDED_PEER_CHECK", true); - private static final InetAddress bindAllAddress; + private static final InetAddress bindAllAddress; - static { - try { - bindAllAddress = InetAddress.getByAddress(new byte[4]); - } catch (UnknownHostException e) { - throw new RuntimeException(e); - } + static { + try { + bindAllAddress = InetAddress.getByAddress(new byte[4]); + } catch (UnknownHostException e) { + throw new RuntimeException(e); } + } + + private static final String SELECT_PEERS = "SELECT * FROM system.peers"; + private static final String SELECT_PEERS_V2 = "SELECT * FROM system.peers_v2"; + private static final String SELECT_LOCAL = "SELECT * FROM system.local WHERE key='local'"; + + private static final String SELECT_SCHEMA_PEERS = + "SELECT peer, rpc_address, schema_version, host_id FROM system.peers"; + private static final String SELECT_SCHEMA_LOCAL = + "SELECT schema_version, host_id FROM system.local WHERE key='local'"; + + private static final VersionNumber _3_11 = VersionNumber.parse("3.11.0"); + + @VisibleForTesting + final AtomicReference connectionRef = new AtomicReference(); + + private final Cluster.Manager cluster; + + private final AtomicReference> reconnectionAttempt = + new AtomicReference>(); + + private volatile boolean isShutdown; + + // set to true initially, if ever fails will be set to false and peers table will be used + // from here on out. + private volatile boolean isPeersV2 = true; + private volatile boolean isCloud = false; + + public ControlConnection(Cluster.Manager manager) { + this.cluster = manager; + } + + // Only for the initial connection. Does not schedule retries if it fails + void connect() throws UnsupportedProtocolVersionException { + if (isShutdown) return; + + List hosts = new ArrayList(cluster.metadata.getContactPoints()); + // shuffle so that multiple clients with the same contact points don't all pick the same control + // host + Collections.shuffle(hosts); + setNewConnection(reconnectInternal(hosts.iterator(), true)); + } + + CloseFuture closeAsync() { + // We don't have to be fancy here. We just set a flag so that we stop trying to reconnect (and + // thus change the + // connection used) and shutdown the current one. + isShutdown = true; + + // Cancel any reconnection attempt in progress + ListenableFuture r = reconnectionAttempt.get(); + if (r != null) r.cancel(false); + + Connection connection = connectionRef.get(); + return connection == null ? CloseFuture.immediateFuture() : connection.closeAsync().force(); + } + + Host connectedHost() { + Connection current = connectionRef.get(); + return (current == null) ? null : cluster.metadata.getHost(current.endPoint); + } + + void triggerReconnect() { + backgroundReconnect(0); + } + + /** @param initialDelayMs if >=0, bypass the schedule and use this for the first call */ + private void backgroundReconnect(long initialDelayMs) { + if (isShutdown) return; + + // Abort if a reconnection is already in progress. This is not thread-safe: two threads might + // race through this and both + // schedule a reconnection; in that case AbstractReconnectionHandler knows how to deal with it + // correctly. + // But this cheap check can help us avoid creating the object unnecessarily. + ListenableFuture reconnection = reconnectionAttempt.get(); + if (reconnection != null && !reconnection.isDone()) return; + + new AbstractReconnectionHandler( + "Control connection", + cluster.reconnectionExecutor, + cluster.reconnectionPolicy().newSchedule(), + reconnectionAttempt, + initialDelayMs) { + @Override + protected Connection tryReconnect() throws ConnectionException { + if (isShutdown) throw new ConnectionException(null, "Control connection was shut down"); - private static final String SELECT_PEERS = "SELECT * FROM system.peers"; - private static final String SELECT_LOCAL = "SELECT * FROM system.local WHERE key='local'"; + try { + return reconnectInternal(queryPlan(), false); + } catch (NoHostAvailableException e) { + throw new ConnectionException(null, e.getMessage()); + } catch (UnsupportedProtocolVersionException e) { + // reconnectInternal only propagate those if we've not decided on the protocol version + // yet, + // which should only happen on the initial connection and thus in connect() but never + // here. + throw new AssertionError(); + } + } - private static final String SELECT_SCHEMA_PEERS = "SELECT peer, rpc_address, schema_version FROM system.peers"; - private static final String SELECT_SCHEMA_LOCAL = "SELECT schema_version FROM system.local WHERE key='local'"; + @Override + protected void onReconnection(Connection connection) { + if (isShutdown) { + connection.closeAsync().force(); + return; + } - @VisibleForTesting - final AtomicReference connectionRef = new AtomicReference(); + setNewConnection(connection); + } - private final Cluster.Manager cluster; + @Override + protected boolean onConnectionException(ConnectionException e, long nextDelayMs) { + if (isShutdown) return false; - private final AtomicReference> reconnectionAttempt = new AtomicReference>(); + logger.error( + "[Control connection] Cannot connect to any host, scheduling retry in {} milliseconds", + nextDelayMs); + return true; + } - private volatile boolean isShutdown; + @Override + protected boolean onUnknownException(Exception e, long nextDelayMs) { + if (isShutdown) return false; - public ControlConnection(Cluster.Manager manager) { - this.cluster = manager; + logger.error( + String.format( + "[Control connection] Unknown error during reconnection, scheduling retry in %d milliseconds", + nextDelayMs), + e); + return true; + } + }.start(); + } + + private Iterator queryPlan() { + return cluster.loadBalancingPolicy().newQueryPlan(null, Statement.DEFAULT); + } + + private void signalError() { + Connection connection = connectionRef.get(); + if (connection != null) connection.closeAsync().force(); + + // If the error caused the host to go down, onDown might have already triggered a reconnect. + // But backgroundReconnect knows how to deal with that. + backgroundReconnect(0); + } + + private void setNewConnection(Connection newConnection) { + Host.statesLogger.debug("[Control connection] established to {}", newConnection.endPoint); + newConnection.setOwner(this); + Connection old = connectionRef.getAndSet(newConnection); + if (old != null && !old.isClosed()) old.closeAsync().force(); + } + + private Connection reconnectInternal(Iterator iter, boolean isInitialConnection) + throws UnsupportedProtocolVersionException { + + Map errors = null; + + Host host = null; + try { + while (iter.hasNext()) { + host = iter.next(); + if (!host.convictionPolicy.canReconnectNow()) continue; + try { + return tryConnect(host, isInitialConnection); + } catch (ConnectionException e) { + errors = logError(host, e, errors, iter); + if (isInitialConnection) { + // Mark the host down right away so that we don't try it again during the initialization + // process. + // We don't call cluster.triggerOnDown because it does a bunch of other things we don't + // want to do here (notify LBP, etc.) + host.setDown(); + } + } catch (ExecutionException e) { + errors = logError(host, e.getCause(), errors, iter); + } catch (UnsupportedProtocolVersionException e) { + // If it's the very first node we've connected to, rethrow the exception and + // Cluster.init() will handle it. Otherwise, just mark this node in error. + if (isInitialConnection) throw e; + logger.debug("Ignoring host {}: {}", host, e.getMessage()); + errors = logError(host, e, errors, iter); + } catch (ClusterNameMismatchException e) { + logger.debug("Ignoring host {}: {}", host, e.getMessage()); + errors = logError(host, e, errors, iter); + } + } + } catch (InterruptedException e) { + // Sets interrupted status + Thread.currentThread().interrupt(); + + // Indicates that all remaining hosts are skipped due to the interruption + errors = logError(host, new DriverException("Connection thread interrupted"), errors, iter); + while (iter.hasNext()) + errors = + logError( + iter.next(), new DriverException("Connection thread interrupted"), errors, iter); } - - // Only for the initial connection. Does not schedule retries if it fails - void connect() throws UnsupportedProtocolVersionException { - if (isShutdown) - return; - - // NB: at this stage, allHosts() only contains the initial contact points - List hosts = new ArrayList(cluster.metadata.allHosts()); - // shuffle so that multiple clients with the same contact points don't all pick the same control host - Collections.shuffle(hosts); - setNewConnection(reconnectInternal(hosts.iterator(), true)); + throw new NoHostAvailableException( + errors == null ? Collections.emptyMap() : errors); + } + + private static Map logError( + Host host, Throwable exception, Map errors, Iterator iter) { + if (errors == null) errors = new HashMap(); + + errors.put(host.getEndPoint(), exception); + + if (logger.isDebugEnabled()) { + if (iter.hasNext()) { + logger.debug( + String.format("[Control connection] error on %s connection, trying next host", host), + exception); + } else { + logger.debug( + String.format("[Control connection] error on %s connection, no more host to try", host), + exception); + } } - - CloseFuture closeAsync() { - // We don't have to be fancy here. We just set a flag so that we stop trying to reconnect (and thus change the - // connection used) and shutdown the current one. - isShutdown = true; - - // Cancel any reconnection attempt in progress - ListenableFuture r = reconnectionAttempt.get(); - if (r != null) - r.cancel(false); - - Connection connection = connectionRef.get(); - return connection == null ? CloseFuture.immediateFuture() : connection.closeAsync().force(); + return errors; + } + + private Connection tryConnect(Host host, boolean isInitialConnection) + throws ConnectionException, ExecutionException, InterruptedException, + UnsupportedProtocolVersionException, ClusterNameMismatchException { + Connection connection = cluster.connectionFactory.open(host); + String productType = connection.optionsQuery().get(); + if (productType.equals("DATASTAX_APOLLO")) { + isCloud = true; } - - Host connectedHost() { - Connection current = connectionRef.get(); - return (current == null) - ? null - : cluster.metadata.getHost(current.address); + // If no protocol version was specified, set the default as soon as a connection succeeds (it's + // needed to parse UDTs in refreshSchema) + if (cluster.connectionFactory.protocolVersion == null) + cluster.connectionFactory.protocolVersion = ProtocolVersion.NEWEST_SUPPORTED; + + try { + logger.trace("[Control connection] Registering for events"); + List evs = + Arrays.asList( + ProtocolEvent.Type.TOPOLOGY_CHANGE, + ProtocolEvent.Type.STATUS_CHANGE, + ProtocolEvent.Type.SCHEMA_CHANGE); + connection.write(new Requests.Register(evs)); + + // We need to refresh the node list first so we know about the cassandra version of + // the node we're connecting to. + // This will create the token map for the first time, but it will be incomplete + // due to the lack of keyspace information + refreshNodeListAndTokenMap(connection, cluster, isInitialConnection, true); + + // refresh schema will also update the token map again, + // this time with information about keyspaces + logger.debug("[Control connection] Refreshing schema"); + refreshSchema(connection, null, null, null, null, cluster); + + return connection; + } catch (BusyConnectionException e) { + connection.closeAsync().force(); + throw new DriverInternalError("Newly created connection should not be busy"); + } catch (InterruptedException e) { + connection.closeAsync().force(); + throw e; + } catch (ConnectionException e) { + connection.closeAsync().force(); + throw e; + } catch (ExecutionException e) { + connection.closeAsync().force(); + throw e; + } catch (RuntimeException e) { + connection.closeAsync().force(); + throw e; } - - void triggerReconnect() { - backgroundReconnect(0); + } + + public void refreshSchema( + SchemaElement targetType, String targetKeyspace, String targetName, List signature) + throws InterruptedException { + logger.debug( + "[Control connection] Refreshing schema for {}{}", + targetType == null ? "everything" : targetKeyspace, + (targetType == KEYSPACE) ? "" : "." + targetName + " (" + targetType + ")"); + try { + Connection c = connectionRef.get(); + // At startup, when we add the initial nodes, this will be null, which is ok + if (c == null || c.isClosed()) return; + refreshSchema(c, targetType, targetKeyspace, targetName, signature, cluster); + } catch (ConnectionException e) { + logger.debug( + "[Control connection] Connection error while refreshing schema ({})", e.getMessage()); + signalError(); + } catch (ExecutionException e) { + // If we're being shutdown during schema refresh, this can happen. That's fine so don't scare + // the user. + if (!isShutdown) + logger.error("[Control connection] Unexpected error while refreshing schema", e); + signalError(); + } catch (BusyConnectionException e) { + logger.debug("[Control connection] Connection is busy, reconnecting"); + signalError(); } - - /** - * @param initialDelayMs if >=0, bypass the schedule and use this for the first call - */ - private void backgroundReconnect(long initialDelayMs) { - if (isShutdown) - return; - - // Abort if a reconnection is already in progress. This is not thread-safe: two threads might race through this and both - // schedule a reconnection; in that case AbstractReconnectionHandler knows how to deal with it correctly. - // But this cheap check can help us avoid creating the object unnecessarily. - ListenableFuture reconnection = reconnectionAttempt.get(); - if (reconnection != null && !reconnection.isDone()) - return; - - new AbstractReconnectionHandler("Control connection", cluster.reconnectionExecutor, cluster.reconnectionPolicy().newSchedule(), reconnectionAttempt, initialDelayMs) { - @Override - protected Connection tryReconnect() throws ConnectionException { - if (isShutdown) - throw new ConnectionException(null, "Control connection was shut down"); - - try { - return reconnectInternal(queryPlan(), false); - } catch (NoHostAvailableException e) { - throw new ConnectionException(null, e.getMessage()); - } catch (UnsupportedProtocolVersionException e) { - // reconnectInternal only propagate those if we've not decided on the protocol version yet, - // which should only happen on the initial connection and thus in connect() but never here. - throw new AssertionError(); - } - } - - @Override - protected void onReconnection(Connection connection) { - if (isShutdown) { - connection.closeAsync().force(); - return; - } - - setNewConnection(connection); - } - - @Override - protected boolean onConnectionException(ConnectionException e, long nextDelayMs) { - if (isShutdown) - return false; - - logger.error("[Control connection] Cannot connect to any host, scheduling retry in {} milliseconds", nextDelayMs); - return true; - } - - @Override - protected boolean onUnknownException(Exception e, long nextDelayMs) { - if (isShutdown) - return false; - - logger.error(String.format("[Control connection] Unknown error during reconnection, scheduling retry in %d milliseconds", nextDelayMs), e); - return true; - } - }.start(); + } + + static void refreshSchema( + Connection connection, + SchemaElement targetType, + String targetKeyspace, + String targetName, + List targetSignature, + Cluster.Manager cluster) + throws ConnectionException, BusyConnectionException, ExecutionException, + InterruptedException { + Host host = cluster.metadata.getHost(connection.endPoint); + // Neither host, nor it's version should be null. But instead of dying if there is a race or + // something, we can kind of try to infer + // a Cassandra version from the protocol version (this is not full proof, we can have the + // protocol 1 against C* 2.0+, but it's worth + // a shot, and since we log in this case, it should be relatively easy to debug when if this + // ever fail). + VersionNumber cassandraVersion; + if (host == null || host.getCassandraVersion() == null) { + cassandraVersion = cluster.protocolVersion().minCassandraVersion(); + logger.warn( + "Cannot find Cassandra version for host {} to parse the schema, using {} based on protocol version in use. " + + "If parsing the schema fails, this could be the cause", + connection.endPoint, + cassandraVersion); + } else { + cassandraVersion = host.getCassandraVersion(); } - - private Iterator queryPlan() { - return cluster.loadBalancingPolicy().newQueryPlan(null, Statement.DEFAULT); + SchemaParser schemaParser; + if (host == null) { + schemaParser = SchemaParser.forVersion(cassandraVersion); + } else { + @SuppressWarnings("deprecation") + VersionNumber dseVersion = host.getDseVersion(); + // If using DSE, derive parser from DSE version. + schemaParser = + dseVersion == null + ? SchemaParser.forVersion(cassandraVersion) + : SchemaParser.forDseVersion(dseVersion); + if (dseVersion != null && dseVersion.getMajor() == 6 && dseVersion.getMinor() < 8) { + // DSE 6.0 and 6.7 report C* 4.0, but consider it C* 3.11 for schema parsing purposes + cassandraVersion = _3_11; + } } - private void signalError() { - Connection connection = connectionRef.get(); - if (connection != null) - connection.closeAsync().force(); - - // If the error caused the host to go down, onDown might have already triggered a reconnect. - // But backgroundReconnect knows how to deal with that. - backgroundReconnect(0); + schemaParser.refresh( + cluster.getCluster(), + targetType, + targetKeyspace, + targetName, + targetSignature, + connection, + cassandraVersion); + } + + void refreshNodeListAndTokenMap() { + Connection c = connectionRef.get(); + // At startup, when we add the initial nodes, this will be null, which is ok + if (c == null || c.isClosed()) return; + + try { + refreshNodeListAndTokenMap(c, cluster, false, true); + } catch (ConnectionException e) { + logger.debug( + "[Control connection] Connection error while refreshing node list and token map ({})", + e.getMessage()); + signalError(); + } catch (ExecutionException e) { + // If we're being shutdown during refresh, this can happen. That's fine so don't scare the + // user. + if (!isShutdown) + logger.error( + "[Control connection] Unexpected error while refreshing node list and token map", e); + signalError(); + } catch (BusyConnectionException e) { + logger.debug("[Control connection] Connection is busy, reconnecting"); + signalError(); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + logger.debug( + "[Control connection] Interrupted while refreshing node list and token map, skipping it."); } - - private void setNewConnection(Connection newConnection) { - Host.statesLogger.debug("[Control connection] established to {}", newConnection.address); - newConnection.setOwner(this); - Connection old = connectionRef.getAndSet(newConnection); - if (old != null && !old.isClosed()) - old.closeAsync().force(); + } + + private static EndPoint endPointForPeerHost( + Row peersRow, EndPoint connectedEndPoint, Cluster.Manager cluster) { + EndPoint endPoint = cluster.configuration.getPolicies().getEndPointFactory().create(peersRow); + if (connectedEndPoint.equals(endPoint)) { + // Some DSE versions were inserting a line for the local node in peers (with mostly null + // values). This has been fixed, but if we detect that's the case, ignore it as it's not + // really a big deal. + logger.debug( + "System.peers on node {} has a line for itself. " + + "This is not normal but is a known problem of some DSE versions. " + + "Ignoring the entry.", + connectedEndPoint); + return null; } - - private Connection reconnectInternal(Iterator iter, boolean isInitialConnection) throws UnsupportedProtocolVersionException { - - Map errors = null; - - Host host = null; - try { - while (iter.hasNext()) { - host = iter.next(); - if (!host.convictionPolicy.canReconnectNow()) - continue; - try { - return tryConnect(host, isInitialConnection); - } catch (ConnectionException e) { - errors = logError(host, e, errors, iter); - if (isInitialConnection) { - // Mark the host down right away so that we don't try it again during the initialization process. - // We don't call cluster.triggerOnDown because it does a bunch of other things we don't want to do here (notify LBP, etc.) - host.setDown(); - } - } catch (ExecutionException e) { - errors = logError(host, e.getCause(), errors, iter); - } catch (UnsupportedProtocolVersionException e) { - // If it's the very first node we've connected to, rethrow the exception and - // Cluster.init() will handle it. Otherwise, just mark this node in error. - if (isInitialConnection) - throw e; - logger.debug("Ignoring host {}: {}", host, e.getMessage()); - errors = logError(host, e, errors, iter); - } catch (ClusterNameMismatchException e) { - logger.debug("Ignoring host {}: {}", host, e.getMessage()); - errors = logError(host, e, errors, iter); - } - } - } catch (InterruptedException e) { - // Sets interrupted status - Thread.currentThread().interrupt(); - - // Indicates that all remaining hosts are skipped due to the interruption - errors = logError(host, new DriverException("Connection thread interrupted"), errors, iter); - while (iter.hasNext()) - errors = logError(iter.next(), new DriverException("Connection thread interrupted"), errors, iter); - } - throw new NoHostAvailableException(errors == null ? Collections.emptyMap() : errors); + return endPoint; + } + + private Row fetchNodeInfo(Host host, Connection c) + throws ConnectionException, BusyConnectionException, ExecutionException, + InterruptedException { + boolean isConnectedHost = c.endPoint.equals(host.getEndPoint()); + if (isConnectedHost || host.getBroadcastSocketAddress() != null) { + String query; + if (isConnectedHost) { + query = SELECT_LOCAL; + } else { + InetSocketAddress broadcastAddress = host.getBroadcastSocketAddress(); + query = + isPeersV2 + ? SELECT_PEERS_V2 + + " WHERE peer='" + + broadcastAddress.getAddress().getHostAddress() + + "' AND peer_port=" + + broadcastAddress.getPort() + : SELECT_PEERS + + " WHERE peer='" + + broadcastAddress.getAddress().getHostAddress() + + "'"; + } + DefaultResultSetFuture future = + new DefaultResultSetFuture(null, cluster.protocolVersion(), new Requests.Query(query)); + c.write(future); + Row row = future.get().one(); + if (row != null) { + return row; + } else { + InetSocketAddress address = host.getBroadcastSocketAddress(); + // Don't include full address if port is 0. + String addressToUse = + address.getPort() != 0 ? address.toString() : address.getAddress().toString(); + logger.debug( + "Could not find peer with broadcast address {}, " + + "falling back to a full system.peers scan to fetch info for {} " + + "(this can happen if the broadcast address changed)", + addressToUse, + host); + } } - private static Map logError(Host host, Throwable exception, Map errors, Iterator iter) { - if (errors == null) - errors = new HashMap(); - - errors.put(host.getSocketAddress(), exception); - - if (logger.isDebugEnabled()) { - if (iter.hasNext()) { - logger.debug(String.format("[Control connection] error on %s connection, trying next host", host), exception); - } else { - logger.debug(String.format("[Control connection] error on %s connection, no more host to try", host), exception); - } - } - return errors; - } - - private Connection tryConnect(Host host, boolean isInitialConnection) throws ConnectionException, ExecutionException, InterruptedException, UnsupportedProtocolVersionException, ClusterNameMismatchException { - Connection connection = cluster.connectionFactory.open(host); - - // If no protocol version was specified, set the default as soon as a connection succeeds (it's needed to parse UDTs in refreshSchema) - if (cluster.connectionFactory.protocolVersion == null) - cluster.connectionFactory.protocolVersion = ProtocolVersion.NEWEST_SUPPORTED; - - try { - logger.trace("[Control connection] Registering for events"); - List evs = Arrays.asList( - ProtocolEvent.Type.TOPOLOGY_CHANGE, - ProtocolEvent.Type.STATUS_CHANGE, - ProtocolEvent.Type.SCHEMA_CHANGE - ); - connection.write(new Requests.Register(evs)); - - // We need to refresh the node list first so we know about the cassandra version of - // the node we're connecting to. - // This will create the token map for the first time, but it will be incomplete - // due to the lack of keyspace information - refreshNodeListAndTokenMap(connection, cluster, isInitialConnection, true); - - // refresh schema will also update the token map again, - // this time with information about keyspaces - logger.debug("[Control connection] Refreshing schema"); - refreshSchema(connection, null, null, null, null, cluster); - - return connection; - } catch (BusyConnectionException e) { - connection.closeAsync().force(); - throw new DriverInternalError("Newly created connection should not be busy"); - } catch (InterruptedException e) { - connection.closeAsync().force(); - throw e; - } catch (ConnectionException e) { - connection.closeAsync().force(); - throw e; - } catch (ExecutionException e) { - connection.closeAsync().force(); - throw e; - } catch (RuntimeException e) { - connection.closeAsync().force(); - throw e; - } + // We have to fetch the whole peers table and find the host we're looking for + ListenableFuture future = selectPeersFuture(c); + for (Row row : future.get()) { + UUID rowId = row.getUUID("host_id"); + if (host.getHostId().equals(rowId)) { + return row; + } } - - public void refreshSchema(SchemaElement targetType, String targetKeyspace, String targetName, List signature) throws InterruptedException { - logger.debug("[Control connection] Refreshing schema for {}{}", - targetType == null ? "everything" : targetKeyspace, - (targetType == KEYSPACE) ? "" : "." + targetName + " (" + targetType + ")"); - try { - Connection c = connectionRef.get(); - // At startup, when we add the initial nodes, this will be null, which is ok - if (c == null || c.isClosed()) - return; - refreshSchema(c, targetType, targetKeyspace, targetName, signature, cluster); - } catch (ConnectionException e) { - logger.debug("[Control connection] Connection error while refreshing schema ({})", e.getMessage()); - signalError(); - } catch (ExecutionException e) { - // If we're being shutdown during schema refresh, this can happen. That's fine so don't scare the user. - if (!isShutdown) - logger.error("[Control connection] Unexpected error while refreshing schema", e); - signalError(); - } catch (BusyConnectionException e) { - logger.debug("[Control connection] Connection is busy, reconnecting"); - signalError(); - } - } - - static void refreshSchema(Connection connection, SchemaElement targetType, String targetKeyspace, String targetName, List targetSignature, Cluster.Manager cluster) throws ConnectionException, BusyConnectionException, ExecutionException, InterruptedException { - Host host = cluster.metadata.getHost(connection.address); - // Neither host, nor it's version should be null. But instead of dying if there is a race or something, we can kind of try to infer - // a Cassandra version from the protocol version (this is not full proof, we can have the protocol 1 against C* 2.0+, but it's worth - // a shot, and since we log in this case, it should be relatively easy to debug when if this ever fail). - VersionNumber cassandraVersion; - if (host == null || host.getCassandraVersion() == null) { - cassandraVersion = cluster.protocolVersion().minCassandraVersion(); - logger.warn("Cannot find Cassandra version for host {} to parse the schema, using {} based on protocol version in use. " - + "If parsing the schema fails, this could be the cause", connection.address, cassandraVersion); + return null; + } + + /** @return whether we have enough information to bring the node back up */ + boolean refreshNodeInfo(Host host) { + + Connection c = connectionRef.get(); + // At startup, when we add the initial nodes, this will be null, which is ok + if (c == null || c.isClosed()) return true; + + logger.debug("[Control connection] Refreshing node info on {}", host); + try { + Row row = fetchNodeInfo(host, c); + if (row == null) { + if (c.isDefunct()) { + logger.debug("Control connection is down, could not refresh node info"); + // Keep going with what we currently know about the node, otherwise we will ignore all + // nodes + // until the control connection is back up (which leads to a catch-22 if there is only + // one) + return true; } else { - cassandraVersion = host.getCassandraVersion(); + logger.warn( + "No row found for host {} in {}'s peers system table. {} will be ignored.", + host.getEndPoint(), + c.endPoint, + host.getEndPoint()); + return false; } - - SchemaParser.forVersion(cassandraVersion) - .refresh(cluster.getCluster(), - targetType, targetKeyspace, targetName, targetSignature, - connection, cassandraVersion); + // Ignore hosts with a null rpc_address, as this is most likely a phantom row in + // system.peers (JAVA-428). + // Don't test this for the control host since we're already connected to it anyway, and we + // read the info from system.local + // which didn't have an rpc_address column (JAVA-546) until CASSANDRA-9436 + } else if (!c.endPoint.equals(host.getEndPoint()) && !isValidPeer(row, true)) { + return false; + } + + updateInfo(host, row, cluster, false); + return true; + + } catch (ConnectionException e) { + logger.debug( + "[Control connection] Connection error while refreshing node info ({})", e.getMessage()); + signalError(); + } catch (ExecutionException e) { + // If we're being shutdown during refresh, this can happen. That's fine so don't scare the + // user. + if (!isShutdown) + logger.debug("[Control connection] Unexpected error while refreshing node info", e); + signalError(); + } catch (BusyConnectionException e) { + logger.debug("[Control connection] Connection is busy, reconnecting"); + signalError(); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + logger.debug("[Control connection] Interrupted while refreshing node info, skipping it."); + } catch (Exception e) { + logger.debug("[Control connection] Unexpected error while refreshing node info", e); + signalError(); } - - void refreshNodeListAndTokenMap() { - Connection c = connectionRef.get(); - // At startup, when we add the initial nodes, this will be null, which is ok - if (c == null || c.isClosed()) - return; - - try { - refreshNodeListAndTokenMap(c, cluster, false, true); - } catch (ConnectionException e) { - logger.debug("[Control connection] Connection error while refreshing node list and token map ({})", e.getMessage()); - signalError(); - } catch (ExecutionException e) { - // If we're being shutdown during refresh, this can happen. That's fine so don't scare the user. - if (!isShutdown) - logger.error("[Control connection] Unexpected error while refreshing node list and token map", e); - signalError(); - } catch (BusyConnectionException e) { - logger.debug("[Control connection] Connection is busy, reconnecting"); - signalError(); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - logger.debug("[Control connection] Interrupted while refreshing node list and token map, skipping it."); - } + // If we got an exception, always return true. Otherwise a faulty control connection would cause + // reconnected hosts to be ignored permanently. + return true; + } + + // row can come either from the 'local' table or the 'peers' one + private static void updateInfo( + Host host, Row row, Cluster.Manager cluster, boolean isInitialConnection) { + if (!row.isNull("data_center") || !row.isNull("rack")) + updateLocationInfo( + host, row.getString("data_center"), row.getString("rack"), isInitialConnection, cluster); + + String version = row.getString("release_version"); + host.setVersion(version); + + // Before CASSANDRA-9436 local row did not contain any info about the host addresses. + // After CASSANDRA-9436 (2.0.16, 2.1.6, 2.2.0 rc1) local row contains two new columns: + // - broadcast_address + // - rpc_address + // After CASSANDRA-9603 (2.0.17, 2.1.8, 2.2.0 rc2) local row contains one more column: + // - listen_address + // After CASSANDRA-7544 (4.0) local row also contains: + // - broadcast_port + // - listen_port + + InetSocketAddress broadcastRpcAddress = null; + if (row.getColumnDefinitions().contains("native_address")) { + InetAddress nativeAddress = row.getInet("native_address"); + int nativePort = row.getInt("native_port"); + broadcastRpcAddress = new InetSocketAddress(nativeAddress, nativePort); + } else if (row.getColumnDefinitions().contains("native_transport_address")) { + // DSE 6.8 introduced native_transport_address and native_transport_port for the + // listen address. Also included is native_transport_port_ssl (in case users + // want to setup a different port for SSL and non-SSL conns). + InetAddress nativeAddress = row.getInet("native_transport_address"); + int nativePort = row.getInt("native_transport_port"); + if (cluster.getCluster().getConfiguration().getProtocolOptions().getSSLOptions() != null + && !row.isNull("native_transport_port_ssl")) { + nativePort = row.getInt("native_transport_port_ssl"); + } + broadcastRpcAddress = new InetSocketAddress(nativeAddress, nativePort); + } else if (row.getColumnDefinitions().contains("rpc_address")) { + InetAddress rpcAddress = row.getInet("rpc_address"); + int nativePort = cluster.connectionFactory.getPort(); + if (row.getColumnDefinitions().contains("rpc_port")) { + nativePort = row.getInt("rpc_port"); + } + broadcastRpcAddress = new InetSocketAddress(rpcAddress, nativePort); } - - private static InetSocketAddress rpcAddressForPeerHost(Row peersRow, InetSocketAddress connectedHost, Cluster.Manager cluster) { - - // after CASSANDRA-9436, system.peers contains the following inet columns: - // - peer: this is actually broadcast_address - // - rpc_address: the address we are looking for (this corresponds to broadcast_rpc_address in the peer's cassandra yaml file; - // if this setting if unset, it defaults to the value for rpc_address or rpc_interface) - // - preferred_ip: used by Ec2MultiRegionSnitch and GossipingPropertyFileSnitch, possibly others; contents unclear - - InetAddress broadcastAddress = peersRow.getInet("peer"); - InetAddress rpcAddress = peersRow.getInet("rpc_address"); - - if (broadcastAddress == null) { - return null; - } else if (broadcastAddress.equals(connectedHost.getAddress()) || (rpcAddress != null && rpcAddress.equals(connectedHost.getAddress()))) { - // Some DSE versions were inserting a line for the local node in peers (with mostly null values). This has been fixed, but if we - // detect that's the case, ignore it as it's not really a big deal. - logger.debug("System.peers on node {} has a line for itself. This is not normal but is a known problem of some DSE version. Ignoring the entry.", connectedHost); - return null; - } else if (rpcAddress == null) { - return null; - } else if (rpcAddress.equals(bindAllAddress)) { - logger.warn("Found host with 0.0.0.0 as rpc_address, using broadcast_address ({}) to contact it instead. If this is incorrect you should avoid the use of 0.0.0.0 server side.", broadcastAddress); - rpcAddress = broadcastAddress; - } - return cluster.translateAddress(rpcAddress); - } - - private Row fetchNodeInfo(Host host, Connection c) throws ConnectionException, BusyConnectionException, ExecutionException, InterruptedException { - boolean isConnectedHost = c.address.equals(host.getSocketAddress()); - if (isConnectedHost || host.getBroadcastAddress() != null) { - DefaultResultSetFuture future = isConnectedHost - ? new DefaultResultSetFuture(null, cluster.protocolVersion(), new Requests.Query(SELECT_LOCAL)) - : new DefaultResultSetFuture(null, cluster.protocolVersion(), new Requests.Query(SELECT_PEERS + " WHERE peer='" + host.getBroadcastAddress().getHostAddress() + '\'')); - c.write(future); - Row row = future.get().one(); - if (row != null) { - return row; - } else { - logger.debug("Could not find peer with broadcast address {}, " + - "falling back to a full system.peers scan to fetch info for {} " + - "(this can happen if the broadcast address changed)", host.getBroadcastAddress(), host); - } - } - - // We have to fetch the whole peers table and find the host we're looking for - DefaultResultSetFuture future = new DefaultResultSetFuture(null, cluster.protocolVersion(), new Requests.Query(SELECT_PEERS)); - c.write(future); - for (Row row : future.get()) { - InetSocketAddress addr = rpcAddressForPeerHost(row, c.address, cluster); - if (addr != null && addr.equals(host.getSocketAddress())) - return row; - } - return null; + // Before CASSANDRA-9436, system.local doesn't have rpc_address, so this might be null. It's not + // a big deal because we only use this for server events, and the control node doesn't receive + // events for itself. + host.setBroadcastRpcAddress(broadcastRpcAddress); + + InetSocketAddress broadcastSocketAddress = null; + if (row.getColumnDefinitions().contains("peer")) { // system.peers + int broadcastPort = + row.getColumnDefinitions().contains("peer_port") ? row.getInt("peer_port") : 0; + broadcastSocketAddress = new InetSocketAddress(row.getInet("peer"), broadcastPort); + } else if (row.getColumnDefinitions().contains("broadcast_address")) { // system.local + int broadcastPort = + row.getColumnDefinitions().contains("broadcast_port") ? row.getInt("broadcast_port") : 0; + broadcastSocketAddress = + new InetSocketAddress(row.getInet("broadcast_address"), broadcastPort); } - - /** - * @return whether we have enough information to bring the node back up - */ - boolean refreshNodeInfo(Host host) { - - Connection c = connectionRef.get(); - // At startup, when we add the initial nodes, this will be null, which is ok - if (c == null || c.isClosed()) - return true; - - logger.debug("[Control connection] Refreshing node info on {}", host); - try { - Row row = fetchNodeInfo(host, c); - if (row == null) { - if (c.isDefunct()) { - logger.debug("Control connection is down, could not refresh node info"); - // Keep going with what we currently know about the node, otherwise we will ignore all nodes - // until the control connection is back up (which leads to a catch-22 if there is only one) - return true; - } else { - logger.warn("No row found for host {} in {}'s peers system table. {} will be ignored.", host.getAddress(), c.address, host.getAddress()); - return false; - } - // Ignore hosts with a null rpc_address, as this is most likely a phantom row in system.peers (JAVA-428). - // Don't test this for the control host since we're already connected to it anyway, and we read the info from system.local - // which didn't have an rpc_address column (JAVA-546) until CASSANDRA-9436 - } else if (!c.address.equals(host.getSocketAddress()) && !isValidPeer(row, true)) { - return false; - } - - updateInfo(host, row, cluster, false); - return true; - - } catch (ConnectionException e) { - logger.debug("[Control connection] Connection error while refreshing node info ({})", e.getMessage()); - signalError(); - } catch (ExecutionException e) { - // If we're being shutdown during refresh, this can happen. That's fine so don't scare the user. - if (!isShutdown) - logger.debug("[Control connection] Unexpected error while refreshing node info", e); - signalError(); - } catch (BusyConnectionException e) { - logger.debug("[Control connection] Connection is busy, reconnecting"); - signalError(); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - logger.debug("[Control connection] Interrupted while refreshing node info, skipping it."); - } catch (Exception e) { - logger.debug("[Control connection] Unexpected error while refreshing node info", e); - signalError(); - } - // If we got an exception, always return true. Otherwise a faulty control connection would cause - // reconnected hosts to be ignored permanently. - return true; + host.setBroadcastSocketAddress(broadcastSocketAddress); + + // in system.local only for C* versions >= 2.0.17, 2.1.8, 2.2.0 rc2, + // not yet in system.peers as of C* 3.2 + InetSocketAddress listenAddress = null; + if (row.getColumnDefinitions().contains("listen_address")) { + int listenPort = + row.getColumnDefinitions().contains("listen_port") ? row.getInt("listen_port") : 0; + listenAddress = new InetSocketAddress(row.getInet("listen_address"), listenPort); } + host.setListenSocketAddress(listenAddress); - // row can come either from the 'local' table or the 'peers' one - private static void updateInfo(Host host, Row row, Cluster.Manager cluster, boolean isInitialConnection) { - if (!row.isNull("data_center") || !row.isNull("rack")) - updateLocationInfo(host, row.getString("data_center"), row.getString("rack"), isInitialConnection, cluster); - - String version = row.getString("release_version"); - host.setVersion(version); - - // Before CASSANDRA-9436 local row did not contain any info about the host addresses. - // After CASSANDRA-9436 (2.0.16, 2.1.6, 2.2.0 rc1) local row contains two new columns: - // - broadcast_address - // - rpc_address - // After CASSANDRA-9603 (2.0.17, 2.1.8, 2.2.0 rc2) local row contains one more column: - // - listen_address - - InetAddress broadcastAddress = null; - if (row.getColumnDefinitions().contains("peer")) { // system.peers - broadcastAddress = row.getInet("peer"); - } else if (row.getColumnDefinitions().contains("broadcast_address")) { // system.local - broadcastAddress = row.getInet("broadcast_address"); - } - host.setBroadcastAddress(broadcastAddress); - - // in system.local only for C* versions >= 2.0.17, 2.1.8, 2.2.0 rc2, - // not yet in system.peers as of C* 3.2 - InetAddress listenAddress = row.getColumnDefinitions().contains("listen_address") - ? row.getInet("listen_address") - : null; - host.setListenAddress(listenAddress); - - if (row.getColumnDefinitions().contains("workload")) { - String dseWorkload = row.getString("workload"); - host.setDseWorkload(dseWorkload); - } - if (row.getColumnDefinitions().contains("graph")) { - boolean isDseGraph = row.getBool("graph"); - host.setDseGraphEnabled(isDseGraph); - } - if (row.getColumnDefinitions().contains("dse_version")) { - String dseVersion = row.getString("dse_version"); - host.setDseVersion(dseVersion); - } + if (row.getColumnDefinitions().contains("workload")) { + String dseWorkload = row.getString("workload"); + host.setDseWorkload(dseWorkload); } - - private static void updateLocationInfo(Host host, String datacenter, String rack, boolean isInitialConnection, Cluster.Manager cluster) { - if (MoreObjects.equal(host.getDatacenter(), datacenter) && MoreObjects.equal(host.getRack(), rack)) - return; - - // If the dc/rack information changes for an existing node, we need to update the load balancing policy. - // For that, we remove and re-add the node against the policy. Not the most elegant, and assumes - // that the policy will update correctly, but in practice this should work. - if (!isInitialConnection) - cluster.loadBalancingPolicy().onDown(host); - host.setLocationInfo(datacenter, rack); - if (!isInitialConnection) - cluster.loadBalancingPolicy().onAdd(host); + if (row.getColumnDefinitions().contains("graph")) { + boolean isDseGraph = row.getBool("graph"); + host.setDseGraphEnabled(isDseGraph); } + if (row.getColumnDefinitions().contains("dse_version")) { + String dseVersion = row.getString("dse_version"); + host.setDseVersion(dseVersion); + } + host.setHostId(row.getUUID("host_id")); + host.setSchemaVersion(row.getUUID("schema_version")); + } + + private static void updateLocationInfo( + Host host, + String datacenter, + String rack, + boolean isInitialConnection, + Cluster.Manager cluster) { + if (MoreObjects.equal(host.getDatacenter(), datacenter) + && MoreObjects.equal(host.getRack(), rack)) return; + + // If the dc/rack information changes for an existing node, we need to update the load balancing + // policy. + // For that, we remove and re-add the node against the policy. Not the most elegant, and assumes + // that the policy will update correctly, but in practice this should work. + if (!isInitialConnection) cluster.loadBalancingPolicy().onRemove(host); + host.setLocationInfo(datacenter, rack); + if (!isInitialConnection) cluster.loadBalancingPolicy().onAdd(host); + } + + /** + * Resolves peering information by doing the following: + * + *

    + *
  1. if isPeersV2 is true, query the system.peers_v2 table, + * otherwise query system.peers. + *
  2. if system.peers_v2 query fails, set isPeersV2 to false and call + * selectPeersFuture again. + *
+ * + * @param connection connection to send request on. + * @return result of peers query. + */ + private ListenableFuture selectPeersFuture(final Connection connection) { + if (isPeersV2) { + DefaultResultSetFuture peersV2Future = + new DefaultResultSetFuture( + null, cluster.protocolVersion(), new Requests.Query(SELECT_PEERS_V2)); + connection.write(peersV2Future); + final SettableFuture peersFuture = SettableFuture.create(); + // if peers v2 query fails, query peers table instead. + GuavaCompatibility.INSTANCE.addCallback( + peersV2Future, + new FutureCallback() { - private static void refreshNodeListAndTokenMap(Connection connection, Cluster.Manager cluster, boolean isInitialConnection, boolean logInvalidPeers) throws ConnectionException, BusyConnectionException, ExecutionException, InterruptedException { - logger.debug("[Control connection] Refreshing node list and token map"); - - boolean metadataEnabled = cluster.configuration.getQueryOptions().isMetadataEnabled(); - - // Make sure we're up to date on nodes and tokens - - DefaultResultSetFuture localFuture = new DefaultResultSetFuture(null, cluster.protocolVersion(), new Requests.Query(SELECT_LOCAL)); - DefaultResultSetFuture peersFuture = new DefaultResultSetFuture(null, cluster.protocolVersion(), new Requests.Query(SELECT_PEERS)); - connection.write(localFuture); - connection.write(peersFuture); - - String partitioner = null; - Token.Factory factory = null; - Map> tokenMap = new HashMap>(); - - // Update cluster name, DC and rack for the one node we are connected to - Row localRow = localFuture.get().one(); - if (localRow != null) { - String clusterName = localRow.getString("cluster_name"); - if (clusterName != null) - cluster.metadata.clusterName = clusterName; - - partitioner = localRow.getString("partitioner"); - if (partitioner != null) { - cluster.metadata.partitioner = partitioner; - factory = Token.getFactory(partitioner); + @Override + public void onSuccess(ResultSet result) { + peersFuture.set(result); } - Host host = cluster.metadata.getHost(connection.address); - // In theory host can't be null. However there is no point in risking a NPE in case we - // have a race between a node removal and this. - if (host == null) { - logger.debug("Host in local system table ({}) unknown to us (ok if said host just got removed)", connection.address); - } else { - updateInfo(host, localRow, cluster, isInitialConnection); - if (metadataEnabled && factory != null) { - Set tokensStr = localRow.getSet("tokens", String.class); - if (!tokensStr.isEmpty()) { - Set tokens = toTokens(factory, tokensStr); - tokenMap.put(host, tokens); - } - } + @Override + public void onFailure(Throwable t) { + // Downgrade to system.peers if we get an invalid query error as this indicates the + // peers_v2 table does not exist. + // Also downgrade on server error with a specific error message (DSE 6.0.0 to 6.0.2 + // with search enabled. + if (t instanceof InvalidQueryException + || (t instanceof ServerError + && t.getMessage().contains("Unknown keyspace/cf pair (system.peers_v2)"))) { + isPeersV2 = false; + MoreFutures.propagateFuture(peersFuture, selectPeersFuture(connection)); + } else { + peersFuture.setException(t); + } } - } + }); + return peersFuture; + } else { + DefaultResultSetFuture peersFuture = + new DefaultResultSetFuture( + null, cluster.protocolVersion(), new Requests.Query(SELECT_PEERS)); + connection.write(peersFuture); + return peersFuture; + } + } + + private void refreshNodeListAndTokenMap( + final Connection connection, + final Cluster.Manager cluster, + boolean isInitialConnection, + boolean logInvalidPeers) + throws ConnectionException, BusyConnectionException, ExecutionException, + InterruptedException { + logger.debug("[Control connection] Refreshing node list and token map"); + + boolean metadataEnabled = cluster.configuration.getQueryOptions().isMetadataEnabled(); + + // Make sure we're up to date on nodes and tokens + + DefaultResultSetFuture localFuture = + new DefaultResultSetFuture( + null, cluster.protocolVersion(), new Requests.Query(SELECT_LOCAL)); + ListenableFuture peersFuture = selectPeersFuture(connection); + connection.write(localFuture); + + String partitioner = null; + Token.Factory factory = null; + Map> tokenMap = new HashMap>(); + + // Update cluster name, DC and rack for the one node we are connected to + Row localRow = localFuture.get().one(); + if (localRow == null) { + throw new IllegalStateException( + String.format( + "system.local is empty on %s, this should not happen", connection.endPoint)); + } + String clusterName = localRow.getString("cluster_name"); + if (clusterName != null) cluster.metadata.clusterName = clusterName; - List foundHosts = new ArrayList(); - List dcs = new ArrayList(); - List racks = new ArrayList(); - List cassandraVersions = new ArrayList(); - List broadcastAddresses = new ArrayList(); - List listenAddresses = new ArrayList(); - List> allTokens = new ArrayList>(); - List dseVersions = new ArrayList(); - List dseGraphEnabled = new ArrayList(); - List dseWorkloads = new ArrayList(); - - for (Row row : peersFuture.get()) { - if (!isValidPeer(row, logInvalidPeers)) - continue; - - InetSocketAddress rpcAddress = rpcAddressForPeerHost(row, connection.address, cluster); - if (rpcAddress == null) - continue; - foundHosts.add(rpcAddress); - dcs.add(row.getString("data_center")); - racks.add(row.getString("rack")); - cassandraVersions.add(row.getString("release_version")); - broadcastAddresses.add(row.getInet("peer")); - if (metadataEnabled && factory != null) { - Set tokensStr = row.getSet("tokens", String.class); - Set tokens = null; - if (!tokensStr.isEmpty()) { - tokens = toTokens(factory, tokensStr); - } - allTokens.add(tokens); - } - InetAddress listenAddress = row.getColumnDefinitions().contains("listen_address") ? row.getInet("listen_address") : null; - listenAddresses.add(listenAddress); - String dseWorkload = row.getColumnDefinitions().contains("workload") ? row.getString("workload") : null; - dseWorkloads.add(dseWorkload); - Boolean isDseGraph = row.getColumnDefinitions().contains("graph") ? row.getBool("graph") : null; - dseGraphEnabled.add(isDseGraph); - String dseVersion = row.getColumnDefinitions().contains("dse_version") ? row.getString("dse_version") : null; - dseVersions.add(dseVersion); - } + partitioner = localRow.getString("partitioner"); + if (partitioner != null) { + cluster.metadata.partitioner = partitioner; + factory = Token.getFactory(partitioner); + } - for (int i = 0; i < foundHosts.size(); i++) { - Host host = cluster.metadata.getHost(foundHosts.get(i)); - boolean isNew = false; - if (host == null) { - // We don't know that node, create the Host object but wait until we've set the known - // info before signaling the addition. - Host newHost = cluster.metadata.newHost(foundHosts.get(i)); - Host existing = cluster.metadata.addIfAbsent(newHost); - if (existing == null) { - host = newHost; - isNew = true; - } else { - host = existing; - isNew = false; - } - } - if (dcs.get(i) != null || racks.get(i) != null) - updateLocationInfo(host, dcs.get(i), racks.get(i), isInitialConnection, cluster); - if (cassandraVersions.get(i) != null) - host.setVersion(cassandraVersions.get(i)); - if (broadcastAddresses.get(i) != null) - host.setBroadcastAddress(broadcastAddresses.get(i)); - if (listenAddresses.get(i) != null) - host.setListenAddress(listenAddresses.get(i)); - - if (dseVersions.get(i) != null) - host.setDseVersion(dseVersions.get(i)); - if (dseWorkloads.get(i) != null) - host.setDseWorkload(dseWorkloads.get(i)); - if (dseGraphEnabled.get(i) != null) - host.setDseGraphEnabled(dseGraphEnabled.get(i)); - - if (metadataEnabled && factory != null && allTokens.get(i) != null) - tokenMap.put(host, allTokens.get(i)); - - if (isNew && !isInitialConnection) - cluster.triggerOnAdd(host); + // During init, metadata.allHosts is still empty, the contact points are in + // metadata.contactPoints. We need to copy them over, but we can only do it after having + // called updateInfo, because we need to know the host id. + // This is the same for peer hosts (see further down). + Host controlHost = + isInitialConnection + ? cluster.metadata.getContactPoint(connection.endPoint) + : cluster.metadata.getHost(connection.endPoint); + // In theory host can't be null. However there is no point in risking a NPE in case we + // have a race between a node removal and this. + if (controlHost == null) { + logger.debug( + "Host in local system table ({}) unknown to us (ok if said host just got removed)", + connection.endPoint); + } else { + updateInfo(controlHost, localRow, cluster, isInitialConnection); + if (metadataEnabled && factory != null) { + Set tokensStr = localRow.getSet("tokens", String.class); + if (!tokensStr.isEmpty()) { + Set tokens = toTokens(factory, tokensStr); + tokenMap.put(controlHost, tokens); } - - // Removes all those that seems to have been removed (since we lost the control connection) - Set foundHostsSet = new HashSet(foundHosts); - for (Host host : cluster.metadata.allHosts()) - if (!host.getSocketAddress().equals(connection.address) && !foundHostsSet.contains(host.getSocketAddress())) - cluster.removeHost(host, isInitialConnection); - - if (metadataEnabled && factory != null && !tokenMap.isEmpty()) - cluster.metadata.rebuildTokenMap(factory, tokenMap); + } + if (isInitialConnection) { + cluster.metadata.addIfAbsent(controlHost); + } } - private static Set toTokens(Token.Factory factory, Set tokensStr) { - Set tokens = new LinkedHashSet(tokensStr.size()); - for (String tokenStr : tokensStr) { - tokens.add(factory.fromString(tokenStr)); + List foundHosts = new ArrayList(); + List dcs = new ArrayList(); + List racks = new ArrayList(); + List cassandraVersions = new ArrayList(); + List broadcastRpcAddresses = new ArrayList(); + List broadcastAddresses = new ArrayList(); + List listenAddresses = new ArrayList(); + List> allTokens = new ArrayList>(); + List dseVersions = new ArrayList(); + List dseGraphEnabled = new ArrayList(); + List dseWorkloads = new ArrayList(); + List hostIds = new ArrayList(); + List schemaVersions = new ArrayList(); + + for (Row row : peersFuture.get()) { + if (!isValidPeer(row, logInvalidPeers)) continue; + + EndPoint endPoint = endPointForPeerHost(row, connection.endPoint, cluster); + if (endPoint == null) { + continue; + } + foundHosts.add(endPoint); + dcs.add(row.getString("data_center")); + racks.add(row.getString("rack")); + cassandraVersions.add(row.getString("release_version")); + + InetSocketAddress broadcastRpcAddress; + if (row.getColumnDefinitions().contains("native_address")) { + InetAddress nativeAddress = row.getInet("native_address"); + int nativePort = row.getInt("native_port"); + broadcastRpcAddress = new InetSocketAddress(nativeAddress, nativePort); + } else if (row.getColumnDefinitions().contains("native_transport_address")) { + InetAddress nativeAddress = row.getInet("native_transport_address"); + int nativePort = row.getInt("native_transport_port"); + if (cluster.getCluster().getConfiguration().getProtocolOptions().getSSLOptions() != null + && !row.isNull("native_transport_port_ssl")) { + nativePort = row.getInt("native_transport_port_ssl"); } - return tokens; - } - - private static boolean isValidPeer(Row peerRow, boolean logIfInvalid) { - boolean isValid = peerRow.getColumnDefinitions().contains("rpc_address") - && !peerRow.isNull("rpc_address"); - if (EXTENDED_PEER_CHECK) { - isValid &= peerRow.getColumnDefinitions().contains("host_id") - && !peerRow.isNull("host_id") - && peerRow.getColumnDefinitions().contains("data_center") - && !peerRow.isNull("data_center") - && peerRow.getColumnDefinitions().contains("rack") - && !peerRow.isNull("rack") - && peerRow.getColumnDefinitions().contains("tokens") - && !peerRow.isNull("tokens"); + broadcastRpcAddress = new InetSocketAddress(nativeAddress, nativePort); + } else { + InetAddress rpcAddress = row.getInet("rpc_address"); + int nativePort = cluster.connectionFactory.getPort(); + if (row.getColumnDefinitions().contains("rpc_port")) { + nativePort = row.getInt("rpc_port"); } - if (!isValid && logIfInvalid) - logger.warn("Found invalid row in system.peers: {}. " + - "This is likely a gossip or snitch issue, this host will be ignored.", formatInvalidPeer(peerRow)); - return isValid; - } - - // Custom formatting to avoid spamming the logs if 'tokens' is present and contains a gazillion tokens - private static String formatInvalidPeer(Row peerRow) { - StringBuilder sb = new StringBuilder("[peer=" + peerRow.getInet("peer")); - formatMissingOrNullColumn(peerRow, "rpc_address", sb); - if (EXTENDED_PEER_CHECK) { - formatMissingOrNullColumn(peerRow, "host_id", sb); - formatMissingOrNullColumn(peerRow, "data_center", sb); - formatMissingOrNullColumn(peerRow, "rack", sb); - formatMissingOrNullColumn(peerRow, "tokens", sb); + broadcastRpcAddress = new InetSocketAddress(rpcAddress, nativePort); + } + broadcastRpcAddresses.add(broadcastRpcAddress); + + int broadcastPort = + row.getColumnDefinitions().contains("peer_port") ? row.getInt("peer_port") : 0; + InetSocketAddress broadcastAddress = + new InetSocketAddress(row.getInet("peer"), broadcastPort); + + broadcastAddresses.add(broadcastAddress); + if (metadataEnabled && factory != null) { + Set tokensStr = row.getSet("tokens", String.class); + Set tokens = null; + if (!tokensStr.isEmpty()) { + tokens = toTokens(factory, tokensStr); } - sb.append("]"); - return sb.toString(); + allTokens.add(tokens); + } + + if (row.getColumnDefinitions().contains("listen_address") && !row.isNull("listen_address")) { + int listenPort = + row.getColumnDefinitions().contains("listen_port") ? row.getInt("listen_port") : 0; + InetSocketAddress listenAddress = + new InetSocketAddress(row.getInet("listen_address"), listenPort); + listenAddresses.add(listenAddress); + } else { + listenAddresses.add(null); + } + String dseWorkload = + row.getColumnDefinitions().contains("workload") ? row.getString("workload") : null; + dseWorkloads.add(dseWorkload); + Boolean isDseGraph = + row.getColumnDefinitions().contains("graph") ? row.getBool("graph") : null; + dseGraphEnabled.add(isDseGraph); + String dseVersion = + row.getColumnDefinitions().contains("dse_version") ? row.getString("dse_version") : null; + dseVersions.add(dseVersion); + hostIds.add(row.getUUID("host_id")); + schemaVersions.add(row.getUUID("schema_version")); } - private static void formatMissingOrNullColumn(Row peerRow, String columnName, StringBuilder sb) { - if (!peerRow.getColumnDefinitions().contains(columnName)) - sb.append(", missing ").append(columnName); - else if (peerRow.isNull(columnName)) - sb.append(", ").append(columnName).append("=null"); - } - - static boolean waitForSchemaAgreement(Connection connection, Cluster.Manager cluster) throws ConnectionException, BusyConnectionException, ExecutionException, InterruptedException { - long start = System.nanoTime(); - long elapsed = 0; - int maxSchemaAgreementWaitSeconds = cluster.configuration.getProtocolOptions().getMaxSchemaAgreementWaitSeconds(); - while (elapsed < maxSchemaAgreementWaitSeconds * 1000) { - - if (checkSchemaAgreement(connection, cluster)) - return true; - - // let's not flood the node too much - Thread.sleep(200); - - elapsed = Cluster.timeSince(start, TimeUnit.MILLISECONDS); + for (int i = 0; i < foundHosts.size(); i++) { + Host peerHost = + isInitialConnection + ? cluster.metadata.getContactPoint(foundHosts.get(i)) + : cluster.metadata.getHost(foundHosts.get(i)); + boolean isNew = false; + if (peerHost == null) { + // We don't know that node, create the Host object but wait until we've set the known + // info before signaling the addition. + Host newHost = cluster.metadata.newHost(foundHosts.get(i)); + newHost.setHostId(hostIds.get(i)); // we need an id to add to the metadata + Host previous = cluster.metadata.addIfAbsent(newHost); + if (previous == null) { + peerHost = newHost; + isNew = true; + } else { + peerHost = previous; + isNew = false; } - - return false; + } + if (dcs.get(i) != null || racks.get(i) != null) + updateLocationInfo(peerHost, dcs.get(i), racks.get(i), isInitialConnection, cluster); + if (cassandraVersions.get(i) != null) peerHost.setVersion(cassandraVersions.get(i)); + if (broadcastRpcAddresses.get(i) != null) + peerHost.setBroadcastRpcAddress(broadcastRpcAddresses.get(i)); + if (broadcastAddresses.get(i) != null) + peerHost.setBroadcastSocketAddress(broadcastAddresses.get(i)); + if (listenAddresses.get(i) != null) peerHost.setListenSocketAddress(listenAddresses.get(i)); + + if (dseVersions.get(i) != null) peerHost.setDseVersion(dseVersions.get(i)); + if (dseWorkloads.get(i) != null) peerHost.setDseWorkload(dseWorkloads.get(i)); + if (dseGraphEnabled.get(i) != null) peerHost.setDseGraphEnabled(dseGraphEnabled.get(i)); + peerHost.setHostId(hostIds.get(i)); + if (schemaVersions.get(i) != null) { + peerHost.setSchemaVersion(schemaVersions.get(i)); + } + + if (metadataEnabled && factory != null && allTokens.get(i) != null) + tokenMap.put(peerHost, allTokens.get(i)); + + if (!isNew && isInitialConnection) { + // If we're at init and the node already existed, it means it was a contact point, so we + // need to copy it over to the regular host list + cluster.metadata.addIfAbsent(peerHost); + } + if (isNew && !isInitialConnection) { + cluster.triggerOnAdd(peerHost); + } } - private static boolean checkSchemaAgreement(Connection connection, Cluster.Manager cluster) throws InterruptedException, ExecutionException { - DefaultResultSetFuture peersFuture = new DefaultResultSetFuture(null, cluster.protocolVersion(), new Requests.Query(SELECT_SCHEMA_PEERS)); - DefaultResultSetFuture localFuture = new DefaultResultSetFuture(null, cluster.protocolVersion(), new Requests.Query(SELECT_SCHEMA_LOCAL)); - connection.write(peersFuture); - connection.write(localFuture); - - Set versions = new HashSet(); - - Row localRow = localFuture.get().one(); - if (localRow != null && !localRow.isNull("schema_version")) - versions.add(localRow.getUUID("schema_version")); - - for (Row row : peersFuture.get()) { - - InetSocketAddress addr = rpcAddressForPeerHost(row, connection.address, cluster); - if (addr == null || row.isNull("schema_version")) - continue; - - Host peer = cluster.metadata.getHost(addr); - if (peer != null && peer.isUp()) - versions.add(row.getUUID("schema_version")); - } - logger.debug("Checking for schema agreement: versions are {}", versions); - return versions.size() <= 1; + // Removes all those that seem to have been removed (since we lost the control connection) + Set foundHostsSet = new HashSet(foundHosts); + for (Host host : cluster.metadata.allHosts()) + if (!host.getEndPoint().equals(connection.endPoint) + && !foundHostsSet.contains(host.getEndPoint())) + cluster.removeHost(host, isInitialConnection); + + if (metadataEnabled && factory != null && !tokenMap.isEmpty()) + cluster.metadata.rebuildTokenMap(factory, tokenMap); + } + + private static Set toTokens(Token.Factory factory, Set tokensStr) { + Set tokens = new LinkedHashSet(tokensStr.size()); + for (String tokenStr : tokensStr) { + tokens.add(factory.fromString(tokenStr)); } - - boolean checkSchemaAgreement() throws ConnectionException, BusyConnectionException, InterruptedException, ExecutionException { - Connection connection = connectionRef.get(); - return connection != null && - !connection.isClosed() && - checkSchemaAgreement(connection, cluster); + return tokens; + } + + private boolean isValidPeer(Row peerRow, boolean logIfInvalid) { + boolean isValid = + peerRow.getColumnDefinitions().contains("host_id") && !peerRow.isNull("host_id"); + + if (isPeersV2) { + isValid &= + peerRow.getColumnDefinitions().contains("native_address") + && peerRow.getColumnDefinitions().contains("native_port") + && !peerRow.isNull("native_address") + && !peerRow.isNull("native_port"); + } else { + isValid &= + (peerRow.getColumnDefinitions().contains("rpc_address") && !peerRow.isNull("rpc_address")) + || (peerRow.getColumnDefinitions().contains("native_transport_address") + && peerRow.getColumnDefinitions().contains("native_transport_port") + && !peerRow.isNull("native_transport_address") + && !peerRow.isNull("native_transport_port")); } - boolean isOpen() { - Connection c = connectionRef.get(); - return c != null && !c.isClosed(); + if (EXTENDED_PEER_CHECK) { + isValid &= + peerRow.getColumnDefinitions().contains("data_center") + && !peerRow.isNull("data_center") + && peerRow.getColumnDefinitions().contains("rack") + && !peerRow.isNull("rack") + && peerRow.getColumnDefinitions().contains("tokens") + && !peerRow.isNull("tokens"); } - - public void onUp(Host host) { + if (!isValid && logIfInvalid) + logger.warn( + "Found invalid row in system.peers: {}. " + + "This is likely a gossip or snitch issue, this host will be ignored.", + formatInvalidPeer(peerRow)); + return isValid; + } + + // Custom formatting to avoid spamming the logs if 'tokens' is present and contains a gazillion + // tokens + private String formatInvalidPeer(Row peerRow) { + StringBuilder sb = new StringBuilder("[peer=" + peerRow.getInet("peer")); + if (isPeersV2) { + formatMissingOrNullColumn(peerRow, "native_address", sb); + formatMissingOrNullColumn(peerRow, "native_port", sb); + } else { + formatMissingOrNullColumn(peerRow, "native_transport_address", sb); + formatMissingOrNullColumn(peerRow, "native_transport_port", sb); + formatMissingOrNullColumn(peerRow, "native_transport_port_ssl", sb); + formatMissingOrNullColumn(peerRow, "rpc_address", sb); } - - public void onAdd(Host host) { + if (EXTENDED_PEER_CHECK) { + formatMissingOrNullColumn(peerRow, "host_id", sb); + formatMissingOrNullColumn(peerRow, "data_center", sb); + formatMissingOrNullColumn(peerRow, "rack", sb); + formatMissingOrNullColumn(peerRow, "tokens", sb); } - - public void onDown(Host host) { - onHostGone(host); + sb.append("]"); + return sb.toString(); + } + + private static void formatMissingOrNullColumn(Row peerRow, String columnName, StringBuilder sb) { + if (!peerRow.getColumnDefinitions().contains(columnName)) + sb.append(", missing ").append(columnName); + else if (peerRow.isNull(columnName)) sb.append(", ").append(columnName).append("=null"); + } + + static boolean waitForSchemaAgreement(Connection connection, Cluster.Manager cluster) + throws ConnectionException, BusyConnectionException, ExecutionException, + InterruptedException { + long start = System.nanoTime(); + long elapsed = 0; + int maxSchemaAgreementWaitSeconds = + cluster.configuration.getProtocolOptions().getMaxSchemaAgreementWaitSeconds(); + while (elapsed < maxSchemaAgreementWaitSeconds * 1000) { + + if (checkSchemaAgreement(connection, cluster)) return true; + + // let's not flood the node too much + Thread.sleep(200); + + elapsed = Cluster.timeSince(start, TimeUnit.MILLISECONDS); } - public void onRemove(Host host) { - onHostGone(host); - } + return false; + } - private void onHostGone(Host host) { - Connection current = connectionRef.get(); + private static boolean checkSchemaAgreement(Connection connection, Cluster.Manager cluster) + throws InterruptedException, ExecutionException { + DefaultResultSetFuture peersFuture = + new DefaultResultSetFuture( + null, cluster.protocolVersion(), new Requests.Query(SELECT_SCHEMA_PEERS)); + DefaultResultSetFuture localFuture = + new DefaultResultSetFuture( + null, cluster.protocolVersion(), new Requests.Query(SELECT_SCHEMA_LOCAL)); + connection.write(peersFuture); + connection.write(localFuture); - if (current != null && current.address.equals(host.getSocketAddress())) { - logger.debug("[Control connection] {} is down/removed and it was the control host, triggering reconnect", - current.address); - if (!current.isClosed()) - current.closeAsync().force(); - backgroundReconnect(0); - } - } + Set versions = new HashSet(); - @Override - public void onConnectionDefunct(Connection connection) { - if (connection == connectionRef.get()) - backgroundReconnect(0); + Row localRow = localFuture.get().one(); + if (localRow != null && !localRow.isNull("schema_version")) + versions.add(localRow.getUUID("schema_version")); + + for (Row row : peersFuture.get()) { + + UUID hostId = row.getUUID("host_id"); + if (row.isNull("schema_version")) continue; + + Host peer = cluster.metadata.getHost(hostId); + if (peer != null && peer.isUp()) versions.add(row.getUUID("schema_version")); + } + logger.debug("Checking for schema agreement: versions are {}", versions); + return versions.size() <= 1; + } + + boolean checkSchemaAgreement() + throws ConnectionException, BusyConnectionException, InterruptedException, + ExecutionException { + Connection connection = connectionRef.get(); + return connection != null + && !connection.isClosed() + && checkSchemaAgreement(connection, cluster); + } + + boolean isOpen() { + Connection c = connectionRef.get(); + return c != null && !c.isClosed(); + } + + boolean isCloud() { + return isCloud; + } + + public void onUp(Host host) {} + + public void onAdd(Host host) {} + + public void onDown(Host host) { + onHostGone(host); + } + + public void onRemove(Host host) { + onHostGone(host); + } + + private void onHostGone(Host host) { + Connection current = connectionRef.get(); + + if (current != null && current.endPoint.equals(host.getEndPoint())) { + logger.debug( + "[Control connection] {} is down/removed and it was the control host, triggering reconnect", + current.endPoint); + if (!current.isClosed()) current.closeAsync().force(); + backgroundReconnect(0); } + } + @Override + public void onConnectionDefunct(Connection connection) { + if (connection == connectionRef.get()) backgroundReconnect(0); + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/ConvictionPolicy.java b/driver-core/src/main/java/com/datastax/driver/core/ConvictionPolicy.java index 0fce5d122fe..7a0c43e0b39 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ConvictionPolicy.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ConvictionPolicy.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,141 +17,135 @@ */ package com.datastax.driver.core; -import com.datastax.driver.core.policies.ReconnectionPolicy; - -import java.util.concurrent.atomic.AtomicInteger; - import static java.util.concurrent.TimeUnit.MILLISECONDS; import static java.util.concurrent.TimeUnit.NANOSECONDS; +import com.datastax.driver.core.policies.ReconnectionPolicy; +import java.util.concurrent.atomic.AtomicInteger; + /** * The policy with which to decide whether a host should be considered down. - *

- * TODO: this class is fully abstract (rather than an interface) because I'm - * not sure it's worth exposing (and if we do expose it, we need to expose - * ConnectionException). Maybe just exposing say a threshold of error before - * convicting a node is enough. + * + *

TODO: this class is fully abstract (rather than an interface) because I'm not sure it's worth + * exposing (and if we do expose it, we need to expose ConnectionException). Maybe just exposing say + * a threshold of error before convicting a node is enough. */ abstract class ConvictionPolicy { - /** - * Called when new connections to the host are about to be created. - * - * @param count the number of connections - */ - abstract void signalConnectionsOpening(int count); + /** + * Called when new connections to the host are about to be created. + * + * @param count the number of connections + */ + abstract void signalConnectionsOpening(int count); - /** - * Called when a connection closed normally. - */ - abstract void signalConnectionClosed(Connection connection); + /** Called when a connection closed normally. */ + abstract void signalConnectionClosed(Connection connection); + + /** + * Called when a connection error occurs on a connection to the host this policy applies to. + * + * @return whether the host should be considered down. + */ + abstract boolean signalConnectionFailure(Connection connection, boolean decrement); + + abstract boolean canReconnectNow(); + + abstract boolean hasActiveConnections(); + + /** Simple factory interface to allow creating {@link ConvictionPolicy} instances. */ + interface Factory { /** - * Called when a connection error occurs on a connection to the host this policy applies to. + * Creates a new ConvictionPolicy instance for {@code host}. * - * @return whether the host should be considered down. + * @param host the host this policy applies to + * @return the newly created {@link ConvictionPolicy} instance. */ - abstract boolean signalConnectionFailure(Connection connection, boolean decrement); + ConvictionPolicy create(Host host, ReconnectionPolicy reconnectionPolicy); + } - abstract boolean canReconnectNow(); + static class DefaultConvictionPolicy extends ConvictionPolicy { + private final Host host; + private final ReconnectionPolicy reconnectionPolicy; + private final AtomicInteger openConnections = new AtomicInteger(); - abstract boolean hasActiveConnections(); + private volatile long nextReconnectionTime = Long.MIN_VALUE; + private ReconnectionPolicy.ReconnectionSchedule reconnectionSchedule; - /** - * Simple factory interface to allow creating {@link ConvictionPolicy} instances. - */ - interface Factory { - - /** - * Creates a new ConvictionPolicy instance for {@code host}. - * - * @param host the host this policy applies to - * @return the newly created {@link ConvictionPolicy} instance. - */ - ConvictionPolicy create(Host host, ReconnectionPolicy reconnectionPolicy); + private DefaultConvictionPolicy(Host host, ReconnectionPolicy reconnectionPolicy) { + this.host = host; + this.reconnectionPolicy = reconnectionPolicy; + } + + @Override + void signalConnectionsOpening(int count) { + int newTotal = openConnections.addAndGet(count); + Host.statesLogger.debug( + "[{}] preparing to open {} new connections, total = {}", host, count, newTotal); + resetReconnectionTime(); + } + + @Override + void signalConnectionClosed(Connection connection) { + int remaining = openConnections.decrementAndGet(); + assert remaining >= 0; + Host.statesLogger.debug("[{}] {} closed, remaining = {}", host, connection, remaining); + } + + @Override + boolean signalConnectionFailure(Connection connection, boolean decrement) { + int remaining; + if (decrement) { + if (host.state != Host.State.DOWN) updateReconnectionTime(); + + remaining = openConnections.decrementAndGet(); + assert remaining >= 0; + Host.statesLogger.debug("[{}] {} failed, remaining = {}", host, connection, remaining); + } else { + remaining = openConnections.get(); + } + return remaining == 0; + } + + private synchronized void updateReconnectionTime() { + long now = System.nanoTime(); + if (nextReconnectionTime > now) + // Someone else updated the time before us + return; + + if (reconnectionSchedule == null) reconnectionSchedule = reconnectionPolicy.newSchedule(); + + long nextDelayMs = reconnectionSchedule.nextDelayMs(); + Host.statesLogger.debug( + "[{}] preventing new connections for the next {} ms", host, nextDelayMs); + nextReconnectionTime = now + NANOSECONDS.convert(nextDelayMs, MILLISECONDS); } - static class DefaultConvictionPolicy extends ConvictionPolicy { - private final Host host; - private final ReconnectionPolicy reconnectionPolicy; - private final AtomicInteger openConnections = new AtomicInteger(); - - private volatile long nextReconnectionTime = Long.MIN_VALUE; - private ReconnectionPolicy.ReconnectionSchedule reconnectionSchedule; - - private DefaultConvictionPolicy(Host host, ReconnectionPolicy reconnectionPolicy) { - this.host = host; - this.reconnectionPolicy = reconnectionPolicy; - } - - @Override - void signalConnectionsOpening(int count) { - int newTotal = openConnections.addAndGet(count); - Host.statesLogger.debug("[{}] preparing to open {} new connections, total = {}", host, count, newTotal); - resetReconnectionTime(); - } - - @Override - void signalConnectionClosed(Connection connection) { - int remaining = openConnections.decrementAndGet(); - assert remaining >= 0; - Host.statesLogger.debug("[{}] {} closed, remaining = {}", host, connection, remaining); - } - - @Override - boolean signalConnectionFailure(Connection connection, boolean decrement) { - int remaining; - if (decrement) { - if (host.state != Host.State.DOWN) - updateReconnectionTime(); - - remaining = openConnections.decrementAndGet(); - assert remaining >= 0; - Host.statesLogger.debug("[{}] {} failed, remaining = {}", host, connection, remaining); - } else { - remaining = openConnections.get(); - } - return remaining == 0; - } - - private synchronized void updateReconnectionTime() { - long now = System.nanoTime(); - if (nextReconnectionTime > now) - // Someone else updated the time before us - return; - - if (reconnectionSchedule == null) - reconnectionSchedule = reconnectionPolicy.newSchedule(); - - long nextDelayMs = reconnectionSchedule.nextDelayMs(); - Host.statesLogger.debug("[{}] preventing new connections for the next {} ms", host, nextDelayMs); - nextReconnectionTime = now + NANOSECONDS.convert(nextDelayMs, MILLISECONDS); - } - - private synchronized void resetReconnectionTime() { - reconnectionSchedule = null; - nextReconnectionTime = Long.MIN_VALUE; - } - - @Override - boolean canReconnectNow() { - boolean canReconnectNow = nextReconnectionTime == Long.MIN_VALUE || - System.nanoTime() >= nextReconnectionTime; - Host.statesLogger.trace("canReconnectNow={}", canReconnectNow); - return canReconnectNow; - } - - @Override - boolean hasActiveConnections() { - return openConnections.get() > 0; - } - - static class Factory implements ConvictionPolicy.Factory { - - @Override - public ConvictionPolicy create(Host host, ReconnectionPolicy reconnectionPolicy) { - return new DefaultConvictionPolicy(host, reconnectionPolicy); - } - } + private synchronized void resetReconnectionTime() { + reconnectionSchedule = null; + nextReconnectionTime = Long.MIN_VALUE; + } + + @Override + boolean canReconnectNow() { + boolean canReconnectNow = + nextReconnectionTime == Long.MIN_VALUE || System.nanoTime() >= nextReconnectionTime; + Host.statesLogger.trace("canReconnectNow={}", canReconnectNow); + return canReconnectNow; + } + + @Override + boolean hasActiveConnections() { + return openConnections.get() > 0; + } + + static class Factory implements ConvictionPolicy.Factory { + + @Override + public ConvictionPolicy create(Host host, ReconnectionPolicy reconnectionPolicy) { + return new DefaultConvictionPolicy(host, reconnectionPolicy); + } } + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/Crc.java b/driver-core/src/main/java/com/datastax/driver/core/Crc.java new file mode 100644 index 00000000000..05435d5f452 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/Crc.java @@ -0,0 +1,150 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import io.netty.buffer.ByteBuf; +import io.netty.util.concurrent.FastThreadLocal; +import java.nio.ByteBuffer; +import java.util.zip.CRC32; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** Copied and adapted from the server-side version. */ +class Crc { + + private static final Logger logger = LoggerFactory.getLogger(Crc.class); + + private static final FastThreadLocal crc32 = + new FastThreadLocal() { + @Override + protected CRC32 initialValue() { + return new CRC32(); + } + }; + + private static final byte[] initialBytes = + new byte[] {(byte) 0xFA, (byte) 0x2D, (byte) 0x55, (byte) 0xCA}; + + private static final CrcUpdater CRC_UPDATER = selectCrcUpdater(); + + static int computeCrc32(ByteBuf buffer) { + CRC32 crc = newCrc32(); + CRC_UPDATER.update(crc, buffer); + return (int) crc.getValue(); + } + + private static CRC32 newCrc32() { + CRC32 crc = crc32.get(); + crc.reset(); + crc.update(initialBytes); + return crc; + } + + private static final int CRC24_INIT = 0x875060; + /** + * Polynomial chosen from https://users.ece.cmu.edu/~koopman/crc/index.html, by Philip Koopman + * + *

This webpage claims a copyright to Philip Koopman, which he licenses under the Creative + * Commons Attribution 4.0 International License (https://creativecommons.org/licenses/by/4.0) + * + *

It is unclear if this copyright can extend to a 'fact' such as this specific number, + * particularly as we do not use Koopman's notation to represent the polynomial, but we anyway + * attribute his work and link the terms of his license since they are not incompatible with our + * usage and we greatly appreciate his work. + * + *

This polynomial provides hamming distance of 8 for messages up to length 105 bits; we only + * support 8-64 bits at present, with an expected range of 40-48. + */ + private static final int CRC24_POLY = 0x1974F0B; + + /** + * NOTE: the order of bytes must reach the wire in the same order the CRC is computed, with the + * CRC immediately following in a trailer. Since we read in least significant byte order, if you + * write to a buffer using putInt or putLong, the byte order will be reversed and you will lose + * the guarantee of protection from burst corruptions of 24 bits in length. + * + *

Make sure either to write byte-by-byte to the wire, or to use Integer/Long.reverseBytes if + * you write to a BIG_ENDIAN buffer. + * + *

See http://users.ece.cmu.edu/~koopman/pubs/ray06_crcalgorithms.pdf + * + *

Complain to the ethernet spec writers, for having inverse bit to byte significance order. + * + *

Note we use the most naive algorithm here. We support at most 8 bytes, and typically supply + * 5 or fewer, so any efficiency of a table approach is swallowed by the time to hit L3, even for + * a tiny (4bit) table. + * + * @param bytes an up to 8-byte register containing bytes to compute the CRC over the bytes AND + * bits will be read least-significant to most significant. + * @param len the number of bytes, greater than 0 and fewer than 9, to be read from bytes + * @return the least-significant bit AND byte order crc24 using the CRC24_POLY polynomial + */ + static int computeCrc24(long bytes, int len) { + int crc = CRC24_INIT; + while (len-- > 0) { + crc ^= (bytes & 0xff) << 16; + bytes >>= 8; + + for (int i = 0; i < 8; i++) { + crc <<= 1; + if ((crc & 0x1000000) != 0) crc ^= CRC24_POLY; + } + } + return crc; + } + + private static CrcUpdater selectCrcUpdater() { + try { + CRC32.class.getDeclaredMethod("update", ByteBuffer.class); + return new Java8CrcUpdater(); + } catch (Exception e) { + logger.warn( + "It looks like you are running Java 7 or below. " + + "CRC checks (used in protocol {} and above) will require a memory copy, which can " + + "negatively impact performance. Consider using a more modern VM.", + ProtocolVersion.V5, + e); + return new Java6CrcUpdater(); + } + } + + private interface CrcUpdater { + void update(CRC32 crc, ByteBuf buffer); + } + + private static class Java6CrcUpdater implements CrcUpdater { + @Override + public void update(CRC32 crc, ByteBuf buffer) { + if (buffer.hasArray()) { + crc.update(buffer.array(), buffer.arrayOffset(), buffer.readableBytes()); + } else { + byte[] bytes = new byte[buffer.readableBytes()]; + buffer.getBytes(buffer.readerIndex(), bytes); + crc.update(bytes); + } + } + } + + @IgnoreJDK6Requirement + private static class Java8CrcUpdater implements CrcUpdater { + @Override + public void update(CRC32 crc, ByteBuf buffer) { + crc.update(buffer.internalNioBuffer(buffer.readerIndex(), buffer.readableBytes())); + } + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/DataType.java b/driver-core/src/main/java/com/datastax/driver/core/DataType.java index b7e7653006b..83260516592 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/DataType.java +++ b/driver-core/src/main/java/com/datastax/driver/core/DataType.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,742 +22,720 @@ import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableSet; import io.netty.buffer.ByteBuf; - -import java.util.*; - -/** - * Data types supported by cassandra. - */ +import java.util.ArrayList; +import java.util.Collections; +import java.util.EnumMap; +import java.util.List; +import java.util.Map; +import java.util.Set; + +/** Data types supported by cassandra. */ public abstract class DataType { - /** - * The CQL type name. - */ - public enum Name { - - CUSTOM(0), - ASCII(1), - BIGINT(2), - BLOB(3), - BOOLEAN(4), - COUNTER(5), - DECIMAL(6), - DOUBLE(7), - FLOAT(8), - INT(9), - TEXT(10) { - @Override - public boolean isCompatibleWith(Name that) { - return this == that || that == VARCHAR; - } - }, - TIMESTAMP(11), - UUID(12), - VARCHAR(13) { - @Override - public boolean isCompatibleWith(Name that) { - return this == that || that == TEXT; - } - }, - VARINT(14), - TIMEUUID(15), - INET(16), - DATE(17, ProtocolVersion.V4), - TIME(18, ProtocolVersion.V4), - SMALLINT(19, ProtocolVersion.V4), - TINYINT(20, ProtocolVersion.V4), - DURATION(21, ProtocolVersion.V5), - LIST(32), - MAP(33), - SET(34), - UDT(48, ProtocolVersion.V3), - TUPLE(49, ProtocolVersion.V3); - - final int protocolId; - - final ProtocolVersion minProtocolVersion; - - private static final Name[] nameToIds; - - static { - int maxCode = -1; - for (Name name : Name.values()) - maxCode = Math.max(maxCode, name.protocolId); - nameToIds = new Name[maxCode + 1]; - for (Name name : Name.values()) { - if (nameToIds[name.protocolId] != null) - throw new IllegalStateException("Duplicate Id"); - nameToIds[name.protocolId] = name; - } - } + /** The CQL type name. */ + public enum Name { + CUSTOM(0), + ASCII(1), + BIGINT(2), + BLOB(3), + BOOLEAN(4), + COUNTER(5), + DECIMAL(6), + DOUBLE(7), + FLOAT(8), + INT(9), + TEXT(10) { + @Override + public boolean isCompatibleWith(Name that) { + return this == that || that == VARCHAR; + } + }, + TIMESTAMP(11), + UUID(12), + VARCHAR(13) { + @Override + public boolean isCompatibleWith(Name that) { + return this == that || that == TEXT; + } + }, + VARINT(14), + TIMEUUID(15), + INET(16), + DATE(17, ProtocolVersion.V4), + TIME(18, ProtocolVersion.V4), + SMALLINT(19, ProtocolVersion.V4), + TINYINT(20, ProtocolVersion.V4), + DURATION(21, ProtocolVersion.V5), + LIST(32), + MAP(33), + SET(34), + UDT(48, ProtocolVersion.V3), + TUPLE(49, ProtocolVersion.V3); + + final int protocolId; + + final ProtocolVersion minProtocolVersion; + + private static final Name[] nameToIds; - private Name(int protocolId) { - this(protocolId, ProtocolVersion.V1); - } - - private Name(int protocolId, ProtocolVersion minProtocolVersion) { - this.protocolId = protocolId; - this.minProtocolVersion = minProtocolVersion; - } - - static Name fromProtocolId(int id) { - Name name = nameToIds[id]; - if (name == null) - throw new DriverInternalError("Unknown data type protocol id: " + id); - return name; - } - - /** - * Return {@code true} if the provided Name is equal to this one, - * or if they are aliases for each other, and {@code false} otherwise. - * - * @param that the Name to compare with the current one. - * @return {@code true} if the provided Name is equal to this one, - * or if they are aliases for each other, and {@code false} otherwise. - */ - public boolean isCompatibleWith(Name that) { - return this == that; + static { + int maxCode = -1; + for (Name name : Name.values()) maxCode = Math.max(maxCode, name.protocolId); + nameToIds = new Name[maxCode + 1]; + for (Name name : Name.values()) { + if (nameToIds[name.protocolId] != null) throw new IllegalStateException("Duplicate Id"); + nameToIds[name.protocolId] = name; + } + } + + private Name(int protocolId) { + this(protocolId, ProtocolVersion.V1); + } + + private Name(int protocolId, ProtocolVersion minProtocolVersion) { + this.protocolId = protocolId; + this.minProtocolVersion = minProtocolVersion; + } + + static Name fromProtocolId(int id) { + Name name = nameToIds[id]; + if (name == null) throw new DriverInternalError("Unknown data type protocol id: " + id); + return name; + } + + /** + * Return {@code true} if the provided Name is equal to this one, or if they are aliases for + * each other, and {@code false} otherwise. + * + * @param that the Name to compare with the current one. + * @return {@code true} if the provided Name is equal to this one, or if they are aliases for + * each other, and {@code false} otherwise. + */ + public boolean isCompatibleWith(Name that) { + return this == that; + } + + @Override + public String toString() { + return super.toString().toLowerCase(); + } + } + + private static final Map primitiveTypeMap = + new EnumMap(Name.class); + + static { + primitiveTypeMap.put(Name.ASCII, new DataType.NativeType(Name.ASCII)); + primitiveTypeMap.put(Name.BIGINT, new DataType.NativeType(Name.BIGINT)); + primitiveTypeMap.put(Name.BLOB, new DataType.NativeType(Name.BLOB)); + primitiveTypeMap.put(Name.BOOLEAN, new DataType.NativeType(Name.BOOLEAN)); + primitiveTypeMap.put(Name.COUNTER, new DataType.NativeType(Name.COUNTER)); + primitiveTypeMap.put(Name.DECIMAL, new DataType.NativeType(Name.DECIMAL)); + primitiveTypeMap.put(Name.DOUBLE, new DataType.NativeType(Name.DOUBLE)); + primitiveTypeMap.put(Name.FLOAT, new DataType.NativeType(Name.FLOAT)); + primitiveTypeMap.put(Name.INET, new DataType.NativeType(Name.INET)); + primitiveTypeMap.put(Name.INT, new DataType.NativeType(Name.INT)); + primitiveTypeMap.put(Name.TEXT, new DataType.NativeType(Name.TEXT)); + primitiveTypeMap.put(Name.TIMESTAMP, new DataType.NativeType(Name.TIMESTAMP)); + primitiveTypeMap.put(Name.UUID, new DataType.NativeType(Name.UUID)); + primitiveTypeMap.put(Name.VARCHAR, new DataType.NativeType(Name.VARCHAR)); + primitiveTypeMap.put(Name.VARINT, new DataType.NativeType(Name.VARINT)); + primitiveTypeMap.put(Name.TIMEUUID, new DataType.NativeType(Name.TIMEUUID)); + primitiveTypeMap.put(Name.SMALLINT, new DataType.NativeType(Name.SMALLINT)); + primitiveTypeMap.put(Name.TINYINT, new DataType.NativeType(Name.TINYINT)); + primitiveTypeMap.put(Name.DATE, new DataType.NativeType(Name.DATE)); + primitiveTypeMap.put(Name.TIME, new DataType.NativeType(Name.TIME)); + primitiveTypeMap.put(Name.DURATION, new DataType.NativeType(Name.DURATION)); + } + + private static final Set primitiveTypeSet = + ImmutableSet.copyOf(primitiveTypeMap.values()); + + protected final DataType.Name name; + + protected DataType(DataType.Name name) { + this.name = name; + } + + static DataType decode( + ByteBuf buffer, ProtocolVersion protocolVersion, CodecRegistry codecRegistry) { + Name name = Name.fromProtocolId(buffer.readUnsignedShort()); + switch (name) { + case CUSTOM: + String className = CBUtil.readString(buffer); + if (DataTypeClassNameParser.isDuration(className)) { + return DataType.duration(); + } else if (DataTypeClassNameParser.isUserType(className) + || DataTypeClassNameParser.isTupleType(className)) { + return DataTypeClassNameParser.parseOne(className, protocolVersion, codecRegistry); + } else { + return custom(className); } - - @Override - public String toString() { - return super.toString().toLowerCase(); + case LIST: + return list(decode(buffer, protocolVersion, codecRegistry)); + case SET: + return set(decode(buffer, protocolVersion, codecRegistry)); + case MAP: + DataType keys = decode(buffer, protocolVersion, codecRegistry); + DataType values = decode(buffer, protocolVersion, codecRegistry); + return map(keys, values); + case UDT: + String keyspace = CBUtil.readString(buffer); + String type = CBUtil.readString(buffer); + int nFields = buffer.readShort() & 0xffff; + List fields = new ArrayList(nFields); + for (int i = 0; i < nFields; i++) { + String fieldName = CBUtil.readString(buffer); + DataType fieldType = decode(buffer, protocolVersion, codecRegistry); + fields.add(new UserType.Field(fieldName, fieldType)); } - } - - private static final Map primitiveTypeMap = new EnumMap(Name.class); - - static { - primitiveTypeMap.put(Name.ASCII, new DataType.NativeType(Name.ASCII)); - primitiveTypeMap.put(Name.BIGINT, new DataType.NativeType(Name.BIGINT)); - primitiveTypeMap.put(Name.BLOB, new DataType.NativeType(Name.BLOB)); - primitiveTypeMap.put(Name.BOOLEAN, new DataType.NativeType(Name.BOOLEAN)); - primitiveTypeMap.put(Name.COUNTER, new DataType.NativeType(Name.COUNTER)); - primitiveTypeMap.put(Name.DECIMAL, new DataType.NativeType(Name.DECIMAL)); - primitiveTypeMap.put(Name.DOUBLE, new DataType.NativeType(Name.DOUBLE)); - primitiveTypeMap.put(Name.FLOAT, new DataType.NativeType(Name.FLOAT)); - primitiveTypeMap.put(Name.INET, new DataType.NativeType(Name.INET)); - primitiveTypeMap.put(Name.INT, new DataType.NativeType(Name.INT)); - primitiveTypeMap.put(Name.TEXT, new DataType.NativeType(Name.TEXT)); - primitiveTypeMap.put(Name.TIMESTAMP, new DataType.NativeType(Name.TIMESTAMP)); - primitiveTypeMap.put(Name.UUID, new DataType.NativeType(Name.UUID)); - primitiveTypeMap.put(Name.VARCHAR, new DataType.NativeType(Name.VARCHAR)); - primitiveTypeMap.put(Name.VARINT, new DataType.NativeType(Name.VARINT)); - primitiveTypeMap.put(Name.TIMEUUID, new DataType.NativeType(Name.TIMEUUID)); - primitiveTypeMap.put(Name.SMALLINT, new DataType.NativeType(Name.SMALLINT)); - primitiveTypeMap.put(Name.TINYINT, new DataType.NativeType(Name.TINYINT)); - primitiveTypeMap.put(Name.DATE, new DataType.NativeType(Name.DATE)); - primitiveTypeMap.put(Name.TIME, new DataType.NativeType(Name.TIME)); - primitiveTypeMap.put(Name.DURATION, new DataType.NativeType(Name.DURATION)); - } - - private static final Set primitiveTypeSet = ImmutableSet.copyOf(primitiveTypeMap.values()); - - protected final DataType.Name name; - - protected DataType(DataType.Name name) { - this.name = name; - } - - static DataType decode(ByteBuf buffer, ProtocolVersion protocolVersion, CodecRegistry codecRegistry) { - Name name = Name.fromProtocolId(buffer.readUnsignedShort()); - switch (name) { - case CUSTOM: - String className = CBUtil.readString(buffer); - if (DataTypeClassNameParser.isDuration(className)) { - return DataType.duration(); - } else if (DataTypeClassNameParser.isUserType(className) || - DataTypeClassNameParser.isTupleType(className)) { - return DataTypeClassNameParser.parseOne(className, protocolVersion, codecRegistry); - } else { - return custom(className); - } - case LIST: - return list(decode(buffer, protocolVersion, codecRegistry)); - case SET: - return set(decode(buffer, protocolVersion, codecRegistry)); - case MAP: - DataType keys = decode(buffer, protocolVersion, codecRegistry); - DataType values = decode(buffer, protocolVersion, codecRegistry); - return map(keys, values); - case UDT: - String keyspace = CBUtil.readString(buffer); - String type = CBUtil.readString(buffer); - int nFields = buffer.readShort() & 0xffff; - List fields = new ArrayList(nFields); - for (int i = 0; i < nFields; i++) { - String fieldName = CBUtil.readString(buffer); - DataType fieldType = decode(buffer, protocolVersion, codecRegistry); - fields.add(new UserType.Field(fieldName, fieldType)); - } - return new UserType(keyspace, type, false, fields, protocolVersion, codecRegistry); - case TUPLE: - nFields = buffer.readShort() & 0xffff; - List types = new ArrayList(nFields); - for (int i = 0; i < nFields; i++) { - types.add(decode(buffer, protocolVersion, codecRegistry)); - } - return new TupleType(types, protocolVersion, codecRegistry); - default: - return primitiveTypeMap.get(name); + return new UserType(keyspace, type, false, fields, protocolVersion, codecRegistry); + case TUPLE: + nFields = buffer.readShort() & 0xffff; + List types = new ArrayList(nFields); + for (int i = 0; i < nFields; i++) { + types.add(decode(buffer, protocolVersion, codecRegistry)); } + return new TupleType(types, protocolVersion, codecRegistry); + default: + return primitiveTypeMap.get(name); + } + } + + /** + * Returns the ASCII type. + * + * @return The ASCII type. + */ + public static DataType ascii() { + return primitiveTypeMap.get(Name.ASCII); + } + + /** + * Returns the BIGINT type. + * + * @return The BIGINT type. + */ + public static DataType bigint() { + return primitiveTypeMap.get(Name.BIGINT); + } + + /** + * Returns the BLOB type. + * + * @return The BLOB type. + */ + public static DataType blob() { + return primitiveTypeMap.get(Name.BLOB); + } + + /** + * Returns the BOOLEAN type. + * + * @return The BOOLEAN type. + */ + public static DataType cboolean() { + return primitiveTypeMap.get(Name.BOOLEAN); + } + + /** + * Returns the COUNTER type. + * + * @return The COUNTER type. + */ + public static DataType counter() { + return primitiveTypeMap.get(Name.COUNTER); + } + + /** + * Returns the DECIMAL type. + * + * @return The DECIMAL type. + */ + public static DataType decimal() { + return primitiveTypeMap.get(Name.DECIMAL); + } + + /** + * Returns the DOUBLE type. + * + * @return The DOUBLE type. + */ + public static DataType cdouble() { + return primitiveTypeMap.get(Name.DOUBLE); + } + + /** + * Returns the FLOAT type. + * + * @return The FLOAT type. + */ + public static DataType cfloat() { + return primitiveTypeMap.get(Name.FLOAT); + } + + /** + * Returns the INET type. + * + * @return The INET type. + */ + public static DataType inet() { + return primitiveTypeMap.get(Name.INET); + } + + /** + * Returns the TINYINT type. + * + * @return The TINYINT type. + */ + public static DataType tinyint() { + return primitiveTypeMap.get(Name.TINYINT); + } + + /** + * Returns the SMALLINT type. + * + * @return The SMALLINT type. + */ + public static DataType smallint() { + return primitiveTypeMap.get(Name.SMALLINT); + } + + /** + * Returns the INT type. + * + * @return The INT type. + */ + public static DataType cint() { + return primitiveTypeMap.get(Name.INT); + } + + /** + * Returns the TEXT type. + * + * @return The TEXT type. + */ + public static DataType text() { + return primitiveTypeMap.get(Name.TEXT); + } + + /** + * Returns the TIMESTAMP type. + * + * @return The TIMESTAMP type. + */ + public static DataType timestamp() { + return primitiveTypeMap.get(Name.TIMESTAMP); + } + + /** + * Returns the DATE type. + * + * @return The DATE type. + */ + public static DataType date() { + return primitiveTypeMap.get(Name.DATE); + } + + /** + * Returns the TIME type. + * + * @return The TIME type. + */ + public static DataType time() { + return primitiveTypeMap.get(Name.TIME); + } + + /** + * Returns the UUID type. + * + * @return The UUID type. + */ + public static DataType uuid() { + return primitiveTypeMap.get(Name.UUID); + } + + /** + * Returns the VARCHAR type. + * + * @return The VARCHAR type. + */ + public static DataType varchar() { + return primitiveTypeMap.get(Name.VARCHAR); + } + + /** + * Returns the VARINT type. + * + * @return The VARINT type. + */ + public static DataType varint() { + return primitiveTypeMap.get(Name.VARINT); + } + + /** + * Returns the TIMEUUID type. + * + * @return The TIMEUUID type. + */ + public static DataType timeuuid() { + return primitiveTypeMap.get(Name.TIMEUUID); + } + + /** + * Returns the type of lists of {@code elementType} elements. + * + * @param elementType the type of the list elements. + * @param frozen whether the list is frozen. + * @return the type of lists of {@code elementType} elements. + */ + public static CollectionType list(DataType elementType, boolean frozen) { + return new DataType.CollectionType(Name.LIST, ImmutableList.of(elementType), frozen); + } + + /** + * Returns the type of "not frozen" lists of {@code elementType} elements. + * + *

This is a shorthand for {@code list(elementType, false);}. + * + * @param elementType the type of the list elements. + * @return the type of "not frozen" lists of {@code elementType} elements. + */ + public static CollectionType list(DataType elementType) { + return list(elementType, false); + } + + /** + * Returns the type of frozen lists of {@code elementType} elements. + * + *

This is a shorthand for {@code list(elementType, true);}. + * + * @param elementType the type of the list elements. + * @return the type of frozen lists of {@code elementType} elements. + */ + public static CollectionType frozenList(DataType elementType) { + return list(elementType, true); + } + + /** + * Returns the type of sets of {@code elementType} elements. + * + * @param elementType the type of the set elements. + * @param frozen whether the set is frozen. + * @return the type of sets of {@code elementType} elements. + */ + public static CollectionType set(DataType elementType, boolean frozen) { + return new DataType.CollectionType(Name.SET, ImmutableList.of(elementType), frozen); + } + + /** + * Returns the type of "not frozen" sets of {@code elementType} elements. + * + *

This is a shorthand for {@code set(elementType, false);}. + * + * @param elementType the type of the set elements. + * @return the type of "not frozen" sets of {@code elementType} elements. + */ + public static CollectionType set(DataType elementType) { + return set(elementType, false); + } + + /** + * Returns the type of frozen sets of {@code elementType} elements. + * + *

This is a shorthand for {@code set(elementType, true);}. + * + * @param elementType the type of the set elements. + * @return the type of frozen sets of {@code elementType} elements. + */ + public static CollectionType frozenSet(DataType elementType) { + return set(elementType, true); + } + + /** + * Returns the type of maps of {@code keyType} to {@code valueType} elements. + * + * @param keyType the type of the map keys. + * @param valueType the type of the map values. + * @param frozen whether the map is frozen. + * @return the type of maps of {@code keyType} to {@code valueType} elements. + */ + public static CollectionType map(DataType keyType, DataType valueType, boolean frozen) { + return new DataType.CollectionType(Name.MAP, ImmutableList.of(keyType, valueType), frozen); + } + + /** + * Returns the type of "not frozen" maps of {@code keyType} to {@code valueType} elements. + * + *

This is a shorthand for {@code map(keyType, valueType, false);}. + * + * @param keyType the type of the map keys. + * @param valueType the type of the map values. + * @return the type of "not frozen" maps of {@code keyType} to {@code valueType} elements. + */ + public static CollectionType map(DataType keyType, DataType valueType) { + return map(keyType, valueType, false); + } + + /** + * Returns the type of frozen maps of {@code keyType} to {@code valueType} elements. + * + *

This is a shorthand for {@code map(keyType, valueType, true);}. + * + * @param keyType the type of the map keys. + * @param valueType the type of the map values. + * @return the type of frozen maps of {@code keyType} to {@code valueType} elements. + */ + public static CollectionType frozenMap(DataType keyType, DataType valueType) { + return map(keyType, valueType, true); + } + + /** + * Returns a Custom type. + * + *

A custom type is defined by the name of the class used on the Cassandra side to implement + * it. Note that the support for custom types by the driver is limited. + * + *

The use of custom types is rarely useful and is thus not encouraged. + * + * @param typeClassName the server-side fully qualified class name for the type. + * @return the custom type for {@code typeClassName}. + */ + public static DataType.CustomType custom(String typeClassName) { + if (typeClassName == null) throw new NullPointerException(); + return new DataType.CustomType(Name.CUSTOM, typeClassName); + } + + /** + * Returns the Duration type, introduced in Cassandra 3.10. + * + *

Note that a Duration type does not have a native representation in CQL, and technically, is + * merely a special {@link DataType#custom(String) custom type} from the driver's point of view. + * + * @return the Duration type. The returned instance is a singleton. + */ + public static DataType duration() { + return primitiveTypeMap.get(Name.DURATION); + } + + /** + * Returns the name of that type. + * + * @return the name of that type. + */ + public Name getName() { + return name; + } + + /** + * Returns whether this data type is frozen. + * + *

This applies to User Defined Types, tuples and nested collections. Frozen types are + * serialized as a single value in Cassandra's storage engine, whereas non-frozen types are stored + * in a form that allows updates to individual subfields. + * + * @return whether this data type is frozen. + */ + public abstract boolean isFrozen(); + + /** + * Returns whether this data type represent a CQL {@link + * com.datastax.driver.core.DataType.CollectionType collection type}, that is, a list, set or map. + * + * @return whether this data type name represent the name of a collection type. + */ + public boolean isCollection() { + return this instanceof CollectionType; + } + + /** + * Returns the type arguments of this type. + * + *

Note that only the collection types (LIST, MAP, SET) have type arguments. For the other + * types, this will return an empty list. + * + *

For the collection types: + * + *

    + *
  • For lists and sets, this method returns one argument, the type of the elements. + *
  • For maps, this method returns two arguments, the first one is the type of the map keys, + * the second one is the type of the map values. + *
+ * + * @return an immutable list containing the type arguments of this type. + */ + public List getTypeArguments() { + return Collections.emptyList(); + } + + /** + * Returns a set of all the primitive types, where primitive types are defined as the types that + * don't have type arguments (that is excluding lists, sets, maps, tuples and udts). + * + * @return returns a set of all the primitive types. + */ + public static Set allPrimitiveTypes() { + return primitiveTypeSet; + } + + /** + * Returns a String representation of this data type suitable for inclusion as a parameter type in + * a function or aggregate signature. + * + *

In such places, the String representation might vary from the canonical one as returned by + * {@link #toString()}; e.g. the {@code frozen} keyword is not accepted. + * + * @return a String representation of this data type suitable for inclusion as a parameter type in + * a function or aggregate signature. + */ + public String asFunctionParameterString() { + return toString(); + } + + /** Instances of this class represent CQL native types, also known as CQL primitive types. */ + public static class NativeType extends DataType { + + private NativeType(DataType.Name name) { + super(name); + } + + @Override + public boolean isFrozen() { + return false; + } + + @Override + public final int hashCode() { + return (name == Name.TEXT) ? Name.VARCHAR.hashCode() : name.hashCode(); + } + + @Override + public final boolean equals(Object o) { + if (!(o instanceof DataType.NativeType)) return false; + + NativeType that = (DataType.NativeType) o; + return this.name.isCompatibleWith(that.name); + } + + @Override + public String toString() { + return name.toString(); + } + } + + /** Instances of this class represent collection types, that is, lists, sets or maps. */ + public static class CollectionType extends DataType { + + private final List typeArguments; + private boolean frozen; + + private CollectionType(DataType.Name name, List typeArguments, boolean frozen) { + super(name); + this.typeArguments = typeArguments; + this.frozen = frozen; + } + + @Override + public boolean isFrozen() { + return frozen; + } + + @Override + public List getTypeArguments() { + return typeArguments; } - /** - * Returns the ASCII type. - * - * @return The ASCII type. - */ - public static DataType ascii() { - return primitiveTypeMap.get(Name.ASCII); - } - - /** - * Returns the BIGINT type. - * - * @return The BIGINT type. - */ - public static DataType bigint() { - return primitiveTypeMap.get(Name.BIGINT); - } - - /** - * Returns the BLOB type. - * - * @return The BLOB type. - */ - public static DataType blob() { - return primitiveTypeMap.get(Name.BLOB); - } - - /** - * Returns the BOOLEAN type. - * - * @return The BOOLEAN type. - */ - public static DataType cboolean() { - return primitiveTypeMap.get(Name.BOOLEAN); - } - - /** - * Returns the COUNTER type. - * - * @return The COUNTER type. - */ - public static DataType counter() { - return primitiveTypeMap.get(Name.COUNTER); - } - - /** - * Returns the DECIMAL type. - * - * @return The DECIMAL type. - */ - public static DataType decimal() { - return primitiveTypeMap.get(Name.DECIMAL); - } - - /** - * Returns the DOUBLE type. - * - * @return The DOUBLE type. - */ - public static DataType cdouble() { - return primitiveTypeMap.get(Name.DOUBLE); - } - - /** - * Returns the FLOAT type. - * - * @return The FLOAT type. - */ - public static DataType cfloat() { - return primitiveTypeMap.get(Name.FLOAT); - } - - /** - * Returns the INET type. - * - * @return The INET type. - */ - public static DataType inet() { - return primitiveTypeMap.get(Name.INET); - } - - /** - * Returns the TINYINT type. - * - * @return The TINYINT type. - */ - public static DataType tinyint() { - return primitiveTypeMap.get(Name.TINYINT); - } - - /** - * Returns the SMALLINT type. - * - * @return The SMALLINT type. - */ - public static DataType smallint() { - return primitiveTypeMap.get(Name.SMALLINT); - } - - /** - * Returns the INT type. - * - * @return The INT type. - */ - public static DataType cint() { - return primitiveTypeMap.get(Name.INT); - } - - /** - * Returns the TEXT type. - * - * @return The TEXT type. - */ - public static DataType text() { - return primitiveTypeMap.get(Name.TEXT); - } - - /** - * Returns the TIMESTAMP type. - * - * @return The TIMESTAMP type. - */ - public static DataType timestamp() { - return primitiveTypeMap.get(Name.TIMESTAMP); - } - - /** - * Returns the DATE type. - * - * @return The DATE type. - */ - public static DataType date() { - return primitiveTypeMap.get(Name.DATE); - } - - /** - * Returns the TIME type. - * - * @return The TIME type. - */ - public static DataType time() { - return primitiveTypeMap.get(Name.TIME); - } - - /** - * Returns the UUID type. - * - * @return The UUID type. - */ - public static DataType uuid() { - return primitiveTypeMap.get(Name.UUID); - } - - /** - * Returns the VARCHAR type. - * - * @return The VARCHAR type. - */ - public static DataType varchar() { - return primitiveTypeMap.get(Name.VARCHAR); - } - - /** - * Returns the VARINT type. - * - * @return The VARINT type. - */ - public static DataType varint() { - return primitiveTypeMap.get(Name.VARINT); - } - - /** - * Returns the TIMEUUID type. - * - * @return The TIMEUUID type. - */ - public static DataType timeuuid() { - return primitiveTypeMap.get(Name.TIMEUUID); - } - - /** - * Returns the type of lists of {@code elementType} elements. - * - * @param elementType the type of the list elements. - * @param frozen whether the list is frozen. - * @return the type of lists of {@code elementType} elements. - */ - public static CollectionType list(DataType elementType, boolean frozen) { - return new DataType.CollectionType(Name.LIST, ImmutableList.of(elementType), frozen); - } - - /** - * Returns the type of "not frozen" lists of {@code elementType} elements. - *

- * This is a shorthand for {@code list(elementType, false);}. - * - * @param elementType the type of the list elements. - * @return the type of "not frozen" lists of {@code elementType} elements. - */ - public static CollectionType list(DataType elementType) { - return list(elementType, false); - } - - /** - * Returns the type of frozen lists of {@code elementType} elements. - *

- * This is a shorthand for {@code list(elementType, true);}. - * - * @param elementType the type of the list elements. - * @return the type of frozen lists of {@code elementType} elements. - */ - public static CollectionType frozenList(DataType elementType) { - return list(elementType, true); - } - - /** - * Returns the type of sets of {@code elementType} elements. - * - * @param elementType the type of the set elements. - * @param frozen whether the set is frozen. - * @return the type of sets of {@code elementType} elements. - */ - public static CollectionType set(DataType elementType, boolean frozen) { - return new DataType.CollectionType(Name.SET, ImmutableList.of(elementType), frozen); - } - - /** - * Returns the type of "not frozen" sets of {@code elementType} elements. - *

- * This is a shorthand for {@code set(elementType, false);}. - * - * @param elementType the type of the set elements. - * @return the type of "not frozen" sets of {@code elementType} elements. - */ - public static CollectionType set(DataType elementType) { - return set(elementType, false); - } - - /** - * Returns the type of frozen sets of {@code elementType} elements. - *

- * This is a shorthand for {@code set(elementType, true);}. - * - * @param elementType the type of the set elements. - * @return the type of frozen sets of {@code elementType} elements. - */ - public static CollectionType frozenSet(DataType elementType) { - return set(elementType, true); - } - - /** - * Returns the type of maps of {@code keyType} to {@code valueType} elements. - * - * @param keyType the type of the map keys. - * @param valueType the type of the map values. - * @param frozen whether the map is frozen. - * @return the type of maps of {@code keyType} to {@code valueType} elements. - */ - public static CollectionType map(DataType keyType, DataType valueType, boolean frozen) { - return new DataType.CollectionType(Name.MAP, ImmutableList.of(keyType, valueType), frozen); - } - - /** - * Returns the type of "not frozen" maps of {@code keyType} to {@code valueType} elements. - *

- * This is a shorthand for {@code map(keyType, valueType, false);}. - * - * @param keyType the type of the map keys. - * @param valueType the type of the map values. - * @return the type of "not frozen" maps of {@code keyType} to {@code valueType} elements. - */ - public static CollectionType map(DataType keyType, DataType valueType) { - return map(keyType, valueType, false); + @Override + public final int hashCode() { + return MoreObjects.hashCode(name, typeArguments); } - /** - * Returns the type of frozen maps of {@code keyType} to {@code valueType} elements. - *

- * This is a shorthand for {@code map(keyType, valueType, true);}. - * - * @param keyType the type of the map keys. - * @param valueType the type of the map values. - * @return the type of frozen maps of {@code keyType} to {@code valueType} elements. - */ - public static CollectionType frozenMap(DataType keyType, DataType valueType) { - return map(keyType, valueType, true); - } + @Override + public final boolean equals(Object o) { + if (!(o instanceof DataType.CollectionType)) return false; - /** - * Returns a Custom type. - *

- * A custom type is defined by the name of the class used on the Cassandra - * side to implement it. Note that the support for custom types by the - * driver is limited. - *

- * The use of custom types is rarely useful and is thus not encouraged. - * - * @param typeClassName the server-side fully qualified class name for the type. - * @return the custom type for {@code typeClassName}. - */ - public static DataType.CustomType custom(String typeClassName) { - if (typeClassName == null) - throw new NullPointerException(); - return new DataType.CustomType(Name.CUSTOM, typeClassName); + DataType.CollectionType d = (DataType.CollectionType) o; + return name == d.name && typeArguments.equals(d.typeArguments); } - /** - * Returns the Duration type, introduced in Cassandra 3.10. - *

- * Note that a Duration type does not have a native representation in CQL, and - * technically, is merely a special {@link DataType#custom(String) custom type} - * from the driver's point of view. - * - * @return the Duration type. The returned instance is a singleton. - */ - public static DataType duration() { - return primitiveTypeMap.get(Name.DURATION); + @Override + public String toString() { + if (name == Name.MAP) { + String template = frozen ? "frozen<%s<%s, %s>>" : "%s<%s, %s>"; + return String.format(template, name, typeArguments.get(0), typeArguments.get(1)); + } else { + String template = frozen ? "frozen<%s<%s>>" : "%s<%s>"; + return String.format(template, name, typeArguments.get(0)); + } } - /** - * Returns the name of that type. - * - * @return the name of that type. - */ - public Name getName() { - return name; + @Override + public String asFunctionParameterString() { + if (name == Name.MAP) { + String template = "%s<%s, %s>"; + return String.format( + template, + name, + typeArguments.get(0).asFunctionParameterString(), + typeArguments.get(1).asFunctionParameterString()); + } else { + String template = "%s<%s>"; + return String.format(template, name, typeArguments.get(0).asFunctionParameterString()); + } } + } - /** - * Returns whether this data type is frozen. - *

- * This applies to User Defined Types, tuples and nested collections. Frozen types are serialized as a single value in - * Cassandra's storage engine, whereas non-frozen types are stored in a form that allows updates to individual subfields. - * - * @return whether this data type is frozen. - */ - public abstract boolean isFrozen(); + /** + * A "custom" type is a type that cannot be expressed as a CQL type. + * + *

Each custom type is merely identified by the fully qualified {@link + * #getCustomTypeClassName() class name} that represents this type server-side. + * + *

The driver provides a minimal support for such types through instances of this class. + * + *

A codec for custom types can be obtained via {@link TypeCodec#custom(DataType.CustomType)}. + */ + public static class CustomType extends DataType { - /** - * Returns whether this data type represent a CQL {@link com.datastax.driver.core.DataType.CollectionType collection type}, - * that is, a list, set or map. - * - * @return whether this data type name represent the name of a collection type. - */ - public boolean isCollection() { - return this instanceof CollectionType; - } + private final String customClassName; - /** - * Returns the type arguments of this type. - *

- * Note that only the collection types (LIST, MAP, SET) have type - * arguments. For the other types, this will return an empty list. - *

- * For the collection types: - *

    - *
  • For lists and sets, this method returns one argument, the type of - * the elements.
  • - *
  • For maps, this method returns two arguments, the first one is the - * type of the map keys, the second one is the type of the map - * values.
  • - *
- * - * @return an immutable list containing the type arguments of this type. - */ - public List getTypeArguments() { - return Collections.emptyList(); + private CustomType(DataType.Name name, String className) { + super(name); + this.customClassName = className; } - /** - * Returns a set of all the primitive types, where primitive types are - * defined as the types that don't have type arguments (that is excluding - * lists, sets, maps, tuples and udts). - * - * @return returns a set of all the primitive types. - */ - public static Set allPrimitiveTypes() { - return primitiveTypeSet; + @Override + public boolean isFrozen() { + return false; } /** - * Returns a String representation of this data type - * suitable for inclusion as a parameter type - * in a function or aggregate signature. - *

- * In such places, the String representation might vary - * from the canonical one as returned by {@link #toString()}; - * e.g. the {@code frozen} keyword is not accepted. + * Returns the fully qualified name of the subtype of {@code + * org.apache.cassandra.db.marshal.AbstractType} that represents this type server-side. * - * @return a String representation of this data type - * suitable for inclusion as a parameter type - * in a function or aggregate signature. + * @return the fully qualified name of the subtype of {@code + * org.apache.cassandra.db.marshal.AbstractType} that represents this type server-side. */ - public String asFunctionParameterString() { - return toString(); + public String getCustomTypeClassName() { + return customClassName; } - /** - * Instances of this class represent CQL native types, - * also known as CQL primitive types. - */ - public static class NativeType extends DataType { - - private NativeType(DataType.Name name) { - super(name); - } - - @Override - public boolean isFrozen() { - return false; - } - - @Override - public final int hashCode() { - return (name == Name.TEXT) - ? Name.VARCHAR.hashCode() - : name.hashCode(); - } - - @Override - public final boolean equals(Object o) { - if (!(o instanceof DataType.NativeType)) - return false; - - NativeType that = (DataType.NativeType) o; - return this.name.isCompatibleWith(that.name); - } - - @Override - public String toString() { - return name.toString(); - } + @Override + public final int hashCode() { + return MoreObjects.hashCode(name, customClassName); } - /** - * Instances of this class represent collection types, that is, - * lists, sets or maps. - */ - public static class CollectionType extends DataType { - - private final List typeArguments; - private boolean frozen; - - private CollectionType(DataType.Name name, List typeArguments, boolean frozen) { - super(name); - this.typeArguments = typeArguments; - this.frozen = frozen; - } - - @Override - public boolean isFrozen() { - return frozen; - } - - @Override - public List getTypeArguments() { - return typeArguments; - } - - @Override - public final int hashCode() { - return MoreObjects.hashCode(name, typeArguments); - } - - @Override - public final boolean equals(Object o) { - if (!(o instanceof DataType.CollectionType)) - return false; + @Override + public final boolean equals(Object o) { + if (!(o instanceof DataType.CustomType)) return false; - DataType.CollectionType d = (DataType.CollectionType) o; - return name == d.name && typeArguments.equals(d.typeArguments); - } - - @Override - public String toString() { - if (name == Name.MAP) { - String template = frozen ? "frozen<%s<%s, %s>>" : "%s<%s, %s>"; - return String.format(template, name, typeArguments.get(0), typeArguments.get(1)); - } else { - String template = frozen ? "frozen<%s<%s>>" : "%s<%s>"; - return String.format(template, name, typeArguments.get(0)); - } - } - - @Override - public String asFunctionParameterString() { - if (name == Name.MAP) { - String template = "%s<%s, %s>"; - return String.format(template, name, typeArguments.get(0).asFunctionParameterString(), typeArguments.get(1).asFunctionParameterString()); - } else { - String template = "%s<%s>"; - return String.format(template, name, typeArguments.get(0).asFunctionParameterString()); - } - } + DataType.CustomType d = (DataType.CustomType) o; + return name == d.name && MoreObjects.equal(customClassName, d.customClassName); } - /** - * A "custom" type is a type that cannot be expressed as a CQL type. - *

- * Each custom type is merely identified by the fully qualified - * {@link #getCustomTypeClassName() class name} - * that represents this type server-side. - *

- * The driver provides a minimal support for such types through - * instances of this class. - *

- * A codec for custom types can be obtained via {@link TypeCodec#custom(DataType.CustomType)}. - */ - public static class CustomType extends DataType { - - private final String customClassName; - - private CustomType(DataType.Name name, String className) { - super(name); - this.customClassName = className; - } - - @Override - public boolean isFrozen() { - return false; - } - - /** - * Returns the fully qualified name - * of the subtype of - * {@code org.apache.cassandra.db.marshal.AbstractType} - * that represents this type server-side. - * - * @return the fully qualified name - * of the subtype of - * {@code org.apache.cassandra.db.marshal.AbstractType} - * that represents this type server-side. - */ - public String getCustomTypeClassName() { - return customClassName; - } - - @Override - public final int hashCode() { - return MoreObjects.hashCode(name, customClassName); - } - - @Override - public final boolean equals(Object o) { - if (!(o instanceof DataType.CustomType)) - return false; - - DataType.CustomType d = (DataType.CustomType) o; - return name == d.name && MoreObjects.equal(customClassName, d.customClassName); - } - - @Override - public String toString() { - return String.format("'%s'", customClassName); - } + @Override + public String toString() { + return String.format("'%s'", customClassName); } - + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/DataTypeClassNameParser.java b/driver-core/src/main/java/com/datastax/driver/core/DataTypeClassNameParser.java index 2e7f4c1bfe7..a352bc72d2d 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/DataTypeClassNameParser.java +++ b/driver-core/src/main/java/com/datastax/driver/core/DataTypeClassNameParser.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,11 +20,15 @@ import com.datastax.driver.core.exceptions.DriverInternalError; import com.datastax.driver.core.utils.Bytes; import com.google.common.collect.ImmutableMap; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.util.*; - /* * Parse data types from schema tables, for Cassandra 3.0 and above. * In these versions, data types appear as class names, like "org.apache.cassandra.db.marshal.AsciiType" @@ -37,366 +43,387 @@ * so there shouldn't be anything wrong with them. */ class DataTypeClassNameParser { - private static final Logger logger = LoggerFactory.getLogger(DataTypeClassNameParser.class); - - private static final String REVERSED_TYPE = "org.apache.cassandra.db.marshal.ReversedType"; - private static final String FROZEN_TYPE = "org.apache.cassandra.db.marshal.FrozenType"; - private static final String COMPOSITE_TYPE = "org.apache.cassandra.db.marshal.CompositeType"; - private static final String COLLECTION_TYPE = "org.apache.cassandra.db.marshal.ColumnToCollectionType"; - private static final String LIST_TYPE = "org.apache.cassandra.db.marshal.ListType"; - private static final String SET_TYPE = "org.apache.cassandra.db.marshal.SetType"; - private static final String MAP_TYPE = "org.apache.cassandra.db.marshal.MapType"; - private static final String UDT_TYPE = "org.apache.cassandra.db.marshal.UserType"; - private static final String TUPLE_TYPE = "org.apache.cassandra.db.marshal.TupleType"; - private static final String DURATION_TYPE = "org.apache.cassandra.db.marshal.DurationType"; - - private static ImmutableMap cassTypeToDataType = - new ImmutableMap.Builder() - .put("org.apache.cassandra.db.marshal.AsciiType", DataType.ascii()) - .put("org.apache.cassandra.db.marshal.LongType", DataType.bigint()) - .put("org.apache.cassandra.db.marshal.BytesType", DataType.blob()) - .put("org.apache.cassandra.db.marshal.BooleanType", DataType.cboolean()) - .put("org.apache.cassandra.db.marshal.CounterColumnType", DataType.counter()) - .put("org.apache.cassandra.db.marshal.DecimalType", DataType.decimal()) - .put("org.apache.cassandra.db.marshal.DoubleType", DataType.cdouble()) - .put("org.apache.cassandra.db.marshal.FloatType", DataType.cfloat()) - .put("org.apache.cassandra.db.marshal.InetAddressType", DataType.inet()) - .put("org.apache.cassandra.db.marshal.Int32Type", DataType.cint()) - .put("org.apache.cassandra.db.marshal.UTF8Type", DataType.text()) - .put("org.apache.cassandra.db.marshal.TimestampType", DataType.timestamp()) - .put("org.apache.cassandra.db.marshal.SimpleDateType", DataType.date()) - .put("org.apache.cassandra.db.marshal.TimeType", DataType.time()) - .put("org.apache.cassandra.db.marshal.UUIDType", DataType.uuid()) - .put("org.apache.cassandra.db.marshal.IntegerType", DataType.varint()) - .put("org.apache.cassandra.db.marshal.TimeUUIDType", DataType.timeuuid()) - .put("org.apache.cassandra.db.marshal.ByteType", DataType.tinyint()) - .put("org.apache.cassandra.db.marshal.ShortType", DataType.smallint()) - .put(DURATION_TYPE, DataType.duration()) - .build(); - - static DataType parseOne(String className, ProtocolVersion protocolVersion, CodecRegistry codecRegistry) { - boolean frozen = false; - if (isReversed(className)) { - // Just skip the ReversedType part, we don't care - className = getNestedClassName(className); - } else if (isFrozen(className)) { - frozen = true; - className = getNestedClassName(className); - } - - Parser parser = new Parser(className, 0); - String next = parser.parseNextName(); - - if (next.startsWith(LIST_TYPE)) - return DataType.list(parseOne(parser.getTypeParameters().get(0), protocolVersion, codecRegistry), frozen); + private static final Logger logger = LoggerFactory.getLogger(DataTypeClassNameParser.class); + + private static final String REVERSED_TYPE = "org.apache.cassandra.db.marshal.ReversedType"; + private static final String FROZEN_TYPE = "org.apache.cassandra.db.marshal.FrozenType"; + private static final String COMPOSITE_TYPE = "org.apache.cassandra.db.marshal.CompositeType"; + private static final String COLLECTION_TYPE = + "org.apache.cassandra.db.marshal.ColumnToCollectionType"; + private static final String LIST_TYPE = "org.apache.cassandra.db.marshal.ListType"; + private static final String SET_TYPE = "org.apache.cassandra.db.marshal.SetType"; + private static final String MAP_TYPE = "org.apache.cassandra.db.marshal.MapType"; + private static final String UDT_TYPE = "org.apache.cassandra.db.marshal.UserType"; + private static final String TUPLE_TYPE = "org.apache.cassandra.db.marshal.TupleType"; + private static final String DURATION_TYPE = "org.apache.cassandra.db.marshal.DurationType"; + + private static ImmutableMap cassTypeToDataType = + new ImmutableMap.Builder() + .put("org.apache.cassandra.db.marshal.AsciiType", DataType.ascii()) + .put("org.apache.cassandra.db.marshal.LongType", DataType.bigint()) + .put("org.apache.cassandra.db.marshal.BytesType", DataType.blob()) + .put("org.apache.cassandra.db.marshal.BooleanType", DataType.cboolean()) + .put("org.apache.cassandra.db.marshal.CounterColumnType", DataType.counter()) + .put("org.apache.cassandra.db.marshal.DecimalType", DataType.decimal()) + .put("org.apache.cassandra.db.marshal.DoubleType", DataType.cdouble()) + .put("org.apache.cassandra.db.marshal.FloatType", DataType.cfloat()) + .put("org.apache.cassandra.db.marshal.InetAddressType", DataType.inet()) + .put("org.apache.cassandra.db.marshal.Int32Type", DataType.cint()) + .put("org.apache.cassandra.db.marshal.UTF8Type", DataType.text()) + .put("org.apache.cassandra.db.marshal.TimestampType", DataType.timestamp()) + .put("org.apache.cassandra.db.marshal.SimpleDateType", DataType.date()) + .put("org.apache.cassandra.db.marshal.TimeType", DataType.time()) + .put("org.apache.cassandra.db.marshal.UUIDType", DataType.uuid()) + .put("org.apache.cassandra.db.marshal.IntegerType", DataType.varint()) + .put("org.apache.cassandra.db.marshal.TimeUUIDType", DataType.timeuuid()) + .put("org.apache.cassandra.db.marshal.ByteType", DataType.tinyint()) + .put("org.apache.cassandra.db.marshal.ShortType", DataType.smallint()) + .put(DURATION_TYPE, DataType.duration()) + .build(); + + static DataType parseOne( + String className, ProtocolVersion protocolVersion, CodecRegistry codecRegistry) { + boolean frozen = false; + if (isReversed(className)) { + // Just skip the ReversedType part, we don't care + className = getNestedClassName(className); + } else if (isFrozen(className)) { + frozen = true; + className = getNestedClassName(className); + } - if (next.startsWith(SET_TYPE)) - return DataType.set(parseOne(parser.getTypeParameters().get(0), protocolVersion, codecRegistry), frozen); + Parser parser = new Parser(className, 0); + String next = parser.parseNextName(); - if (next.startsWith(MAP_TYPE)) { - List params = parser.getTypeParameters(); - return DataType.map(parseOne(params.get(0), protocolVersion, codecRegistry), parseOne(params.get(1), protocolVersion, codecRegistry), frozen); - } + if (next.startsWith(LIST_TYPE)) + return DataType.list( + parseOne(parser.getTypeParameters().get(0), protocolVersion, codecRegistry), frozen); - if (frozen) - logger.warn("Got o.a.c.db.marshal.FrozenType for something else than a collection, " - + "this driver version might be too old for your version of Cassandra"); - - if (isUserType(next)) { - ++parser.idx; // skipping '(' - - String keyspace = parser.readOne(); - parser.skipBlankAndComma(); - String typeName = TypeCodec.varchar().deserialize(Bytes.fromHexString("0x" + parser.readOne()), protocolVersion); - parser.skipBlankAndComma(); - Map rawFields = parser.getNameAndTypeParameters(); - List fields = new ArrayList(rawFields.size()); - for (Map.Entry entry : rawFields.entrySet()) - fields.add(new UserType.Field(entry.getKey(), parseOne(entry.getValue(), protocolVersion, codecRegistry))); - // create a frozen UserType since C* 2.x UDTs are always frozen. - return new UserType(keyspace, typeName, true, fields, protocolVersion, codecRegistry); - } + if (next.startsWith(SET_TYPE)) + return DataType.set( + parseOne(parser.getTypeParameters().get(0), protocolVersion, codecRegistry), frozen); - if (isTupleType(next)) { - List rawTypes = parser.getTypeParameters(); - List types = new ArrayList(rawTypes.size()); - for (String rawType : rawTypes) { - types.add(parseOne(rawType, protocolVersion, codecRegistry)); - } - return new TupleType(types, protocolVersion, codecRegistry); - } + if (next.startsWith(MAP_TYPE)) { + List params = parser.getTypeParameters(); + return DataType.map( + parseOne(params.get(0), protocolVersion, codecRegistry), + parseOne(params.get(1), protocolVersion, codecRegistry), + frozen); + } - DataType type = cassTypeToDataType.get(next); - return type == null ? DataType.custom(className) : type; + if (frozen) + logger.warn( + "Got o.a.c.db.marshal.FrozenType for something else than a collection, " + + "this driver version might be too old for your version of Cassandra"); + + if (isUserType(next)) { + ++parser.idx; // skipping '(' + + String keyspace = parser.readOne(); + parser.skipBlankAndComma(); + String typeName = + TypeCodec.varchar() + .deserialize(Bytes.fromHexString("0x" + parser.readOne()), protocolVersion); + parser.skipBlankAndComma(); + Map rawFields = parser.getNameAndTypeParameters(); + List fields = new ArrayList(rawFields.size()); + for (Map.Entry entry : rawFields.entrySet()) + fields.add( + new UserType.Field( + entry.getKey(), parseOne(entry.getValue(), protocolVersion, codecRegistry))); + // create a frozen UserType since C* 2.x UDTs are always frozen. + return new UserType(keyspace, typeName, true, fields, protocolVersion, codecRegistry); } - public static boolean isReversed(String className) { - return className.startsWith(REVERSED_TYPE); + if (isTupleType(next)) { + List rawTypes = parser.getTypeParameters(); + List types = new ArrayList(rawTypes.size()); + for (String rawType : rawTypes) { + types.add(parseOne(rawType, protocolVersion, codecRegistry)); + } + return new TupleType(types, protocolVersion, codecRegistry); } - public static boolean isFrozen(String className) { - return className.startsWith(FROZEN_TYPE); + DataType type = cassTypeToDataType.get(next); + return type == null ? DataType.custom(className) : type; + } + + public static boolean isReversed(String className) { + return className.startsWith(REVERSED_TYPE); + } + + public static boolean isFrozen(String className) { + return className.startsWith(FROZEN_TYPE); + } + + private static String getNestedClassName(String className) { + Parser p = new Parser(className, 0); + p.parseNextName(); + List l = p.getTypeParameters(); + if (l.size() != 1) throw new IllegalStateException(); + className = l.get(0); + return className; + } + + public static boolean isUserType(String className) { + return className.startsWith(UDT_TYPE); + } + + public static boolean isTupleType(String className) { + return className.startsWith(TUPLE_TYPE); + } + + private static boolean isComposite(String className) { + return className.startsWith(COMPOSITE_TYPE); + } + + private static boolean isCollection(String className) { + return className.startsWith(COLLECTION_TYPE); + } + + public static boolean isDuration(String className) { + return className.equals(DURATION_TYPE); + } + + static ParseResult parseWithComposite( + String className, ProtocolVersion protocolVersion, CodecRegistry codecRegistry) { + Parser parser = new Parser(className, 0); + + String next = parser.parseNextName(); + if (!isComposite(next)) + return new ParseResult(parseOne(className, protocolVersion, codecRegistry), isReversed(next)); + + List subClassNames = parser.getTypeParameters(); + int count = subClassNames.size(); + String last = subClassNames.get(count - 1); + Map collections = new HashMap(); + if (isCollection(last)) { + count--; + Parser collectionParser = new Parser(last, 0); + collectionParser.parseNextName(); // skips columnToCollectionType + Map params = collectionParser.getCollectionsParameters(); + for (Map.Entry entry : params.entrySet()) + collections.put(entry.getKey(), parseOne(entry.getValue(), protocolVersion, codecRegistry)); } - private static String getNestedClassName(String className) { - Parser p = new Parser(className, 0); - p.parseNextName(); - List l = p.getTypeParameters(); - if (l.size() != 1) - throw new IllegalStateException(); - className = l.get(0); - return className; + List types = new ArrayList(count); + List reversed = new ArrayList(count); + for (int i = 0; i < count; i++) { + types.add(parseOne(subClassNames.get(i), protocolVersion, codecRegistry)); + reversed.add(isReversed(subClassNames.get(i))); } - public static boolean isUserType(String className) { - return className.startsWith(UDT_TYPE); + return new ParseResult(true, types, reversed, collections); + } + + static class ParseResult { + public final boolean isComposite; + public final List types; + public final List reversed; + public final Map collections; + + private ParseResult(DataType type, boolean reversed) { + this( + false, + Collections.singletonList(type), + Collections.singletonList(reversed), + Collections.emptyMap()); } - public static boolean isTupleType(String className) { - return className.startsWith(TUPLE_TYPE); + private ParseResult( + boolean isComposite, + List types, + List reversed, + Map collections) { + this.isComposite = isComposite; + this.types = types; + this.reversed = reversed; + this.collections = collections; } + } - private static boolean isComposite(String className) { - return className.startsWith(COMPOSITE_TYPE); + private static class Parser { + + private final String str; + private int idx; + + private Parser(String str, int idx) { + this.str = str; + this.idx = idx; } - private static boolean isCollection(String className) { - return className.startsWith(COLLECTION_TYPE); + public String parseNextName() { + skipBlank(); + return readNextIdentifier(); } - public static boolean isDuration(String className) { - return className.equals(DURATION_TYPE); + public String readOne() { + String name = parseNextName(); + String args = readRawArguments(); + return name + args; } - static ParseResult parseWithComposite(String className, ProtocolVersion protocolVersion, CodecRegistry codecRegistry) { - Parser parser = new Parser(className, 0); - - String next = parser.parseNextName(); - if (!isComposite(next)) - return new ParseResult(parseOne(className, protocolVersion, codecRegistry), isReversed(next)); - - List subClassNames = parser.getTypeParameters(); - int count = subClassNames.size(); - String last = subClassNames.get(count - 1); - Map collections = new HashMap(); - if (isCollection(last)) { - count--; - Parser collectionParser = new Parser(last, 0); - collectionParser.parseNextName(); // skips columnToCollectionType - Map params = collectionParser.getCollectionsParameters(); - for (Map.Entry entry : params.entrySet()) - collections.put(entry.getKey(), parseOne(entry.getValue(), protocolVersion, codecRegistry)); - } + // Assumes we have just read a class name and read it's potential arguments + // blindly. I.e. it assume that either parsing is done or that we're on a '(' + // and this reads everything up until the corresponding closing ')'. It + // returns everything read, including the enclosing parenthesis. + private String readRawArguments() { + skipBlank(); - List types = new ArrayList(count); - List reversed = new ArrayList(count); - for (int i = 0; i < count; i++) { - types.add(parseOne(subClassNames.get(i), protocolVersion, codecRegistry)); - reversed.add(isReversed(subClassNames.get(i))); - } + if (isEOS() || str.charAt(idx) == ')' || str.charAt(idx) == ',') return ""; - return new ParseResult(true, types, reversed, collections); - } + if (str.charAt(idx) != '(') + throw new IllegalStateException( + String.format( + "Expecting char %d of %s to be '(' but '%c' found", idx, str, str.charAt(idx))); - static class ParseResult { - public final boolean isComposite; - public final List types; - public final List reversed; - public final Map collections; - - private ParseResult(DataType type, boolean reversed) { - this(false, - Collections.singletonList(type), - Collections.singletonList(reversed), - Collections.emptyMap()); - } + int i = idx; + int open = 1; + while (open > 0) { + ++idx; + + if (isEOS()) throw new IllegalStateException("Non closed parenthesis"); - private ParseResult(boolean isComposite, List types, List reversed, Map collections) { - this.isComposite = isComposite; - this.types = types; - this.reversed = reversed; - this.collections = collections; + if (str.charAt(idx) == '(') { + open++; + } else if (str.charAt(idx) == ')') { + open--; } + } + // we've stopped at the last closing ')' so move past that + ++idx; + return str.substring(i, idx); } - private static class Parser { + public List getTypeParameters() { + List list = new ArrayList(); - private final String str; - private int idx; + if (isEOS()) return list; - private Parser(String str, int idx) { - this.str = str; - this.idx = idx; - } + if (str.charAt(idx) != '(') throw new IllegalStateException(); - public String parseNextName() { - skipBlank(); - return readNextIdentifier(); - } + ++idx; // skipping '(' - public String readOne() { - String name = parseNextName(); - String args = readRawArguments(); - return name + args; + while (skipBlankAndComma()) { + if (str.charAt(idx) == ')') { + ++idx; + return list; } - // Assumes we have just read a class name and read it's potential arguments - // blindly. I.e. it assume that either parsing is done or that we're on a '(' - // and this reads everything up until the corresponding closing ')'. It - // returns everything read, including the enclosing parenthesis. - private String readRawArguments() { - skipBlank(); - - if (isEOS() || str.charAt(idx) == ')' || str.charAt(idx) == ',') - return ""; - - if (str.charAt(idx) != '(') - throw new IllegalStateException(String.format("Expecting char %d of %s to be '(' but '%c' found", idx, str, str.charAt(idx))); - - int i = idx; - int open = 1; - while (open > 0) { - ++idx; - - if (isEOS()) - throw new IllegalStateException("Non closed parenthesis"); - - if (str.charAt(idx) == '(') { - open++; - } else if (str.charAt(idx) == ')') { - open--; - } - } - // we've stopped at the last closing ')' so move past that - ++idx; - return str.substring(i, idx); + try { + list.add(readOne()); + } catch (DriverInternalError e) { + throw new DriverInternalError( + String.format("Exception while parsing '%s' around char %d", str, idx), e); } + } + throw new DriverInternalError( + String.format( + "Syntax error parsing '%s' at char %d: unexpected end of string", str, idx)); + } - public List getTypeParameters() { - List list = new ArrayList(); + public Map getCollectionsParameters() { + if (isEOS()) return Collections.emptyMap(); - if (isEOS()) - return list; + if (str.charAt(idx) != '(') throw new IllegalStateException(); - if (str.charAt(idx) != '(') - throw new IllegalStateException(); + ++idx; // skipping '(' - ++idx; // skipping '(' + return getNameAndTypeParameters(); + } - while (skipBlankAndComma()) { - if (str.charAt(idx) == ')') { - ++idx; - return list; - } + // Must be at the start of the first parameter to read + public Map getNameAndTypeParameters() { + // The order of the hashmap matters for UDT + Map map = new LinkedHashMap(); - try { - list.add(readOne()); - } catch (DriverInternalError e) { - throw new DriverInternalError(String.format("Exception while parsing '%s' around char %d", str, idx), e); - } - } - throw new DriverInternalError(String.format("Syntax error parsing '%s' at char %d: unexpected end of string", str, idx)); + while (skipBlankAndComma()) { + if (str.charAt(idx) == ')') { + ++idx; + return map; } - public Map getCollectionsParameters() { - if (isEOS()) - return Collections.emptyMap(); - - if (str.charAt(idx) != '(') - throw new IllegalStateException(); - - ++idx; // skipping '(' - - return getNameAndTypeParameters(); + String bbHex = readNextIdentifier(); + String name = null; + try { + name = + TypeCodec.varchar() + .deserialize(Bytes.fromHexString("0x" + bbHex), ProtocolVersion.NEWEST_SUPPORTED); + } catch (NumberFormatException e) { + throwSyntaxError(e.getMessage()); } - // Must be at the start of the first parameter to read - public Map getNameAndTypeParameters() { - // The order of the hashmap matters for UDT - Map map = new LinkedHashMap(); - - while (skipBlankAndComma()) { - if (str.charAt(idx) == ')') { - ++idx; - return map; - } - - String bbHex = readNextIdentifier(); - String name = null; - try { - name = TypeCodec.varchar().deserialize(Bytes.fromHexString("0x" + bbHex), ProtocolVersion.NEWEST_SUPPORTED); - } catch (NumberFormatException e) { - throwSyntaxError(e.getMessage()); - } - - skipBlank(); - if (str.charAt(idx) != ':') - throwSyntaxError("expecting ':' token"); - - ++idx; - skipBlank(); - try { - map.put(name, readOne()); - } catch (DriverInternalError e) { - throw new DriverInternalError(String.format("Exception while parsing '%s' around char %d", str, idx), e); - } - } - throw new DriverInternalError(String.format("Syntax error parsing '%s' at char %d: unexpected end of string", str, idx)); - } + skipBlank(); + if (str.charAt(idx) != ':') throwSyntaxError("expecting ':' token"); - private void throwSyntaxError(String msg) { - throw new DriverInternalError(String.format("Syntax error parsing '%s' at char %d: %s", str, idx, msg)); + ++idx; + skipBlank(); + try { + map.put(name, readOne()); + } catch (DriverInternalError e) { + throw new DriverInternalError( + String.format("Exception while parsing '%s' around char %d", str, idx), e); } + } + throw new DriverInternalError( + String.format( + "Syntax error parsing '%s' at char %d: unexpected end of string", str, idx)); + } - private boolean isEOS() { - return isEOS(str, idx); - } + private void throwSyntaxError(String msg) { + throw new DriverInternalError( + String.format("Syntax error parsing '%s' at char %d: %s", str, idx, msg)); + } - private static boolean isEOS(String str, int i) { - return i >= str.length(); - } + private boolean isEOS() { + return isEOS(str, idx); + } - private void skipBlank() { - idx = skipBlank(str, idx); - } + private static boolean isEOS(String str, int i) { + return i >= str.length(); + } - private static int skipBlank(String str, int i) { - while (!isEOS(str, i) && ParseUtils.isBlank(str.charAt(i))) - ++i; + private void skipBlank() { + idx = skipBlank(str, idx); + } - return i; - } + private static int skipBlank(String str, int i) { + while (!isEOS(str, i) && ParseUtils.isBlank(str.charAt(i))) ++i; + + return i; + } - // skip all blank and at best one comma, return true if there not EOS - private boolean skipBlankAndComma() { - boolean commaFound = false; - while (!isEOS()) { - int c = str.charAt(idx); - if (c == ',') { - if (commaFound) - return true; - else - commaFound = true; - } else if (!ParseUtils.isBlank(c)) { - return true; - } - ++idx; - } - return false; + // skip all blank and at best one comma, return true if there not EOS + private boolean skipBlankAndComma() { + boolean commaFound = false; + while (!isEOS()) { + int c = str.charAt(idx); + if (c == ',') { + if (commaFound) return true; + else commaFound = true; + } else if (!ParseUtils.isBlank(c)) { + return true; } + ++idx; + } + return false; + } - // left idx positioned on the character stopping the read - public String readNextIdentifier() { - int i = idx; - while (!isEOS() && ParseUtils.isIdentifierChar(str.charAt(idx))) - ++idx; + // left idx positioned on the character stopping the read + public String readNextIdentifier() { + int i = idx; + while (!isEOS() && ParseUtils.isIdentifierChar(str.charAt(idx))) ++idx; - return str.substring(i, idx); - } + return str.substring(i, idx); + } - @Override - public String toString() { - return str.substring(0, idx) + "[" + (idx == str.length() ? "" : str.charAt(idx)) + "]" + str.substring(idx + 1); - } + @Override + public String toString() { + return str.substring(0, idx) + + "[" + + (idx == str.length() ? "" : str.charAt(idx)) + + "]" + + str.substring(idx + 1); } + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/DataTypeCqlNameParser.java b/driver-core/src/main/java/com/datastax/driver/core/DataTypeCqlNameParser.java index 4b36b07f638..ee0d6744d76 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/DataTypeCqlNameParser.java +++ b/driver-core/src/main/java/com/datastax/driver/core/DataTypeCqlNameParser.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,17 +17,42 @@ */ package com.datastax.driver.core; +import static com.datastax.driver.core.DataType.ascii; +import static com.datastax.driver.core.DataType.bigint; +import static com.datastax.driver.core.DataType.blob; +import static com.datastax.driver.core.DataType.cboolean; +import static com.datastax.driver.core.DataType.cdouble; +import static com.datastax.driver.core.DataType.cfloat; +import static com.datastax.driver.core.DataType.cint; +import static com.datastax.driver.core.DataType.counter; +import static com.datastax.driver.core.DataType.custom; +import static com.datastax.driver.core.DataType.date; +import static com.datastax.driver.core.DataType.decimal; +import static com.datastax.driver.core.DataType.duration; +import static com.datastax.driver.core.DataType.inet; +import static com.datastax.driver.core.DataType.list; +import static com.datastax.driver.core.DataType.map; +import static com.datastax.driver.core.DataType.set; +import static com.datastax.driver.core.DataType.smallint; +import static com.datastax.driver.core.DataType.text; +import static com.datastax.driver.core.DataType.time; +import static com.datastax.driver.core.DataType.timestamp; +import static com.datastax.driver.core.DataType.timeuuid; +import static com.datastax.driver.core.DataType.tinyint; +import static com.datastax.driver.core.DataType.uuid; +import static com.datastax.driver.core.DataType.varchar; +import static com.datastax.driver.core.DataType.varint; +import static com.datastax.driver.core.ParseUtils.isBlank; +import static com.datastax.driver.core.ParseUtils.isIdentifierChar; +import static com.datastax.driver.core.ParseUtils.skipSpaces; + import com.datastax.driver.core.exceptions.DriverInternalError; import com.datastax.driver.core.exceptions.UnresolvedUserTypeException; import com.google.common.collect.ImmutableMap; - import java.util.ArrayList; import java.util.List; import java.util.Map; -import static com.datastax.driver.core.DataType.*; -import static com.datastax.driver.core.ParseUtils.*; - /* * Parse data types from schema tables, for Cassandra 3.0 and above. * In these versions, data types appear as string literals, like "ascii" or "tuple". @@ -36,265 +63,326 @@ */ class DataTypeCqlNameParser { - private static final String FROZEN = "frozen"; - private static final String LIST = "list"; - private static final String SET = "set"; - private static final String MAP = "map"; - private static final String TUPLE = "tuple"; - private static final String EMPTY = "empty"; - - private static final ImmutableMap NATIVE_TYPES_MAP = - new ImmutableMap.Builder() - .put("ascii", ascii()) - .put("bigint", bigint()) - .put("blob", blob()) - .put("boolean", cboolean()) - .put("counter", counter()) - .put("decimal", decimal()) - .put("double", cdouble()) - .put("float", cfloat()) - .put("inet", inet()) - .put("int", cint()) - .put("text", text()) - .put("varchar", varchar()) - .put("timestamp", timestamp()) - .put("date", date()) - .put("time", time()) - .put("uuid", uuid()) - .put("varint", varint()) - .put("timeuuid", timeuuid()) - .put("tinyint", tinyint()) - .put("smallint", smallint()) - // duration is not really a native CQL type, but appears as so in system tables - .put("duration", duration()) - .build(); - - /** - * @param currentUserTypes if this method gets called as part of a refresh that spans multiple user types, this contains the ones - * that have already been refreshed. If the type we are parsing references a user type, we want to pick its - * definition from this map in priority. - * @param oldUserTypes this contains all the keyspace's user types as they were before the refresh started. If we can't find a - * definition in {@code currentUserTypes}, we'll check this map as a fallback. - */ - static DataType parse(String toParse, Cluster cluster, String currentKeyspaceName, Map currentUserTypes, Map oldUserTypes, boolean frozen, boolean shallowUserTypes) { - - if (toParse.startsWith("'")) - return custom(toParse.substring(1, toParse.length() - 1)); - - Parser parser = new Parser(toParse, 0); - String type = parser.parseTypeName(); - - DataType nativeType = NATIVE_TYPES_MAP.get(type.toLowerCase()); - if (nativeType != null) - return nativeType; - - if (type.equalsIgnoreCase(LIST)) { - List parameters = parser.parseTypeParameters(); - if (parameters.size() != 1) - throw new DriverInternalError(String.format("Excepting single parameter for list, got %s", parameters)); - DataType elementType = parse(parameters.get(0), cluster, currentKeyspaceName, currentUserTypes, oldUserTypes, false, shallowUserTypes); - return list(elementType, frozen); - } + private static final String FROZEN = "frozen"; + private static final String LIST = "list"; + private static final String SET = "set"; + private static final String MAP = "map"; + private static final String TUPLE = "tuple"; + private static final String EMPTY = "empty"; + + private static final ImmutableMap NATIVE_TYPES_MAP = + new ImmutableMap.Builder() + .put("ascii", ascii()) + .put("bigint", bigint()) + .put("blob", blob()) + .put("boolean", cboolean()) + .put("counter", counter()) + .put("decimal", decimal()) + .put("double", cdouble()) + .put("float", cfloat()) + .put("inet", inet()) + .put("int", cint()) + .put("text", text()) + .put("varchar", varchar()) + .put("timestamp", timestamp()) + .put("date", date()) + .put("time", time()) + .put("uuid", uuid()) + .put("varint", varint()) + .put("timeuuid", timeuuid()) + .put("tinyint", tinyint()) + .put("smallint", smallint()) + // duration is not really a native CQL type, but appears as so in system tables + .put("duration", duration()) + .build(); + + /** + * @param currentUserTypes if this method gets called as part of a refresh that spans multiple + * user types, this contains the ones that have already been refreshed. If the type we are + * parsing references a user type, we want to pick its definition from this map in priority. + * @param oldUserTypes this contains all the keyspace's user types as they were before the refresh + * started. If we can't find a definition in {@code currentUserTypes}, we'll check this map as + * a fallback. + */ + static DataType parse( + String toParse, + Cluster cluster, + String currentKeyspaceName, + Map currentUserTypes, + Map oldUserTypes, + boolean frozen, + boolean shallowUserTypes) { + + if (toParse.startsWith("'")) return custom(toParse.substring(1, toParse.length() - 1)); + + Parser parser = new Parser(toParse, 0); + String type = parser.parseTypeName(); + + DataType nativeType = NATIVE_TYPES_MAP.get(type.toLowerCase()); + if (nativeType != null) return nativeType; + + if (parser.isEOS()) { + // return a custom type for the special empty type + // so that it gets detected later on, see TableMetadata + if (type.equalsIgnoreCase(EMPTY)) return custom(type); + + // We need to remove escaped double quotes within the type name as it is stored unescaped. + // Otherwise it's a UDT. If we only want a shallow definition build it, otherwise search known + // definitions. + if (shallowUserTypes) + return new UserType.Shallow(currentKeyspaceName, Metadata.handleId(type), frozen); + + UserType userType = null; + if (currentUserTypes != null) userType = currentUserTypes.get(Metadata.handleId(type)); + if (userType == null && oldUserTypes != null) + userType = oldUserTypes.get(Metadata.handleId(type)); + + if (userType == null) throw new UnresolvedUserTypeException(currentKeyspaceName, type); + else return userType.copy(frozen); + } - if (type.equalsIgnoreCase(SET)) { - List parameters = parser.parseTypeParameters(); - if (parameters.size() != 1) - throw new DriverInternalError(String.format("Excepting single parameter for set, got %s", parameters)); - DataType elementType = parse(parameters.get(0), cluster, currentKeyspaceName, currentUserTypes, oldUserTypes, false, shallowUserTypes); - return set(elementType, frozen); - } + List parameters = parser.parseTypeParameters(); + if (type.equalsIgnoreCase(LIST)) { + if (parameters.size() != 1) + throw new DriverInternalError( + String.format("Excepting single parameter for list, got %s", parameters)); + DataType elementType = + parse( + parameters.get(0), + cluster, + currentKeyspaceName, + currentUserTypes, + oldUserTypes, + false, + shallowUserTypes); + return list(elementType, frozen); + } - if (type.equalsIgnoreCase(MAP)) { - List parameters = parser.parseTypeParameters(); - if (parameters.size() != 2) - throw new DriverInternalError(String.format("Excepting two parameters for map, got %s", parameters)); - DataType keyType = parse(parameters.get(0), cluster, currentKeyspaceName, currentUserTypes, oldUserTypes, false, shallowUserTypes); - DataType valueType = parse(parameters.get(1), cluster, currentKeyspaceName, currentUserTypes, oldUserTypes, false, shallowUserTypes); - return map(keyType, valueType, frozen); - } + if (type.equalsIgnoreCase(SET)) { + if (parameters.size() != 1) + throw new DriverInternalError( + String.format("Excepting single parameter for set, got %s", parameters)); + DataType elementType = + parse( + parameters.get(0), + cluster, + currentKeyspaceName, + currentUserTypes, + oldUserTypes, + false, + shallowUserTypes); + return set(elementType, frozen); + } - if (type.equalsIgnoreCase(FROZEN)) { - List parameters = parser.parseTypeParameters(); - if (parameters.size() != 1) - throw new DriverInternalError(String.format("Excepting single parameter for frozen keyword, got %s", parameters)); - return parse(parameters.get(0), cluster, currentKeyspaceName, currentUserTypes, oldUserTypes, true, shallowUserTypes); - } + if (type.equalsIgnoreCase(MAP)) { + if (parameters.size() != 2) + throw new DriverInternalError( + String.format("Excepting two parameters for map, got %s", parameters)); + DataType keyType = + parse( + parameters.get(0), + cluster, + currentKeyspaceName, + currentUserTypes, + oldUserTypes, + false, + shallowUserTypes); + DataType valueType = + parse( + parameters.get(1), + cluster, + currentKeyspaceName, + currentUserTypes, + oldUserTypes, + false, + shallowUserTypes); + return map(keyType, valueType, frozen); + } - if (type.equalsIgnoreCase(TUPLE)) { - List rawTypes = parser.parseTypeParameters(); - List types = new ArrayList(rawTypes.size()); - for (String rawType : rawTypes) { - types.add(parse(rawType, cluster, currentKeyspaceName, currentUserTypes, oldUserTypes, false, shallowUserTypes)); - } - return cluster.getMetadata().newTupleType(types); - } + if (type.equalsIgnoreCase(FROZEN)) { + if (parameters.size() != 1) + throw new DriverInternalError( + String.format("Excepting single parameter for frozen keyword, got %s", parameters)); + return parse( + parameters.get(0), + cluster, + currentKeyspaceName, + currentUserTypes, + oldUserTypes, + true, + shallowUserTypes); + } - // return a custom type for the special empty type - // so that it gets detected later on, see TableMetadata - if (type.equalsIgnoreCase(EMPTY)) - return custom(type); - - // We need to remove escaped double quotes within the type name as it is stored unescaped. - // Otherwise it's a UDT. If we only want a shallow definition build it, otherwise search known definitions. - if (shallowUserTypes) - return new UserType.Shallow(currentKeyspaceName, Metadata.handleId(type), frozen); - - UserType userType = null; - if (currentUserTypes != null) - userType = currentUserTypes.get(Metadata.handleId(type)); - if (userType == null && oldUserTypes != null) - userType = oldUserTypes.get(Metadata.handleId(type)); - - if (userType == null) - throw new UnresolvedUserTypeException(currentKeyspaceName, type); - else - return userType.copy(frozen); + if (type.equalsIgnoreCase(TUPLE)) { + if (parameters.isEmpty()) { + throw new IllegalArgumentException("Expecting at list one parameter for tuple, got none"); + } + List types = new ArrayList(parameters.size()); + for (String rawType : parameters) { + types.add( + parse( + rawType, + cluster, + currentKeyspaceName, + currentUserTypes, + oldUserTypes, + false, + shallowUserTypes)); + } + return cluster.getMetadata().newTupleType(types); } - private static class Parser { + throw new IllegalArgumentException("Could not parse type name " + toParse); + } - private final String str; + private static class Parser { - private int idx; + private final String str; - Parser(String str, int idx) { - this.str = str; - this.idx = idx; - } + private int idx; - String parseTypeName() { - idx = skipSpaces(str, idx); - return readNextIdentifier(); - } + Parser(String str, int idx) { + this.str = str; + this.idx = idx; + } - List parseTypeParameters() { - List list = new ArrayList(); + String parseTypeName() { + idx = skipSpaces(str, idx); + return readNextIdentifier(); + } - if (isEOS()) - return list; + List parseTypeParameters() { + List list = new ArrayList(); - skipBlankAndComma(); + if (isEOS()) return list; - if (str.charAt(idx) != '<') - throw new IllegalStateException(); + skipBlankAndComma(); - ++idx; // skipping '<' + if (str.charAt(idx) != '<') throw new IllegalStateException(); - while (skipBlankAndComma()) { - if (str.charAt(idx) == '>') { - ++idx; - return list; - } + ++idx; // skipping '<' - try { - String name = parseTypeName(); - String args = readRawTypeParameters(); - list.add(name + args); - } catch (DriverInternalError e) { - DriverInternalError ex = new DriverInternalError(String.format("Exception while parsing '%s' around char %d", str, idx)); - ex.initCause(e); - throw ex; - } - } - throw new DriverInternalError(String.format("Syntax error parsing '%s' at char %d: unexpected end of string", str, idx)); + while (skipBlankAndComma()) { + if (str.charAt(idx) == '>') { + ++idx; + return list; } - // left idx positioned on the character stopping the read - private String readNextIdentifier() { - int startIdx = idx; - if (str.charAt(startIdx) == '"') { // case-sensitive name included in double quotes - ++idx; - // read until closing quote. - while (!isEOS()) { - boolean atQuote = str.charAt(idx) == '"'; - ++idx; - if (atQuote) { - // if the next character is also a quote, this is an escaped - // quote, continue reading, otherwise stop. - if (!isEOS() && str.charAt(idx) == '"') - ++idx; - else - break; - } - } - } else if (str.charAt(startIdx) == '\'') { // custom type name included in single quotes - ++idx; - // read until closing quote. - while (!isEOS() && str.charAt(idx++) != '\'') { /* loop */ } - } else { - while (!isEOS() && (isIdentifierChar(str.charAt(idx)) || str.charAt(idx) == '"')) - ++idx; - } - return str.substring(startIdx, idx); + try { + String name = parseTypeName(); + String args = readRawTypeParameters(); + list.add(name + args); + } catch (DriverInternalError e) { + DriverInternalError ex = + new DriverInternalError( + String.format("Exception while parsing '%s' around char %d", str, idx)); + ex.initCause(e); + throw ex; } + } + throw new DriverInternalError( + String.format( + "Syntax error parsing '%s' at char %d: unexpected end of string", str, idx)); + } - // Assumes we have just read a type name and read it's potential arguments - // blindly. I.e. it assume that either parsing is done or that we're on a '<' - // and this reads everything up until the corresponding closing '>'. It - // returns everything read, including the enclosing brackets. - private String readRawTypeParameters() { - idx = skipSpaces(str, idx); - - if (isEOS() || str.charAt(idx) == '>' || str.charAt(idx) == ',') - return ""; - - if (str.charAt(idx) != '<') - throw new IllegalStateException(String.format("Expecting char %d of %s to be '<' but '%c' found", idx, str, str.charAt(idx))); - - int i = idx; - int open = 1; - boolean inQuotes = false; - while (open > 0) { - ++idx; - - if (isEOS()) - throw new IllegalStateException("Non closed angle brackets"); - - // Only parse for '<' and '>' characters if not within a quoted identifier. - // Note we don't need to handle escaped quotes ("") in type names here, because they just cause inQuotes to flip - // to false and immediately back to true - if (!inQuotes) { - if (str.charAt(idx) == '"') { - inQuotes = true; - } else if (str.charAt(idx) == '<') { - open++; - } else if (str.charAt(idx) == '>') { - open--; - } - } else if (str.charAt(idx) == '"') { - inQuotes = false; - } - } - // we've stopped at the last closing ')' so move past that - ++idx; - return str.substring(i, idx); + // left idx positioned on the character stopping the read + private String readNextIdentifier() { + int startIdx = idx; + if (str.charAt(startIdx) == '"') { // case-sensitive name included in double quotes + ++idx; + // read until closing quote. + while (!isEOS()) { + boolean atQuote = str.charAt(idx) == '"'; + ++idx; + if (atQuote) { + // if the next character is also a quote, this is an escaped + // quote, continue reading, otherwise stop. + if (!isEOS() && str.charAt(idx) == '"') ++idx; + else break; + } } - - // skip all blank and at best one comma, return true if there not EOS - private boolean skipBlankAndComma() { - boolean commaFound = false; - while (!isEOS()) { - int c = str.charAt(idx); - if (c == ',') { - if (commaFound) - return true; - else - commaFound = true; - } else if (!isBlank(c)) { - return true; - } - ++idx; - } - return false; + } else if (str.charAt(startIdx) == '\'') { // custom type name included in single quotes + ++idx; + // read until closing quote. + while (!isEOS() && str.charAt(idx++) != '\'') { + /* loop */ } + } else { + while (!isEOS() && (isIdentifierChar(str.charAt(idx)) || str.charAt(idx) == '"')) ++idx; + } + return str.substring(startIdx, idx); + } - private boolean isEOS() { - return idx >= str.length(); + // Assumes we have just read a type name and read it's potential arguments + // blindly. I.e. it assume that either parsing is done or that we're on a '<' + // and this reads everything up until the corresponding closing '>'. It + // returns everything read, including the enclosing brackets. + private String readRawTypeParameters() { + idx = skipSpaces(str, idx); + + if (isEOS() || str.charAt(idx) == '>' || str.charAt(idx) == ',') return ""; + + if (str.charAt(idx) != '<') + throw new IllegalStateException( + String.format( + "Expecting char %d of %s to be '<' but '%c' found", idx, str, str.charAt(idx))); + + int i = idx; + int open = 1; + boolean inQuotes = false; + while (open > 0) { + ++idx; + + if (isEOS()) throw new IllegalStateException("Non closed angle brackets"); + + // Only parse for '<' and '>' characters if not within a quoted identifier. + // Note we don't need to handle escaped quotes ("") in type names here, because they just + // cause inQuotes to flip + // to false and immediately back to true + if (!inQuotes) { + if (str.charAt(idx) == '"') { + inQuotes = true; + } else if (str.charAt(idx) == '<') { + open++; + } else if (str.charAt(idx) == '>') { + open--; + } + } else if (str.charAt(idx) == '"') { + inQuotes = false; } + } + // we've stopped at the last closing ')' so move past that + ++idx; + return str.substring(i, idx); + } - @Override - public String toString() { - return str.substring(0, idx) + "[" + (idx == str.length() ? "" : str.charAt(idx)) + "]" + str.substring(idx + 1); + // skip all blank and at best one comma, return true if there not EOS + private boolean skipBlankAndComma() { + boolean commaFound = false; + while (!isEOS()) { + int c = str.charAt(idx); + if (c == ',') { + if (commaFound) return true; + else commaFound = true; + } else if (!isBlank(c)) { + return true; } + ++idx; + } + return false; + } + + private boolean isEOS() { + return idx >= str.length(); + } + + @Override + public String toString() { + return str.substring(0, idx) + + "[" + + (idx == str.length() ? "" : str.charAt(idx)) + + "]" + + str.substring(idx + 1); } + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/DefaultEndPointFactory.java b/driver-core/src/main/java/com/datastax/driver/core/DefaultEndPointFactory.java new file mode 100644 index 00000000000..367532edd5d --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/DefaultEndPointFactory.java @@ -0,0 +1,81 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.net.UnknownHostException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class DefaultEndPointFactory implements EndPointFactory { + + private static final Logger logger = LoggerFactory.getLogger(ControlConnection.class); + private static final InetAddress BIND_ALL_ADDRESS; + + static { + try { + BIND_ALL_ADDRESS = InetAddress.getByAddress(new byte[4]); + } catch (UnknownHostException e) { + throw new RuntimeException(e); + } + } + + private volatile Cluster cluster; + + @Override + public void init(Cluster cluster) { + this.cluster = cluster; + } + + @Override + public EndPoint create(Row peersRow) { + if (peersRow.getColumnDefinitions().contains("native_address")) { + InetAddress nativeAddress = peersRow.getInet("native_address"); + int nativePort = peersRow.getInt("native_port"); + InetSocketAddress translateAddress = + cluster.manager.translateAddress(new InetSocketAddress(nativeAddress, nativePort)); + return new TranslatedAddressEndPoint(translateAddress); + } else if (peersRow.getColumnDefinitions().contains("native_transport_address")) { + InetAddress nativeAddress = peersRow.getInet("native_transport_address"); + int nativePort = peersRow.getInt("native_transport_port"); + if (cluster.getConfiguration().getProtocolOptions().getSSLOptions() != null + && !peersRow.isNull("native_transport_port_ssl")) { + nativePort = peersRow.getInt("native_transport_port_ssl"); + } + InetSocketAddress translateAddress = + cluster.manager.translateAddress(new InetSocketAddress(nativeAddress, nativePort)); + return new TranslatedAddressEndPoint(translateAddress); + } else { + InetAddress broadcastAddress = peersRow.getInet("peer"); + InetAddress rpcAddress = peersRow.getInet("rpc_address"); + if (broadcastAddress == null || rpcAddress == null) { + return null; + } else if (rpcAddress.equals(BIND_ALL_ADDRESS)) { + logger.warn( + "Found host with 0.0.0.0 as rpc_address, " + + "using broadcast_address ({}) to contact it instead. " + + "If this is incorrect you should avoid the use of 0.0.0.0 server side.", + broadcastAddress); + rpcAddress = broadcastAddress; + } + InetSocketAddress translateAddress = cluster.manager.translateAddress(rpcAddress); + return new TranslatedAddressEndPoint(translateAddress); + } + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/DefaultPreparedStatement.java b/driver-core/src/main/java/com/datastax/driver/core/DefaultPreparedStatement.java index 9680664944b..7a75e9f57e2 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/DefaultPreparedStatement.java +++ b/driver-core/src/main/java/com/datastax/driver/core/DefaultPreparedStatement.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,241 +17,244 @@ */ package com.datastax.driver.core; +import static com.datastax.driver.core.ProtocolVersion.V4; + import com.datastax.driver.core.policies.RetryPolicy; import com.google.common.collect.ImmutableMap; - import java.nio.ByteBuffer; import java.util.List; import java.util.Map; -import static com.datastax.driver.core.ProtocolVersion.V4; - public class DefaultPreparedStatement implements PreparedStatement { - final PreparedId preparedId; - - final String query; - final String queryKeyspace; - final Map incomingPayload; - final Cluster cluster; - - volatile ByteBuffer routingKey; - - volatile ConsistencyLevel consistency; - volatile ConsistencyLevel serialConsistency; - volatile boolean traceQuery; - volatile RetryPolicy retryPolicy; - volatile ImmutableMap outgoingPayload; - volatile Boolean idempotent; - - private DefaultPreparedStatement(PreparedId id, String query, String queryKeyspace, Map incomingPayload, Cluster cluster) { - this.preparedId = id; - this.query = query; - this.queryKeyspace = queryKeyspace; - this.incomingPayload = incomingPayload; - this.cluster = cluster; - } - - static DefaultPreparedStatement fromMessage(Responses.Result.Prepared msg, Cluster cluster, String query, String queryKeyspace) { - assert msg.metadata.columns != null; - - ColumnDefinitions defs = msg.metadata.columns; - - ProtocolVersion protocolVersion = cluster.getConfiguration().getProtocolOptions().getProtocolVersion(); - - if (defs.size() == 0) { - return new DefaultPreparedStatement(new PreparedId(msg.statementId, defs, msg.resultMetadata.columns, null, protocolVersion), query, queryKeyspace, msg.getCustomPayload(), cluster); - } - - int[] pkIndices = (protocolVersion.compareTo(V4) >= 0) - ? msg.metadata.pkIndices - : computePkIndices(cluster.getMetadata(), defs); - - PreparedId prepId = new PreparedId(msg.statementId, defs, msg.resultMetadata.columns, pkIndices, protocolVersion); - - return new DefaultPreparedStatement(prepId, query, queryKeyspace, msg.getCustomPayload(), cluster); - } - - private static int[] computePkIndices(Metadata clusterMetadata, ColumnDefinitions boundColumns) { - List partitionKeyColumns = null; - int[] pkIndexes = null; - KeyspaceMetadata km = clusterMetadata.getKeyspace(Metadata.quote(boundColumns.getKeyspace(0))); - if (km != null) { - TableMetadata tm = km.getTable(Metadata.quote(boundColumns.getTable(0))); - if (tm != null) { - partitionKeyColumns = tm.getPartitionKey(); - pkIndexes = new int[partitionKeyColumns.size()]; - for (int i = 0; i < pkIndexes.length; ++i) - pkIndexes[i] = -1; - } - } - - // Note: we rely on the fact CQL queries cannot span multiple tables. If that change, we'll have to get smarter. - for (int i = 0; i < boundColumns.size(); i++) - maybeGetIndex(boundColumns.getName(i), i, partitionKeyColumns, pkIndexes); - - return allSet(pkIndexes) ? pkIndexes : null; + final PreparedId preparedId; + + final String query; + final String queryKeyspace; + final Map incomingPayload; + final Cluster cluster; + + volatile ByteBuffer routingKey; + + volatile ConsistencyLevel consistency; + volatile ConsistencyLevel serialConsistency; + volatile boolean traceQuery; + volatile RetryPolicy retryPolicy; + volatile ImmutableMap outgoingPayload; + volatile Boolean idempotent; + + private DefaultPreparedStatement( + PreparedId id, + String query, + String queryKeyspace, + Map incomingPayload, + Cluster cluster) { + this.preparedId = id; + this.query = query; + this.queryKeyspace = queryKeyspace; + this.incomingPayload = incomingPayload; + this.cluster = cluster; + } + + static DefaultPreparedStatement fromMessage( + Responses.Result.Prepared msg, Cluster cluster, String query, String queryKeyspace) { + assert msg.metadata.columns != null; + + ColumnDefinitions defs = msg.metadata.columns; + + ProtocolVersion protocolVersion = + cluster.getConfiguration().getProtocolOptions().getProtocolVersion(); + PreparedId.PreparedMetadata boundValuesMetadata = + new PreparedId.PreparedMetadata(msg.statementId, defs); + PreparedId.PreparedMetadata resultSetMetadata = + new PreparedId.PreparedMetadata(msg.resultMetadataId, msg.resultMetadata.columns); + + int[] pkIndices = null; + if (defs.size() > 0) { + pkIndices = + (protocolVersion.compareTo(V4) >= 0) + ? msg.metadata.pkIndices + : computePkIndices(cluster.getMetadata(), defs); } - private static void maybeGetIndex(String name, int j, List pkColumns, int[] pkIndexes) { - if (pkColumns == null) - return; - - for (int i = 0; i < pkColumns.size(); ++i) { - if (name.equals(pkColumns.get(i).getName())) { - // We may have the same column prepared multiple times, but only pick the first value - pkIndexes[i] = j; - return; - } - } + PreparedId preparedId = + new PreparedId(boundValuesMetadata, resultSetMetadata, pkIndices, protocolVersion); + return new DefaultPreparedStatement( + preparedId, query, queryKeyspace, msg.getCustomPayload(), cluster); + } + + private static int[] computePkIndices(Metadata clusterMetadata, ColumnDefinitions boundColumns) { + List partitionKeyColumns = null; + int[] pkIndexes = null; + KeyspaceMetadata km = clusterMetadata.getKeyspace(Metadata.quote(boundColumns.getKeyspace(0))); + if (km != null) { + TableMetadata tm = km.getTable(Metadata.quote(boundColumns.getTable(0))); + if (tm != null) { + partitionKeyColumns = tm.getPartitionKey(); + pkIndexes = new int[partitionKeyColumns.size()]; + for (int i = 0; i < pkIndexes.length; ++i) pkIndexes[i] = -1; + } } - private static boolean allSet(int[] pkColumns) { - if (pkColumns == null) - return false; + // Note: we rely on the fact CQL queries cannot span multiple tables. If that change, we'll have + // to get smarter. + for (int i = 0; i < boundColumns.size(); i++) + maybeGetIndex(boundColumns.getName(i), i, partitionKeyColumns, pkIndexes); - for (int i = 0; i < pkColumns.length; ++i) - if (pkColumns[i] < 0) - return false; - - return true; - } - - @Override - public ColumnDefinitions getVariables() { - return preparedId.metadata; - } - - @Override - public BoundStatement bind(Object... values) { - BoundStatement bs = new BoundStatement(this); - return bs.bind(values); - } - - @Override - public BoundStatement bind() { - return new BoundStatement(this); - } - - @Override - public PreparedStatement setRoutingKey(ByteBuffer routingKey) { - this.routingKey = routingKey; - return this; - } - - @Override - public PreparedStatement setRoutingKey(ByteBuffer... routingKeyComponents) { - this.routingKey = SimpleStatement.compose(routingKeyComponents); - return this; - } - - @Override - public ByteBuffer getRoutingKey() { - return routingKey; - } + return allSet(pkIndexes) ? pkIndexes : null; + } - @Override - public PreparedStatement setConsistencyLevel(ConsistencyLevel consistency) { - this.consistency = consistency; - return this; - } - - @Override - public ConsistencyLevel getConsistencyLevel() { - return consistency; - } - - @Override - public PreparedStatement setSerialConsistencyLevel(ConsistencyLevel serialConsistency) { - if (!serialConsistency.isSerial()) - throw new IllegalArgumentException(); - this.serialConsistency = serialConsistency; - return this; - } - - @Override - public ConsistencyLevel getSerialConsistencyLevel() { - return serialConsistency; - } - - @Override - public String getQueryString() { - return query; - } - - @Override - public String getQueryKeyspace() { - return queryKeyspace; - } - - @Override - public PreparedStatement enableTracing() { - this.traceQuery = true; - return this; - } - - @Override - public PreparedStatement disableTracing() { - this.traceQuery = false; - return this; - } - - @Override - public boolean isTracing() { - return traceQuery; - } - - @Override - public PreparedStatement setRetryPolicy(RetryPolicy policy) { - this.retryPolicy = policy; - return this; - } - - @Override - public RetryPolicy getRetryPolicy() { - return retryPolicy; - } - - @Override - public PreparedId getPreparedId() { - return preparedId; - } - - @Override - public Map getIncomingPayload() { - return incomingPayload; - } - - @Override - public Map getOutgoingPayload() { - return outgoingPayload; - } - - @Override - public PreparedStatement setOutgoingPayload(Map payload) { - this.outgoingPayload = payload == null ? null : ImmutableMap.copyOf(payload); - return this; - } - - @Override - public CodecRegistry getCodecRegistry() { - return cluster.getConfiguration().getCodecRegistry(); - } - - /** - * {@inheritDoc} - */ - @Override - public PreparedStatement setIdempotent(Boolean idempotent) { - this.idempotent = idempotent; - return this; - } + private static void maybeGetIndex( + String name, int j, List pkColumns, int[] pkIndexes) { + if (pkColumns == null) return; - /** - * {@inheritDoc} - */ - @Override - public Boolean isIdempotent() { - return this.idempotent; + for (int i = 0; i < pkColumns.size(); ++i) { + if (name.equals(pkColumns.get(i).getName())) { + // We may have the same column prepared multiple times, but only pick the first value + pkIndexes[i] = j; + return; + } } + } + + private static boolean allSet(int[] pkColumns) { + if (pkColumns == null) return false; + + for (int i = 0; i < pkColumns.length; ++i) if (pkColumns[i] < 0) return false; + + return true; + } + + @Override + public ColumnDefinitions getVariables() { + return preparedId.boundValuesMetadata.variables; + } + + @Override + public BoundStatement bind(Object... values) { + BoundStatement bs = new BoundStatement(this); + return bs.bind(values); + } + + @Override + public BoundStatement bind() { + return new BoundStatement(this); + } + + @Override + public PreparedStatement setRoutingKey(ByteBuffer routingKey) { + this.routingKey = routingKey; + return this; + } + + @Override + public PreparedStatement setRoutingKey(ByteBuffer... routingKeyComponents) { + this.routingKey = SimpleStatement.compose(routingKeyComponents); + return this; + } + + @Override + public ByteBuffer getRoutingKey() { + return routingKey; + } + + @Override + public PreparedStatement setConsistencyLevel(ConsistencyLevel consistency) { + this.consistency = consistency; + return this; + } + + @Override + public ConsistencyLevel getConsistencyLevel() { + return consistency; + } + + @Override + public PreparedStatement setSerialConsistencyLevel(ConsistencyLevel serialConsistency) { + if (!serialConsistency.isSerial()) throw new IllegalArgumentException(); + this.serialConsistency = serialConsistency; + return this; + } + + @Override + public ConsistencyLevel getSerialConsistencyLevel() { + return serialConsistency; + } + + @Override + public String getQueryString() { + return query; + } + + @Override + public String getQueryKeyspace() { + return queryKeyspace; + } + + @Override + public PreparedStatement enableTracing() { + this.traceQuery = true; + return this; + } + + @Override + public PreparedStatement disableTracing() { + this.traceQuery = false; + return this; + } + + @Override + public boolean isTracing() { + return traceQuery; + } + + @Override + public PreparedStatement setRetryPolicy(RetryPolicy policy) { + this.retryPolicy = policy; + return this; + } + + @Override + public RetryPolicy getRetryPolicy() { + return retryPolicy; + } + + @Override + public PreparedId getPreparedId() { + return preparedId; + } + + @Override + public Map getIncomingPayload() { + return incomingPayload; + } + + @Override + public Map getOutgoingPayload() { + return outgoingPayload; + } + + @Override + public PreparedStatement setOutgoingPayload(Map payload) { + this.outgoingPayload = payload == null ? null : ImmutableMap.copyOf(payload); + return this; + } + + @Override + public CodecRegistry getCodecRegistry() { + return cluster.getConfiguration().getCodecRegistry(); + } + + /** {@inheritDoc} */ + @Override + public PreparedStatement setIdempotent(Boolean idempotent) { + this.idempotent = idempotent; + return this; + } + + /** {@inheritDoc} */ + @Override + public Boolean isIdempotent() { + return this.idempotent; + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/DefaultResultSetFuture.java b/driver-core/src/main/java/com/datastax/driver/core/DefaultResultSetFuture.java index ba2239337c8..b26c69ffaae 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/DefaultResultSetFuture.java +++ b/driver-core/src/main/java/com/datastax/driver/core/DefaultResultSetFuture.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,317 +17,363 @@ */ package com.datastax.driver.core; -import com.datastax.driver.core.exceptions.*; +import static com.datastax.driver.core.SchemaElement.KEYSPACE; + +import com.datastax.driver.core.exceptions.ConnectionException; +import com.datastax.driver.core.exceptions.DriverInternalError; +import com.datastax.driver.core.exceptions.NoHostAvailableException; +import com.datastax.driver.core.exceptions.OperationTimedOutException; +import com.datastax.driver.core.exceptions.QueryExecutionException; +import com.datastax.driver.core.exceptions.QueryValidationException; import com.google.common.util.concurrent.AbstractFuture; import com.google.common.util.concurrent.Uninterruptibles; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; -import static com.datastax.driver.core.SchemaElement.KEYSPACE; - -/** - * Internal implementation of ResultSetFuture. - */ -class DefaultResultSetFuture extends AbstractFuture implements ResultSetFuture, RequestHandler.Callback { +/** Internal implementation of ResultSetFuture. */ +class DefaultResultSetFuture extends AbstractFuture + implements ResultSetFuture, RequestHandler.Callback { - private static final Logger logger = LoggerFactory.getLogger(ResultSetFuture.class); + private static final Logger logger = LoggerFactory.getLogger(ResultSetFuture.class); - private final SessionManager session; - private final ProtocolVersion protocolVersion; - private final Message.Request request; - private volatile RequestHandler handler; + private final SessionManager session; + private final ProtocolVersion protocolVersion; + private final Message.Request request; + private volatile RequestHandler handler; - DefaultResultSetFuture(SessionManager session, ProtocolVersion protocolVersion, Message.Request request) { - this.session = session; - this.protocolVersion = protocolVersion; - this.request = request; - } + DefaultResultSetFuture( + SessionManager session, ProtocolVersion protocolVersion, Message.Request request) { + this.session = session; + this.protocolVersion = protocolVersion; + this.request = request; + } - @Override - public void register(RequestHandler handler) { - this.handler = handler; - } + @Override + public void register(RequestHandler handler) { + this.handler = handler; + } - @Override - public Message.Request request() { - return request; - } + @Override + public Message.Request request() { + return request; + } - @Override - public void onSet(Connection connection, Message.Response response, ExecutionInfo info, Statement statement, long latency) { - try { - switch (response.type) { - case RESULT: - Responses.Result rm = (Responses.Result) response; - switch (rm.kind) { - case SET_KEYSPACE: - // propagate the keyspace change to other connections - session.poolsState.setKeyspace(((Responses.Result.SetKeyspace) rm).keyspace); - set(ArrayBackedResultSet.fromMessage(rm, session, protocolVersion, info, statement)); - break; - case SCHEMA_CHANGE: - ResultSet rs = ArrayBackedResultSet.fromMessage(rm, session, protocolVersion, info, statement); - final Cluster.Manager cluster = session.cluster.manager; - if (!cluster.configuration.getQueryOptions().isMetadataEnabled()) { - cluster.waitForSchemaAgreementAndSignal(connection, this, rs); + @Override + public void onSet( + Connection connection, + Message.Response response, + ExecutionInfo info, + Statement statement, + long latency) { + try { + switch (response.type) { + case RESULT: + Responses.Result rm = (Responses.Result) response; + switch (rm.kind) { + case SET_KEYSPACE: + // propagate the keyspace change to other connections + session.poolsState.setKeyspace(((Responses.Result.SetKeyspace) rm).keyspace); + set(ArrayBackedResultSet.fromMessage(rm, session, protocolVersion, info, statement)); + break; + case SCHEMA_CHANGE: + ResultSet rs = + ArrayBackedResultSet.fromMessage(rm, session, protocolVersion, info, statement); + final Cluster.Manager cluster = session.cluster.manager; + if (!cluster.configuration.getQueryOptions().isMetadataEnabled()) { + cluster.waitForSchemaAgreementAndSignal(connection, this, rs); + } else { + Responses.Result.SchemaChange scc = (Responses.Result.SchemaChange) rm; + switch (scc.change) { + case CREATED: + case UPDATED: + cluster.refreshSchemaAndSignal( + connection, + this, + rs, + scc.targetType, + scc.targetKeyspace, + scc.targetName, + scc.targetSignature); + break; + case DROPPED: + if (scc.targetType == KEYSPACE) { + // If that the one keyspace we are logged in, reset to null (it shouldn't + // really happen but ...) + // Note: Actually, Cassandra doesn't do that so we don't either as this could + // confuse prepared statements. + // We'll add it back if CASSANDRA-5358 changes that behavior + // if (scc.keyspace.equals(session.poolsState.keyspace)) + // session.poolsState.setKeyspace(null); + final KeyspaceMetadata removedKeyspace = + cluster.metadata.removeKeyspace(scc.targetKeyspace); + if (removedKeyspace != null) { + cluster.executor.submit( + new Runnable() { + @Override + public void run() { + cluster.metadata.triggerOnKeyspaceRemoved(removedKeyspace); + } + }); + } + } else { + KeyspaceMetadata keyspace = + session.cluster.manager.metadata.keyspaces.get(scc.targetKeyspace); + if (keyspace == null) { + logger.warn( + "Received a DROPPED notification for {} {}.{}, but this keyspace is unknown in our metadata", + scc.targetType, + scc.targetKeyspace, + scc.targetName); + } else { + switch (scc.targetType) { + case TABLE: + // we can't tell whether it's a table or a view, + // but since two objects cannot have the same name, + // try removing both + final TableMetadata removedTable = keyspace.removeTable(scc.targetName); + if (removedTable != null) { + cluster.executor.submit( + new Runnable() { + @Override + public void run() { + cluster.metadata.triggerOnTableRemoved(removedTable); + } + }); } else { - Responses.Result.SchemaChange scc = (Responses.Result.SchemaChange) rm; - switch (scc.change) { - case CREATED: - case UPDATED: - cluster.refreshSchemaAndSignal(connection, this, rs, scc.targetType, scc.targetKeyspace, scc.targetName, scc.targetSignature); - break; - case DROPPED: - if (scc.targetType == KEYSPACE) { - // If that the one keyspace we are logged in, reset to null (it shouldn't really happen but ...) - // Note: Actually, Cassandra doesn't do that so we don't either as this could confuse prepared statements. - // We'll add it back if CASSANDRA-5358 changes that behavior - //if (scc.keyspace.equals(session.poolsState.keyspace)) - // session.poolsState.setKeyspace(null); - final KeyspaceMetadata removedKeyspace = cluster.metadata.removeKeyspace(scc.targetKeyspace); - if (removedKeyspace != null) { - cluster.executor.submit(new Runnable() { - @Override - public void run() { - cluster.metadata.triggerOnKeyspaceRemoved(removedKeyspace); - } - }); - } - } else { - KeyspaceMetadata keyspace = session.cluster.manager.metadata.keyspaces.get(scc.targetKeyspace); - if (keyspace == null) { - logger.warn("Received a DROPPED notification for {} {}.{}, but this keyspace is unknown in our metadata", - scc.targetType, scc.targetKeyspace, scc.targetName); - } else { - switch (scc.targetType) { - case TABLE: - // we can't tell whether it's a table or a view, - // but since two objects cannot have the same name, - // try removing both - final TableMetadata removedTable = keyspace.removeTable(scc.targetName); - if (removedTable != null) { - cluster.executor.submit(new Runnable() { - @Override - public void run() { - cluster.metadata.triggerOnTableRemoved(removedTable); - } - }); - } else { - final MaterializedViewMetadata removedView = keyspace.removeMaterializedView(scc.targetName); - if (removedView != null) { - cluster.executor.submit(new Runnable() { - @Override - public void run() { - cluster.metadata.triggerOnMaterializedViewRemoved(removedView); - } - }); - } - } - break; - case TYPE: - final UserType removedType = keyspace.removeUserType(scc.targetName); - if (removedType != null) { - cluster.executor.submit(new Runnable() { - @Override - public void run() { - cluster.metadata.triggerOnUserTypeRemoved(removedType); - } - }); - } - break; - case FUNCTION: - final FunctionMetadata removedFunction = keyspace.removeFunction(Metadata.fullFunctionName(scc.targetName, scc.targetSignature)); - if (removedFunction != null) { - cluster.executor.submit(new Runnable() { - @Override - public void run() { - cluster.metadata.triggerOnFunctionRemoved(removedFunction); - } - }); - } - break; - case AGGREGATE: - final AggregateMetadata removedAggregate = keyspace.removeAggregate(Metadata.fullFunctionName(scc.targetName, scc.targetSignature)); - if (removedAggregate != null) { - cluster.executor.submit(new Runnable() { - @Override - public void run() { - cluster.metadata.triggerOnAggregateRemoved(removedAggregate); - } - }); - } - break; - } - } - } - session.cluster.manager.waitForSchemaAgreementAndSignal(connection, this, rs); - break; - default: - logger.info("Ignoring unknown schema change result"); - break; - } + final MaterializedViewMetadata removedView = + keyspace.removeMaterializedView(scc.targetName); + if (removedView != null) { + cluster.executor.submit( + new Runnable() { + @Override + public void run() { + cluster.metadata.triggerOnMaterializedViewRemoved( + removedView); + } + }); + } + } + break; + case TYPE: + final UserType removedType = keyspace.removeUserType(scc.targetName); + if (removedType != null) { + cluster.executor.submit( + new Runnable() { + @Override + public void run() { + cluster.metadata.triggerOnUserTypeRemoved(removedType); + } + }); } break; - default: - set(ArrayBackedResultSet.fromMessage(rm, session, protocolVersion, info, statement)); + case FUNCTION: + final FunctionMetadata removedFunction = + keyspace.removeFunction( + Metadata.fullFunctionName(scc.targetName, scc.targetSignature)); + if (removedFunction != null) { + cluster.executor.submit( + new Runnable() { + @Override + public void run() { + cluster.metadata.triggerOnFunctionRemoved(removedFunction); + } + }); + } + break; + case AGGREGATE: + final AggregateMetadata removedAggregate = + keyspace.removeAggregate( + Metadata.fullFunctionName(scc.targetName, scc.targetSignature)); + if (removedAggregate != null) { + cluster.executor.submit( + new Runnable() { + @Override + public void run() { + cluster.metadata.triggerOnAggregateRemoved(removedAggregate); + } + }); + } break; + } + } } + session.cluster.manager.waitForSchemaAgreementAndSignal(connection, this, rs); break; - case ERROR: - setException(((Responses.Error) response).asException(connection.address)); + default: + logger.info("Ignoring unknown schema change result"); break; - default: - // This mean we have probably have a bad node, so defunct the connection - connection.defunct(new ConnectionException(connection.address, String.format("Got unexpected %s response", response.type))); - setException(new DriverInternalError(String.format("Got unexpected %s response from %s", response.type, connection.address))); - break; - } - } catch (Throwable e) { - // If we get a bug here, the client will not get it, so better forwarding the error - setException(new DriverInternalError("Unexpected error while processing response from " + connection.address, e)); - } + } + } + break; + default: + set(ArrayBackedResultSet.fromMessage(rm, session, protocolVersion, info, statement)); + break; + } + break; + case ERROR: + setException(((Responses.Error) response).asException(connection.endPoint)); + break; + default: + // This mean we have probably have a bad node, so defunct the connection + connection.defunct( + new ConnectionException( + connection.endPoint, String.format("Got unexpected %s response", response.type))); + setException( + new DriverInternalError( + String.format( + "Got unexpected %s response from %s", response.type, connection.endPoint))); + break; + } + } catch (Throwable e) { + // If we get a bug here, the client will not get it, so better forwarding the error + setException( + new DriverInternalError( + "Unexpected error while processing response from " + connection.endPoint, e)); } + } - @Override - public void onSet(Connection connection, Message.Response response, long latency, int retryCount) { - // This is only called for internal calls (i.e, when the callback is not wrapped in ResponseHandler), - // so don't bother with ExecutionInfo. - onSet(connection, response, null, null, latency); - } + @Override + public void onSet( + Connection connection, Message.Response response, long latency, int retryCount) { + // This is only called for internal calls (i.e, when the callback is not wrapped in + // ResponseHandler), + // so don't bother with ExecutionInfo. + onSet(connection, response, null, null, latency); + } - @Override - public void onException(Connection connection, Exception exception, long latency, int retryCount) { - setException(exception); - } + @Override + public void onException( + Connection connection, Exception exception, long latency, int retryCount) { + setException(exception); + } - @Override - public boolean onTimeout(Connection connection, long latency, int retryCount) { - // This is only called for internal calls (i.e, when the future is not wrapped in RequestHandler). - // So just set an exception for the final result, which should be handled correctly by said internal call. - setException(new OperationTimedOutException(connection.address)); - return true; - } + @Override + public boolean onTimeout(Connection connection, long latency, int retryCount) { + // This is only called for internal calls (i.e, when the future is not wrapped in + // RequestHandler). + // So just set an exception for the final result, which should be handled correctly by said + // internal call. + setException(new OperationTimedOutException(connection.endPoint)); + return true; + } - // We sometimes need (in the driver) to set the future from outside this class, - // but AbstractFuture#set is protected so this method. We don't want it public - // however, no particular reason to give users rope to hang themselves. - void setResult(ResultSet rs) { - set(rs); - } + // We sometimes need (in the driver) to set the future from outside this class, + // but AbstractFuture#set is protected so this method. We don't want it public + // however, no particular reason to give users rope to hang themselves. + void setResult(ResultSet rs) { + set(rs); + } - /** - * Waits for the query to return and return its result. - *

- * This method is usually more convenient than {@link #get} because it: - *

    - *
  • Waits for the result uninterruptibly, and so doesn't throw - * {@link InterruptedException}.
  • - *
  • Returns meaningful exceptions, instead of having to deal - * with ExecutionException.
  • - *
- * As such, it is the preferred way to get the future result. - * - * @throws NoHostAvailableException if no host in the cluster can be - * contacted successfully to execute this query. - * @throws QueryExecutionException if the query triggered an execution - * exception, that is an exception thrown by Cassandra when it cannot execute - * the query with the requested consistency level successfully. - * @throws QueryValidationException if the query is invalid (syntax error, - * unauthorized or any other validation problem). - */ - @Override - public ResultSet getUninterruptibly() { - try { - return Uninterruptibles.getUninterruptibly(this); - } catch (ExecutionException e) { - throw DriverThrowables.propagateCause(e); - } + /** + * Waits for the query to return and return its result. + * + *

This method is usually more convenient than {@link #get} because it: + * + *

    + *
  • Waits for the result uninterruptibly, and so doesn't throw {@link InterruptedException}. + *
  • Returns meaningful exceptions, instead of having to deal with ExecutionException. + *
+ * + * As such, it is the preferred way to get the future result. + * + * @throws NoHostAvailableException if no host in the cluster can be contacted successfully to + * execute this query. + * @throws QueryExecutionException if the query triggered an execution exception, that is an + * exception thrown by Cassandra when it cannot execute the query with the requested + * consistency level successfully. + * @throws QueryValidationException if the query is invalid (syntax error, unauthorized or any + * other validation problem). + */ + @Override + public ResultSet getUninterruptibly() { + try { + return Uninterruptibles.getUninterruptibly(this); + } catch (ExecutionException e) { + throw DriverThrowables.propagateCause(e); } + } - /** - * Waits for the provided time for the query to return and return its - * result if available. - *

- * This method is usually more convenient than {@link #get} because it: - *

    - *
  • Waits for the result uninterruptibly, and so doesn't throw - * {@link InterruptedException}.
  • - *
  • Returns meaningful exceptions, instead of having to deal - * with ExecutionException.
  • - *
- * As such, it is the preferred way to get the future result. - * - * @throws NoHostAvailableException if no host in the cluster can be - * contacted successfully to execute this query. - * @throws QueryExecutionException if the query triggered an execution - * exception, that is an exception thrown by Cassandra when it cannot execute - * the query with the requested consistency level successfully. - * @throws QueryValidationException if the query if invalid (syntax error, - * unauthorized or any other validation problem). - * @throws TimeoutException if the wait timed out (Note that this is - * different from a Cassandra timeout, which is a {@code - * QueryExecutionException}). - */ - @Override - public ResultSet getUninterruptibly(long timeout, TimeUnit unit) throws TimeoutException { - try { - return Uninterruptibles.getUninterruptibly(this, timeout, unit); - } catch (ExecutionException e) { - throw DriverThrowables.propagateCause(e); - } + /** + * Waits for the provided time for the query to return and return its result if available. + * + *

This method is usually more convenient than {@link #get} because it: + * + *

    + *
  • Waits for the result uninterruptibly, and so doesn't throw {@link InterruptedException}. + *
  • Returns meaningful exceptions, instead of having to deal with ExecutionException. + *
+ * + * As such, it is the preferred way to get the future result. + * + * @throws NoHostAvailableException if no host in the cluster can be contacted successfully to + * execute this query. + * @throws QueryExecutionException if the query triggered an execution exception, that is an + * exception thrown by Cassandra when it cannot execute the query with the requested + * consistency level successfully. + * @throws QueryValidationException if the query if invalid (syntax error, unauthorized or any + * other validation problem). + * @throws TimeoutException if the wait timed out (Note that this is different from a Cassandra + * timeout, which is a {@code QueryExecutionException}). + */ + @Override + public ResultSet getUninterruptibly(long timeout, TimeUnit unit) throws TimeoutException { + try { + return Uninterruptibles.getUninterruptibly(this, timeout, unit); + } catch (ExecutionException e) { + throw DriverThrowables.propagateCause(e); } + } - /** - * Attempts to cancel the execution of the request corresponding to this - * future. This attempt will fail if the request has already returned. - *

- * Please note that this only cancels the request driver side, but nothing - * is done to interrupt the execution of the request Cassandra side (and that even - * if {@code mayInterruptIfRunning} is true) since Cassandra does not - * support such interruption. - *

- * This method can be used to ensure no more work is performed driver side - * (which, while it doesn't include stopping a request already submitted - * to a Cassandra node, may include not retrying another Cassandra host on - * failure/timeout) if the ResultSet is not going to be retried. Typically, - * the code to wait for a request result for a maximum of 1 second could - * look like: - *

-     *   ResultSetFuture future = session.executeAsync(...some query...);
-     *   try {
-     *       ResultSet result = future.get(1, TimeUnit.SECONDS);
-     *       ... process result ...
-     *   } catch (TimeoutException e) {
-     *       future.cancel(true); // Ensure any resource used by this query driver
-     *                            // side is released immediately
-     *       ... handle timeout ...
-     *   }
-     * 
-     *
-     * @param mayInterruptIfRunning the value of this parameter is currently
-     *                              ignored.
-     * @return {@code false} if the future could not be cancelled (it has already
-     * completed normally); {@code true} otherwise.
-     */
-    @Override
-    public boolean cancel(boolean mayInterruptIfRunning) {
-        if (!super.cancel(mayInterruptIfRunning))
-            return false;
+  /**
+   * Attempts to cancel the execution of the request corresponding to this
+   * future. This attempt will fail if the request has already returned.
+   * 

+ * Please note that this only cancels the request driver side, but nothing + * is done to interrupt the execution of the request Cassandra side (and that even + * if {@code mayInterruptIfRunning} is true) since Cassandra does not + * support such interruption. + *

+ * This method can be used to ensure no more work is performed driver side + * (which, while it doesn't include stopping a request already submitted + * to a Cassandra node, may include not retrying another Cassandra host on + * failure/timeout) if the ResultSet is not going to be retried. Typically, + * the code to wait for a request result for a maximum of 1 second could + * look like: + *

+   *   ResultSetFuture future = session.executeAsync(...some query...);
+   *   try {
+   *       ResultSet result = future.get(1, TimeUnit.SECONDS);
+   *       ... process result ...
+   *   } catch (TimeoutException e) {
+   *       future.cancel(true); // Ensure any resource used by this query driver
+   *                            // side is released immediately
+   *       ... handle timeout ...
+   *   }
+   * 
+   *
+   * @param mayInterruptIfRunning the value of this parameter is currently
+   *                              ignored.
+   * @return {@code false} if the future could not be cancelled (it has already
+   * completed normally); {@code true} otherwise.
+   */
+  @Override
+  public boolean cancel(boolean mayInterruptIfRunning) {
+    if (!super.cancel(mayInterruptIfRunning)) return false;
 
-        if (handler != null) {
-            handler.cancel();
-        }
-        return true;
+    if (handler != null) {
+      handler.cancel();
     }
+    return true;
+  }
 
-    @Override
-    public int retryCount() {
-        // This is only called for internal calls (i.e, when the future is not wrapped in RequestHandler).
-        // There is no retry logic in that case, so the value does not really matter.
-        return 0;
-    }
+  @Override
+  public int retryCount() {
+    // This is only called for internal calls (i.e, when the future is not wrapped in
+    // RequestHandler).
+    // There is no retry logic in that case, so the value does not really matter.
+    return 0;
+  }
 }
diff --git a/driver-core/src/main/java/com/datastax/driver/core/DelegatingCluster.java b/driver-core/src/main/java/com/datastax/driver/core/DelegatingCluster.java
index bf8c06f6f4f..30ee4274071 100644
--- a/driver-core/src/main/java/com/datastax/driver/core/DelegatingCluster.java
+++ b/driver-core/src/main/java/com/datastax/driver/core/DelegatingCluster.java
@@ -1,11 +1,13 @@
 /*
- * Copyright (C) 2012-2017 DataStax Inc.
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
  *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
+ *     http://www.apache.org/licenses/LICENSE-2.0
  *
  * Unless required by applicable law or agreed to in writing, software
  * distributed under the License is distributed on an "AS IS" BASIS,
@@ -16,133 +18,138 @@
 package com.datastax.driver.core;
 
 import com.google.common.util.concurrent.ListenableFuture;
-
-import java.net.InetSocketAddress;
 import java.util.Collections;
 
 /**
- * Base class for custom {@link Cluster} implementations that wrap another instance (delegate / decorator pattern).
+ * Base class for custom {@link Cluster} implementations that wrap another instance (delegate /
+ * decorator pattern).
  */
 public abstract class DelegatingCluster extends Cluster {
-    /**
-     * Builds a new instance.
-     */
-    protected DelegatingCluster() {
-        // Implementation notes:
-        // If Cluster was an interface, delegates would be trivial to write. But, for historical reasons, it's a class,
-        // and changing that would break backward compatibility. That makes delegates rather convoluted and error-prone
-        // to write, so we provide DelegatingCluster to abstract the details.
-        // This class ensures that:
-        // - init() is never called on the parent class, because that would initialize the Cluster.Manager instance and
-        //   create a lot of internal state (thread pools, etc.) that we don't need, since another Cluster instance is
-        //   already handling the calls.
-        // - all public methods are properly forwarded to the delegate (otherwise they would call the parent class and
-        //   return inconsistent results).
-        // These two goals are closely related, since a lot of public methods call init(), so accidentally calling a
-        // parent method could initialize the parent state.
-
-        // Construct parent class with dummy parameters that will never get used (since super.init() is never called).
-        super("delegating_cluster", Collections.emptyList(), null);
-
-        // Immediately close the parent class's internal Manager, to make sure that it will fail fast if it's ever
-        // accidentally invoked.
-        super.closeAsync();
-    }
-
-    /**
-     * Returns the delegate instance where all calls will be forwarded.
-     *
-     * @return the delegate.
-     */
-    protected abstract Cluster delegate();
-
-    @Override
-    public Cluster init() {
-        return delegate().init();
-    }
-
-    @Override
-    public Session newSession() {
-        return delegate().newSession();
-    }
-
-    @Override
-    public Session connect() {
-        return delegate().connect();
-    }
-
-    @Override
-    public Session connect(String keyspace) {
-        return delegate().connect(keyspace);
-    }
-
-    @Override
-    public ListenableFuture connectAsync() {
-        return delegate().connectAsync();
-    }
-
-    @Override
-    public ListenableFuture connectAsync(String keyspace) {
-        return delegate().connectAsync(keyspace);
-    }
-
-    @Override
-    public Metadata getMetadata() {
-        return delegate().getMetadata();
-    }
-
-    @Override
-    public Configuration getConfiguration() {
-        return delegate().getConfiguration();
-    }
-
-    @Override
-    public Metrics getMetrics() {
-        return delegate().getMetrics();
-    }
-
-    @Override
-    public Cluster register(Host.StateListener listener) {
-        return delegate().register(listener);
-    }
-
-    @Override
-    public Cluster unregister(Host.StateListener listener) {
-        return delegate().unregister(listener);
-    }
-
-    @Override
-    public Cluster register(LatencyTracker tracker) {
-        return delegate().register(tracker);
-    }
-
-    @Override
-    public Cluster unregister(LatencyTracker tracker) {
-        return delegate().unregister(tracker);
-    }
-
-    @Override
-    public Cluster register(SchemaChangeListener listener) {
-        return delegate().register(listener);
-    }
-
-    @Override
-    public Cluster unregister(SchemaChangeListener listener) {
-        return delegate().unregister(listener);
-    }
-
-    @Override
-    public CloseFuture closeAsync() {
-        return delegate().closeAsync();
-    }
-
-    @Override
-    public void close() {
-        delegate().close();
-    }
-
-    @Override
-    public boolean isClosed() {
-        return delegate().isClosed();
-    }
+  /** Builds a new instance. */
+  protected DelegatingCluster() {
+    // Implementation notes:
+    // If Cluster was an interface, delegates would be trivial to write. But, for historical
+    // reasons, it's a class,
+    // and changing that would break backward compatibility. That makes delegates rather convoluted
+    // and error-prone
+    // to write, so we provide DelegatingCluster to abstract the details.
+    // This class ensures that:
+    // - init() is never called on the parent class, because that would initialize the
+    // Cluster.Manager instance and
+    //   create a lot of internal state (thread pools, etc.) that we don't need, since another
+    // Cluster instance is
+    //   already handling the calls.
+    // - all public methods are properly forwarded to the delegate (otherwise they would call the
+    // parent class and
+    //   return inconsistent results).
+    // These two goals are closely related, since a lot of public methods call init(), so
+    // accidentally calling a
+    // parent method could initialize the parent state.
+
+    // Construct parent class with dummy parameters that will never get used (since super.init() is
+    // never called).
+    super("delegating_cluster", Collections.emptyList(), null);
+
+    // Immediately close the parent class's internal Manager, to make sure that it will fail fast if
+    // it's ever
+    // accidentally invoked.
+    super.closeAsync();
+  }
+
+  /**
+   * Returns the delegate instance where all calls will be forwarded.
+   *
+   * @return the delegate.
+   */
+  protected abstract Cluster delegate();
+
+  @Override
+  public Cluster init() {
+    return delegate().init();
+  }
+
+  @Override
+  public Session newSession() {
+    return delegate().newSession();
+  }
+
+  @Override
+  public Session connect() {
+    return delegate().connect();
+  }
+
+  @Override
+  public Session connect(String keyspace) {
+    return delegate().connect(keyspace);
+  }
+
+  @Override
+  public ListenableFuture connectAsync() {
+    return delegate().connectAsync();
+  }
+
+  @Override
+  public ListenableFuture connectAsync(String keyspace) {
+    return delegate().connectAsync(keyspace);
+  }
+
+  @Override
+  public Metadata getMetadata() {
+    return delegate().getMetadata();
+  }
+
+  @Override
+  public Configuration getConfiguration() {
+    return delegate().getConfiguration();
+  }
+
+  @Override
+  public Metrics getMetrics() {
+    return delegate().getMetrics();
+  }
+
+  @Override
+  public Cluster register(Host.StateListener listener) {
+    return delegate().register(listener);
+  }
+
+  @Override
+  public Cluster unregister(Host.StateListener listener) {
+    return delegate().unregister(listener);
+  }
+
+  @Override
+  public Cluster register(LatencyTracker tracker) {
+    return delegate().register(tracker);
+  }
+
+  @Override
+  public Cluster unregister(LatencyTracker tracker) {
+    return delegate().unregister(tracker);
+  }
+
+  @Override
+  public Cluster register(SchemaChangeListener listener) {
+    return delegate().register(listener);
+  }
+
+  @Override
+  public Cluster unregister(SchemaChangeListener listener) {
+    return delegate().unregister(listener);
+  }
+
+  @Override
+  public CloseFuture closeAsync() {
+    return delegate().closeAsync();
+  }
+
+  @Override
+  public void close() {
+    delegate().close();
+  }
+
+  @Override
+  public boolean isClosed() {
+    return delegate().isClosed();
+  }
 }
diff --git a/driver-core/src/main/java/com/datastax/driver/core/DirectedGraph.java b/driver-core/src/main/java/com/datastax/driver/core/DirectedGraph.java
index 5987c511d71..4a28063d27a 100644
--- a/driver-core/src/main/java/com/datastax/driver/core/DirectedGraph.java
+++ b/driver-core/src/main/java/com/datastax/driver/core/DirectedGraph.java
@@ -1,11 +1,13 @@
 /*
- * Copyright (C) 2012-2017 DataStax Inc.
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
  *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
+ *     http://www.apache.org/licenses/LICENSE-2.0
  *
  * Unless required by applicable law or agreed to in writing, software
  * distributed under the License is distributed on an "AS IS" BASIS,
@@ -21,75 +23,85 @@
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
 import com.google.common.collect.Multimap;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Queue;
 
-import java.util.*;
-
-/**
- * A basic directed graph implementation to perform topological sorts.
- */
+/** A basic directed graph implementation to perform topological sorts. */
 class DirectedGraph {
 
-    // We need to keep track of the predecessor count. For simplicity, use a map to store it alongside the vertices.
-    final Map vertices;
-    final Multimap adjacencyList;
-    boolean wasSorted;
+  // We need to keep track of the predecessor count. For simplicity, use a map to store it alongside
+  // the vertices.
+  final Map vertices;
+  final Multimap adjacencyList;
+  boolean wasSorted;
+  final Comparator comparator;
 
-    DirectedGraph(List vertices) {
-        this.vertices = Maps.newHashMapWithExpectedSize(vertices.size());
-        this.adjacencyList = HashMultimap.create();
+  DirectedGraph(Comparator comparator, List vertices) {
+    this.comparator = comparator;
+    this.vertices = Maps.newHashMapWithExpectedSize(vertices.size());
+    this.adjacencyList = HashMultimap.create();
 
-        for (V vertex : vertices) {
-            this.vertices.put(vertex, 0);
-        }
+    for (V vertex : vertices) {
+      this.vertices.put(vertex, 0);
     }
+  }
 
-    DirectedGraph(V... vertices) {
-        this(Arrays.asList(vertices));
-    }
+  DirectedGraph(Comparator comparator, V... vertices) {
+    this(comparator, Arrays.asList(vertices));
+  }
 
-    /**
-     * this assumes that {@code from} and {@code to} were part of the vertices passed to the constructor
-     */
-    void addEdge(V from, V to) {
-        Preconditions.checkArgument(vertices.containsKey(from) && vertices.containsKey(to));
-        adjacencyList.put(from, to);
-        vertices.put(to, vertices.get(to) + 1);
-    }
+  /**
+   * this assumes that {@code from} and {@code to} were part of the vertices passed to the
+   * constructor
+   */
+  void addEdge(V from, V to) {
+    Preconditions.checkArgument(vertices.containsKey(from) && vertices.containsKey(to));
+    adjacencyList.put(from, to);
+    vertices.put(to, vertices.get(to) + 1);
+  }
 
-    /**
-     * one-time use only, calling this multiple times on the same graph won't work
-     */
-    List topologicalSort() {
-        Preconditions.checkState(!wasSorted);
-        wasSorted = true;
+  /** one-time use only, calling this multiple times on the same graph won't work */
+  List topologicalSort() {
+    Preconditions.checkState(!wasSorted);
+    wasSorted = true;
 
-        Queue queue = new LinkedList();
+    Queue queue = new LinkedList();
 
-        for (Map.Entry entry : vertices.entrySet()) {
-            if (entry.getValue() == 0)
-                queue.add(entry.getKey());
-        }
+    // Sort vertices so order of evaluation is always the same (instead of depending on undefined
+    // map order behavior)
+    List orderedVertices = new ArrayList(vertices.keySet());
+    Collections.sort(orderedVertices, comparator);
+    for (V v : orderedVertices) {
+      if (vertices.get(v) == 0) queue.add(v);
+    }
 
-        List result = Lists.newArrayList();
-        while (!queue.isEmpty()) {
-            V vertex = queue.remove();
-            result.add(vertex);
-            for (V successor : adjacencyList.get(vertex)) {
-                if (decrementAndGetCount(successor) == 0)
-                    queue.add(successor);
-            }
-        }
+    List result = Lists.newArrayList();
+    while (!queue.isEmpty()) {
+      V vertex = queue.remove();
+      result.add(vertex);
+      List adjacentVertices = new ArrayList(adjacencyList.get(vertex));
+      Collections.sort(adjacentVertices, comparator);
+      for (V successor : adjacentVertices) {
+        if (decrementAndGetCount(successor) == 0) queue.add(successor);
+      }
+    }
 
-        if (result.size() != vertices.size())
-            throw new DriverInternalError("failed to perform topological sort, graph has a cycle");
+    if (result.size() != vertices.size())
+      throw new DriverInternalError("failed to perform topological sort, graph has a cycle");
 
-        return result;
-    }
+    return result;
+  }
 
-    private int decrementAndGetCount(V vertex) {
-        Integer count = vertices.get(vertex);
-        count = count - 1;
-        vertices.put(vertex, count);
-        return count;
-    }
+  private int decrementAndGetCount(V vertex) {
+    Integer count = vertices.get(vertex);
+    count = count - 1;
+    vertices.put(vertex, count);
+    return count;
+  }
 }
diff --git a/driver-core/src/main/java/com/datastax/driver/core/DriverThrowables.java b/driver-core/src/main/java/com/datastax/driver/core/DriverThrowables.java
index 9e692a2c506..582aa7107ab 100644
--- a/driver-core/src/main/java/com/datastax/driver/core/DriverThrowables.java
+++ b/driver-core/src/main/java/com/datastax/driver/core/DriverThrowables.java
@@ -1,11 +1,13 @@
 /*
- * Copyright (C) 2012-2017 DataStax Inc.
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
  *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
+ *     http://www.apache.org/licenses/LICENSE-2.0
  *
  * Unless required by applicable law or agreed to in writing, software
  * distributed under the License is distributed on an "AS IS" BASIS,
@@ -17,25 +19,22 @@
 
 import com.datastax.driver.core.exceptions.DriverException;
 import com.datastax.driver.core.exceptions.DriverInternalError;
-
 import java.util.concurrent.ExecutionException;
 
 class DriverThrowables {
 
-    static RuntimeException propagateCause(ExecutionException e) {
-        Throwable cause = e.getCause();
+  static RuntimeException propagateCause(ExecutionException e) {
+    Throwable cause = e.getCause();
 
-        if (cause instanceof Error)
-            throw ((Error) cause);
+    if (cause instanceof Error) throw ((Error) cause);
 
-        // We could just rethrow e.getCause(). However, the cause of the ExecutionException has likely been
-        // created on the I/O thread receiving the response. Which means that the stacktrace associated
-        // with said cause will make no mention of the current thread. This is painful for say, finding
-        // out which execute() statement actually raised the exception. So instead, we re-create the
-        // exception.
-        if (cause instanceof DriverException)
-            throw ((DriverException) cause).copy();
-        else
-            throw new DriverInternalError("Unexpected exception thrown", cause);
-    }
+    // We could just rethrow e.getCause(). However, the cause of the ExecutionException has likely
+    // been
+    // created on the I/O thread receiving the response. Which means that the stacktrace associated
+    // with said cause will make no mention of the current thread. This is painful for say, finding
+    // out which execute() statement actually raised the exception. So instead, we re-create the
+    // exception.
+    if (cause instanceof DriverException) throw ((DriverException) cause).copy();
+    else throw new DriverInternalError("Unexpected exception thrown", cause);
+  }
 }
diff --git a/driver-core/src/main/java/com/datastax/driver/core/Duration.java b/driver-core/src/main/java/com/datastax/driver/core/Duration.java
index 729b0c99543..f097f8e38fa 100644
--- a/driver-core/src/main/java/com/datastax/driver/core/Duration.java
+++ b/driver-core/src/main/java/com/datastax/driver/core/Duration.java
@@ -1,11 +1,13 @@
 /*
- * Copyright (C) 2012-2017 DataStax Inc.
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
  *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
+ *     http://www.apache.org/licenses/LICENSE-2.0
  *
  * Unless required by applicable law or agreed to in writing, software
  * distributed under the License is distributed on an "AS IS" BASIS,
@@ -15,575 +17,556 @@
  */
 package com.datastax.driver.core;
 
-import com.google.common.base.Objects;
+import static com.google.common.base.Preconditions.checkArgument;
 
+import com.google.common.base.Objects;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
-import static com.google.common.base.Preconditions.checkArgument;
-
-
 /**
- * Represents a duration. A duration stores separately months, days, and seconds due to the fact that
- * the number of days in a month varies, and a day can have 23 or 25 hours if a daylight saving is involved.
+ * Represents a duration. A duration stores separately months, days, and seconds due to the fact
+ * that the number of days in a month varies, and a day can have 23 or 25 hours if a daylight saving
+ * is involved.
  */
 public final class Duration {
 
-    static final long NANOS_PER_MICRO = 1000L;
-    static final long NANOS_PER_MILLI = 1000 * NANOS_PER_MICRO;
-    static final long NANOS_PER_SECOND = 1000 * NANOS_PER_MILLI;
-    static final long NANOS_PER_MINUTE = 60 * NANOS_PER_SECOND;
-    static final long NANOS_PER_HOUR = 60 * NANOS_PER_MINUTE;
-    static final int DAYS_PER_WEEK = 7;
-    static final int MONTHS_PER_YEAR = 12;
+  static final long NANOS_PER_MICRO = 1000L;
+  static final long NANOS_PER_MILLI = 1000 * NANOS_PER_MICRO;
+  static final long NANOS_PER_SECOND = 1000 * NANOS_PER_MILLI;
+  static final long NANOS_PER_MINUTE = 60 * NANOS_PER_SECOND;
+  static final long NANOS_PER_HOUR = 60 * NANOS_PER_MINUTE;
+  static final int DAYS_PER_WEEK = 7;
+  static final int MONTHS_PER_YEAR = 12;
+
+  /** The Regexp used to parse the duration provided as String. */
+  private static final Pattern STANDARD_PATTERN =
+      Pattern.compile(
+          "\\G(\\d+)(y|Y|mo|MO|mO|Mo|w|W|d|D|h|H|s|S|ms|MS|mS|Ms|us|US|uS|Us|µs|µS|ns|NS|nS|Ns|m|M)");
+
+  /**
+   * The Regexp used to parse the duration when provided in the ISO 8601 format with designators.
+   */
+  private static final Pattern ISO8601_PATTERN =
+      Pattern.compile("P((\\d+)Y)?((\\d+)M)?((\\d+)D)?(T((\\d+)H)?((\\d+)M)?((\\d+)S)?)?");
+
+  /**
+   * The Regexp used to parse the duration when provided in the ISO 8601 format with designators.
+   */
+  private static final Pattern ISO8601_WEEK_PATTERN = Pattern.compile("P(\\d+)W");
+
+  /** The Regexp used to parse the duration when provided in the ISO 8601 alternative format. */
+  private static final Pattern ISO8601_ALTERNATIVE_PATTERN =
+      Pattern.compile("P(\\d{4})-(\\d{2})-(\\d{2})T(\\d{2}):(\\d{2}):(\\d{2})");
+
+  /** The number of months. */
+  private final int months;
+
+  /** The number of days. */
+  private final int days;
+
+  /** The number of nanoseconds. */
+  private final long nanoseconds;
+
+  private Duration(int months, int days, long nanoseconds) {
+    // Makes sure that all the values are negative if one of them is
+    if ((months < 0 || days < 0 || nanoseconds < 0)
+        && ((months > 0 || days > 0 || nanoseconds > 0))) {
+      throw new IllegalArgumentException(
+          String.format(
+              "All values must be either negative or positive, got %d months, %d days, %d nanoseconds",
+              months, days, nanoseconds));
+    }
+    this.months = months;
+    this.days = days;
+    this.nanoseconds = nanoseconds;
+  }
+
+  /**
+   * Creates a duration with the given number of months, days and nanoseconds.
+   *
+   * 

A duration can be negative. In this case, all the non zero values must be negative. + * + * @param months the number of months + * @param days the number of days + * @param nanoseconds the number of nanoseconds + * @throws IllegalArgumentException if the values are not all negative or all positive + */ + public static Duration newInstance(int months, int days, long nanoseconds) { + return new Duration(months, days, nanoseconds); + } + + /** + * Converts a String into a duration. + * + *

The accepted formats are: + * + *

    + *
  • multiple digits followed by a time unit like: 12h30m where the time unit can be: + *
      + *
    • {@code y}: years + *
    • {@code m}: months + *
    • {@code w}: weeks + *
    • {@code d}: days + *
    • {@code h}: hours + *
    • {@code m}: minutes + *
    • {@code s}: seconds + *
    • {@code ms}: milliseconds + *
    • {@code us} or {@code µs}: microseconds + *
    • {@code ns}: nanoseconds + *
    + *
  • ISO 8601 format: P[n]Y[n]M[n]DT[n]H[n]M[n]S or P[n]W + *
  • ISO 8601 alternative format: P[YYYY]-[MM]-[DD]T[hh]:[mm]:[ss] + *
+ * + * @param input the String to convert + * @return a {@link Duration} + */ + public static Duration from(String input) { + boolean isNegative = input.startsWith("-"); + String source = isNegative ? input.substring(1) : input; + + if (source.startsWith("P")) { + if (source.endsWith("W")) return parseIso8601WeekFormat(isNegative, source); + + if (source.contains("-")) return parseIso8601AlternativeFormat(isNegative, source); + + return parseIso8601Format(isNegative, source); + } + return parseStandardFormat(isNegative, source); + } - /** - * The Regexp used to parse the duration provided as String. - */ - private static final Pattern STANDARD_PATTERN = - Pattern.compile("\\G(\\d+)(y|Y|mo|MO|mO|Mo|w|W|d|D|h|H|s|S|ms|MS|mS|Ms|us|US|uS|Us|µs|µS|ns|NS|nS|Ns|m|M)"); + private static Duration parseIso8601Format(boolean isNegative, String source) { + Matcher matcher = ISO8601_PATTERN.matcher(source); + if (!matcher.matches()) + throw new IllegalArgumentException( + String.format("Unable to convert '%s' to a duration", source)); - /** - * The Regexp used to parse the duration when provided in the ISO 8601 format with designators. - */ - private static final Pattern ISO8601_PATTERN = - Pattern.compile("P((\\d+)Y)?((\\d+)M)?((\\d+)D)?(T((\\d+)H)?((\\d+)M)?((\\d+)S)?)?"); + Builder builder = new Builder(isNegative); + if (matcher.group(1) != null) builder.addYears(groupAsLong(matcher, 2)); - /** - * The Regexp used to parse the duration when provided in the ISO 8601 format with designators. - */ - private static final Pattern ISO8601_WEEK_PATTERN = Pattern.compile("P(\\d+)W"); + if (matcher.group(3) != null) builder.addMonths(groupAsLong(matcher, 4)); - /** - * The Regexp used to parse the duration when provided in the ISO 8601 alternative format. - */ - private static final Pattern ISO8601_ALTERNATIVE_PATTERN = - Pattern.compile("P(\\d{4})-(\\d{2})-(\\d{2})T(\\d{2}):(\\d{2}):(\\d{2})"); + if (matcher.group(5) != null) builder.addDays(groupAsLong(matcher, 6)); - /** - * The number of months. - */ - private final int months; + // Checks if the String contains time information + if (matcher.group(7) != null) { + if (matcher.group(8) != null) builder.addHours(groupAsLong(matcher, 9)); - /** - * The number of days. - */ - private final int days; + if (matcher.group(10) != null) builder.addMinutes(groupAsLong(matcher, 11)); + + if (matcher.group(12) != null) builder.addSeconds(groupAsLong(matcher, 13)); + } + return builder.build(); + } + + private static Duration parseIso8601AlternativeFormat(boolean isNegative, String source) { + Matcher matcher = ISO8601_ALTERNATIVE_PATTERN.matcher(source); + if (!matcher.matches()) + throw new IllegalArgumentException( + String.format("Unable to convert '%s' to a duration", source)); + + return new Builder(isNegative) + .addYears(groupAsLong(matcher, 1)) + .addMonths(groupAsLong(matcher, 2)) + .addDays(groupAsLong(matcher, 3)) + .addHours(groupAsLong(matcher, 4)) + .addMinutes(groupAsLong(matcher, 5)) + .addSeconds(groupAsLong(matcher, 6)) + .build(); + } + + private static Duration parseIso8601WeekFormat(boolean isNegative, String source) { + Matcher matcher = ISO8601_WEEK_PATTERN.matcher(source); + if (!matcher.matches()) + throw new IllegalArgumentException( + String.format("Unable to convert '%s' to a duration", source)); + + return new Builder(isNegative).addWeeks(groupAsLong(matcher, 1)).build(); + } + + private static Duration parseStandardFormat(boolean isNegative, String source) { + Matcher matcher = STANDARD_PATTERN.matcher(source); + if (!matcher.find()) + throw new IllegalArgumentException( + String.format("Unable to convert '%s' to a duration", source)); + + Builder builder = new Builder(isNegative); + boolean done; + + do { + long number = groupAsLong(matcher, 1); + String symbol = matcher.group(2); + add(builder, number, symbol); + done = matcher.end() == source.length(); + } while (matcher.find()); + + if (!done) + throw new IllegalArgumentException( + String.format("Unable to convert '%s' to a duration", source)); + + return builder.build(); + } + + private static long groupAsLong(Matcher matcher, int group) { + return Long.parseLong(matcher.group(group)); + } + + private static Builder add(Builder builder, long number, String symbol) { + String s = symbol.toLowerCase(); + if (s.equals("y")) { + return builder.addYears(number); + } else if (s.equals("mo")) { + return builder.addMonths(number); + } else if (s.equals("w")) { + return builder.addWeeks(number); + } else if (s.equals("d")) { + return builder.addDays(number); + } else if (s.equals("h")) { + return builder.addHours(number); + } else if (s.equals("m")) { + return builder.addMinutes(number); + } else if (s.equals("s")) { + return builder.addSeconds(number); + } else if (s.equals("ms")) { + return builder.addMillis(number); + } else if (s.equals("us") || s.equals("µs")) { + return builder.addMicros(number); + } else if (s.equals("ns")) { + return builder.addNanos(number); + } + throw new IllegalArgumentException(String.format("Unknown duration symbol '%s'", symbol)); + } + + /** + * Appends the result of the division to the specified builder if the dividend is not zero. + * + * @param builder the builder to append to + * @param dividend the dividend + * @param divisor the divisor + * @param unit the time unit to append after the result of the division + * @return the remainder of the division + */ + private static long append(StringBuilder builder, long dividend, long divisor, String unit) { + if (dividend == 0 || dividend < divisor) return dividend; + + builder.append(dividend / divisor).append(unit); + return dividend % divisor; + } + + /** + * Returns the number of months in this duration. + * + * @return the number of months in this duration. + */ + public int getMonths() { + return months; + } + + /** + * Returns the number of days in this duration. + * + * @return the number of days in this duration. + */ + public int getDays() { + return days; + } + + /** + * Returns the number of nanoseconds in this duration. + * + * @return the number of months in this duration. + */ + public long getNanoseconds() { + return nanoseconds; + } + + @Override + public int hashCode() { + return Objects.hashCode(days, months, nanoseconds); + } + + @Override + public boolean equals(Object obj) { + if (!(obj instanceof Duration)) return false; + + Duration other = (Duration) obj; + return days == other.days && months == other.months && nanoseconds == other.nanoseconds; + } + + @Override + public String toString() { + StringBuilder builder = new StringBuilder(); + + if (months < 0 || days < 0 || nanoseconds < 0) builder.append('-'); + + long remainder = append(builder, Math.abs(months), MONTHS_PER_YEAR, "y"); + append(builder, remainder, 1, "mo"); + + append(builder, Math.abs(days), 1, "d"); + + if (nanoseconds != 0) { + remainder = append(builder, Math.abs(nanoseconds), NANOS_PER_HOUR, "h"); + remainder = append(builder, remainder, NANOS_PER_MINUTE, "m"); + remainder = append(builder, remainder, NANOS_PER_SECOND, "s"); + remainder = append(builder, remainder, NANOS_PER_MILLI, "ms"); + remainder = append(builder, remainder, NANOS_PER_MICRO, "us"); + append(builder, remainder, 1, "ns"); + } + return builder.toString(); + } + + private static class Builder { + /** {@code true} if the duration is a negative one, {@code false} otherwise. */ + private final boolean isNegative; + + /** The number of months. */ + private int months; + + /** The number of days. */ + private int days; + + /** The number of nanoseconds. */ + private long nanoseconds; + + /** We need to make sure that the values for each units are provided in order. */ + private int currentUnitIndex; + + public Builder(boolean isNegative) { + this.isNegative = isNegative; + } /** - * The number of nanoseconds. + * Adds the specified amount of years. + * + * @param numberOfYears the number of years to add. + * @return this {@code Builder} */ - private final long nanoseconds; - - private Duration(int months, int days, long nanoseconds) { - // Makes sure that all the values are negative if one of them is - if ((months < 0 || days < 0 || nanoseconds < 0) && ((months > 0 || days > 0 || nanoseconds > 0))) { - throw new IllegalArgumentException(String.format( - "All values must be either negative or positive, got %d months, %d days, %d nanoseconds", months, days, nanoseconds)); - } - this.months = months; - this.days = days; - this.nanoseconds = nanoseconds; + public Builder addYears(long numberOfYears) { + validateOrder(1); + validateMonths(numberOfYears, MONTHS_PER_YEAR); + months += numberOfYears * MONTHS_PER_YEAR; + return this; } /** - * Creates a duration with the given number of months, days and nanoseconds. - *

- * A duration can be negative. In this case, all the non zero values must be negative. + * Adds the specified amount of months. * - * @param months the number of months - * @param days the number of days - * @param nanoseconds the number of nanoseconds - * @throws IllegalArgumentException if the values are not all negative or all positive + * @param numberOfMonths the number of months to add. + * @return this {@code Builder} */ - public static Duration newInstance(int months, int days, long nanoseconds) { - return new Duration(months, days, nanoseconds); + public Builder addMonths(long numberOfMonths) { + validateOrder(2); + validateMonths(numberOfMonths, 1); + months += numberOfMonths; + return this; } /** - * Converts a String into a duration. - *

The accepted formats are: - *

    - *
  • multiple digits followed by a time unit like: 12h30m where the time unit can be: - *
      - *
    • {@code y}: years
    • - *
    • {@code m}: months
    • - *
    • {@code w}: weeks
    • - *
    • {@code d}: days
    • - *
    • {@code h}: hours
    • - *
    • {@code m}: minutes
    • - *
    • {@code s}: seconds
    • - *
    • {@code ms}: milliseconds
    • - *
    • {@code us} or {@code µs}: microseconds
    • - *
    • {@code ns}: nanoseconds
    • - *
    - *
  • - *
  • ISO 8601 format: P[n]Y[n]M[n]DT[n]H[n]M[n]S or P[n]W
  • - *
  • ISO 8601 alternative format: P[YYYY]-[MM]-[DD]T[hh]:[mm]:[ss]
  • - *
+ * Adds the specified amount of weeks. * - * @param input the String to convert - * @return a {@link Duration} + * @param numberOfWeeks the number of weeks to add. + * @return this {@code Builder} */ - public static Duration from(String input) { - boolean isNegative = input.startsWith("-"); - String source = isNegative ? input.substring(1) : input; - - if (source.startsWith("P")) { - if (source.endsWith("W")) - return parseIso8601WeekFormat(isNegative, source); - - if (source.contains("-")) - return parseIso8601AlternativeFormat(isNegative, source); - - return parseIso8601Format(isNegative, source); - } - return parseStandardFormat(isNegative, source); + public Builder addWeeks(long numberOfWeeks) { + validateOrder(3); + validateDays(numberOfWeeks, DAYS_PER_WEEK); + days += numberOfWeeks * DAYS_PER_WEEK; + return this; } - private static Duration parseIso8601Format(boolean isNegative, String source) { - Matcher matcher = ISO8601_PATTERN.matcher(source); - if (!matcher.matches()) - throw new IllegalArgumentException(String.format("Unable to convert '%s' to a duration", source)); - - Builder builder = new Builder(isNegative); - if (matcher.group(1) != null) - builder.addYears(groupAsLong(matcher, 2)); - - if (matcher.group(3) != null) - builder.addMonths(groupAsLong(matcher, 4)); - - if (matcher.group(5) != null) - builder.addDays(groupAsLong(matcher, 6)); - - // Checks if the String contains time information - if (matcher.group(7) != null) { - if (matcher.group(8) != null) - builder.addHours(groupAsLong(matcher, 9)); - - if (matcher.group(10) != null) - builder.addMinutes(groupAsLong(matcher, 11)); - - if (matcher.group(12) != null) - builder.addSeconds(groupAsLong(matcher, 13)); - } - return builder.build(); + /** + * Adds the specified amount of days. + * + * @param numberOfDays the number of days to add. + * @return this {@code Builder} + */ + public Builder addDays(long numberOfDays) { + validateOrder(4); + validateDays(numberOfDays, 1); + days += numberOfDays; + return this; } - private static Duration parseIso8601AlternativeFormat(boolean isNegative, String source) { - Matcher matcher = ISO8601_ALTERNATIVE_PATTERN.matcher(source); - if (!matcher.matches()) - throw new IllegalArgumentException(String.format("Unable to convert '%s' to a duration", source)); - - return new Builder(isNegative).addYears(groupAsLong(matcher, 1)) - .addMonths(groupAsLong(matcher, 2)) - .addDays(groupAsLong(matcher, 3)) - .addHours(groupAsLong(matcher, 4)) - .addMinutes(groupAsLong(matcher, 5)) - .addSeconds(groupAsLong(matcher, 6)) - .build(); + /** + * Adds the specified amount of hours. + * + * @param numberOfHours the number of hours to add. + * @return this {@code Builder} + */ + public Builder addHours(long numberOfHours) { + validateOrder(5); + validateNanos(numberOfHours, NANOS_PER_HOUR); + nanoseconds += numberOfHours * NANOS_PER_HOUR; + return this; } - private static Duration parseIso8601WeekFormat(boolean isNegative, String source) { - Matcher matcher = ISO8601_WEEK_PATTERN.matcher(source); - if (!matcher.matches()) - throw new IllegalArgumentException(String.format("Unable to convert '%s' to a duration", source)); - - return new Builder(isNegative).addWeeks(groupAsLong(matcher, 1)) - .build(); + /** + * Adds the specified amount of minutes. + * + * @param numberOfMinutes the number of minutes to add. + * @return this {@code Builder} + */ + public Builder addMinutes(long numberOfMinutes) { + validateOrder(6); + validateNanos(numberOfMinutes, NANOS_PER_MINUTE); + nanoseconds += numberOfMinutes * NANOS_PER_MINUTE; + return this; } - private static Duration parseStandardFormat(boolean isNegative, String source) { - Matcher matcher = STANDARD_PATTERN.matcher(source); - if (!matcher.find()) - throw new IllegalArgumentException(String.format("Unable to convert '%s' to a duration", source)); - - Builder builder = new Builder(isNegative); - boolean done; - - do { - long number = groupAsLong(matcher, 1); - String symbol = matcher.group(2); - add(builder, number, symbol); - done = matcher.end() == source.length(); - } - while (matcher.find()); - - if (!done) - throw new IllegalArgumentException(String.format("Unable to convert '%s' to a duration", source)); - - return builder.build(); + /** + * Adds the specified amount of seconds. + * + * @param numberOfSeconds the number of seconds to add. + * @return this {@code Builder} + */ + public Builder addSeconds(long numberOfSeconds) { + validateOrder(7); + validateNanos(numberOfSeconds, NANOS_PER_SECOND); + nanoseconds += numberOfSeconds * NANOS_PER_SECOND; + return this; } - private static long groupAsLong(Matcher matcher, int group) { - return Long.parseLong(matcher.group(group)); + /** + * Adds the specified amount of milliseconds. + * + * @param numberOfMillis the number of milliseconds to add. + * @return this {@code Builder} + */ + public Builder addMillis(long numberOfMillis) { + validateOrder(8); + validateNanos(numberOfMillis, NANOS_PER_MILLI); + nanoseconds += numberOfMillis * NANOS_PER_MILLI; + return this; } - private static Builder add(Builder builder, long number, String symbol) { - String s = symbol.toLowerCase(); - if (s.equals("y")) { - return builder.addYears(number); - } else if (s.equals("mo")) { - return builder.addMonths(number); - } else if (s.equals("w")) { - return builder.addWeeks(number); - } else if (s.equals("d")) { - return builder.addDays(number); - } else if (s.equals("h")) { - return builder.addHours(number); - } else if (s.equals("m")) { - return builder.addMinutes(number); - } else if (s.equals("s")) { - return builder.addSeconds(number); - } else if (s.equals("ms")) { - return builder.addMillis(number); - } else if (s.equals("us") || s.equals("µs")) { - return builder.addMicros(number); - } else if (s.equals("ns")) { - return builder.addNanos(number); - } - throw new IllegalArgumentException(String.format("Unknown duration symbol '%s'", symbol)); + /** + * Adds the specified amount of microseconds. + * + * @param numberOfMicros the number of microseconds to add. + * @return this {@code Builder} + */ + public Builder addMicros(long numberOfMicros) { + validateOrder(9); + validateNanos(numberOfMicros, NANOS_PER_MICRO); + nanoseconds += numberOfMicros * NANOS_PER_MICRO; + return this; } /** - * Appends the result of the division to the specified builder if the dividend is not zero. + * Adds the specified amount of nanoseconds. * - * @param builder the builder to append to - * @param dividend the dividend - * @param divisor the divisor - * @param unit the time unit to append after the result of the division - * @return the remainder of the division + * @param numberOfNanos the number of nanoseconds to add. + * @return this {@code Builder} */ - private static long append(StringBuilder builder, long dividend, long divisor, String unit) { - if (dividend == 0 || dividend < divisor) - return dividend; - - builder.append(dividend / divisor).append(unit); - return dividend % divisor; + public Builder addNanos(long numberOfNanos) { + validateOrder(10); + validateNanos(numberOfNanos, 1); + nanoseconds += numberOfNanos; + return this; } /** - * Returns the number of months in this duration. + * Validates that the total number of months can be stored. * - * @return the number of months in this duration. + * @param units the number of units that need to be added + * @param monthsPerUnit the number of days per unit */ - public int getMonths() { - return months; + private void validateMonths(long units, int monthsPerUnit) { + validate(units, (Integer.MAX_VALUE - months) / monthsPerUnit, "months"); } /** - * Returns the number of days in this duration. + * Validates that the total number of days can be stored. * - * @return the number of days in this duration. + * @param units the number of units that need to be added + * @param daysPerUnit the number of days per unit */ - public int getDays() { - return days; + private void validateDays(long units, int daysPerUnit) { + validate(units, (Integer.MAX_VALUE - days) / daysPerUnit, "days"); } /** - * Returns the number of nanoseconds in this duration. + * Validates that the total number of nanoseconds can be stored. * - * @return the number of months in this duration. + * @param units the number of units that need to be added + * @param nanosPerUnit the number of nanoseconds per unit */ - public long getNanoseconds() { - return nanoseconds; + private void validateNanos(long units, long nanosPerUnit) { + validate(units, (Long.MAX_VALUE - nanoseconds) / nanosPerUnit, "nanoseconds"); } - @Override - public int hashCode() { - return Objects.hashCode(days, months, nanoseconds); + /** + * Validates that the specified amount is less than the limit. + * + * @param units the number of units to check + * @param limit the limit on the number of units + * @param unitName the unit name + */ + private void validate(long units, long limit, String unitName) { + checkArgument( + units <= limit, + "Invalid duration. The total number of %s must be less or equal to %s", + unitName, + Integer.MAX_VALUE); } - @Override - public boolean equals(Object obj) { - if (!(obj instanceof Duration)) - return false; - - Duration other = (Duration) obj; - return days == other.days - && months == other.months - && nanoseconds == other.nanoseconds; + /** + * Validates that the duration values are added in the proper order. + * + * @param unitIndex the unit index (e.g. years=1, months=2, ...) + */ + private void validateOrder(int unitIndex) { + if (unitIndex == currentUnitIndex) + throw new IllegalArgumentException( + String.format( + "Invalid duration. The %s are specified multiple times", getUnitName(unitIndex))); + + if (unitIndex <= currentUnitIndex) + throw new IllegalArgumentException( + String.format( + "Invalid duration. The %s should be after %s", + getUnitName(currentUnitIndex), getUnitName(unitIndex))); + + currentUnitIndex = unitIndex; } - @Override - public String toString() { - StringBuilder builder = new StringBuilder(); - - if (months < 0 || days < 0 || nanoseconds < 0) - builder.append('-'); - - long remainder = append(builder, Math.abs(months), MONTHS_PER_YEAR, "y"); - append(builder, remainder, 1, "mo"); - - append(builder, Math.abs(days), 1, "d"); - - if (nanoseconds != 0) { - remainder = append(builder, Math.abs(nanoseconds), NANOS_PER_HOUR, "h"); - remainder = append(builder, remainder, NANOS_PER_MINUTE, "m"); - remainder = append(builder, remainder, NANOS_PER_SECOND, "s"); - remainder = append(builder, remainder, NANOS_PER_MILLI, "ms"); - remainder = append(builder, remainder, NANOS_PER_MICRO, "us"); - append(builder, remainder, 1, "ns"); - } - return builder.toString(); + /** + * Returns the name of the unit corresponding to the specified index. + * + * @param unitIndex the unit index + * @return the name of the unit corresponding to the specified index. + */ + private String getUnitName(int unitIndex) { + switch (unitIndex) { + case 1: + return "years"; + case 2: + return "months"; + case 3: + return "weeks"; + case 4: + return "days"; + case 5: + return "hours"; + case 6: + return "minutes"; + case 7: + return "seconds"; + case 8: + return "milliseconds"; + case 9: + return "microseconds"; + case 10: + return "nanoseconds"; + default: + throw new AssertionError("unknown unit index: " + unitIndex); + } } - private static class Builder { - /** - * {@code true} if the duration is a negative one, {@code false} otherwise. - */ - private final boolean isNegative; - - /** - * The number of months. - */ - private int months; - - /** - * The number of days. - */ - private int days; - - /** - * The number of nanoseconds. - */ - private long nanoseconds; - - /** - * We need to make sure that the values for each units are provided in order. - */ - private int currentUnitIndex; - - public Builder(boolean isNegative) { - this.isNegative = isNegative; - } - - /** - * Adds the specified amount of years. - * - * @param numberOfYears the number of years to add. - * @return this {@code Builder} - */ - public Builder addYears(long numberOfYears) { - validateOrder(1); - validateMonths(numberOfYears, MONTHS_PER_YEAR); - months += numberOfYears * MONTHS_PER_YEAR; - return this; - } - - /** - * Adds the specified amount of months. - * - * @param numberOfMonths the number of months to add. - * @return this {@code Builder} - */ - public Builder addMonths(long numberOfMonths) { - validateOrder(2); - validateMonths(numberOfMonths, 1); - months += numberOfMonths; - return this; - } - - /** - * Adds the specified amount of weeks. - * - * @param numberOfWeeks the number of weeks to add. - * @return this {@code Builder} - */ - public Builder addWeeks(long numberOfWeeks) { - validateOrder(3); - validateDays(numberOfWeeks, DAYS_PER_WEEK); - days += numberOfWeeks * DAYS_PER_WEEK; - return this; - } - - /** - * Adds the specified amount of days. - * - * @param numberOfDays the number of days to add. - * @return this {@code Builder} - */ - public Builder addDays(long numberOfDays) { - validateOrder(4); - validateDays(numberOfDays, 1); - days += numberOfDays; - return this; - } - - /** - * Adds the specified amount of hours. - * - * @param numberOfHours the number of hours to add. - * @return this {@code Builder} - */ - public Builder addHours(long numberOfHours) { - validateOrder(5); - validateNanos(numberOfHours, NANOS_PER_HOUR); - nanoseconds += numberOfHours * NANOS_PER_HOUR; - return this; - } - - /** - * Adds the specified amount of minutes. - * - * @param numberOfMinutes the number of minutes to add. - * @return this {@code Builder} - */ - public Builder addMinutes(long numberOfMinutes) { - validateOrder(6); - validateNanos(numberOfMinutes, NANOS_PER_MINUTE); - nanoseconds += numberOfMinutes * NANOS_PER_MINUTE; - return this; - } - - /** - * Adds the specified amount of seconds. - * - * @param numberOfSeconds the number of seconds to add. - * @return this {@code Builder} - */ - public Builder addSeconds(long numberOfSeconds) { - validateOrder(7); - validateNanos(numberOfSeconds, NANOS_PER_SECOND); - nanoseconds += numberOfSeconds * NANOS_PER_SECOND; - return this; - } - - /** - * Adds the specified amount of milliseconds. - * - * @param numberOfMillis the number of milliseconds to add. - * @return this {@code Builder} - */ - public Builder addMillis(long numberOfMillis) { - validateOrder(8); - validateNanos(numberOfMillis, NANOS_PER_MILLI); - nanoseconds += numberOfMillis * NANOS_PER_MILLI; - return this; - } - - /** - * Adds the specified amount of microseconds. - * - * @param numberOfMicros the number of microseconds to add. - * @return this {@code Builder} - */ - public Builder addMicros(long numberOfMicros) { - validateOrder(9); - validateNanos(numberOfMicros, NANOS_PER_MICRO); - nanoseconds += numberOfMicros * NANOS_PER_MICRO; - return this; - } - - /** - * Adds the specified amount of nanoseconds. - * - * @param numberOfNanos the number of nanoseconds to add. - * @return this {@code Builder} - */ - public Builder addNanos(long numberOfNanos) { - validateOrder(10); - validateNanos(numberOfNanos, 1); - nanoseconds += numberOfNanos; - return this; - } - - /** - * Validates that the total number of months can be stored. - * - * @param units the number of units that need to be added - * @param monthsPerUnit the number of days per unit - */ - private void validateMonths(long units, int monthsPerUnit) { - validate(units, (Integer.MAX_VALUE - months) / monthsPerUnit, "months"); - } - - /** - * Validates that the total number of days can be stored. - * - * @param units the number of units that need to be added - * @param daysPerUnit the number of days per unit - */ - private void validateDays(long units, int daysPerUnit) { - validate(units, (Integer.MAX_VALUE - days) / daysPerUnit, "days"); - } - - /** - * Validates that the total number of nanoseconds can be stored. - * - * @param units the number of units that need to be added - * @param nanosPerUnit the number of nanoseconds per unit - */ - private void validateNanos(long units, long nanosPerUnit) { - validate(units, (Long.MAX_VALUE - nanoseconds) / nanosPerUnit, "nanoseconds"); - } - - /** - * Validates that the specified amount is less than the limit. - * - * @param units the number of units to check - * @param limit the limit on the number of units - * @param unitName the unit name - */ - private void validate(long units, long limit, String unitName) { - checkArgument(units <= limit, - "Invalid duration. The total number of %s must be less or equal to %s", - unitName, - Integer.MAX_VALUE); - } - - /** - * Validates that the duration values are added in the proper order. - * - * @param unitIndex the unit index (e.g. years=1, months=2, ...) - */ - private void validateOrder(int unitIndex) { - if (unitIndex == currentUnitIndex) - throw new IllegalArgumentException(String.format("Invalid duration. The %s are specified multiple times", getUnitName(unitIndex))); - - if (unitIndex <= currentUnitIndex) - throw new IllegalArgumentException(String.format("Invalid duration. The %s should be after %s", - getUnitName(currentUnitIndex), - getUnitName(unitIndex))); - - currentUnitIndex = unitIndex; - } - - /** - * Returns the name of the unit corresponding to the specified index. - * - * @param unitIndex the unit index - * @return the name of the unit corresponding to the specified index. - */ - private String getUnitName(int unitIndex) { - switch (unitIndex) { - case 1: - return "years"; - case 2: - return "months"; - case 3: - return "weeks"; - case 4: - return "days"; - case 5: - return "hours"; - case 6: - return "minutes"; - case 7: - return "seconds"; - case 8: - return "milliseconds"; - case 9: - return "microseconds"; - case 10: - return "nanoseconds"; - default: - throw new AssertionError("unknown unit index: " + unitIndex); - } - } - - public Duration build() { - return isNegative ? new Duration(-months, -days, -nanoseconds) : new Duration(months, days, nanoseconds); - } + public Duration build() { + return isNegative + ? new Duration(-months, -days, -nanoseconds) + : new Duration(months, days, nanoseconds); } + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/EndPoint.java b/driver-core/src/main/java/com/datastax/driver/core/EndPoint.java new file mode 100644 index 00000000000..4900084c2d3 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/EndPoint.java @@ -0,0 +1,32 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import java.net.InetSocketAddress; + +/** Encapsulates the information needed by the driver to open connections to a node. */ +public interface EndPoint { + + /** + * Resolves this instance to a socket address. + * + *

This will be called each time the driver opens a new connection to the node. The returned + * address cannot be null. + */ + InetSocketAddress resolve(); +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/EndPointFactory.java b/driver-core/src/main/java/com/datastax/driver/core/EndPointFactory.java new file mode 100644 index 00000000000..0941720ea5a --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/EndPointFactory.java @@ -0,0 +1,39 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +/** + * Produces {@link EndPoint} instances representing the connection information to every node. + * + *

This component is reserved for advanced use cases where the driver needs more than an IP + * address to connect. + * + *

Note that if endpoints do not translate to addresses 1-to-1, the auth provider and SSL options + * should be instances of {@link ExtendedAuthProvider} and {@link + * ExtendedRemoteEndpointAwareSslOptions} respectively. + */ +public interface EndPointFactory { + + void init(Cluster cluster); + + /** + * Creates an instance from a row in {@code system.peers}, or returns {@code null} if there is no + * sufficient information. + */ + EndPoint create(Row peersRow); +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/EventDebouncer.java b/driver-core/src/main/java/com/datastax/driver/core/EventDebouncer.java index a1acb26888e..d3ac3e8c452 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/EventDebouncer.java +++ b/driver-core/src/main/java/com/datastax/driver/core/EventDebouncer.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,15 +17,15 @@ */ package com.datastax.driver.core; +import static com.google.common.base.Preconditions.checkNotNull; +import static java.util.concurrent.TimeUnit.NANOSECONDS; +import static java.util.concurrent.TimeUnit.SECONDS; + import com.datastax.driver.core.utils.MoreFutures; import com.google.common.collect.Lists; import com.google.common.util.concurrent.FutureCallback; -import com.google.common.util.concurrent.Futures; import com.google.common.util.concurrent.ListenableFuture; import com.google.common.util.concurrent.SettableFuture; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import java.util.List; import java.util.Queue; import java.util.concurrent.ConcurrentLinkedQueue; @@ -32,270 +34,287 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; - -import static com.google.common.base.Preconditions.checkNotNull; -import static java.util.concurrent.TimeUnit.NANOSECONDS; -import static java.util.concurrent.TimeUnit.SECONDS; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * A helper class to debounce events received by the Control Connection. - *

- * This class accumulates received events, and delivers them when either: - * - no events have been received for delayMs - * - maxPendingEvents have been received + * + *

This class accumulates received events, and delivers them when either: - no events have been + * received for delayMs - maxPendingEvents have been received */ abstract class EventDebouncer { - private static final Logger logger = LoggerFactory.getLogger(EventDebouncer.class); + private static final Logger logger = LoggerFactory.getLogger(EventDebouncer.class); - private static final int DEFAULT_MAX_QUEUED_EVENTS = 10000; + private static final int DEFAULT_MAX_QUEUED_EVENTS = 10000; - private final String name; + private final String name; - private final AtomicReference immediateDelivery = new AtomicReference(null); - private final AtomicReference delayedDelivery = new AtomicReference(null); + private final AtomicReference immediateDelivery = + new AtomicReference(null); + private final AtomicReference delayedDelivery = + new AtomicReference(null); - private final ScheduledExecutorService executor; + private final ScheduledExecutorService executor; - private final DeliveryCallback callback; + private final DeliveryCallback callback; - private final int maxQueuedEvents; + private final int maxQueuedEvents; - private final Queue> events; - private final AtomicInteger eventCount; + private final Queue> events; + private final AtomicInteger eventCount; - private enum State {NEW, RUNNING, STOPPED} + private enum State { + NEW, + RUNNING, + STOPPED + } - private volatile State state; + private volatile State state; - private static final long OVERFLOW_WARNING_INTERVAL = NANOSECONDS.convert(5, SECONDS); - private volatile long lastOverflowWarning = Long.MIN_VALUE; + private static final long OVERFLOW_WARNING_INTERVAL = NANOSECONDS.convert(5, SECONDS); + private volatile long lastOverflowWarning = Long.MIN_VALUE; - EventDebouncer(String name, ScheduledExecutorService executor, DeliveryCallback callback) { - this(name, executor, callback, DEFAULT_MAX_QUEUED_EVENTS); - } + EventDebouncer(String name, ScheduledExecutorService executor, DeliveryCallback callback) { + this(name, executor, callback, DEFAULT_MAX_QUEUED_EVENTS); + } - EventDebouncer(String name, ScheduledExecutorService executor, DeliveryCallback callback, int maxQueuedEvents) { - this.name = name; - this.executor = executor; - this.callback = callback; - this.maxQueuedEvents = maxQueuedEvents; - this.events = new ConcurrentLinkedQueue>(); - this.eventCount = new AtomicInteger(); - this.state = State.NEW; - } + EventDebouncer( + String name, + ScheduledExecutorService executor, + DeliveryCallback callback, + int maxQueuedEvents) { + this.name = name; + this.executor = executor; + this.callback = callback; + this.maxQueuedEvents = maxQueuedEvents; + this.events = new ConcurrentLinkedQueue>(); + this.eventCount = new AtomicInteger(); + this.state = State.NEW; + } - abstract int maxPendingEvents(); + abstract int maxPendingEvents(); - abstract long delayMs(); + abstract long delayMs(); - void start() { - logger.trace("Starting {} debouncer...", name); - state = State.RUNNING; - if (!events.isEmpty()) { - logger.trace("{} debouncer: {} events were accumulated before the debouncer started: delivering now", - name, eventCount.get()); - scheduleImmediateDelivery(); - } + void start() { + logger.trace("Starting {} debouncer...", name); + state = State.RUNNING; + if (!events.isEmpty()) { + logger.trace( + "{} debouncer: {} events were accumulated before the debouncer started: delivering now", + name, + eventCount.get()); + scheduleImmediateDelivery(); + } + } + + void stop() { + logger.trace("Stopping {} debouncer...", name); + state = State.STOPPED; + while (true) { + DeliveryAttempt previous = cancelDelayedDelivery(); + if (delayedDelivery.compareAndSet(previous, null)) { + break; + } + } + while (true) { + DeliveryAttempt previous = cancelImmediateDelivery(); + if (immediateDelivery.compareAndSet(previous, null)) { + break; + } } - void stop() { - logger.trace("Stopping {} debouncer...", name); - state = State.STOPPED; - while (true) { - DeliveryAttempt previous = cancelDelayedDelivery(); - if (delayedDelivery.compareAndSet(previous, null)) { - break; - } - } + completeAllPendingFutures(); + + logger.trace("{} debouncer stopped", name); + } - completeAllPendingFutures(); + private void completeAllPendingFutures() { + Entry entry; + while ((entry = this.events.poll()) != null) { + entry.future.set(null); + } + } - logger.trace("{} debouncer stopped", name); + /** @return a future that will complete once the event has been processed */ + ListenableFuture eventReceived(T event) { + if (state == State.STOPPED) { + logger.trace("{} debouncer is stopped, rejecting event: {}", name, event); + return MoreFutures.VOID_SUCCESS; + } + checkNotNull(event); + logger.trace("{} debouncer: event received {}", name, event); + + // Safeguard against the queue filling up faster than we can process it + if (eventCount.incrementAndGet() > maxQueuedEvents) { + long now = System.nanoTime(); + if (now > lastOverflowWarning + OVERFLOW_WARNING_INTERVAL) { + lastOverflowWarning = now; + logger.warn( + "{} debouncer enqueued more than {} events, rejecting new events. " + + "This should not happen and is likely a sign that something is wrong.", + name, + maxQueuedEvents); + } + eventCount.decrementAndGet(); + return MoreFutures.VOID_SUCCESS; } - private void completeAllPendingFutures() { - Entry entry; - while ((entry = this.events.poll()) != null) { - entry.future.set(null); - } + Entry entry = new Entry(event); + try { + events.add(entry); + } catch (RuntimeException e) { + eventCount.decrementAndGet(); + throw e; } - /** - * @return a future that will complete once the event has been processed - */ - ListenableFuture eventReceived(T event) { - if (state == State.STOPPED) { - logger.trace("{} debouncer is stopped, rejecting event: {}", name, event); - return MoreFutures.VOID_SUCCESS; - } - checkNotNull(event); - logger.trace("{} debouncer: event received {}", name, event); - - // Safeguard against the queue filling up faster than we can process it - if (eventCount.incrementAndGet() > maxQueuedEvents) { - long now = System.nanoTime(); - if (now > lastOverflowWarning + OVERFLOW_WARNING_INTERVAL) { - lastOverflowWarning = now; - logger.warn("{} debouncer enqueued more than {} events, rejecting new events. " - + "This should not happen and is likely a sign that something is wrong.", - name, maxQueuedEvents); - } - eventCount.decrementAndGet(); - return MoreFutures.VOID_SUCCESS; - } - - Entry entry = new Entry(event); - try { - events.add(entry); - } catch (RuntimeException e) { - eventCount.decrementAndGet(); - throw e; - } - - if (state == State.RUNNING) { - int count = eventCount.get(); - int maxPendingEvents = maxPendingEvents(); - if (count < maxPendingEvents) { - scheduleDelayedDelivery(); - } else if (count == maxPendingEvents) { - scheduleImmediateDelivery(); - } - } else if (state == State.STOPPED) { - // If we race with stop() since the check at the beginning, ensure the future - // gets completed (no-op if the future was already set). - entry.future.set(null); - } - return entry.future; + if (state == State.RUNNING) { + int count = eventCount.get(); + int maxPendingEvents = maxPendingEvents(); + if (count < maxPendingEvents) { + scheduleDelayedDelivery(); + } else if (count == maxPendingEvents) { + scheduleImmediateDelivery(); + } + } else if (state == State.STOPPED) { + // If we race with stop() since the check at the beginning, ensure the future + // gets completed (no-op if the future was already set). + entry.future.set(null); } + return entry.future; + } - void scheduleImmediateDelivery() { - cancelDelayedDelivery(); + void scheduleImmediateDelivery() { + cancelDelayedDelivery(); - while (state == State.RUNNING) { - DeliveryAttempt previous = immediateDelivery.get(); - if (previous != null) - previous.cancel(); + while (state == State.RUNNING) { + DeliveryAttempt previous = immediateDelivery.get(); + if (previous != null) previous.cancel(); - DeliveryAttempt current = new DeliveryAttempt(); - if (immediateDelivery.compareAndSet(previous, current)) { - current.executeNow(); - return; - } - } + DeliveryAttempt current = new DeliveryAttempt(); + if (immediateDelivery.compareAndSet(previous, current)) { + current.executeNow(); + return; + } + } + } + + private void scheduleDelayedDelivery() { + while (state == State.RUNNING) { + DeliveryAttempt previous = cancelDelayedDelivery(); + DeliveryAttempt next = new DeliveryAttempt(); + if (delayedDelivery.compareAndSet(previous, next)) { + next.scheduleAfterDelay(); + break; + } } + } - private void scheduleDelayedDelivery() { - while (state == State.RUNNING) { - DeliveryAttempt previous = cancelDelayedDelivery(); - DeliveryAttempt next = new DeliveryAttempt(); - if (delayedDelivery.compareAndSet(previous, next)) { - next.scheduleAfterDelay(); - break; - } - } + private DeliveryAttempt cancelDelayedDelivery() { + return cancelDelivery(delayedDelivery.get()); + } + + private DeliveryAttempt cancelImmediateDelivery() { + return cancelDelivery(immediateDelivery.get()); + } + + private DeliveryAttempt cancelDelivery(DeliveryAttempt previous) { + if (previous != null) { + previous.cancel(); } + return previous; + } - private DeliveryAttempt cancelDelayedDelivery() { - DeliveryAttempt previous = delayedDelivery.get(); - if (previous != null) { - previous.cancel(); - } - return previous; + private void deliverEvents() { + if (state == State.STOPPED) { + completeAllPendingFutures(); + return; } + final List toDeliver = Lists.newArrayList(); + final List> futures = Lists.newArrayList(); + + Entry entry; + // Limit the number of events we dequeue, to avoid an infinite loop if the queue starts filling + // faster than we can consume it. + int count = 0; + while (++count <= maxQueuedEvents && (entry = this.events.poll()) != null) { + toDeliver.add(entry.event); + futures.add(entry.future); + } + eventCount.addAndGet(-toDeliver.size()); + + if (toDeliver.isEmpty()) { + logger.trace("{} debouncer: no events to deliver", name); + } else { + logger.trace("{} debouncer: delivering {} events", name, toDeliver.size()); + ListenableFuture delivered = callback.deliver(toDeliver); + GuavaCompatibility.INSTANCE.addCallback( + delivered, + new FutureCallback() { + @Override + public void onSuccess(Object result) { + for (SettableFuture future : futures) future.set(null); + } - void deliverEvents() { - if (state == State.STOPPED) { - completeAllPendingFutures(); - return; - } - final List toDeliver = Lists.newArrayList(); - final List> futures = Lists.newArrayList(); - - Entry entry; - // Limit the number of events we dequeue, to avoid an infinite loop if the queue starts filling faster than we can consume it. - int count = 0; - while (++count <= maxQueuedEvents && (entry = this.events.poll()) != null) { - toDeliver.add(entry.event); - futures.add(entry.future); - } - eventCount.addAndGet(-toDeliver.size()); - - if (toDeliver.isEmpty()) { - logger.trace("{} debouncer: no events to deliver", name); - } else { - logger.trace("{} debouncer: delivering {} events", name, toDeliver.size()); - ListenableFuture delivered = callback.deliver(toDeliver); - Futures.addCallback(delivered, new FutureCallback() { - @Override - public void onSuccess(Object result) { - for (SettableFuture future : futures) - future.set(null); - } - - @Override - public void onFailure(Throwable t) { - for (SettableFuture future : futures) - future.setException(t); - } - }); - } - - // If we didn't dequeue all events (or new ones arrived since we did), make sure we eventually - // process the remaining events, because eventReceived might have skipped the delivery - if (eventCount.get() > 0) - scheduleDelayedDelivery(); + @Override + public void onFailure(Throwable t) { + for (SettableFuture future : futures) future.setException(t); + } + }); } - class DeliveryAttempt extends ExceptionCatchingRunnable { + // If we didn't dequeue all events (or new ones arrived since we did), make sure we eventually + // process the remaining events, because eventReceived might have skipped the delivery + if (eventCount.get() > 0) scheduleDelayedDelivery(); + } - volatile Future deliveryFuture; + class DeliveryAttempt extends ExceptionCatchingRunnable { - boolean isDone() { - return deliveryFuture != null && deliveryFuture.isDone(); - } + volatile Future deliveryFuture; - void cancel() { - if (deliveryFuture != null) - deliveryFuture.cancel(true); - } + boolean isDone() { + return deliveryFuture != null && deliveryFuture.isDone(); + } - void executeNow() { - if (state != State.STOPPED) - deliveryFuture = executor.submit(this); - } + void cancel() { + if (deliveryFuture != null) deliveryFuture.cancel(true); + } - void scheduleAfterDelay() { - if (state != State.STOPPED) - deliveryFuture = executor.schedule(this, delayMs(), TimeUnit.MILLISECONDS); - } + void executeNow() { + if (state != State.STOPPED) deliveryFuture = executor.submit(this); + } - @Override - public void runMayThrow() throws Exception { - deliverEvents(); - } + void scheduleAfterDelay() { + if (state != State.STOPPED) + deliveryFuture = executor.schedule(this, delayMs(), TimeUnit.MILLISECONDS); } - interface DeliveryCallback { + @Override + public void runMayThrow() throws Exception { + deliverEvents(); + } + } - /** - * Deliver the given list of events. - * The given list is a private copy and any modification made to it - * has no side-effect; it is also guaranteed not to be null nor empty. - * - * @param events the events to deliver - */ - ListenableFuture deliver(List events); + interface DeliveryCallback { - } + /** + * Deliver the given list of events. The given list is a private copy and any modification made + * to it has no side-effect; it is also guaranteed not to be null nor empty. + * + * @param events the events to deliver + */ + ListenableFuture deliver(List events); + } - static class Entry { - final T event; - final SettableFuture future; + static class Entry { + final T event; + final SettableFuture future; - Entry(T event) { - this.event = event; - this.future = SettableFuture.create(); - } + Entry(T event) { + this.event = event; + this.future = SettableFuture.create(); } + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/ExceptionCatchingRunnable.java b/driver-core/src/main/java/com/datastax/driver/core/ExceptionCatchingRunnable.java index e14dc3a9b5f..9e2d136100b 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ExceptionCatchingRunnable.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ExceptionCatchingRunnable.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,18 +24,18 @@ // our executors. abstract class ExceptionCatchingRunnable implements Runnable { - private static final Logger logger = LoggerFactory.getLogger(ExceptionCatchingRunnable.class); + private static final Logger logger = LoggerFactory.getLogger(ExceptionCatchingRunnable.class); - public abstract void runMayThrow() throws Exception; + public abstract void runMayThrow() throws Exception; - @Override - public void run() { - try { - runMayThrow(); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - } catch (Exception e) { - logger.error("Unexpected error while executing task", e); - } + @Override + public void run() { + try { + runMayThrow(); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } catch (Exception e) { + logger.error("Unexpected error while executing task", e); } + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/ExceptionCode.java b/driver-core/src/main/java/com/datastax/driver/core/ExceptionCode.java index 48a6c5822f2..7b5b91237fb 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ExceptionCode.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ExceptionCode.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,55 +18,52 @@ package com.datastax.driver.core; import com.datastax.driver.core.exceptions.DriverInternalError; - import java.util.HashMap; import java.util.Map; -/** - * Exceptions code, as defined by the native protocol. - */ +/** Exceptions code, as defined by the native protocol. */ enum ExceptionCode { + SERVER_ERROR(0x0000), + PROTOCOL_ERROR(0x000A), - SERVER_ERROR(0x0000), - PROTOCOL_ERROR(0x000A), - - BAD_CREDENTIALS(0x0100), + BAD_CREDENTIALS(0x0100), - // 1xx: problem during request execution - UNAVAILABLE(0x1000), - OVERLOADED(0x1001), - IS_BOOTSTRAPPING(0x1002), - TRUNCATE_ERROR(0x1003), - WRITE_TIMEOUT(0x1100), - READ_TIMEOUT(0x1200), - READ_FAILURE(0x1300), - FUNCTION_FAILURE(0x1400), - WRITE_FAILURE(0x1500), + // 1xx: problem during request execution + UNAVAILABLE(0x1000), + OVERLOADED(0x1001), + IS_BOOTSTRAPPING(0x1002), + TRUNCATE_ERROR(0x1003), + WRITE_TIMEOUT(0x1100), + READ_TIMEOUT(0x1200), + READ_FAILURE(0x1300), + FUNCTION_FAILURE(0x1400), + WRITE_FAILURE(0x1500), + CDC_WRITE_FAILURE(0x1600), + CAS_WRITE_UNKNOWN(0x1700), - // 2xx: problem validating the request - SYNTAX_ERROR(0x2000), - UNAUTHORIZED(0x2100), - INVALID(0x2200), - CONFIG_ERROR(0x2300), - ALREADY_EXISTS(0x2400), - UNPREPARED(0x2500); + // 2xx: problem validating the request + SYNTAX_ERROR(0x2000), + UNAUTHORIZED(0x2100), + INVALID(0x2200), + CONFIG_ERROR(0x2300), + ALREADY_EXISTS(0x2400), + UNPREPARED(0x2500); - public final int value; - private static final Map valueToCode = new HashMap(ExceptionCode.values().length); + public final int value; + private static final Map valueToCode = + new HashMap(ExceptionCode.values().length); - static { - for (ExceptionCode code : ExceptionCode.values()) - valueToCode.put(code.value, code); - } + static { + for (ExceptionCode code : ExceptionCode.values()) valueToCode.put(code.value, code); + } - private ExceptionCode(int value) { - this.value = value; - } + private ExceptionCode(int value) { + this.value = value; + } - public static ExceptionCode fromValue(int value) { - ExceptionCode code = valueToCode.get(value); - if (code == null) - throw new DriverInternalError(String.format("Unknown error code %d", value)); - return code; - } + public static ExceptionCode fromValue(int value) { + ExceptionCode code = valueToCode.get(value); + if (code == null) throw new DriverInternalError(String.format("Unknown error code %d", value)); + return code; + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/ExecutionInfo.java b/driver-core/src/main/java/com/datastax/driver/core/ExecutionInfo.java index 9c1ad7e1a3b..6b1c606782b 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ExecutionInfo.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ExecutionInfo.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,274 +21,319 @@ import com.google.common.collect.ImmutableList; import com.google.common.util.concurrent.Futures; import com.google.common.util.concurrent.ListenableFuture; - import java.nio.ByteBuffer; import java.util.Collections; import java.util.List; import java.util.Map; -/** - * Basic information on the execution of a query. - */ +/** Basic information on the execution of a query. */ public class ExecutionInfo { - private final int speculativeExecutions; - private final int successfulExecutionIndex; - private final List triedHosts; - private final ConsistencyLevel achievedConsistency; - private final QueryTrace trace; - private final ByteBuffer pagingState; - private final ProtocolVersion protocolVersion; - private final CodecRegistry codecRegistry; - private final Statement statement; - private volatile boolean schemaInAgreement; - private final List warnings; - private final Map incomingPayload; + private final int speculativeExecutions; + private final int successfulExecutionIndex; + private final List triedHosts; + private final ConsistencyLevel achievedConsistency; + private final QueryTrace trace; + private final ByteBuffer pagingState; + private final ProtocolVersion protocolVersion; + private final CodecRegistry codecRegistry; + private final Statement statement; + private volatile boolean schemaInAgreement; + private final List warnings; + private final Map incomingPayload; - private ExecutionInfo(int speculativeExecutions, int successfulExecutionIndex, List triedHosts, ConsistencyLevel achievedConsistency, QueryTrace trace, ByteBuffer pagingState, ProtocolVersion protocolVersion, CodecRegistry codecRegistry, Statement statement, boolean schemaAgreement, List warnings, Map incomingPayload) { - this.speculativeExecutions = speculativeExecutions; - this.successfulExecutionIndex = successfulExecutionIndex; - this.triedHosts = triedHosts; - this.achievedConsistency = achievedConsistency; - this.trace = trace; - this.pagingState = pagingState; - this.protocolVersion = protocolVersion; - this.codecRegistry = codecRegistry; - this.statement = statement; - this.schemaInAgreement = schemaAgreement; - this.warnings = warnings; - this.incomingPayload = incomingPayload; - } + private ExecutionInfo( + int speculativeExecutions, + int successfulExecutionIndex, + List triedHosts, + ConsistencyLevel achievedConsistency, + QueryTrace trace, + ByteBuffer pagingState, + ProtocolVersion protocolVersion, + CodecRegistry codecRegistry, + Statement statement, + boolean schemaAgreement, + List warnings, + Map incomingPayload) { + this.speculativeExecutions = speculativeExecutions; + this.successfulExecutionIndex = successfulExecutionIndex; + this.triedHosts = triedHosts; + this.achievedConsistency = achievedConsistency; + this.trace = trace; + this.pagingState = pagingState; + this.protocolVersion = protocolVersion; + this.codecRegistry = codecRegistry; + this.statement = statement; + this.schemaInAgreement = schemaAgreement; + this.warnings = warnings; + this.incomingPayload = incomingPayload; + } - ExecutionInfo(Host singleHost) { - this(0, 0, ImmutableList.of(singleHost), null, null, null, null, null, null, true, Collections.emptyList(), null); - } + ExecutionInfo(Host singleHost) { + this( + 0, + 0, + ImmutableList.of(singleHost), + null, + null, + null, + null, + null, + null, + true, + Collections.emptyList(), + null); + } - public ExecutionInfo(int speculativeExecutions, int successfulExecutionIndex, List triedHosts, ConsistencyLevel achievedConsistency, Map customPayload) { - this(speculativeExecutions, successfulExecutionIndex, triedHosts, achievedConsistency, null, null, null, null, null, false, null, customPayload); - } + public ExecutionInfo( + int speculativeExecutions, + int successfulExecutionIndex, + List triedHosts, + ConsistencyLevel achievedConsistency, + Map customPayload) { + this( + speculativeExecutions, + successfulExecutionIndex, + triedHosts, + achievedConsistency, + null, + null, + null, + null, + null, + false, + null, + customPayload); + } - ExecutionInfo with(QueryTrace newTrace, List newWarnings, ByteBuffer newPagingState, Statement newStatement, ProtocolVersion protocolVersion, CodecRegistry codecRegistry) { - return new ExecutionInfo(speculativeExecutions, successfulExecutionIndex, triedHosts, achievedConsistency, - newTrace, - newPagingState, protocolVersion, codecRegistry, - newStatement, - schemaInAgreement, - newWarnings, - incomingPayload - ); - } + ExecutionInfo with( + QueryTrace newTrace, + List newWarnings, + ByteBuffer newPagingState, + Statement newStatement, + ProtocolVersion protocolVersion, + CodecRegistry codecRegistry) { + return new ExecutionInfo( + speculativeExecutions, + successfulExecutionIndex, + triedHosts, + achievedConsistency, + newTrace, + newPagingState, + protocolVersion, + codecRegistry, + newStatement, + schemaInAgreement, + newWarnings, + incomingPayload); + } - /** - * The list of tried hosts for this query. - *

- * In general, this will be a singleton list with the host that coordinated - * that query. However: - *

    - *
  • if a host is tried by the driver but is dead or in - * error, that host is recorded and the query is retried;
  • - *
  • on a timeout or unavailable exception, some - * {@link com.datastax.driver.core.policies.RetryPolicy} may retry the - * query on the same host, so the same host might appear twice.
  • - *
  • if {@link com.datastax.driver.core.policies.SpeculativeExecutionPolicy speculative executions} - * are enabled, this will also contain hosts that were tried by other executions (however, note that - * this only contains hosts which timed out, or for which a response was received; if an execution is - * waiting for a response from a host and another execution completes the request in the meantime, then - * the host of the first execution will not be in that list).
  • - *
- *

- * If you are only interested in fetching the final (and often only) node - * coordinating the query, {@link #getQueriedHost} provides a shortcut to - * fetch the last element of the list returned by this method. - * - * @return the list of tried hosts for this query, in the order tried. - */ - public List getTriedHosts() { - return triedHosts; - } + /** + * The list of tried hosts for this query. + * + *

In general, this will be a singleton list with the host that coordinated that query. + * However: + * + *

    + *
  • if a host is tried by the driver but is dead or in error, that host is recorded and the + * query is retried; + *
  • on a timeout or unavailable exception, some {@link + * com.datastax.driver.core.policies.RetryPolicy} may retry the query on the same host, so + * the same host might appear twice. + *
  • if {@link com.datastax.driver.core.policies.SpeculativeExecutionPolicy speculative + * executions} are enabled, this will also contain hosts that were tried by other executions + * (however, note that this only contains hosts which timed out, or for which a response was + * received; if an execution is waiting for a response from a host and another execution + * completes the request in the meantime, then the host of the first execution will not be + * in that list). + *
+ * + *

If you are only interested in fetching the final (and often only) node coordinating the + * query, {@link #getQueriedHost} provides a shortcut to fetch the last element of the list + * returned by this method. + * + * @return the list of tried hosts for this query, in the order tried. + */ + public List getTriedHosts() { + return triedHosts; + } - /** - * Return the Cassandra host that coordinated this query. - *

- * This is a shortcut for {@code getTriedHosts().get(getTriedHosts().size() - 1)}. - * - * @return return the Cassandra host that coordinated this query. - */ - public Host getQueriedHost() { - return triedHosts.get(triedHosts.size() - 1); - } + /** + * Return the Cassandra host that coordinated this query. + * + *

This is a shortcut for {@code getTriedHosts().get(getTriedHosts().size() - 1)}. + * + * @return return the Cassandra host that coordinated this query. + */ + public Host getQueriedHost() { + return triedHosts.get(triedHosts.size() - 1); + } - /** - * The number of speculative executions that were started for this query. - *

- * This does not include the initial, normal execution of the query. Therefore, if speculative - * executions are disabled, this will always be 0. If they are enabled and one speculative - * execution was triggered in addition to the initial execution, this will be 1, etc. - * - * @see #getSuccessfulExecutionIndex() - * @see Cluster.Builder#withSpeculativeExecutionPolicy(com.datastax.driver.core.policies.SpeculativeExecutionPolicy) - */ - public int getSpeculativeExecutions() { - return speculativeExecutions; - } + /** + * The number of speculative executions that were started for this query. + * + *

This does not include the initial, normal execution of the query. Therefore, if speculative + * executions are disabled, this will always be 0. If they are enabled and one speculative + * execution was triggered in addition to the initial execution, this will be 1, etc. + * + * @see #getSuccessfulExecutionIndex() + * @see + * Cluster.Builder#withSpeculativeExecutionPolicy(com.datastax.driver.core.policies.SpeculativeExecutionPolicy) + */ + public int getSpeculativeExecutions() { + return speculativeExecutions; + } - /** - * The index of the execution that completed this query. - *

- * 0 represents the initial, normal execution of the query, 1 represents the first speculative - * execution, etc. - * - * @see #getSpeculativeExecutions() - * @see Cluster.Builder#withSpeculativeExecutionPolicy(com.datastax.driver.core.policies.SpeculativeExecutionPolicy) - */ - public int getSuccessfulExecutionIndex() { - return successfulExecutionIndex; - } + /** + * The index of the execution that completed this query. + * + *

0 represents the initial, normal execution of the query, 1 represents the first speculative + * execution, etc. + * + * @see #getSpeculativeExecutions() + * @see + * Cluster.Builder#withSpeculativeExecutionPolicy(com.datastax.driver.core.policies.SpeculativeExecutionPolicy) + */ + public int getSuccessfulExecutionIndex() { + return successfulExecutionIndex; + } - /** - * If the query returned without achieving the requested consistency level - * due to the {@link com.datastax.driver.core.policies.RetryPolicy}, this - * return the biggest consistency level that has been actually achieved by - * the query. - *

- * Note that the default {@code RetryPolicy} - * ({@link com.datastax.driver.core.policies.DefaultRetryPolicy}) - * will never allow a query to be successful without achieving the - * initially requested consistency level and hence with that default - * policy, this method will always return {@code null}. However, it - * might occasionally return a non-{@code null} with say, - * {@link com.datastax.driver.core.policies.DowngradingConsistencyRetryPolicy}. - * - * @return {@code null} if the original consistency level of the query was - * achieved, or the consistency level that was ultimately achieved if the - * {@code RetryPolicy} triggered a retry at a different consistency level - * than the original one. - */ - public ConsistencyLevel getAchievedConsistencyLevel() { - return achievedConsistency; - } + /** + * If the query returned without achieving the requested consistency level due to the {@link + * com.datastax.driver.core.policies.RetryPolicy}, this return the biggest consistency level that + * has been actually achieved by the query. + * + *

Note that the default {@code RetryPolicy} ({@link + * com.datastax.driver.core.policies.DefaultRetryPolicy}) will never allow a query to be + * successful without achieving the initially requested consistency level and hence with that + * default policy, this method will always return {@code null}. However, it might + * occasionally return a non-{@code null} with say, {@link + * com.datastax.driver.core.policies.DowngradingConsistencyRetryPolicy}. + * + * @return {@code null} if the original consistency level of the query was achieved, or the + * consistency level that was ultimately achieved if the {@code RetryPolicy} triggered a retry + * at a different consistency level than the original one. + */ + public ConsistencyLevel getAchievedConsistencyLevel() { + return achievedConsistency; + } - /** - * Return the query trace if tracing was enabled on this query. - *

- * Note that accessing the fields of the the returned object will trigger a - * blocking background query. - * - * @return the {@code QueryTrace} object for this query if tracing was - * enable for this query, or {@code null} otherwise. - */ - public QueryTrace getQueryTrace() { - return trace; - } + /** + * Return the query trace if tracing was enabled on this query. + * + *

Note that accessing the fields of the the returned object will trigger a blocking + * background query. + * + * @return the {@code QueryTrace} object for this query if tracing was enable for this query, or + * {@code null} otherwise. + */ + public QueryTrace getQueryTrace() { + return trace; + } - /** - * Placeholder for async query trace retrieval (not implemented yet). - *

- * Async query trace retrieval will be implemented in a future version. This method - * is added now to avoid breaking binary compatibility later. - * The current implementation merely wraps the result of {@link #getQueryTrace()} in - * an immediate future; it will still trigger a blocking query when the query - * trace's fields are accessed. - * - * @return currently, an immediate future containing the result of {@link #getQueryTrace()}. - */ - public ListenableFuture getQueryTraceAsync() { - return Futures.immediateFuture(trace); - } + /** + * Placeholder for async query trace retrieval (not implemented yet). + * + *

Async query trace retrieval will be implemented in a future version. This method is added + * now to avoid breaking binary compatibility later. The current implementation merely wraps the + * result of {@link #getQueryTrace()} in an immediate future; it will still trigger a blocking + * query when the query trace's fields are accessed. + * + * @return currently, an immediate future containing the result of {@link #getQueryTrace()}. + */ + public ListenableFuture getQueryTraceAsync() { + return Futures.immediateFuture(trace); + } - /** - * The paging state of the query. - *

- * This object represents the next page to be fetched if this query is - * multi page. It can be saved and reused later on the same statement. - * - * @return the paging state or null if there is no next page. - * @see Statement#setPagingState(PagingState) - */ - public PagingState getPagingState() { - if (this.pagingState == null) - return null; - return new PagingState(this.pagingState, this.statement, this.protocolVersion, this.codecRegistry); - } + /** + * The paging state of the query. + * + *

This object represents the next page to be fetched if this query is multi page. It can be + * saved and reused later on the same statement. + * + * @return the paging state or null if there is no next page. + * @see Statement#setPagingState(PagingState) + */ + public PagingState getPagingState() { + if (this.pagingState == null) return null; + return new PagingState( + this.pagingState, this.statement, this.protocolVersion, this.codecRegistry); + } - /** - * Returns the "raw" paging state of the query. - *

- * Contrary to {@link #getPagingState()}, there will be no validation when - * this is later reinjected into a statement. - * - * @return the paging state or null if there is no next page. - * @see Statement#setPagingStateUnsafe(byte[]) - */ - public byte[] getPagingStateUnsafe() { - if (this.pagingState == null) - return null; - return Bytes.getArray(this.pagingState); - } + /** + * Returns the "raw" paging state of the query. + * + *

Contrary to {@link #getPagingState()}, there will be no validation when this is later + * reinjected into a statement. + * + * @return the paging state or null if there is no next page. + * @see Statement#setPagingStateUnsafe(byte[]) + */ + public byte[] getPagingStateUnsafe() { + if (this.pagingState == null) return null; + return Bytes.getArray(this.pagingState); + } - /** - * Whether the cluster had reached schema agreement after the execution of this query. - *

- * After a successful schema-altering query (ex: creating a table), the driver - * will check if the cluster's nodes agree on the new schema version. If not, - * it will keep retrying for a given delay (configurable via - * {@link Cluster.Builder#withMaxSchemaAgreementWaitSeconds(int)}). - *

- * If this method returns {@code false}, clients can call {@link Metadata#checkSchemaAgreement()} - * later to perform the check manually. - *

- * Note that the schema agreement check is only performed for schema-altering queries - * For other query types, this method will always return {@code true}. - * - * @return whether the cluster reached schema agreement, or {@code true} for a non - * schema-altering statement. - */ - public boolean isSchemaInAgreement() { - return schemaInAgreement; - } + /** + * Whether the cluster had reached schema agreement after the execution of this query. + * + *

After a successful schema-altering query (ex: creating a table), the driver will check if + * the cluster's nodes agree on the new schema version. If not, it will keep retrying for a given + * delay (configurable via {@link Cluster.Builder#withMaxSchemaAgreementWaitSeconds(int)}). + * + *

If this method returns {@code false}, clients can call {@link + * Metadata#checkSchemaAgreement()} later to perform the check manually. + * + *

Note that the schema agreement check is only performed for schema-altering queries For other + * query types, this method will always return {@code true}. + * + * @return whether the cluster reached schema agreement, or {@code true} for a non schema-altering + * statement. + */ + public boolean isSchemaInAgreement() { + return schemaInAgreement; + } - void setSchemaInAgreement(boolean schemaAgreement) { - this.schemaInAgreement = schemaAgreement; - } + void setSchemaInAgreement(boolean schemaAgreement) { + this.schemaInAgreement = schemaAgreement; + } - /** - * Returns the server-side warnings for this query. - *

- * This feature is only available with {@link ProtocolVersion#V4} or above; with lower - * versions, the returned list will always be empty. - * - * @return the warnings, or an empty list if there are none. - * @since 2.2 - */ - public List getWarnings() { - return warnings; - } + /** + * Returns the server-side warnings for this query. + * + *

This feature is only available with {@link ProtocolVersion#V4} or above; with lower + * versions, the returned list will always be empty. + * + * @return the warnings, or an empty list if there are none. + * @since 2.2 + */ + public List getWarnings() { + return warnings; + } - /** - * Return the incoming payload, that is, the payload that the server - * sent back with its response, if any, - * or {@code null}, if the server did not include any custom payload. - *

- * This method returns a read-only view of the original map, but - * its values remain inherently mutable. - * Callers should take care not to modify the returned map in any way. - *

- * This feature is only available with {@link ProtocolVersion#V4} or above; with lower - * versions, this method will always return {@code null}. - * - * @return the custom payload that the server sent back with its response, if any, - * or {@code null}, if the server did not include any custom payload. - * @since 2.2 - */ - public Map getIncomingPayload() { - return incomingPayload; - } + /** + * Return the incoming payload, that is, the payload that the server sent back with its response, + * if any, or {@code null}, if the server did not include any custom payload. + * + *

This method returns a read-only view of the original map, but its values remain inherently + * mutable. Callers should take care not to modify the returned map in any way. + * + *

This feature is only available with {@link ProtocolVersion#V4} or above; with lower + * versions, this method will always return {@code null}. + * + * @return the custom payload that the server sent back with its response, if any, or {@code + * null}, if the server did not include any custom payload. + * @since 2.2 + */ + public Map getIncomingPayload() { + return incomingPayload; + } - /** - * Get the statement that has been executed. - * - * @return the statement executed. - */ - public Statement getStatement() { - return this.statement; - } + /** + * Get the statement that has been executed. + * + * @return the statement executed. + */ + public Statement getStatement() { + return this.statement; + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/ExtendedAuthProvider.java b/driver-core/src/main/java/com/datastax/driver/core/ExtendedAuthProvider.java new file mode 100644 index 00000000000..d381c6210c0 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/ExtendedAuthProvider.java @@ -0,0 +1,84 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import com.datastax.driver.core.exceptions.AuthenticationException; +import java.net.InetSocketAddress; + +/** + * An auth provider that represents the host as an {@link EndPoint} instead of a raw {@link + * InetSocketAddress}. + * + *

This interface exists solely for backward compatibility: it wasn't possible to change {@link + * AuthProvider} directly, because it would have broken every 3rd-party implementation. + * + *

All built-in providers now implement this interface, and it is recommended that new + * implementations do too. + * + *

When the driver calls an auth provider, it will check if it implements this interface. If so, + * it will call {@link #newAuthenticator(EndPoint, String)}; otherwise it will convert the endpoint + * into an address with {@link EndPoint#resolve()} and call {@link + * AuthProvider#newAuthenticator(InetSocketAddress, String)}. + */ +public interface ExtendedAuthProvider extends AuthProvider { + + /** + * The {@code Authenticator} to use when connecting to {@code endpoint}. + * + * @param endPoint the Cassandra host to connect to. + * @param authenticator the configured authenticator on the host. + * @return The authentication implementation to use. + */ + Authenticator newAuthenticator(EndPoint endPoint, String authenticator) + throws AuthenticationException; + + /** + * @deprecated the driver will never call this method on {@link ExtendedAuthProvider} instances. + * Implementors should throw {@link AssertionError}. + */ + @Override + @Deprecated + Authenticator newAuthenticator(InetSocketAddress host, String authenticator) + throws AuthenticationException; + + class NoAuthProvider implements ExtendedAuthProvider { + + private static final String DSE_AUTHENTICATOR = + "com.datastax.bdp.cassandra.auth.DseAuthenticator"; + + static final String NO_AUTHENTICATOR_MESSAGE = + "Host %s requires authentication, but no authenticator found in Cluster configuration"; + + @Override + public Authenticator newAuthenticator(EndPoint endPoint, String authenticator) { + if (authenticator.equals(DSE_AUTHENTICATOR)) { + return new TransitionalModePlainTextAuthenticator(); + } + throw new AuthenticationException( + endPoint, String.format(NO_AUTHENTICATOR_MESSAGE, endPoint)); + } + + @Override + public Authenticator newAuthenticator(InetSocketAddress host, String authenticator) + throws AuthenticationException { + throw new AssertionError( + "The driver should never call this method on an object that implements " + + this.getClass().getSimpleName()); + } + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/ExtendedRemoteEndpointAwareSslOptions.java b/driver-core/src/main/java/com/datastax/driver/core/ExtendedRemoteEndpointAwareSslOptions.java new file mode 100644 index 00000000000..e7826c5fe15 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/ExtendedRemoteEndpointAwareSslOptions.java @@ -0,0 +1,41 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import io.netty.channel.socket.SocketChannel; +import io.netty.handler.ssl.SslHandler; + +public interface ExtendedRemoteEndpointAwareSslOptions extends RemoteEndpointAwareSSLOptions { + + /** + * Creates a new SSL handler for the given Netty channel and the given remote endpoint. + * + *

This gets called each time the driver opens a new connection to a Cassandra host. The newly + * created handler will be added to the channel's pipeline to provide SSL support for the + * connection. + * + *

You don't necessarily need to implement this method directly; see the provided + * implementations: {@link RemoteEndpointAwareJdkSSLOptions} and {@link + * RemoteEndpointAwareNettySSLOptions}. + * + * @param channel the channel. + * @param remoteEndpoint the remote endpoint information. + * @return a newly-created {@link SslHandler}. + */ + SslHandler newSSLHandler(SocketChannel channel, EndPoint remoteEndpoint); +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/Frame.java b/driver-core/src/main/java/com/datastax/driver/core/Frame.java index 36cd16fca8d..2221130cc56 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Frame.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Frame.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,26 +22,31 @@ import io.netty.buffer.ByteBuf; import io.netty.channel.ChannelHandler; import io.netty.channel.ChannelHandlerContext; -import io.netty.handler.codec.*; - +import io.netty.handler.codec.ByteToMessageDecoder; +import io.netty.handler.codec.CorruptedFrameException; +import io.netty.handler.codec.LengthFieldBasedFrameDecoder; +import io.netty.handler.codec.MessageToMessageDecoder; +import io.netty.handler.codec.MessageToMessageEncoder; +import io.netty.handler.codec.TooLongFrameException; import java.util.EnumSet; import java.util.List; /** * A frame for the CQL binary protocol. - *

- * Each frame contains a fixed size header (8 bytes for V1 and V2, 9 bytes for V3 and V4) - * followed by a variable size body. The content of the body depends - * on the header opcode value (the body can in particular be empty for some - * opcode values). - *

- * The protocol distinguishes 2 types of frames: requests and responses. Requests - * are those frames sent by the clients to the server, response are the ones sent - * by the server. Note however that the protocol supports server pushes (events) - * so responses does not necessarily come right after a client request. - *

- * Frames for protocol versions 1+2 are defined as: - *

+ * + *

Each frame contains a fixed size header (8 bytes for V1 and V2, 9 bytes for V3 and V4) + * followed by a variable size body. The content of the body depends on the header opcode value (the + * body can in particular be empty for some opcode values). + * + *

The protocol distinguishes 2 types of frames: requests and responses. Requests are those + * frames sent by the clients to the server, response are the ones sent by the server. Note however + * that the protocol supports server pushes (events) so responses does not necessarily come right + * after a client request. + * + *

Frames for protocol versions 1+2 are defined as: + * + *

+ * *

  *  0         8        16        24        32
  * +---------+---------+---------+---------+
@@ -53,9 +60,11 @@
  * .                                       .
  * +---------------------------------------- *
  * 
- *

- * Frames for protocol versions 3+4 are defined as: - *

+ * + *

Frames for protocol versions 3+4 are defined as: + * + *

+ * *

  * 0         8        16        24        32         40
  * +---------+---------+---------+---------+---------+
@@ -77,268 +86,289 @@
  */
 class Frame {
 
-    final Header header;
-    final ByteBuf body;
-
-    private Frame(Header header, ByteBuf body) {
-        this.header = header;
-        this.body = body;
+  final Header header;
+  final ByteBuf body;
+
+  Frame(Header header, ByteBuf body) {
+    this.header = header;
+    this.body = body;
+  }
+
+  private static Frame create(ByteBuf fullFrame) {
+    Header header = Header.decode(fullFrame);
+    assert header.bodyLength == fullFrame.readableBytes();
+    return new Frame(header, fullFrame);
+  }
+
+  private static int readStreamId(ByteBuf fullFrame, ProtocolVersion version) {
+    switch (version) {
+      case V1:
+      case V2:
+        return fullFrame.readByte();
+      case V3:
+      case V4:
+      case V5:
+      case V6:
+        return fullFrame.readShort();
+      default:
+        throw version.unsupported();
     }
+  }
 
-    private static Frame create(ByteBuf fullFrame) {
-        assert fullFrame.readableBytes() >= 1 : String.format("Frame too short (%d bytes)", fullFrame.readableBytes());
+  static Frame create(
+      ProtocolVersion version, int opcode, int streamId, EnumSet flags, ByteBuf body) {
+    Header header = new Header(version, flags, streamId, opcode, body.readableBytes());
+    return new Frame(header, body);
+  }
 
-        int versionBytes = fullFrame.readByte();
-        // version first byte is the "direction" of the frame (request or response)
-        ProtocolVersion version = ProtocolVersion.fromInt(versionBytes & 0x7F);
-        int hdrLen = Header.lengthFor(version);
-        assert fullFrame.readableBytes() >= (hdrLen - 1) : String.format("Frame too short (%d bytes)", fullFrame.readableBytes());
+  static class Header {
 
-        int flags = fullFrame.readByte();
-        int streamId = readStreamid(fullFrame, version);
-        int opcode = fullFrame.readByte();
-        int length = fullFrame.readInt();
-        assert length == fullFrame.readableBytes();
+    final ProtocolVersion version;
+    final EnumSet flags;
+    final int streamId;
+    final int opcode;
+    final int bodyLength;
 
-        Header header = new Header(version, flags, streamId, opcode);
-        return new Frame(header, fullFrame);
+    private Header(ProtocolVersion version, int flags, int streamId, int opcode, int bodyLength) {
+      this(version, Flag.deserialize(flags), streamId, opcode, bodyLength);
     }
 
-    private static int readStreamid(ByteBuf fullFrame, ProtocolVersion version) {
-        switch (version) {
-            case V1:
-            case V2:
-                return fullFrame.readByte();
-            case V3:
-            case V4:
-            case V5:
-                return fullFrame.readShort();
-            default:
-                throw version.unsupported();
-        }
+    Header(ProtocolVersion version, EnumSet flags, int streamId, int opcode, int bodyLength) {
+      this.version = version;
+      this.flags = flags;
+      this.streamId = streamId;
+      this.opcode = opcode;
+      this.bodyLength = bodyLength;
     }
 
-    static Frame create(ProtocolVersion version, int opcode, int streamId, EnumSet flags, ByteBuf body) {
-        Header header = new Header(version, flags, streamId, opcode);
-        return new Frame(header, body);
+    Header withNewBodyLength(int newBodyLength) {
+      return new Header(version, flags, streamId, opcode, newBodyLength);
     }
 
-    static class Header {
-
-        final ProtocolVersion version;
-        final EnumSet flags;
-        final int streamId;
-        final int opcode;
+    /**
+     * Return the expected frame header length in bytes according to the protocol version in use.
+     *
+     * @param version the protocol version in use
+     * @return the expected frame header length in bytes
+     */
+    static int lengthFor(ProtocolVersion version) {
+      switch (version) {
+        case V1:
+        case V2:
+          return 8;
+        case V3:
+        case V4:
+        case V5:
+        case V6:
+          return 9;
+        default:
+          throw version.unsupported();
+      }
+    }
 
-        private Header(ProtocolVersion version, int flags, int streamId, int opcode) {
-            this(version, Flag.deserialize(flags), streamId, opcode);
-        }
+    public void encodeInto(ByteBuf destination) {
+      // Don't bother with the direction, we only send requests.
+      destination.writeByte(version.toInt());
+      destination.writeByte(Flag.serialize(flags));
+      switch (version) {
+        case V1:
+        case V2:
+          destination.writeByte(streamId);
+          break;
+        case V3:
+        case V4:
+        case V5:
+        case V6:
+          destination.writeShort(streamId);
+          break;
+        default:
+          throw version.unsupported();
+      }
+      destination.writeByte(opcode);
+      destination.writeInt(bodyLength);
+    }
 
-        private Header(ProtocolVersion version, EnumSet flags, int streamId, int opcode) {
-            this.version = version;
-            this.flags = flags;
-            this.streamId = streamId;
-            this.opcode = opcode;
-        }
+    static Header decode(ByteBuf buffer) {
+      assert buffer.readableBytes() >= 1
+          : String.format("Frame too short (%d bytes)", buffer.readableBytes());
 
-        /**
-         * Return the expected frame header length in bytes according to the protocol version in use.
-         *
-         * @param version the protocol version in use
-         * @return the expected frame header length in bytes
-         */
-        static int lengthFor(ProtocolVersion version) {
-            switch (version) {
-                case V1:
-                case V2:
-                    return 8;
-                case V3:
-                case V4:
-                case V5:
-                    return 9;
-                default:
-                    throw version.unsupported();
-            }
-        }
+      int versionBytes = buffer.readByte();
+      // version first byte is the "direction" of the frame (request or response)
+      ProtocolVersion version = ProtocolVersion.fromInt(versionBytes & 0x7F);
+      int hdrLen = Header.lengthFor(version);
+      assert buffer.readableBytes() >= (hdrLen - 1)
+          : String.format("Frame too short (%d bytes)", buffer.readableBytes());
 
-        enum Flag {
-            // The order of that enum matters!!
-            COMPRESSED,
-            TRACING,
-            CUSTOM_PAYLOAD,
-            WARNING,
-            USE_BETA;
-
-            static EnumSet deserialize(int flags) {
-                EnumSet set = EnumSet.noneOf(Flag.class);
-                Flag[] values = Flag.values();
-                for (int n = 0; n < 8; n++) {
-                    if ((flags & (1 << n)) != 0)
-                        set.add(values[n]);
-                }
-                return set;
-            }
-
-            static int serialize(EnumSet flags) {
-                int i = 0;
-                for (Flag flag : flags)
-                    i |= 1 << flag.ordinal();
-                return i;
-            }
-        }
-    }
+      int flags = buffer.readByte();
+      int streamId = readStreamId(buffer, version);
+      int opcode = buffer.readByte();
+      int length = buffer.readInt();
 
-    Frame with(ByteBuf newBody) {
-        return new Frame(header, newBody);
+      return new Header(version, flags, streamId, opcode, length);
     }
 
-    static final class Decoder extends ByteToMessageDecoder {
-        private DecoderForStreamIdSize decoder;
-
-        @Override
-        protected void decode(ChannelHandlerContext ctx, ByteBuf buffer, List out) throws Exception {
-            if (buffer.readableBytes() < 1)
-                return;
-
-            // Initialize sub decoder on first message.  No synchronization needed as
-            // decode is always called from same thread.
-            if (decoder == null) {
-                int version = buffer.getByte(buffer.readerIndex());
-                // version first bit is the "direction" of the frame (request or response)
-                version = version & 0x7F;
-                decoder = new DecoderForStreamIdSize(version, version >= 3 ? 2 : 1);
-            }
-
-            Object frame = decoder.decode(ctx, buffer);
-            if (frame != null)
-                out.add(frame);
+    enum Flag {
+      // The order of that enum matters!!
+      COMPRESSED,
+      TRACING,
+      CUSTOM_PAYLOAD,
+      WARNING,
+      USE_BETA;
+
+      static EnumSet deserialize(int flags) {
+        EnumSet set = EnumSet.noneOf(Flag.class);
+        Flag[] values = Flag.values();
+        for (int n = 0; n < 8; n++) {
+          if ((flags & (1 << n)) != 0) set.add(values[n]);
         }
+        return set;
+      }
+
+      static int serialize(EnumSet flags) {
+        int i = 0;
+        for (Flag flag : flags) i |= 1 << flag.ordinal();
+        return i;
+      }
+    }
+  }
+
+  Frame with(ByteBuf newBody) {
+    return new Frame(header.withNewBodyLength(newBody.readableBytes()), newBody);
+  }
+
+  static final class Decoder extends ByteToMessageDecoder {
+    private DecoderForStreamIdSize decoder;
+
+    @Override
+    protected void decode(ChannelHandlerContext ctx, ByteBuf buffer, List out)
+        throws Exception {
+      if (buffer.readableBytes() < 1) return;
+
+      // Initialize sub decoder on first message.  No synchronization needed as
+      // decode is always called from same thread.
+      if (decoder == null) {
+        int version = buffer.getByte(buffer.readerIndex());
+        // version first bit is the "direction" of the frame (request or response)
+        version = version & 0x7F;
+        decoder = new DecoderForStreamIdSize(version, version >= 3 ? 2 : 1);
+      }
+
+      Object frame = decoder.decode(ctx, buffer);
+      if (frame != null) out.add(frame);
+    }
 
-        static class DecoderForStreamIdSize extends LengthFieldBasedFrameDecoder {
-            // The maximum response frame length allowed.  Note that C* does not currently restrict the length of its responses (CASSANDRA-12630).
-            private static final int MAX_FRAME_LENGTH = SystemProperties.getInt("com.datastax.driver.NATIVE_TRANSPORT_MAX_FRAME_SIZE_IN_MB", 256) * 1024 * 1024; // 256 MB
-            private final int protocolVersion;
-
-            DecoderForStreamIdSize(int protocolVersion, int streamIdSize) {
-                super(MAX_FRAME_LENGTH, /*lengthOffset=*/ 3 + streamIdSize, 4, 0, 0, true);
-                this.protocolVersion = protocolVersion;
-            }
-
-            @Override
-            protected Object decode(ChannelHandlerContext ctx, ByteBuf buffer) throws Exception {
-                // Capture current index in case we need to get the stream id.
-                // If a TooLongFrameException is thrown the readerIndex will advance to the end of
-                // the buffer (or past the frame) so we need the position as we entered this method.
-                int curIndex = buffer.readerIndex();
-                try {
-                    ByteBuf frame = (ByteBuf) super.decode(ctx, buffer);
-                    if (frame == null) {
-                        return null;
-                    }
-                    // Do not deallocate `frame` just yet, because it is stored as Frame.body and will be used
-                    // in Message.ProtocolDecoder or Frame.Decompressor if compression is enabled (we deallocate
-                    // it there).
-                    Frame theFrame = Frame.create(frame);
-                    // Validate the opcode (this will throw if it's not a response)
-                    Message.Response.Type.fromOpcode(theFrame.header.opcode);
-                    return theFrame;
-                } catch (CorruptedFrameException e) {
-                    throw new DriverInternalError(e);
-                } catch (TooLongFrameException e) {
-                    int streamId = protocolVersion > 2 ?
-                            buffer.getShort(curIndex + 2) :
-                            buffer.getByte(curIndex + 2);
-                    throw new FrameTooLongException(streamId);
-                }
-            }
+    static class DecoderForStreamIdSize extends LengthFieldBasedFrameDecoder {
+      // The maximum response frame length allowed.  Note that C* does not currently restrict the
+      // length of its responses (CASSANDRA-12630).
+      private static final int MAX_FRAME_LENGTH =
+          SystemProperties.getInt("com.datastax.driver.NATIVE_TRANSPORT_MAX_FRAME_SIZE_IN_MB", 256)
+              * 1024
+              * 1024; // 256 MB
+      private final int protocolVersion;
+
+      DecoderForStreamIdSize(int protocolVersion, int streamIdSize) {
+        super(MAX_FRAME_LENGTH, /*lengthOffset=*/ 3 + streamIdSize, 4, 0, 0, true);
+        this.protocolVersion = protocolVersion;
+      }
+
+      @Override
+      protected Object decode(ChannelHandlerContext ctx, ByteBuf buffer) throws Exception {
+        // Capture current index in case we need to get the stream id.
+        // If a TooLongFrameException is thrown the readerIndex will advance to the end of
+        // the buffer (or past the frame) so we need the position as we entered this method.
+        int curIndex = buffer.readerIndex();
+        try {
+          ByteBuf frame = (ByteBuf) super.decode(ctx, buffer);
+          if (frame == null) {
+            return null;
+          }
+          // Do not deallocate `frame` just yet, because it is stored as Frame.body and will be used
+          // in Message.ProtocolDecoder or Frame.Decompressor if compression is enabled (we
+          // deallocate
+          // it there).
+          Frame theFrame = Frame.create(frame);
+          // Validate the opcode (this will throw if it's not a response)
+          Message.Response.Type.fromOpcode(theFrame.header.opcode);
+          return theFrame;
+        } catch (CorruptedFrameException e) {
+          throw new DriverInternalError(e);
+        } catch (TooLongFrameException e) {
+          int streamId =
+              protocolVersion > 2 ? buffer.getShort(curIndex + 2) : buffer.getByte(curIndex + 2);
+          throw new FrameTooLongException(streamId);
         }
+      }
     }
+  }
 
-    @ChannelHandler.Sharable
-    static class Encoder extends MessageToMessageEncoder {
-
-        @Override
-        protected void encode(ChannelHandlerContext ctx, Frame frame, List out) throws Exception {
-            ProtocolVersion protocolVersion = frame.header.version;
-            ByteBuf header = ctx.alloc().ioBuffer(Frame.Header.lengthFor(protocolVersion));
-            // We don't bother with the direction, we only send requests.
-            header.writeByte(frame.header.version.toInt());
-            header.writeByte(Header.Flag.serialize(frame.header.flags));
-            writeStreamId(frame.header.streamId, header, protocolVersion);
-            header.writeByte(frame.header.opcode);
-            header.writeInt(frame.body.readableBytes());
-
-            out.add(header);
-            out.add(frame.body);
-        }
+  @ChannelHandler.Sharable
+  static class Encoder extends MessageToMessageEncoder {
 
-        private void writeStreamId(int streamId, ByteBuf header, ProtocolVersion protocolVersion) {
-            switch (protocolVersion) {
-                case V1:
-                case V2:
-                    header.writeByte(streamId);
-                    break;
-                case V3:
-                case V4:
-                case V5:
-                    header.writeShort(streamId);
-                    break;
-                default:
-                    throw protocolVersion.unsupported();
-            }
-        }
+    @Override
+    protected void encode(ChannelHandlerContext ctx, Frame frame, List out)
+        throws Exception {
+      ProtocolVersion protocolVersion = frame.header.version;
+      ByteBuf header = ctx.alloc().ioBuffer(Frame.Header.lengthFor(protocolVersion));
+      frame.header.encodeInto(header);
+
+      out.add(header);
+      out.add(frame.body);
     }
+  }
 
-    static class Decompressor extends MessageToMessageDecoder {
+  static class Decompressor extends MessageToMessageDecoder {
 
-        private final FrameCompressor compressor;
+    private final FrameCompressor compressor;
 
-        Decompressor(FrameCompressor compressor) {
-            assert compressor != null;
-            this.compressor = compressor;
-        }
+    Decompressor(FrameCompressor compressor) {
+      assert compressor != null;
+      this.compressor = compressor;
+    }
 
-        @Override
-        protected void decode(ChannelHandlerContext ctx, Frame frame, List out) throws Exception {
-            if (frame.header.flags.contains(Header.Flag.COMPRESSED)) {
-                // All decompressors allocate a new buffer for the decompressed data, so this is the last time
-                // we have a reference to the compressed body (and therefore a chance to release it).
-                ByteBuf compressedBody = frame.body;
-                try {
-                    out.add(compressor.decompress(frame));
-                } finally {
-                    compressedBody.release();
-                }
-            } else {
-                out.add(frame);
-            }
+    @Override
+    protected void decode(ChannelHandlerContext ctx, Frame frame, List out)
+        throws Exception {
+      if (frame.header.flags.contains(Header.Flag.COMPRESSED)) {
+        // All decompressors allocate a new buffer for the decompressed data, so this is the last
+        // time
+        // we have a reference to the compressed body (and therefore a chance to release it).
+        ByteBuf compressedBody = frame.body;
+        try {
+          out.add(compressor.decompress(frame));
+        } finally {
+          compressedBody.release();
         }
+      } else {
+        out.add(frame);
+      }
     }
+  }
 
-    static class Compressor extends MessageToMessageEncoder {
+  static class Compressor extends MessageToMessageEncoder {
 
-        private final FrameCompressor compressor;
+    private final FrameCompressor compressor;
 
-        Compressor(FrameCompressor compressor) {
-            assert compressor != null;
-            this.compressor = compressor;
-        }
+    Compressor(FrameCompressor compressor) {
+      assert compressor != null;
+      this.compressor = compressor;
+    }
 
-        @Override
-        protected void encode(ChannelHandlerContext ctx, Frame frame, List out) throws Exception {
-            // Never compress STARTUP messages
-            if (frame.header.opcode == Message.Request.Type.STARTUP.opcode) {
-                out.add(frame);
-            } else {
-                frame.header.flags.add(Header.Flag.COMPRESSED);
-                // See comment in decode()
-                ByteBuf uncompressedBody = frame.body;
-                try {
-                    out.add(compressor.compress(frame));
-                } finally {
-                    uncompressedBody.release();
-                }
-            }
+    @Override
+    protected void encode(ChannelHandlerContext ctx, Frame frame, List out)
+        throws Exception {
+      // Never compress STARTUP messages
+      if (frame.header.opcode == Message.Request.Type.STARTUP.opcode) {
+        out.add(frame);
+      } else {
+        frame.header.flags.add(Header.Flag.COMPRESSED);
+        // See comment in decode()
+        ByteBuf uncompressedBody = frame.body;
+        try {
+          out.add(compressor.compress(frame));
+        } finally {
+          uncompressedBody.release();
         }
+      }
     }
+  }
 }
diff --git a/driver-core/src/main/java/com/datastax/driver/core/FrameCompressor.java b/driver-core/src/main/java/com/datastax/driver/core/FrameCompressor.java
index 58e91f0da3d..4063b131ddc 100644
--- a/driver-core/src/main/java/com/datastax/driver/core/FrameCompressor.java
+++ b/driver-core/src/main/java/com/datastax/driver/core/FrameCompressor.java
@@ -1,11 +1,13 @@
 /*
- * Copyright (C) 2012-2017 DataStax Inc.
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
  *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
+ *     http://www.apache.org/licenses/LICENSE-2.0
  *
  * Unless required by applicable law or agreed to in writing, software
  * distributed under the License is distributed on an "AS IS" BASIS,
@@ -16,27 +18,39 @@
 package com.datastax.driver.core;
 
 import io.netty.buffer.ByteBuf;
-
 import java.io.IOException;
 import java.nio.ByteBuffer;
 
 abstract class FrameCompressor {
 
-    abstract Frame compress(Frame frame) throws IOException;
+  abstract Frame compress(Frame frame) throws IOException;
+
+  /**
+   * Unlike {@link #compress(Frame)}, this variant does not store the uncompressed length if the
+   * underlying algorithm does not do it natively (like LZ4). It must be stored separately and
+   * passed back to {@link #decompress(ByteBuf, int)}.
+   */
+  abstract ByteBuf compress(ByteBuf buffer) throws IOException;
+
+  abstract Frame decompress(Frame frame) throws IOException;
 
-    abstract Frame decompress(Frame frame) throws IOException;
+  abstract ByteBuf decompress(ByteBuf buffer, int uncompressedLength) throws IOException;
 
-    protected static ByteBuffer inputNioBuffer(ByteBuf buf) {
-        // Using internalNioBuffer(...) as we only hold the reference in this method and so can
-        // reduce Object allocations.
-        int index = buf.readerIndex();
-        int len = buf.readableBytes();
-        return buf.nioBufferCount() == 1 ? buf.internalNioBuffer(index, len) : buf.nioBuffer(index, len);
-    }
+  protected static ByteBuffer inputNioBuffer(ByteBuf buf) {
+    // Using internalNioBuffer(...) as we only hold the reference in this method and so can
+    // reduce Object allocations.
+    int index = buf.readerIndex();
+    int len = buf.readableBytes();
+    return buf.nioBufferCount() == 1
+        ? buf.internalNioBuffer(index, len)
+        : buf.nioBuffer(index, len);
+  }
 
-    protected static ByteBuffer outputNioBuffer(ByteBuf buf) {
-        int index = buf.writerIndex();
-        int len = buf.writableBytes();
-        return buf.nioBufferCount() == 1 ? buf.internalNioBuffer(index, len) : buf.nioBuffer(index, len);
-    }
+  protected static ByteBuffer outputNioBuffer(ByteBuf buf) {
+    int index = buf.writerIndex();
+    int len = buf.writableBytes();
+    return buf.nioBufferCount() == 1
+        ? buf.internalNioBuffer(index, len)
+        : buf.nioBuffer(index, len);
+  }
 }
diff --git a/driver-core/src/main/java/com/datastax/driver/core/FramingFormatHandler.java b/driver-core/src/main/java/com/datastax/driver/core/FramingFormatHandler.java
new file mode 100644
index 00000000000..c59343df26e
--- /dev/null
+++ b/driver-core/src/main/java/com/datastax/driver/core/FramingFormatHandler.java
@@ -0,0 +1,80 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.datastax.driver.core;
+
+import com.datastax.driver.core.Message.Response.Type;
+import io.netty.channel.ChannelHandlerContext;
+import io.netty.channel.ChannelPipeline;
+import io.netty.handler.codec.MessageToMessageDecoder;
+import java.util.List;
+
+/**
+ * A handler to deal with different protocol framing formats.
+ *
+ * 

This handler detects when a handshake is successful; then, if necessary, adapts the pipeline + * to the modern framing format introduced in protocol v5. + */ +public class FramingFormatHandler extends MessageToMessageDecoder { + + private final Connection.Factory factory; + + FramingFormatHandler(Connection.Factory factory) { + this.factory = factory; + } + + @Override + protected void decode(ChannelHandlerContext ctx, Frame frame, List out) throws Exception { + boolean handshakeSuccessful = + frame.header.opcode == Type.READY.opcode || frame.header.opcode == Type.AUTHENTICATE.opcode; + if (handshakeSuccessful) { + // By default, the pipeline is configured for legacy framing since this is the format used + // by all protocol versions until handshake; after handshake however, we need to switch to + // modern framing for protocol v5 and higher. + if (frame.header.version.compareTo(ProtocolVersion.V5) >= 0) { + switchToModernFraming(ctx); + } + // once the handshake is successful, the framing format cannot change anymore; + // we can safely remove ourselves from the pipeline. + ctx.pipeline().remove("framingFormatHandler"); + } + out.add(frame); + } + + private void switchToModernFraming(ChannelHandlerContext ctx) { + ChannelPipeline pipeline = ctx.pipeline(); + SegmentCodec segmentCodec = + new SegmentCodec( + ctx.channel().alloc(), factory.configuration.getProtocolOptions().getCompression()); + + // Outbound: "message -> segment -> bytes" instead of "message -> frame -> bytes" + Message.ProtocolEncoder requestEncoder = + (Message.ProtocolEncoder) pipeline.get("messageEncoder"); + pipeline.replace( + "messageEncoder", + "messageToSegmentEncoder", + new MessageToSegmentEncoder(ctx.channel().alloc(), requestEncoder)); + pipeline.replace( + "frameEncoder", "segmentToBytesEncoder", new SegmentToBytesEncoder(segmentCodec)); + + // Inbound: "frame <- segment <- bytes" instead of "frame <- bytes" + pipeline.replace( + "frameDecoder", "bytesToSegmentDecoder", new BytesToSegmentDecoder(segmentCodec)); + pipeline.addAfter( + "bytesToSegmentDecoder", "segmentToFrameDecoder", new SegmentToFrameDecoder()); + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/FunctionMetadata.java b/driver-core/src/main/java/com/datastax/driver/core/FunctionMetadata.java index 22221728456..f246e59bce2 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/FunctionMetadata.java +++ b/driver-core/src/main/java/com/datastax/driver/core/FunctionMetadata.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,310 +19,316 @@ import com.datastax.driver.core.utils.MoreObjects; import com.google.common.collect.ImmutableMap; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import java.util.Collections; import java.util.Iterator; import java.util.List; import java.util.Map; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; -/** - * Describes a CQL function (created with {@code CREATE FUNCTION...}). - */ +/** Describes a CQL function (created with {@code CREATE FUNCTION...}). */ public class FunctionMetadata { - private static final Logger logger = LoggerFactory.getLogger(FunctionMetadata.class); + private static final Logger logger = LoggerFactory.getLogger(FunctionMetadata.class); - private final KeyspaceMetadata keyspace; - private final String simpleName; - private final Map arguments; - private final String body; - private final boolean calledOnNullInput; - private final String language; - private final DataType returnType; + private final KeyspaceMetadata keyspace; + private final String simpleName; + private final Map arguments; + private final String body; + private final boolean calledOnNullInput; + private final String language; + private final DataType returnType; - private FunctionMetadata(KeyspaceMetadata keyspace, - String simpleName, - Map arguments, - String body, - boolean calledOnNullInput, - String language, - DataType returnType) { - this.keyspace = keyspace; - this.simpleName = simpleName; - this.arguments = arguments; - this.body = body; - this.calledOnNullInput = calledOnNullInput; - this.language = language; - this.returnType = returnType; - } + private FunctionMetadata( + KeyspaceMetadata keyspace, + String simpleName, + Map arguments, + String body, + boolean calledOnNullInput, + String language, + DataType returnType) { + this.keyspace = keyspace; + this.simpleName = simpleName; + this.arguments = arguments; + this.body = body; + this.calledOnNullInput = calledOnNullInput; + this.language = language; + this.returnType = returnType; + } - // Cassandra < 3.0: - // CREATE TABLE system.schema_functions ( - // keyspace_name text, - // function_name text, - // signature frozen>, - // argument_names list, - // argument_types list, - // body text, - // called_on_null_input boolean, - // language text, - // return_type text, - // PRIMARY KEY (keyspace_name, function_name, signature) - // ) WITH CLUSTERING ORDER BY (function_name ASC, signature ASC) - // - // Cassandra >= 3.0: - // CREATE TABLE system_schema.functions ( - // keyspace_name text, - // function_name text, - // argument_names frozen>, - // argument_types frozen>, - // body text, - // called_on_null_input boolean, - // language text, - // return_type text, - // PRIMARY KEY (keyspace_name, function_name, argument_types) - // ) WITH CLUSTERING ORDER BY (function_name ASC, argument_types ASC) - // + // Cassandra < 3.0: + // CREATE TABLE system.schema_functions ( + // keyspace_name text, + // function_name text, + // signature frozen>, + // argument_names list, + // argument_types list, + // body text, + // called_on_null_input boolean, + // language text, + // return_type text, + // PRIMARY KEY (keyspace_name, function_name, signature) + // ) WITH CLUSTERING ORDER BY (function_name ASC, signature ASC) + // + // Cassandra >= 3.0: + // CREATE TABLE system_schema.functions ( + // keyspace_name text, + // function_name text, + // argument_names frozen>, + // argument_types frozen>, + // body text, + // called_on_null_input boolean, + // language text, + // return_type text, + // PRIMARY KEY (keyspace_name, function_name, argument_types) + // ) WITH CLUSTERING ORDER BY (function_name ASC, argument_types ASC) + // - static FunctionMetadata build(KeyspaceMetadata ksm, Row row, VersionNumber version, Cluster cluster) { - CodecRegistry codecRegistry = cluster.getConfiguration().getCodecRegistry(); - ProtocolVersion protocolVersion = cluster.getConfiguration().getProtocolOptions().getProtocolVersion(); - String simpleName = row.getString("function_name"); - List argumentNames = row.getList("argument_names", String.class); - // this will be a list of C* types in 2.2 and a list of CQL types in 3.0 - List argumentTypes = row.getList("argument_types", String.class); - Map arguments = buildArguments(ksm, argumentNames, argumentTypes, version, cluster); - if (argumentNames.size() != argumentTypes.size()) { - String fullName = Metadata.fullFunctionName(simpleName, arguments.values()); - logger.error(String.format("Error parsing definition of function %1$s.%2$s: the number of argument names and types don't match." - + "Cluster.getMetadata().getKeyspace(\"%1$s\").getFunction(\"%2$s\") will be missing.", - ksm.getName(), fullName)); - return null; - } - String body = row.getString("body"); - boolean calledOnNullInput = row.getBool("called_on_null_input"); - String language = row.getString("language"); - DataType returnType; - if (version.getMajor() >= 3.0) { - returnType = DataTypeCqlNameParser.parse(row.getString("return_type"), cluster, ksm.getName(), ksm.userTypes, null, false, false); - } else { - returnType = DataTypeClassNameParser.parseOne(row.getString("return_type"), protocolVersion, codecRegistry); - } - return new FunctionMetadata(ksm, simpleName, arguments, body, calledOnNullInput, language, returnType); + static FunctionMetadata build( + KeyspaceMetadata ksm, Row row, VersionNumber version, Cluster cluster) { + CodecRegistry codecRegistry = cluster.getConfiguration().getCodecRegistry(); + ProtocolVersion protocolVersion = + cluster.getConfiguration().getProtocolOptions().getProtocolVersion(); + String simpleName = row.getString("function_name"); + List argumentNames = row.getList("argument_names", String.class); + // this will be a list of C* types in 2.2 and a list of CQL types in 3.0 + List argumentTypes = row.getList("argument_types", String.class); + Map arguments = + buildArguments(ksm, argumentNames, argumentTypes, version, cluster); + if (argumentNames.size() != argumentTypes.size()) { + String fullName = Metadata.fullFunctionName(simpleName, arguments.values()); + logger.error( + String.format( + "Error parsing definition of function %1$s.%2$s: the number of argument names and types don't match." + + "Cluster.getMetadata().getKeyspace(\"%1$s\").getFunction(\"%2$s\") will be missing.", + ksm.getName(), fullName)); + return null; } - - // Note: the caller ensures that names and types have the same size - private static Map buildArguments(KeyspaceMetadata ksm, List names, List types, VersionNumber version, Cluster cluster) { - if (names.isEmpty()) - return Collections.emptyMap(); - ImmutableMap.Builder builder = ImmutableMap.builder(); - CodecRegistry codecRegistry = cluster.getConfiguration().getCodecRegistry(); - ProtocolVersion protocolVersion = cluster.getConfiguration().getProtocolOptions().getProtocolVersion(); - Iterator iterTypes = types.iterator(); - for (String name : names) { - DataType type; - if (version.getMajor() >= 3) { - type = DataTypeCqlNameParser.parse(iterTypes.next(), cluster, ksm.getName(), ksm.userTypes, null, false, false); - } else { - type = DataTypeClassNameParser.parseOne(iterTypes.next(), protocolVersion, codecRegistry); - } - builder.put(name, type); - } - return builder.build(); + String body = row.getString("body"); + boolean calledOnNullInput = row.getBool("called_on_null_input"); + String language = row.getString("language"); + DataType returnType; + if (version.getMajor() >= 3.0) { + returnType = + DataTypeCqlNameParser.parse( + row.getString("return_type"), + cluster, + ksm.getName(), + ksm.userTypes, + null, + false, + false); + } else { + returnType = + DataTypeClassNameParser.parseOne( + row.getString("return_type"), protocolVersion, codecRegistry); } + return new FunctionMetadata( + ksm, simpleName, arguments, body, calledOnNullInput, language, returnType); + } - /** - * Returns a CQL query representing this function in human readable form. - *

- * This method is equivalent to {@link #asCQLQuery} but the output is formatted. - * - * @return the CQL query representing this function. - */ - public String exportAsString() { - return asCQLQuery(true); + // Note: the caller ensures that names and types have the same size + private static Map buildArguments( + KeyspaceMetadata ksm, + List names, + List types, + VersionNumber version, + Cluster cluster) { + if (names.isEmpty()) return Collections.emptyMap(); + ImmutableMap.Builder builder = ImmutableMap.builder(); + CodecRegistry codecRegistry = cluster.getConfiguration().getCodecRegistry(); + ProtocolVersion protocolVersion = + cluster.getConfiguration().getProtocolOptions().getProtocolVersion(); + Iterator iterTypes = types.iterator(); + for (String name : names) { + DataType type; + if (version.getMajor() >= 3) { + type = + DataTypeCqlNameParser.parse( + iterTypes.next(), cluster, ksm.getName(), ksm.userTypes, null, false, false); + } else { + type = DataTypeClassNameParser.parseOne(iterTypes.next(), protocolVersion, codecRegistry); + } + builder.put(name, type); } + return builder.build(); + } - /** - * Returns a CQL query representing this function. - *

- * This method returns a single 'CREATE FUNCTION' query corresponding to - * this function definition. - * - * @return the 'CREATE FUNCTION' query corresponding to this function. - */ - public String asCQLQuery() { - return asCQLQuery(false); - } + /** + * Returns a CQL query representing this function in human readable form. + * + *

This method is equivalent to {@link #asCQLQuery} but the output is formatted. + * + * @return the CQL query representing this function. + */ + public String exportAsString() { + return asCQLQuery(true); + } - @Override - public String toString() { - return asCQLQuery(false); - } + /** + * Returns a CQL query representing this function. + * + *

This method returns a single 'CREATE FUNCTION' query corresponding to this function + * definition. + * + * @return the 'CREATE FUNCTION' query corresponding to this function. + */ + public String asCQLQuery() { + return asCQLQuery(false); + } - private String asCQLQuery(boolean formatted) { + @Override + public String toString() { + return asCQLQuery(false); + } - StringBuilder sb = new StringBuilder("CREATE FUNCTION "); + private String asCQLQuery(boolean formatted) { - sb - .append(Metadata.quoteIfNecessary(keyspace.getName())) - .append('.') - .append(Metadata.quoteIfNecessary(simpleName)) - .append('('); + StringBuilder sb = new StringBuilder("CREATE FUNCTION "); - boolean first = true; - for (Map.Entry entry : arguments.entrySet()) { - if (first) - first = false; - else - sb.append(','); - TableMetadata.newLine(sb, formatted); - String name = entry.getKey(); - DataType type = entry.getValue(); - sb - .append(TableMetadata.spaces(4, formatted)) - .append(Metadata.quoteIfNecessary(name)) - .append(' ') - .append(type.asFunctionParameterString()); - } - sb.append(')'); + sb.append(Metadata.quoteIfNecessary(keyspace.getName())) + .append('.') + .append(Metadata.quoteIfNecessary(simpleName)) + .append('('); - TableMetadata.spaceOrNewLine(sb, formatted) - .append(calledOnNullInput ? "CALLED ON NULL INPUT" : "RETURNS NULL ON NULL INPUT"); + boolean first = true; + for (Map.Entry entry : arguments.entrySet()) { + if (first) first = false; + else sb.append(','); + String name = entry.getKey(); + DataType type = entry.getValue(); + sb.append(Metadata.quoteIfNecessary(name)) + .append(' ') + .append(type.asFunctionParameterString()); + } + sb.append(')'); - TableMetadata.spaceOrNewLine(sb, formatted) - .append("RETURNS ") - .append(returnType); + TableMetadata.spaceOrNewLine(sb, formatted) + .append(calledOnNullInput ? "CALLED ON NULL INPUT" : "RETURNS NULL ON NULL INPUT"); - TableMetadata.spaceOrNewLine(sb, formatted) - .append("LANGUAGE ") - .append(language); + TableMetadata.spaceOrNewLine(sb, formatted) + .append("RETURNS ") + .append(returnType.asFunctionParameterString()); - TableMetadata.spaceOrNewLine(sb, formatted) - .append("AS '") - .append(body) - .append("';"); + TableMetadata.spaceOrNewLine(sb, formatted).append("LANGUAGE ").append(language); - return sb.toString(); - } + TableMetadata.spaceOrNewLine(sb, formatted).append("AS '").append(body).append("';"); - /** - * Returns the keyspace this function belongs to. - * - * @return the keyspace metadata of the keyspace this function belongs to. - */ - public KeyspaceMetadata getKeyspace() { - return keyspace; - } + return sb.toString(); + } - /** - * Returns the CQL signature of this function. - *

- * This is the name of the function, followed by the names of the argument types between parentheses, - * for example {@code sum(int,int)}. - *

- * Note that the returned signature is not qualified with the keyspace name. - * - * @return the signature of this function. - */ - public String getSignature() { - StringBuilder sb = new StringBuilder(); - sb - .append(Metadata.quoteIfNecessary(simpleName)) - .append('('); - boolean first = true; - for (DataType type : arguments.values()) { - if (first) - first = false; - else - sb.append(','); - sb.append(type.asFunctionParameterString()); - } - sb.append(')'); - return sb.toString(); - } + /** + * Returns the keyspace this function belongs to. + * + * @return the keyspace metadata of the keyspace this function belongs to. + */ + public KeyspaceMetadata getKeyspace() { + return keyspace; + } - /** - * Returns the simple name of this function. - *

- * This is the name of the function, without arguments. Note that functions can be overloaded with - * different argument lists, therefore the simple name may not be unique. For example, - * {@code sum(int,int)} and {@code sum(int,int,int)} both have the simple name {@code sum}. - * - * @return the simple name of this function. - * @see #getSignature() - */ - public String getSimpleName() { - return simpleName; + /** + * Returns the CQL signature of this function. + * + *

This is the name of the function, followed by the names of the argument types between + * parentheses, for example {@code sum(int,int)}. + * + *

Note that the returned signature is not qualified with the keyspace name. + * + * @return the signature of this function. + */ + public String getSignature() { + StringBuilder sb = new StringBuilder(); + sb.append(Metadata.quoteIfNecessary(simpleName)).append('('); + boolean first = true; + for (DataType type : arguments.values()) { + if (first) first = false; + else sb.append(','); + sb.append(type.asFunctionParameterString()); } + sb.append(')'); + return sb.toString(); + } - /** - * Returns the names and types of this function's arguments. - * - * @return a map from argument name to argument type. - */ - public Map getArguments() { - return arguments; - } + /** + * Returns the simple name of this function. + * + *

This is the name of the function, without arguments. Note that functions can be overloaded + * with different argument lists, therefore the simple name may not be unique. For example, {@code + * sum(int,int)} and {@code sum(int,int,int)} both have the simple name {@code sum}. + * + * @return the simple name of this function. + * @see #getSignature() + */ + public String getSimpleName() { + return simpleName; + } - /** - * Returns the body of this function. - * - * @return the body. - */ - public String getBody() { - return body; - } + /** + * Returns the names and types of this function's arguments. + * + * @return a map from argument name to argument type. + */ + public Map getArguments() { + return arguments; + } - /** - * Indicates whether this function's body gets called on null input. - *

- * This is {@code true} if the function was created with {@code CALLED ON NULL INPUT}, - * and {@code false} if it was created with {@code RETURNS NULL ON NULL INPUT}. - * - * @return whether this function's body gets called on null input. - */ - public boolean isCalledOnNullInput() { - return calledOnNullInput; - } + /** + * Returns the body of this function. + * + * @return the body. + */ + public String getBody() { + return body; + } - /** - * Returns the programming language in which this function's body is written. - * - * @return the language. - */ - public String getLanguage() { - return language; - } + /** + * Indicates whether this function's body gets called on null input. + * + *

This is {@code true} if the function was created with {@code CALLED ON NULL INPUT}, and + * {@code false} if it was created with {@code RETURNS NULL ON NULL INPUT}. + * + * @return whether this function's body gets called on null input. + */ + public boolean isCalledOnNullInput() { + return calledOnNullInput; + } - /** - * Returns the return type of this function. - * - * @return the return type. - */ - public DataType getReturnType() { - return returnType; - } + /** + * Returns the programming language in which this function's body is written. + * + * @return the language. + */ + public String getLanguage() { + return language; + } - @Override - public boolean equals(Object other) { - if (other == this) - return true; + /** + * Returns the return type of this function. + * + * @return the return type. + */ + public DataType getReturnType() { + return returnType; + } - if (other instanceof FunctionMetadata) { - FunctionMetadata that = (FunctionMetadata) other; - return this.keyspace.getName().equals(that.keyspace.getName()) && - this.arguments.equals(that.arguments) && - this.body.equals(that.body) && - this.calledOnNullInput == that.calledOnNullInput && - this.language.equals(that.language) && - this.returnType.equals(that.returnType); - } - return false; - } + @Override + public boolean equals(Object other) { + if (other == this) return true; - @Override - public int hashCode() { - return MoreObjects.hashCode(keyspace.getName(), arguments, body, calledOnNullInput, language, returnType); + if (other instanceof FunctionMetadata) { + FunctionMetadata that = (FunctionMetadata) other; + return this.keyspace.getName().equals(that.keyspace.getName()) + && this.arguments.equals(that.arguments) + && this.body.equals(that.body) + && this.calledOnNullInput == that.calledOnNullInput + && this.language.equals(that.language) + && this.returnType.equals(that.returnType); } + return false; + } + + @Override + public int hashCode() { + return MoreObjects.hashCode( + keyspace.getName(), arguments, body, calledOnNullInput, language, returnType); + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/GettableByIndexData.java b/driver-core/src/main/java/com/datastax/driver/core/GettableByIndexData.java index a0a4c63f414..7a10bc8c558 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/GettableByIndexData.java +++ b/driver-core/src/main/java/com/datastax/driver/core/GettableByIndexData.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,558 +20,572 @@ import com.datastax.driver.core.exceptions.CodecNotFoundException; import com.datastax.driver.core.exceptions.InvalidTypeException; import com.google.common.reflect.TypeToken; - import java.math.BigDecimal; import java.math.BigInteger; import java.net.InetAddress; import java.nio.ByteBuffer; -import java.util.*; +import java.util.Date; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.UUID; -/** - * Collection of (typed) CQL values that can be retrieved by index (starting at zero). - */ +/** Collection of (typed) CQL values that can be retrieved by index (starting at zero). */ public interface GettableByIndexData { - /** - * Returns whether the {@code i}th value is NULL. - * - * @param i the index ({@code 0 <= i < size()}) of the value to check. - * @return whether the {@code i}th value is NULL. - * @throws IndexOutOfBoundsException if {@code i} is not a valid index for - * this object. - */ - public boolean isNull(int i); - - /** - * Returns the {@code i}th value as a boolean. - *

- * This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL type to a Java {@code boolean} - * (for CQL type {@code boolean}, this will be the built-in codec). - * - * @param i the index ({@code 0 <= i < size()}) to retrieve. - * @return the boolean value of the {@code i}th element. If the value is NULL, {@code false} is returned. - * If you need to distinguish NULL and false values, check first with {@link #isNull(int)} or use {@code get(i, Boolean.class)}. - * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. - * @throws CodecNotFoundException if there is no registered codec to convert the element's CQL - * type to a boolean. - */ - public boolean getBool(int i); - - /** - * Returns the {@code i}th value as a byte. - *

- * This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL type to a Java {@code byte} - * (for CQL type {@code tinyint}, this will be the built-in codec). - * - * @param i the index ({@code 0 <= i < size()}) to retrieve. - * @return the value of the {@code i}th element as a byte. If the value is NULL, {@code 0} is returned. - * If you need to distinguish NULL and 0, check first with {@link #isNull(int)} or use {@code get(i, Byte.class)}. - * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. - * @throws CodecNotFoundException if there is no registered codec to convert the element's CQL - * type to a byte. - */ - public byte getByte(int i); - - /** - * Returns the {@code i}th value as a short. - *

- * This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL type to a Java {@code short} - * (for CQL type {@code smallint}, this will be the built-in codec). - * - * @param i the index ({@code 0 <= i < size()}) to retrieve. - * @return the value of the {@code i}th element as a short. If the value is NULL, {@code 0} is returned. - * If you need to distinguish NULL and 0, check first with {@link #isNull(int)} or use {@code get(i, Short.class)}. - * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. - * @throws CodecNotFoundException if there is no registered codec to convert the element's CQL - * type to a short. - */ - public short getShort(int i); - - /** - * Returns the {@code i}th value as an integer. - *

- * This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL type to a Java {@code int} - * (for CQL type {@code int}, this will be the built-in codec). - * - * @param i the index ({@code 0 <= i < size()}) to retrieve. - * @return the value of the {@code i}th element as an integer. If the value is NULL, {@code 0} is returned. - * If you need to distinguish NULL and 0, check first with {@link #isNull(int)} or use {@code get(i, Integer.class)}. - * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. - * @throws CodecNotFoundException if there is no registered codec to convert the element's CQL - * type to an int. - */ - public int getInt(int i); - - /** - * Returns the {@code i}th value as a long. - *

- * This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL type to a Java {@code byte} - * (for CQL types {@code bigint} and {@code counter}, this will be the built-in codec). - * - * @param i the index ({@code 0 <= i < size()}) to retrieve. - * @return the value of the {@code i}th element as a long. If the value is NULL, {@code 0L} is returned. - * If you need to distinguish NULL and 0L, check first with {@link #isNull(int)} or use {@code get(i, Long.class)}. - * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. - * @throws CodecNotFoundException if there is no registered codec to convert the element's CQL - * type to a long. - */ - public long getLong(int i); - - /** - * Returns the {@code i}th value as a date. - *

- * This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL type to a {@code Date} - * (for CQL type {@code timestamp}, this will be the built-in codec). - * - * @param i the index ({@code 0 <= i < size()}) to retrieve. - * @return the value of the {@code i}th element as a data. If the - * value is NULL, {@code null} is returned. - * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. - * @throws CodecNotFoundException if there is no registered codec to convert the element's CQL - * type to a {@code Date}. - */ - public Date getTimestamp(int i); - - /** - * Returns the {@code i}th value as a date (without time). - *

- * This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL type to a {@link LocalDate} - * (for CQL type {@code date}, this will be the built-in codec). - * - * @param i the index ({@code 0 <= i < size()}) to retrieve. - * @return the value of the {@code i}th element as an date. If the - * value is NULL, {@code null} is returned. - * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. - * @throws CodecNotFoundException if there is no registered codec to convert the element's CQL - * type to a {@code LocalDate}. - */ - public LocalDate getDate(int i); - - /** - * Returns the {@code i}th value as a long in nanoseconds since midnight. - *

- * This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL type to a Java {@code long} - * (for CQL type {@code time}, this will be the built-in codec). - * - * @param i the index ({@code 0 <= i < size()}) to retrieve. - * @return the value of the {@code i}th element as a long. If the - * value is NULL, {@code 0L} is returned. - * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. - * @throws CodecNotFoundException if there is no registered codec to convert the element's CQL - * type to a long. - */ - public long getTime(int i); - - /** - * Returns the {@code i}th value as a float. - *

- * This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL type to a Java {@code float} - * (for CQL type {@code float}, this will be the built-in codec). - * - * @param i the index ({@code 0 <= i < size()}) to retrieve. - * @return the value of the {@code i}th element as a float. If the value is NULL, {@code 0.0f} is returned. - * If you need to distinguish NULL and 0.0f, check first with {@link #isNull(int)} or use {@code get(i, Float.class)}. - * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. - * @throws CodecNotFoundException if there is no registered codec to convert the element's CQL - * type to a float. - */ - public float getFloat(int i); - - /** - * Returns the {@code i}th value as a double. - *

- * This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL type to a Java {@code double} - * (for CQL type {@code double}, this will be the built-in codec). - * - * @param i the index ({@code 0 <= i < size()}) to retrieve. - * @return the value of the {@code i}th element as a double. If the value is NULL, {@code 0.0} is returned. - * If you need to distinguish NULL and 0.0, check first with {@link #isNull(int)} or use {@code get(i, Double.class)}. - * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. - * @throws CodecNotFoundException if there is no registered codec to convert the element's CQL - * type to a double. - */ - public double getDouble(int i); - - /** - * Returns the {@code i}th value as a {@code ByteBuffer}. - *

- * This method does not use any codec; it returns a copy of the binary representation of the value. It is up to the - * caller to convert the returned value appropriately. - * - * @param i the index ({@code 0 <= i < size()}) to retrieve. - * @return the value of the {@code i}th element as a ByteBuffer. If the - * value is NULL, {@code null} is returned. - * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. - */ - public ByteBuffer getBytesUnsafe(int i); - - /** - * Returns the {@code i}th value as a byte array. - *

- * This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL type to a Java {@code ByteBuffer} - * (for CQL type {@code blob}, this will be the built-in codec). - * - * @param i the index ({@code 0 <= i < size()}) to retrieve. - * @return the value of the {@code i}th element as a byte array. If the - * value is NULL, {@code null} is returned. - * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. - * @throws CodecNotFoundException if there is no registered codec to convert the element's CQL - * type to a {@code ByteBuffer}. - */ - public ByteBuffer getBytes(int i); - - /** - * Returns the {@code i}th value as a string. - *

- * This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL type to a Java string - * (for CQL types {@code text}, {@code varchar} and {@code ascii}, this will be the built-in codec). - * - * @param i the index ({@code 0 <= i < size()}) to retrieve. - * @return the value of the {@code i}th element as a string. If the - * value is NULL, {@code null} is returned. - * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. - * @throws CodecNotFoundException if there is no registered codec to convert the element's CQL - * type to a string. - */ - public String getString(int i); - - /** - * Returns the {@code i}th value as a variable length integer. - *

- * This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL type to a {@code BigInteger} - * (for CQL type {@code varint}, this will be the built-in codec). - * - * @param i the index ({@code 0 <= i < size()}) to retrieve. - * @return the value of the {@code i}th element as a variable - * length integer. If the value is NULL, {@code null} is returned. - * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. - * @throws CodecNotFoundException if there is no registered codec to convert the element's CQL - * type to a {@code BigInteger}. - */ - public BigInteger getVarint(int i); - - /** - * Returns the {@code i}th value as a variable length decimal. - *

- * This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL type to a {@code BigDecimal} - * (for CQL type {@code decimal}, this will be the built-in codec). - * - * @param i the index ({@code 0 <= i < size()}) to retrieve. - * @return the value of the {@code i}th element as a variable - * length decimal. If the value is NULL, {@code null} is returned. - * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. - * @throws CodecNotFoundException if there is no registered codec to convert the element's CQL - * type to a {@code BigDecimal}. - */ - public BigDecimal getDecimal(int i); - - /** - * Returns the {@code i}th value as a UUID. - *

- * This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL type to a {@code UUID} - * (for CQL types {@code uuid} and {@code timeuuid}, this will be the built-in codec). - * - * @param i the index ({@code 0 <= i < size()}) to retrieve. - * @return the value of the {@code i}th element as a UUID. - * If the value is NULL, {@code null} is returned. - * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. - * @throws CodecNotFoundException if there is no registered codec to convert the element's CQL - * type to a {@code UUID}. - */ - public UUID getUUID(int i); - - /** - * Returns the {@code i}th value as an InetAddress. - *

- * This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL type to an {@code InetAddress} - * (for CQL type {@code inet}, this will be the built-in codec). - * - * @param i the index ({@code 0 <= i < size()}) to retrieve. - * @return the value of the {@code i}th element as an InetAddress. - * If the value is NULL, {@code null} is returned. - * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. - * @throws CodecNotFoundException if there is no registered codec to convert the element's CQL - * type to a {@code InetAddress}. - */ - public InetAddress getInet(int i); - - /** - * Returns the {@code i}th value as a list. - *

- * This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL type to a list of the specified type. - *

- * If the type of the elements is generic, use {@link #getList(int, TypeToken)}. - *

- * Implementation note: the actual {@link List} implementation will depend - * on the {@link TypeCodec codec} being used; therefore, callers should - * make no assumptions concerning its mutability nor its thread-safety. - * Furthermore, the behavior of this method in respect to CQL {@code NULL} values is also - * codec-dependent. By default, the driver will return mutable instances, and - * a CQL {@code NULL} will be mapped to an empty collection (note that Cassandra - * makes no distinction between {@code NULL} and an empty collection). - * - * @param i the index ({@code 0 <= i < size()}) to retrieve. - * @param elementsClass the class for the elements of the list to retrieve. - * @return the value of the {@code i}th element as a list of - * {@code T} objects. - * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. - * @throws CodecNotFoundException if there is no registered codec to convert the element's CQL - * type to a list. - */ - public List getList(int i, Class elementsClass); - - /** - * Returns the {@code i}th value as a list. - *

- * This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL type to a list of the specified type. - *

- * Use this variant with nested collections, which produce a generic element type: - *

-     * {@code List> l = row.getList(1, new TypeToken>() {});}
-     * 
- *

- * Implementation note: the actual {@link List} implementation will depend - * on the {@link TypeCodec codec} being used; therefore, callers should - * make no assumptions concerning its mutability nor its thread-safety. - * Furthermore, the behavior of this method in respect to CQL {@code NULL} values is also - * codec-dependent. By default, the driver will return mutable instances, and - * a CQL {@code NULL} will mapped to an empty collection (note that Cassandra - * makes no distinction between {@code NULL} and an empty collection). - * - * @param i the index ({@code 0 <= i < size()}) to retrieve. - * @param elementsType the type of the elements of the list to retrieve. - * @return the value of the {@code i}th element as a list of - * {@code T} objects. - * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. - * @throws CodecNotFoundException if there is no registered codec to convert the element's CQL - * type to a list. - */ - public List getList(int i, TypeToken elementsType); - - /** - * Returns the {@code i}th value as a set. - *

- * This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL type to a set of the specified type. - *

- * If the type of the elements is generic, use {@link #getSet(int, TypeToken)}. - *

- * Implementation note: the actual {@link Set} implementation will depend - * on the {@link TypeCodec codec} being used; therefore, callers should - * make no assumptions concerning its mutability nor its thread-safety. - * Furthermore, the behavior of this method in respect to CQL {@code NULL} values is also - * codec-dependent. By default, the driver will return mutable instances, and - * a CQL {@code NULL} will mapped to an empty collection (note that Cassandra - * makes no distinction between {@code NULL} and an empty collection). - * - * @param i the index ({@code 0 <= i < size()}) to retrieve. - * @param elementsClass the class for the elements of the set to retrieve. - * @return the value of the {@code i}th element as a set of - * {@code T} objects. - * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. - * @throws CodecNotFoundException if there is no registered codec to convert the element's CQL - * type to a set. - */ - public Set getSet(int i, Class elementsClass); - - /** - * Returns the {@code i}th value as a set. - *

- * This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL type to a set of the specified type. - *

- * Use this variant with nested collections, which produce a generic element type: - *

-     * {@code Set> l = row.getSet(1, new TypeToken>() {});}
-     * 
- *

- * Implementation note: the actual {@link Set} implementation will depend - * on the {@link TypeCodec codec} being used; therefore, callers should - * make no assumptions concerning its mutability nor its thread-safety. - * Furthermore, the behavior of this method in respect to CQL {@code NULL} values is also - * codec-dependent. By default, the driver will return mutable instances, and - * a CQL {@code NULL} will mapped to an empty collection (note that Cassandra - * makes no distinction between {@code NULL} and an empty collection). - * - * @param i the index ({@code 0 <= i < size()}) to retrieve. - * @param elementsType the type for the elements of the set to retrieve. - * @return the value of the {@code i}th element as a set of - * {@code T} objects. - * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. - * @throws CodecNotFoundException if there is no registered codec to convert the element's CQL - * type to a set. - */ - public Set getSet(int i, TypeToken elementsType); - - /** - * Returns the {@code i}th value as a map. - *

- * This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL type to a map of the specified types. - *

- * If the type of the keys and/or values is generic, use {@link #getMap(int, TypeToken, TypeToken)}. - *

- * Implementation note: the actual {@link Map} implementation will depend - * on the {@link TypeCodec codec} being used; therefore, callers should - * make no assumptions concerning its mutability nor its thread-safety. - * Furthermore, the behavior of this method in respect to CQL {@code NULL} values is also - * codec-dependent. By default, the driver will return mutable instances, and - * a CQL {@code NULL} will mapped to an empty collection (note that Cassandra - * makes no distinction between {@code NULL} and an empty collection). - * - * @param i the index ({@code 0 <= i < size()}) to retrieve. - * @param keysClass the class for the keys of the map to retrieve. - * @param valuesClass the class for the values of the map to retrieve. - * @return the value of the {@code i}th element as a map of - * {@code K} to {@code V} objects. - * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. - * @throws CodecNotFoundException if there is no registered codec to convert the element's CQL - * type to a map. - */ - public Map getMap(int i, Class keysClass, Class valuesClass); - - - /** - * Returns the {@code i}th value as a map. - *

- * This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL type to a map of the specified types. - *

- * Use this variant with nested collections, which produce a generic element type: - *

-     * {@code Map> l = row.getMap(1, TypeToken.of(Integer.class), new TypeToken>() {});}
-     * 
- *

- * Implementation note: the actual {@link Map} implementation will depend - * on the {@link TypeCodec codec} being used; therefore, callers should - * make no assumptions concerning its mutability nor its thread-safety. - * Furthermore, the behavior of this method in respect to CQL {@code NULL} values is also - * codec-dependent. By default, the driver will return mutable instances, and - * a CQL {@code NULL} will mapped to an empty collection (note that Cassandra - * makes no distinction between {@code NULL} and an empty collection). - * - * @param i the index ({@code 0 <= i < size()}) to retrieve. - * @param keysType the type for the keys of the map to retrieve. - * @param valuesType the type for the values of the map to retrieve. - * @return the value of the {@code i}th element as a map of - * {@code K} to {@code V} objects. - * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. - * @throws CodecNotFoundException if there is no registered codec to convert the element's CQL - * type to a map. - */ - public Map getMap(int i, TypeToken keysType, TypeToken valuesType); - - /** - * Return the {@code i}th value as a UDT value. - *

- * This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL type to a {@code UDTValue} - * (if the CQL type is a UDT, the registry will generate a codec automatically). - * - * @param i the index ({@code 0 <= i < size()}) to retrieve. - * @return the value of the {@code i}th element as a UDT value. If the value is NULL, - * then {@code null} will be returned. - * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. - * @throws CodecNotFoundException if there is no registered codec to convert the element's CQL - * type to a {@code UDTValue}. - */ - public UDTValue getUDTValue(int i); - - /** - * Return the {@code i}th value as a tuple value. - *

- * This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL type to a {@code TupleValue} - * (if the CQL type is a tuple, the registry will generate a codec automatically). - * - * @param i the index ({@code 0 <= i < size()}) to retrieve. - * @return the value of the {@code i}th element as a tuple value. If the value is NULL, - * then {@code null} will be returned. - * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. - * @throws CodecNotFoundException if there is no registered codec to convert the element's CQL - * type to a {@code TupleValue}. - */ - public TupleValue getTupleValue(int i); - - /** - * Returns the {@code i}th value as the Java type matching its CQL type. - *

- * This method uses the {@link CodecRegistry} to find the first codec that handles the underlying CQL type. The Java type - * of the returned object will be determined by the codec that was selected. - *

- * Use this method to dynamically inspect elements when types aren't known in advance, for instance if you're writing a - * generic row logger. If you know the target Java type, it is generally preferable to use typed getters, such as the - * ones for built-in types ({@link #getBool(int)}, {@link #getInt(int)}, etc.), or {@link #get(int, Class)} and - * {@link #get(int, TypeToken)} for custom types. - * - * @param i the index to retrieve. - * @return the value of the {@code i}th value as the Java type matching its CQL type. - * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. - * @see CodecRegistry#codecFor(DataType) - */ - public Object getObject(int i); - - /** - * Returns the {@code i}th value converted to the given Java type. - *

- * This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL type to the given Java type. - *

- * If the target type is generic, use {@link #get(int, TypeToken)}. - *

- * Implementation note: the actual object returned by this method will depend - * on the {@link TypeCodec codec} being used; therefore, callers should - * make no assumptions concerning its mutability nor its thread-safety. - * Furthermore, the behavior of this method in respect to CQL {@code NULL} values is also - * codec-dependent; by default, a CQL {@code NULL} value translates to {@code null} for - * simple CQL types, UDTs and tuples, and to empty collections for all CQL collection types. - * - * @param i the index to retrieve. - * @param targetClass The Java type the value should be converted to. - * @return the value of the {@code i}th value converted to the given Java type. - * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. - * @throws CodecNotFoundException if there is no registered codec to convert the element's CQL - * type to {@code targetClass}. - */ - T get(int i, Class targetClass); - - /** - * Returns the {@code i}th value converted to the given Java type. - *

- * This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL type to the given Java type. - *

- * Implementation note: the actual object returned by this method will depend - * on the {@link TypeCodec codec} being used; therefore, callers should - * make no assumptions concerning its mutability nor its thread-safety. - * Furthermore, the behavior of this method in respect to CQL {@code NULL} values is also - * codec-dependent; by default, a CQL {@code NULL} value translates to {@code null} for - * simple CQL types, UDTs and tuples, and to empty collections for all CQL collection types. - * - * @param i the index to retrieve. - * @param targetType The Java type the value should be converted to. - * @return the value of the {@code i}th value converted to the given Java type. - * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. - * @throws CodecNotFoundException if there is no registered codec to convert the element's CQL - * type to {@code targetType}. - */ - T get(int i, TypeToken targetType); - - /** - * Returns the {@code i}th value converted using the given {@link TypeCodec}. - *

- * This method entirely bypasses the {@link CodecRegistry} and forces the driver to use the given codec instead. - * This can be useful if the codec would collide with a previously registered one, or if you want to use the - * codec just once without registering it. - *

- * It is the caller's responsibility to ensure that the given codec {@link TypeCodec#accepts(DataType) accepts} - * the underlying CQL type; failing to do so may result in {@link InvalidTypeException}s being thrown. - *

- * Implementation note: the actual object returned by this method will depend - * on the {@link TypeCodec codec} being used; therefore, callers should - * make no assumptions concerning its mutability nor its thread-safety. - * Furthermore, the behavior of this method in respect to CQL {@code NULL} values is also - * codec-dependent; by default, a CQL {@code NULL} value translates to {@code null} for - * simple CQL types, UDTs and tuples, and to empty collections for all CQL collection types. - * - * @param i the index to retrieve. - * @param codec The {@link TypeCodec} to use to deserialize the value; may not be {@code null}. - * @return the value of the {@code i}th value converted using the given {@link TypeCodec}. - * @throws InvalidTypeException if the given codec does not {@link TypeCodec#accepts(DataType) accept} the underlying CQL type. - * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. - */ - T get(int i, TypeCodec codec); - + /** + * Returns whether the {@code i}th value is NULL. + * + * @param i the index ({@code 0 <= i < size()}) of the value to check. + * @return whether the {@code i}th value is NULL. + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + */ + public boolean isNull(int i); + + /** + * Returns the {@code i}th value as a boolean. + * + *

This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL + * type to a Java {@code boolean} (for CQL type {@code boolean}, this will be the built-in codec). + * + * @param i the index ({@code 0 <= i < size()}) to retrieve. + * @return the boolean value of the {@code i}th element. If the value is NULL, {@code false} is + * returned. If you need to distinguish NULL and false values, check first with {@link + * #isNull(int)} or use {@code get(i, Boolean.class)}. + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + * @throws CodecNotFoundException if there is no registered codec to convert the element's CQL + * type to a boolean. + */ + public boolean getBool(int i); + + /** + * Returns the {@code i}th value as a byte. + * + *

This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL + * type to a Java {@code byte} (for CQL type {@code tinyint}, this will be the built-in codec). + * + * @param i the index ({@code 0 <= i < size()}) to retrieve. + * @return the value of the {@code i}th element as a byte. If the value is NULL, {@code 0} is + * returned. If you need to distinguish NULL and 0, check first with {@link #isNull(int)} or + * use {@code get(i, Byte.class)}. + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + * @throws CodecNotFoundException if there is no registered codec to convert the element's CQL + * type to a byte. + */ + public byte getByte(int i); + + /** + * Returns the {@code i}th value as a short. + * + *

This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL + * type to a Java {@code short} (for CQL type {@code smallint}, this will be the built-in codec). + * + * @param i the index ({@code 0 <= i < size()}) to retrieve. + * @return the value of the {@code i}th element as a short. If the value is NULL, {@code 0} is + * returned. If you need to distinguish NULL and 0, check first with {@link #isNull(int)} or + * use {@code get(i, Short.class)}. + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + * @throws CodecNotFoundException if there is no registered codec to convert the element's CQL + * type to a short. + */ + public short getShort(int i); + + /** + * Returns the {@code i}th value as an integer. + * + *

This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL + * type to a Java {@code int} (for CQL type {@code int}, this will be the built-in codec). + * + * @param i the index ({@code 0 <= i < size()}) to retrieve. + * @return the value of the {@code i}th element as an integer. If the value is NULL, {@code 0} is + * returned. If you need to distinguish NULL and 0, check first with {@link #isNull(int)} or + * use {@code get(i, Integer.class)}. + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + * @throws CodecNotFoundException if there is no registered codec to convert the element's CQL + * type to an int. + */ + public int getInt(int i); + + /** + * Returns the {@code i}th value as a long. + * + *

This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL + * type to a Java {@code byte} (for CQL types {@code bigint} and {@code counter}, this will be the + * built-in codec). + * + * @param i the index ({@code 0 <= i < size()}) to retrieve. + * @return the value of the {@code i}th element as a long. If the value is NULL, {@code 0L} is + * returned. If you need to distinguish NULL and 0L, check first with {@link #isNull(int)} or + * use {@code get(i, Long.class)}. + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + * @throws CodecNotFoundException if there is no registered codec to convert the element's CQL + * type to a long. + */ + public long getLong(int i); + + /** + * Returns the {@code i}th value as a date. + * + *

This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL + * type to a {@code Date} (for CQL type {@code timestamp}, this will be the built-in codec). + * + * @param i the index ({@code 0 <= i < size()}) to retrieve. + * @return the value of the {@code i}th element as a data. If the value is NULL, {@code null} is + * returned. + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + * @throws CodecNotFoundException if there is no registered codec to convert the element's CQL + * type to a {@code Date}. + */ + public Date getTimestamp(int i); + + /** + * Returns the {@code i}th value as a date (without time). + * + *

This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL + * type to a {@link LocalDate} (for CQL type {@code date}, this will be the built-in codec). + * + * @param i the index ({@code 0 <= i < size()}) to retrieve. + * @return the value of the {@code i}th element as an date. If the value is NULL, {@code null} is + * returned. + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + * @throws CodecNotFoundException if there is no registered codec to convert the element's CQL + * type to a {@code LocalDate}. + */ + public LocalDate getDate(int i); + + /** + * Returns the {@code i}th value as a long in nanoseconds since midnight. + * + *

This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL + * type to a Java {@code long} (for CQL type {@code time}, this will be the built-in codec). + * + * @param i the index ({@code 0 <= i < size()}) to retrieve. + * @return the value of the {@code i}th element as a long. If the value is NULL, {@code 0L} is + * returned. + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + * @throws CodecNotFoundException if there is no registered codec to convert the element's CQL + * type to a long. + */ + public long getTime(int i); + + /** + * Returns the {@code i}th value as a float. + * + *

This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL + * type to a Java {@code float} (for CQL type {@code float}, this will be the built-in codec). + * + * @param i the index ({@code 0 <= i < size()}) to retrieve. + * @return the value of the {@code i}th element as a float. If the value is NULL, {@code 0.0f} is + * returned. If you need to distinguish NULL and 0.0f, check first with {@link #isNull(int)} + * or use {@code get(i, Float.class)}. + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + * @throws CodecNotFoundException if there is no registered codec to convert the element's CQL + * type to a float. + */ + public float getFloat(int i); + + /** + * Returns the {@code i}th value as a double. + * + *

This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL + * type to a Java {@code double} (for CQL type {@code double}, this will be the built-in codec). + * + * @param i the index ({@code 0 <= i < size()}) to retrieve. + * @return the value of the {@code i}th element as a double. If the value is NULL, {@code 0.0} is + * returned. If you need to distinguish NULL and 0.0, check first with {@link #isNull(int)} or + * use {@code get(i, Double.class)}. + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + * @throws CodecNotFoundException if there is no registered codec to convert the element's CQL + * type to a double. + */ + public double getDouble(int i); + + /** + * Returns the {@code i}th value as a {@code ByteBuffer}. + * + *

This method does not use any codec; it returns a copy of the binary representation of the + * value. It is up to the caller to convert the returned value appropriately. + * + * @param i the index ({@code 0 <= i < size()}) to retrieve. + * @return the value of the {@code i}th element as a ByteBuffer. If the value is NULL, {@code + * null} is returned. + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + */ + public ByteBuffer getBytesUnsafe(int i); + + /** + * Returns the {@code i}th value as a byte array. + * + *

This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL + * type to a Java {@code ByteBuffer} (for CQL type {@code blob}, this will be the built-in codec). + * + * @param i the index ({@code 0 <= i < size()}) to retrieve. + * @return the value of the {@code i}th element as a byte array. If the value is NULL, {@code + * null} is returned. + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + * @throws CodecNotFoundException if there is no registered codec to convert the element's CQL + * type to a {@code ByteBuffer}. + */ + public ByteBuffer getBytes(int i); + + /** + * Returns the {@code i}th value as a string. + * + *

This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL + * type to a Java string (for CQL types {@code text}, {@code varchar} and {@code ascii}, this will + * be the built-in codec). + * + * @param i the index ({@code 0 <= i < size()}) to retrieve. + * @return the value of the {@code i}th element as a string. If the value is NULL, {@code null} is + * returned. + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + * @throws CodecNotFoundException if there is no registered codec to convert the element's CQL + * type to a string. + */ + public String getString(int i); + + /** + * Returns the {@code i}th value as a variable length integer. + * + *

This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL + * type to a {@code BigInteger} (for CQL type {@code varint}, this will be the built-in codec). + * + * @param i the index ({@code 0 <= i < size()}) to retrieve. + * @return the value of the {@code i}th element as a variable length integer. If the value is + * NULL, {@code null} is returned. + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + * @throws CodecNotFoundException if there is no registered codec to convert the element's CQL + * type to a {@code BigInteger}. + */ + public BigInteger getVarint(int i); + + /** + * Returns the {@code i}th value as a variable length decimal. + * + *

This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL + * type to a {@code BigDecimal} (for CQL type {@code decimal}, this will be the built-in codec). + * + * @param i the index ({@code 0 <= i < size()}) to retrieve. + * @return the value of the {@code i}th element as a variable length decimal. If the value is + * NULL, {@code null} is returned. + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + * @throws CodecNotFoundException if there is no registered codec to convert the element's CQL + * type to a {@code BigDecimal}. + */ + public BigDecimal getDecimal(int i); + + /** + * Returns the {@code i}th value as a UUID. + * + *

This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL + * type to a {@code UUID} (for CQL types {@code uuid} and {@code timeuuid}, this will be the + * built-in codec). + * + * @param i the index ({@code 0 <= i < size()}) to retrieve. + * @return the value of the {@code i}th element as a UUID. If the value is NULL, {@code null} is + * returned. + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + * @throws CodecNotFoundException if there is no registered codec to convert the element's CQL + * type to a {@code UUID}. + */ + public UUID getUUID(int i); + + /** + * Returns the {@code i}th value as an InetAddress. + * + *

This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL + * type to an {@code InetAddress} (for CQL type {@code inet}, this will be the built-in codec). + * + * @param i the index ({@code 0 <= i < size()}) to retrieve. + * @return the value of the {@code i}th element as an InetAddress. If the value is NULL, {@code + * null} is returned. + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + * @throws CodecNotFoundException if there is no registered codec to convert the element's CQL + * type to a {@code InetAddress}. + */ + public InetAddress getInet(int i); + + /** + * Returns the {@code i}th value as a list. + * + *

This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL + * type to a list of the specified type. + * + *

If the type of the elements is generic, use {@link #getList(int, TypeToken)}. + * + *

Implementation note: the actual {@link List} implementation will depend on the {@link + * TypeCodec codec} being used; therefore, callers should make no assumptions concerning its + * mutability nor its thread-safety. Furthermore, the behavior of this method in respect to CQL + * {@code NULL} values is also codec-dependent. By default, the driver will return mutable + * instances, and a CQL {@code NULL} will be mapped to an empty collection (note that Cassandra + * makes no distinction between {@code NULL} and an empty collection). + * + * @param i the index ({@code 0 <= i < size()}) to retrieve. + * @param elementsClass the class for the elements of the list to retrieve. + * @return the value of the {@code i}th element as a list of {@code T} objects. + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + * @throws CodecNotFoundException if there is no registered codec to convert the element's CQL + * type to a list. + */ + public List getList(int i, Class elementsClass); + + /** + * Returns the {@code i}th value as a list. + * + *

This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL + * type to a list of the specified type. + * + *

Use this variant with nested collections, which produce a generic element type: + * + *

+   * {@code List> l = row.getList(1, new TypeToken>() {});}
+   * 
+ * + *

Implementation note: the actual {@link List} implementation will depend on the {@link + * TypeCodec codec} being used; therefore, callers should make no assumptions concerning its + * mutability nor its thread-safety. Furthermore, the behavior of this method in respect to CQL + * {@code NULL} values is also codec-dependent. By default, the driver will return mutable + * instances, and a CQL {@code NULL} will mapped to an empty collection (note that Cassandra makes + * no distinction between {@code NULL} and an empty collection). + * + * @param i the index ({@code 0 <= i < size()}) to retrieve. + * @param elementsType the type of the elements of the list to retrieve. + * @return the value of the {@code i}th element as a list of {@code T} objects. + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + * @throws CodecNotFoundException if there is no registered codec to convert the element's CQL + * type to a list. + */ + public List getList(int i, TypeToken elementsType); + + /** + * Returns the {@code i}th value as a set. + * + *

This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL + * type to a set of the specified type. + * + *

If the type of the elements is generic, use {@link #getSet(int, TypeToken)}. + * + *

Implementation note: the actual {@link Set} implementation will depend on the {@link + * TypeCodec codec} being used; therefore, callers should make no assumptions concerning its + * mutability nor its thread-safety. Furthermore, the behavior of this method in respect to CQL + * {@code NULL} values is also codec-dependent. By default, the driver will return mutable + * instances, and a CQL {@code NULL} will mapped to an empty collection (note that Cassandra makes + * no distinction between {@code NULL} and an empty collection). + * + * @param i the index ({@code 0 <= i < size()}) to retrieve. + * @param elementsClass the class for the elements of the set to retrieve. + * @return the value of the {@code i}th element as a set of {@code T} objects. + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + * @throws CodecNotFoundException if there is no registered codec to convert the element's CQL + * type to a set. + */ + public Set getSet(int i, Class elementsClass); + + /** + * Returns the {@code i}th value as a set. + * + *

This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL + * type to a set of the specified type. + * + *

Use this variant with nested collections, which produce a generic element type: + * + *

+   * {@code Set> l = row.getSet(1, new TypeToken>() {});}
+   * 
+ * + *

Implementation note: the actual {@link Set} implementation will depend on the {@link + * TypeCodec codec} being used; therefore, callers should make no assumptions concerning its + * mutability nor its thread-safety. Furthermore, the behavior of this method in respect to CQL + * {@code NULL} values is also codec-dependent. By default, the driver will return mutable + * instances, and a CQL {@code NULL} will mapped to an empty collection (note that Cassandra makes + * no distinction between {@code NULL} and an empty collection). + * + * @param i the index ({@code 0 <= i < size()}) to retrieve. + * @param elementsType the type for the elements of the set to retrieve. + * @return the value of the {@code i}th element as a set of {@code T} objects. + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + * @throws CodecNotFoundException if there is no registered codec to convert the element's CQL + * type to a set. + */ + public Set getSet(int i, TypeToken elementsType); + + /** + * Returns the {@code i}th value as a map. + * + *

This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL + * type to a map of the specified types. + * + *

If the type of the keys and/or values is generic, use {@link #getMap(int, TypeToken, + * TypeToken)}. + * + *

Implementation note: the actual {@link Map} implementation will depend on the {@link + * TypeCodec codec} being used; therefore, callers should make no assumptions concerning its + * mutability nor its thread-safety. Furthermore, the behavior of this method in respect to CQL + * {@code NULL} values is also codec-dependent. By default, the driver will return mutable + * instances, and a CQL {@code NULL} will mapped to an empty collection (note that Cassandra makes + * no distinction between {@code NULL} and an empty collection). + * + * @param i the index ({@code 0 <= i < size()}) to retrieve. + * @param keysClass the class for the keys of the map to retrieve. + * @param valuesClass the class for the values of the map to retrieve. + * @return the value of the {@code i}th element as a map of {@code K} to {@code V} objects. + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + * @throws CodecNotFoundException if there is no registered codec to convert the element's CQL + * type to a map. + */ + public Map getMap(int i, Class keysClass, Class valuesClass); + + /** + * Returns the {@code i}th value as a map. + * + *

This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL + * type to a map of the specified types. + * + *

Use this variant with nested collections, which produce a generic element type: + * + *

+   * {@code Map> l = row.getMap(1, TypeToken.of(Integer.class), new TypeToken>() {});}
+   * 
+ * + *

Implementation note: the actual {@link Map} implementation will depend on the {@link + * TypeCodec codec} being used; therefore, callers should make no assumptions concerning its + * mutability nor its thread-safety. Furthermore, the behavior of this method in respect to CQL + * {@code NULL} values is also codec-dependent. By default, the driver will return mutable + * instances, and a CQL {@code NULL} will mapped to an empty collection (note that Cassandra makes + * no distinction between {@code NULL} and an empty collection). + * + * @param i the index ({@code 0 <= i < size()}) to retrieve. + * @param keysType the type for the keys of the map to retrieve. + * @param valuesType the type for the values of the map to retrieve. + * @return the value of the {@code i}th element as a map of {@code K} to {@code V} objects. + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + * @throws CodecNotFoundException if there is no registered codec to convert the element's CQL + * type to a map. + */ + public Map getMap(int i, TypeToken keysType, TypeToken valuesType); + + /** + * Return the {@code i}th value as a UDT value. + * + *

This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL + * type to a {@code UDTValue} (if the CQL type is a UDT, the registry will generate a codec + * automatically). + * + * @param i the index ({@code 0 <= i < size()}) to retrieve. + * @return the value of the {@code i}th element as a UDT value. If the value is NULL, then {@code + * null} will be returned. + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + * @throws CodecNotFoundException if there is no registered codec to convert the element's CQL + * type to a {@code UDTValue}. + */ + public UDTValue getUDTValue(int i); + + /** + * Return the {@code i}th value as a tuple value. + * + *

This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL + * type to a {@code TupleValue} (if the CQL type is a tuple, the registry will generate a codec + * automatically). + * + * @param i the index ({@code 0 <= i < size()}) to retrieve. + * @return the value of the {@code i}th element as a tuple value. If the value is NULL, then + * {@code null} will be returned. + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + * @throws CodecNotFoundException if there is no registered codec to convert the element's CQL + * type to a {@code TupleValue}. + */ + public TupleValue getTupleValue(int i); + + /** + * Returns the {@code i}th value as the Java type matching its CQL type. + * + *

This method uses the {@link CodecRegistry} to find the first codec that handles the + * underlying CQL type. The Java type of the returned object will be determined by the codec that + * was selected. + * + *

Use this method to dynamically inspect elements when types aren't known in advance, for + * instance if you're writing a generic row logger. If you know the target Java type, it is + * generally preferable to use typed getters, such as the ones for built-in types ({@link + * #getBool(int)}, {@link #getInt(int)}, etc.), or {@link #get(int, Class)} and {@link #get(int, + * TypeToken)} for custom types. + * + * @param i the index to retrieve. + * @return the value of the {@code i}th value as the Java type matching its CQL type. + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + * @see CodecRegistry#codecFor(DataType) + */ + public Object getObject(int i); + + /** + * Returns the {@code i}th value converted to the given Java type. + * + *

This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL + * type to the given Java type. + * + *

If the target type is generic, use {@link #get(int, TypeToken)}. + * + *

Implementation note: the actual object returned by this method will depend on the {@link + * TypeCodec codec} being used; therefore, callers should make no assumptions concerning its + * mutability nor its thread-safety. Furthermore, the behavior of this method in respect to CQL + * {@code NULL} values is also codec-dependent; by default, a CQL {@code NULL} value translates to + * {@code null} for simple CQL types, UDTs and tuples, and to empty collections for all CQL + * collection types. + * + * @param i the index to retrieve. + * @param targetClass The Java type the value should be converted to. + * @return the value of the {@code i}th value converted to the given Java type. + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + * @throws CodecNotFoundException if there is no registered codec to convert the element's CQL + * type to {@code targetClass}. + */ + T get(int i, Class targetClass); + + /** + * Returns the {@code i}th value converted to the given Java type. + * + *

This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL + * type to the given Java type. + * + *

Implementation note: the actual object returned by this method will depend on the {@link + * TypeCodec codec} being used; therefore, callers should make no assumptions concerning its + * mutability nor its thread-safety. Furthermore, the behavior of this method in respect to CQL + * {@code NULL} values is also codec-dependent; by default, a CQL {@code NULL} value translates to + * {@code null} for simple CQL types, UDTs and tuples, and to empty collections for all CQL + * collection types. + * + * @param i the index to retrieve. + * @param targetType The Java type the value should be converted to. + * @return the value of the {@code i}th value converted to the given Java type. + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + * @throws CodecNotFoundException if there is no registered codec to convert the element's CQL + * type to {@code targetType}. + */ + T get(int i, TypeToken targetType); + + /** + * Returns the {@code i}th value converted using the given {@link TypeCodec}. + * + *

This method entirely bypasses the {@link CodecRegistry} and forces the driver to use the + * given codec instead. This can be useful if the codec would collide with a previously registered + * one, or if you want to use the codec just once without registering it. + * + *

It is the caller's responsibility to ensure that the given codec {@link + * TypeCodec#accepts(DataType) accepts} the underlying CQL type; failing to do so may result in + * {@link InvalidTypeException}s being thrown. + * + *

Implementation note: the actual object returned by this method will depend on the {@link + * TypeCodec codec} being used; therefore, callers should make no assumptions concerning its + * mutability nor its thread-safety. Furthermore, the behavior of this method in respect to CQL + * {@code NULL} values is also codec-dependent; by default, a CQL {@code NULL} value translates to + * {@code null} for simple CQL types, UDTs and tuples, and to empty collections for all CQL + * collection types. + * + * @param i the index to retrieve. + * @param codec The {@link TypeCodec} to use to deserialize the value; may not be {@code null}. + * @return the value of the {@code i}th value converted using the given {@link TypeCodec}. + * @throws InvalidTypeException if the given codec does not {@link TypeCodec#accepts(DataType) + * accept} the underlying CQL type. + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + */ + T get(int i, TypeCodec codec); } diff --git a/driver-core/src/main/java/com/datastax/driver/core/GettableByNameData.java b/driver-core/src/main/java/com/datastax/driver/core/GettableByNameData.java index e13c522c20a..e74fa49e611 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/GettableByNameData.java +++ b/driver-core/src/main/java/com/datastax/driver/core/GettableByNameData.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,566 +20,573 @@ import com.datastax.driver.core.exceptions.CodecNotFoundException; import com.datastax.driver.core.exceptions.InvalidTypeException; import com.google.common.reflect.TypeToken; - import java.math.BigDecimal; import java.math.BigInteger; import java.net.InetAddress; import java.nio.ByteBuffer; -import java.util.*; +import java.util.Date; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.UUID; -/** - * Collection of (typed) CQL values that can be retrieved by name. - */ +/** Collection of (typed) CQL values that can be retrieved by name. */ public interface GettableByNameData { - /** - * Returns whether the value for {@code name} is NULL. - * - * @param name the name to check. - * @return whether the value for {@code name} is NULL. - * @throws IllegalArgumentException if {@code name} is not valid name for this object. - */ - public boolean isNull(String name); - - /** - * Returns the value for {@code name} as a boolean. - *

- * This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL type to a Java {@code boolean} - * (for CQL type {@code boolean}, this will be the built-in codec). - * - * @param name the name to retrieve. - * @return the boolean value for {@code name}. If the value is NULL, {@code false} is returned. - * If you need to distinguish NULL and false values, check first with {@link #isNull(String)} or use {@code get(name, Boolean.class)}. - * @throws IllegalArgumentException if {@code name} is not valid name for this object. - * @throws CodecNotFoundException if there is no registered codec to convert the underlying CQL - * type to a boolean. - */ - public boolean getBool(String name); - - /** - * Returns the value for {@code name} as a byte. - *

- * This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL type to a Java {@code byte} - * (for CQL type {@code tinyint}, this will be the built-in codec). - * - * @param name the name to retrieve. - * @return the value for {@code name} as a byte. If the value is NULL, {@code 0} is returned. - * If you need to distinguish NULL and 0, check first with {@link #isNull(String)} or use {@code get(name, Byte.class)}. - * {@code 0} is returned. - * @throws IllegalArgumentException if {@code name} is not valid name for this object. - * @throws CodecNotFoundException if there is no registered codec to convert the underlying CQL - * type to a byte. - */ - public byte getByte(String name); - - /** - * Returns the value for {@code name} as a short. - *

- * This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL type to a Java {@code short} - * (for CQL type {@code smallint}, this will be the built-in codec). - * - * @param name the name to retrieve. - * @return the value for {@code name} as a short. If the value is NULL, {@code 0} is returned. - * If you need to distinguish NULL and 0, check first with {@link #isNull(String)} or use {@code get(name, Short.class)}. - * {@code 0} is returned. - * @throws IllegalArgumentException if {@code name} is not valid name for this object. - * @throws CodecNotFoundException if there is no registered codec to convert the underlying CQL - * type to a short. - */ - public short getShort(String name); - - /** - * Returns the value for {@code name} as an integer. - *

- * This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL type to a Java {@code int} - * (for CQL type {@code int}, this will be the built-in codec). - * - * @param name the name to retrieve. - * @return the value for {@code name} as an integer. If the value is NULL, {@code 0} is returned. - * If you need to distinguish NULL and 0, check first with {@link #isNull(String)} or use {@code get(name, Integer.class)}. - * {@code 0} is returned. - * @throws IllegalArgumentException if {@code name} is not valid name for this object. - * @throws CodecNotFoundException if there is no registered codec to convert the underlying CQL - * type to an int. - */ - public int getInt(String name); - - /** - * Returns the value for {@code name} as a long. - *

- * This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL type to a Java {@code byte} - * (for CQL types {@code bigint} and {@code counter}, this will be the built-in codec). - * - * @param name the name to retrieve. - * @return the value for {@code name} as a long. If the value is NULL, {@code 0L} is returned. - * If you need to distinguish NULL and 0L, check first with {@link #isNull(String)} or use {@code get(name, Long.class)}. - * @throws IllegalArgumentException if {@code name} is not valid name for this object. - * @throws CodecNotFoundException if there is no registered codec to convert the underlying CQL - * type to a long. - */ - public long getLong(String name); - - /** - * Returns the value for {@code name} as a date. - *

- * This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL type to a {@code Date} - * (for CQL type {@code timestamp}, this will be the built-in codec). - * - * @param name the name to retrieve. - * @return the value for {@code name} as a date. If the value is NULL, - * {@code null} is returned. - * @throws IllegalArgumentException if {@code name} is not valid name for this object. - * @throws CodecNotFoundException if there is no registered codec to convert the underlying CQL - * type to a {@code Date}. - */ - public Date getTimestamp(String name); - - /** - * Returns the value for {@code name} as a date (without time). - *

- * This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL type to a {@link LocalDate} - * (for CQL type {@code date}, this will be the built-in codec). - * - * @param name the name to retrieve. - * @return the value for {@code name} as a date. If the value is NULL, - * {@code null} is returned. - * @throws IllegalArgumentException if {@code name} is not valid name for this object. - * @throws CodecNotFoundException if there is no registered codec to convert the underlying CQL - * type to a {@code LocalDate}. - */ - public LocalDate getDate(String name); - - /** - * Returns the value for {@code name} as a long in nanoseconds since midnight. - *

- * This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL type to a Java {@code long} - * (for CQL type {@code time}, this will be the built-in codec). - * - * @param name the name to retrieve. - * @return the value for {@code name} as a long. If the value is NULL, - * {@code 0L} is returned. - * @throws IllegalArgumentException if {@code name} is not valid name for this object. - * @throws CodecNotFoundException if there is no registered codec to convert the underlying CQL - * type to a long. - */ - public long getTime(String name); - - /** - * Returns the value for {@code name} as a float. - *

- * This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL type to a Java {@code float} - * (for CQL type {@code float}, this will be the built-in codec). - * - * @param name the name to retrieve. - * @return the value for {@code name} as a float. If the value is NULL, {@code 0.0f} is returned. - * If you need to distinguish NULL and 0.0f, check first with {@link #isNull(String)} or use {@code get(name, Float.class)}. - * @throws IllegalArgumentException if {@code name} is not valid name for this object. - * @throws CodecNotFoundException if there is no registered codec to convert the underlying CQL - * type to a float. - */ - public float getFloat(String name); - - /** - * Returns the value for {@code name} as a double. - *

- * This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL type to a Java {@code double} - * (for CQL type {@code double}, this will be the built-in codec). - * - * @param name the name to retrieve. - * @return the value for {@code name} as a double. If the value is NULL, {@code 0.0} is returned. - * If you need to distinguish NULL and 0.0, check first with {@link #isNull(String)} or use {@code get(name, Double.class)}. - * @throws IllegalArgumentException if {@code name} is not valid name for this object. - * @throws CodecNotFoundException if there is no registered codec to convert the underlying CQL - * type to a double. - */ - public double getDouble(String name); - - /** - * Returns the value for {@code name} as a ByteBuffer. - *

- * This method does not use any codec; it returns a copy of the binary representation of the value. It is up to the - * caller to convert the returned value appropriately. - *

- * Note: this method always return the bytes composing the value, even if - * the column is not of type BLOB. That is, this method never throw an - * InvalidTypeException. However, if the type is not BLOB, it is up to the - * caller to handle the returned value correctly. - * - * @param name the name to retrieve. - * @return the value for {@code name} as a ByteBuffer. If the value is NULL, - * {@code null} is returned. - * @throws IllegalArgumentException if {@code name} is not valid name for this object. - */ - public ByteBuffer getBytesUnsafe(String name); - - /** - * Returns the value for {@code name} as a byte array. - *

- * This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL type to a Java {@code ByteBuffer} - * (for CQL type {@code blob}, this will be the built-in codec). - * - * @param name the name to retrieve. - * @return the value for {@code name} as a byte array. If the value is NULL, - * {@code null} is returned. - * @throws IllegalArgumentException if {@code name} is not valid name for this object. - * @throws CodecNotFoundException if there is no registered codec to convert the underlying CQL - * type to a {@code ByteBuffer}. - */ - public ByteBuffer getBytes(String name); - - /** - * Returns the value for {@code name} as a string. - *

- * This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL type to a Java string - * (for CQL types {@code text}, {@code varchar} and {@code ascii}, this will be the built-in codec). - * - * @param name the name to retrieve. - * @return the value for {@code name} as a string. If the value is NULL, - * {@code null} is returned. - * @throws IllegalArgumentException if {@code name} is not valid name for this object. - * @throws CodecNotFoundException if there is no registered codec to convert the underlying CQL - * type to a string. - */ - public String getString(String name); - - /** - * Returns the value for {@code name} as a variable length integer. - *

- * This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL type to a {@code BigInteger} - * (for CQL type {@code varint}, this will be the built-in codec). - * - * @param name the name to retrieve. - * @return the value for {@code name} as a variable length integer. - * If the value is NULL, {@code null} is returned. - * @throws IllegalArgumentException if {@code name} is not valid name for this object. - * @throws CodecNotFoundException if there is no registered codec to convert the underlying CQL - * type to a {@code BigInteger}. - */ - public BigInteger getVarint(String name); - - /** - * Returns the value for {@code name} as a variable length decimal. - *

- * This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL type to a {@code BigDecimal} - * (for CQL type {@code decimal}, this will be the built-in codec). - * - * @param name the name to retrieve. - * @return the value for {@code name} as a variable length decimal. - * If the value is NULL, {@code null} is returned. - * @throws IllegalArgumentException if {@code name} is not valid name for this object. - * @throws CodecNotFoundException if there is no registered codec to convert the underlying CQL - * type to a {@code BigDecimal}. - */ - public BigDecimal getDecimal(String name); - - /** - * Returns the value for {@code name} as a UUID. - *

- * This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL type to a {@code UUID} - * (for CQL types {@code uuid} and {@code timeuuid}, this will be the built-in codec). - * - * @param name the name to retrieve. - * @return the value for {@code name} as a UUID. - * If the value is NULL, {@code null} is returned. - * @throws IllegalArgumentException if {@code name} is not valid name for this object. - * @throws CodecNotFoundException if there is no registered codec to convert the underlying CQL - * type to a {@code UUID}. - */ - public UUID getUUID(String name); - - /** - * Returns the value for {@code name} as an InetAddress. - *

- * This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL type to an {@code InetAddress} - * (for CQL type {@code inet}, this will be the built-in codec). - * - * @param name the name to retrieve. - * @return the value for {@code name} as an InetAddress. - * If the value is NULL, {@code null} is returned. - * @throws IllegalArgumentException if {@code name} is not valid name for this object. - * @throws CodecNotFoundException if there is no registered codec to convert the underlying CQL - * type to a {@code InetAddress}. - */ - public InetAddress getInet(String name); - - /** - * Returns the value for {@code name} as a list. - *

- * This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL type to a list of the specified type. - *

- * If the type of the elements is generic, use {@link #getList(String, TypeToken)}. - *

- * Implementation note: the actual {@link List} implementation will depend - * on the {@link TypeCodec codec} being used; therefore, callers should - * make no assumptions concerning its mutability nor its thread-safety. - * Furthermore, the behavior of this method in respect to CQL {@code NULL} values is also - * codec-dependent. By default, the driver will return mutable instances, and - * a CQL {@code NULL} will mapped to an empty collection (note that Cassandra - * makes no distinction between {@code NULL} and an empty collection). - * - * @param name the name to retrieve. - * @param elementsClass the class for the elements of the list to retrieve. - * @return the value of the {@code i}th element as a list of - * {@code T} objects. - * @throws IllegalArgumentException if {@code name} is not valid name for this object. - * @throws CodecNotFoundException if there is no registered codec to convert the underlying CQL - * type to a list. - */ - public List getList(String name, Class elementsClass); - - /** - * Returns the value for {@code name} as a list. - *

- * This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL type to a list of the specified type. - *

- * Use this variant with nested collections, which produce a generic element type: - *

-     * {@code List> l = row.getList("theColumn", new TypeToken>() {});}
-     * 
- *

- * Implementation note: the actual {@link List} implementation will depend - * on the {@link TypeCodec codec} being used; therefore, callers should - * make no assumptions concerning its mutability nor its thread-safety. - * Furthermore, the behavior of this method in respect to CQL {@code NULL} values is also - * codec-dependent. By default, the driver will return mutable instances, and - * a CQL {@code NULL} will mapped to an empty collection (note that Cassandra - * makes no distinction between {@code NULL} and an empty collection). - * - * @param name the name to retrieve. - * @param elementsType the type for the elements of the list to retrieve. - * @return the value of the {@code i}th element as a list of - * {@code T} objects. - * @throws IllegalArgumentException if {@code name} is not valid name for this object. - * @throws CodecNotFoundException if there is no registered codec to convert the underlying CQL - * type to a list. - */ - public List getList(String name, TypeToken elementsType); - - /** - * Returns the value for {@code name} as a set. - *

- * This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL type to a set of the specified type. - *

- * If the type of the elements is generic, use {@link #getSet(String, TypeToken)}. - *

- * Implementation note: the actual {@link Set} implementation will depend - * on the {@link TypeCodec codec} being used; therefore, callers should - * make no assumptions concerning its mutability nor its thread-safety. - * Furthermore, the behavior of this method in respect to CQL {@code NULL} values is also - * codec-dependent. By default, the driver will return mutable instances, and - * a CQL {@code NULL} will mapped to an empty collection (note that Cassandra - * makes no distinction between {@code NULL} and an empty collection). - * - * @param name the name to retrieve. - * @param elementsClass the class for the elements of the set to retrieve. - * @return the value of the {@code i}th element as a set of - * {@code T} objects. - * @throws IllegalArgumentException if {@code name} is not valid name for this object. - * @throws CodecNotFoundException if there is no registered codec to convert the underlying CQL - * type to a set. - */ - public Set getSet(String name, Class elementsClass); - - /** - * Returns the value for {@code name} as a set. - *

- * This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL type to a set of the specified type. - *

- * Use this variant with nested collections, which produce a generic element type: - *

-     * {@code Set> l = row.getSet("theColumn", new TypeToken>() {});}
-     * 
- *

- * Implementation note: the actual {@link Set} implementation will depend - * on the {@link TypeCodec codec} being used; therefore, callers should - * make no assumptions concerning its mutability nor its thread-safety. - * Furthermore, the behavior of this method in respect to CQL {@code NULL} values is also - * codec-dependent. By default, the driver will return mutable instances, and - * a CQL {@code NULL} will mapped to an empty collection (note that Cassandra - * makes no distinction between {@code NULL} and an empty collection). - * - * @param name the name to retrieve. - * @param elementsType the type for the elements of the set to retrieve. - * @return the value of the {@code i}th element as a set of - * {@code T} objects. - * @throws IllegalArgumentException if {@code name} is not valid name for this object. - * @throws CodecNotFoundException if there is no registered codec to convert the underlying CQL - * type to a set. - */ - public Set getSet(String name, TypeToken elementsType); - - /** - * Returns the value for {@code name} as a map. - *

- * This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL type to a map of the specified types. - *

- * If the type of the keys and/or values is generic, use {@link #getMap(String, TypeToken, TypeToken)}. - *

- * Implementation note: the actual {@link Map} implementation will depend - * on the {@link TypeCodec codec} being used; therefore, callers should - * make no assumptions concerning its mutability nor its thread-safety. - * Furthermore, the behavior of this method in respect to CQL {@code NULL} values is also - * codec-dependent. By default, the driver will return mutable instances, and - * a CQL {@code NULL} will mapped to an empty collection (note that Cassandra - * makes no distinction between {@code NULL} and an empty collection). - * - * @param name the name to retrieve. - * @param keysClass the class for the keys of the map to retrieve. - * @param valuesClass the class for the values of the map to retrieve. - * @return the value of {@code name} as a map of - * {@code K} to {@code V} objects. - * @throws IllegalArgumentException if {@code name} is not valid name for this object. - * @throws CodecNotFoundException if there is no registered codec to convert the underlying CQL - * type to a map. - */ - public Map getMap(String name, Class keysClass, Class valuesClass); - - /** - * Returns the value for {@code name} as a map. - *

- * This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL type to a map of the specified types. - *

- * Use this variant with nested collections, which produce a generic element type: - *

-     * {@code Map> l = row.getMap("theColumn", TypeToken.of(Integer.class), new TypeToken>() {});}
-     * 
- *

- * Implementation note: the actual {@link Map} implementation will depend - * on the {@link TypeCodec codec} being used; therefore, callers should - * make no assumptions concerning its mutability nor its thread-safety. - * Furthermore, the behavior of this method in respect to CQL {@code NULL} values is also - * codec-dependent. By default, the driver will return mutable instances, and - * a CQL {@code NULL} will mapped to an empty collection (note that Cassandra - * makes no distinction between {@code NULL} and an empty collection). - * - * @param name the name to retrieve. - * @param keysType the class for the keys of the map to retrieve. - * @param valuesType the class for the values of the map to retrieve. - * @return the value of {@code name} as a map of - * {@code K} to {@code V} objects. - * @throws IllegalArgumentException if {@code name} is not valid name for this object. - * @throws CodecNotFoundException if there is no registered codec to convert the underlying CQL - * type to a map. - */ - public Map getMap(String name, TypeToken keysType, TypeToken valuesType); - - /** - * Return the value for {@code name} as a UDT value. - *

- * This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL type to a {@code UDTValue} - * (if the CQL type is a UDT, the registry will generate a codec automatically). - * - * @param name the name to retrieve. - * @return the value of {@code name} as a UDT value. If the value is NULL, - * then {@code null} will be returned. - * @throws IllegalArgumentException if {@code name} is not valid name for this object. - * @throws CodecNotFoundException if there is no registered codec to convert the underlying CQL - * type to a {@code UDTValue}. - */ - public UDTValue getUDTValue(String name); - - /** - * Return the value for {@code name} as a tuple value. - *

- * This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL type to a {@code TupleValue} - * (if the CQL type is a tuple, the registry will generate a codec automatically). - * - * @param name the name to retrieve. - * @return the value of {@code name} as a tuple value. If the value is NULL, - * then {@code null} will be returned. - * @throws IllegalArgumentException if {@code name} is not valid name for this object. - * @throws CodecNotFoundException if there is no registered codec to convert the underlying CQL - * type to a {@code TupleValue}. - */ - public TupleValue getTupleValue(String name); - - /** - * Returns the value for {@code name} as the Java type matching its CQL type. - *

- * This method uses the {@link CodecRegistry} to find the first codec that handles the underlying CQL type. The Java type - * of the returned object will be determined by the codec that was selected. - *

- * Use this method to dynamically inspect elements when types aren't known in advance, for instance if you're writing a - * generic row logger. If you know the target Java type, it is generally preferable to use typed getters, such as the - * ones for built-in types ({@link #getBool(String)}, {@link #getInt(String)}, etc.), or {@link #get(String, Class)} and - * {@link #get(String, TypeToken)} for custom types. - * - * @param name the name to retrieve. - * @return the value of {@code name} as the Java type matching its CQL type. - * If the value is NULL and is a simple type, UDT or tuple, {@code null} is returned. - * If it is NULL and is a collection type, an empty (immutable) collection is returned. - * @throws IllegalArgumentException if {@code name} is not a valid name for this object. - * @see CodecRegistry#codecFor(DataType) - */ - Object getObject(String name); - - /** - * Returns the value for {@code name} converted to the given Java type. - *

- * This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL type to the given Java type. - *

- * If the target type is generic, use {@link #get(String, TypeToken)}. - *

- * Implementation note: the actual object returned by this method will depend - * on the {@link TypeCodec codec} being used; therefore, callers should - * make no assumptions concerning its mutability nor its thread-safety. - * Furthermore, the behavior of this method in respect to CQL {@code NULL} values is also - * codec-dependent; by default, a CQL {@code NULL} value translates to {@code null} for - * simple CQL types, UDTs and tuples, and to empty collections for all CQL collection types. - * - * @param name the name to retrieve. - * @param targetClass The Java type the value should be converted to. - * @return the value for {@code name} value converted to the given Java type. - * @throws IllegalArgumentException if {@code name} is not a valid name for this object. - * @throws CodecNotFoundException if there is no registered codec to convert the underlying CQL - * type to {@code targetClass}. - */ - T get(String name, Class targetClass); - - /** - * Returns the value for {@code name} converted to the given Java type. - *

- * This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL type to the given Java type. - *

- * Implementation note: the actual object returned by this method will depend - * on the {@link TypeCodec codec} being used; therefore, callers should - * make no assumptions concerning its mutability nor its thread-safety. - * Furthermore, the behavior of this method in respect to CQL {@code NULL} values is also - * codec-dependent; by default, a CQL {@code NULL} value translates to {@code null} for - * simple CQL types, UDTs and tuples, and to empty collections for all CQL collection types. - * - * @param name the name to retrieve. - * @param targetType The Java type the value should be converted to. - * @return the value for {@code name} value converted to the given Java type. - * @throws IllegalArgumentException if {@code name} is not a valid name for this object. - * @throws CodecNotFoundException if there is no registered codec to convert the underlying CQL - * type to {@code targetType}. - */ - T get(String name, TypeToken targetType); - - /** - * Returns the value for {@code name} converted using the given {@link TypeCodec}. - *

- * This method entirely bypasses the {@link CodecRegistry} and forces the driver to use the given codec instead. - * This can be useful if the codec would collide with a previously registered one, or if you want to use the - * codec just once without registering it. - *

- * It is the caller's responsibility to ensure that the given codec {@link TypeCodec#accepts(DataType) accepts} - * the underlying CQL type; failing to do so may result in {@link InvalidTypeException}s being thrown. - *

- * Implementation note: the actual object returned by this method will depend - * on the {@link TypeCodec codec} being used; therefore, callers should - * make no assumptions concerning its mutability nor its thread-safety. - * Furthermore, the behavior of this method in respect to CQL {@code NULL} values is also - * codec-dependent; by default, a CQL {@code NULL} value translates to {@code null} for - * simple CQL types, UDTs and tuples, and to empty collections for all CQL collection types. - * - * @param name the name to retrieve. - * @param codec The {@link TypeCodec} to use to deserialize the value; may not be {@code null}. - * @return the value of the {@code i}th value converted using the given {@link TypeCodec}. - * @throws InvalidTypeException if the given codec does not {@link TypeCodec#accepts(DataType) accept} the underlying CQL type. - * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. - */ - T get(String name, TypeCodec codec); - + /** + * Returns whether the value for {@code name} is NULL. + * + * @param name the name to check. + * @return whether the value for {@code name} is NULL. + * @throws IllegalArgumentException if {@code name} is not valid name for this object. + */ + public boolean isNull(String name); + + /** + * Returns the value for {@code name} as a boolean. + * + *

This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL + * type to a Java {@code boolean} (for CQL type {@code boolean}, this will be the built-in codec). + * + * @param name the name to retrieve. + * @return the boolean value for {@code name}. If the value is NULL, {@code false} is returned. If + * you need to distinguish NULL and false values, check first with {@link #isNull(String)} or + * use {@code get(name, Boolean.class)}. + * @throws IllegalArgumentException if {@code name} is not valid name for this object. + * @throws CodecNotFoundException if there is no registered codec to convert the underlying CQL + * type to a boolean. + */ + public boolean getBool(String name); + + /** + * Returns the value for {@code name} as a byte. + * + *

This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL + * type to a Java {@code byte} (for CQL type {@code tinyint}, this will be the built-in codec). + * + * @param name the name to retrieve. + * @return the value for {@code name} as a byte. If the value is NULL, {@code 0} is returned. If + * you need to distinguish NULL and 0, check first with {@link #isNull(String)} or use {@code + * get(name, Byte.class)}. {@code 0} is returned. + * @throws IllegalArgumentException if {@code name} is not valid name for this object. + * @throws CodecNotFoundException if there is no registered codec to convert the underlying CQL + * type to a byte. + */ + public byte getByte(String name); + + /** + * Returns the value for {@code name} as a short. + * + *

This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL + * type to a Java {@code short} (for CQL type {@code smallint}, this will be the built-in codec). + * + * @param name the name to retrieve. + * @return the value for {@code name} as a short. If the value is NULL, {@code 0} is returned. If + * you need to distinguish NULL and 0, check first with {@link #isNull(String)} or use {@code + * get(name, Short.class)}. {@code 0} is returned. + * @throws IllegalArgumentException if {@code name} is not valid name for this object. + * @throws CodecNotFoundException if there is no registered codec to convert the underlying CQL + * type to a short. + */ + public short getShort(String name); + + /** + * Returns the value for {@code name} as an integer. + * + *

This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL + * type to a Java {@code int} (for CQL type {@code int}, this will be the built-in codec). + * + * @param name the name to retrieve. + * @return the value for {@code name} as an integer. If the value is NULL, {@code 0} is returned. + * If you need to distinguish NULL and 0, check first with {@link #isNull(String)} or use + * {@code get(name, Integer.class)}. {@code 0} is returned. + * @throws IllegalArgumentException if {@code name} is not valid name for this object. + * @throws CodecNotFoundException if there is no registered codec to convert the underlying CQL + * type to an int. + */ + public int getInt(String name); + + /** + * Returns the value for {@code name} as a long. + * + *

This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL + * type to a Java {@code byte} (for CQL types {@code bigint} and {@code counter}, this will be the + * built-in codec). + * + * @param name the name to retrieve. + * @return the value for {@code name} as a long. If the value is NULL, {@code 0L} is returned. If + * you need to distinguish NULL and 0L, check first with {@link #isNull(String)} or use {@code + * get(name, Long.class)}. + * @throws IllegalArgumentException if {@code name} is not valid name for this object. + * @throws CodecNotFoundException if there is no registered codec to convert the underlying CQL + * type to a long. + */ + public long getLong(String name); + + /** + * Returns the value for {@code name} as a date. + * + *

This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL + * type to a {@code Date} (for CQL type {@code timestamp}, this will be the built-in codec). + * + * @param name the name to retrieve. + * @return the value for {@code name} as a date. If the value is NULL, {@code null} is returned. + * @throws IllegalArgumentException if {@code name} is not valid name for this object. + * @throws CodecNotFoundException if there is no registered codec to convert the underlying CQL + * type to a {@code Date}. + */ + public Date getTimestamp(String name); + + /** + * Returns the value for {@code name} as a date (without time). + * + *

This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL + * type to a {@link LocalDate} (for CQL type {@code date}, this will be the built-in codec). + * + * @param name the name to retrieve. + * @return the value for {@code name} as a date. If the value is NULL, {@code null} is returned. + * @throws IllegalArgumentException if {@code name} is not valid name for this object. + * @throws CodecNotFoundException if there is no registered codec to convert the underlying CQL + * type to a {@code LocalDate}. + */ + public LocalDate getDate(String name); + + /** + * Returns the value for {@code name} as a long in nanoseconds since midnight. + * + *

This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL + * type to a Java {@code long} (for CQL type {@code time}, this will be the built-in codec). + * + * @param name the name to retrieve. + * @return the value for {@code name} as a long. If the value is NULL, {@code 0L} is returned. + * @throws IllegalArgumentException if {@code name} is not valid name for this object. + * @throws CodecNotFoundException if there is no registered codec to convert the underlying CQL + * type to a long. + */ + public long getTime(String name); + + /** + * Returns the value for {@code name} as a float. + * + *

This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL + * type to a Java {@code float} (for CQL type {@code float}, this will be the built-in codec). + * + * @param name the name to retrieve. + * @return the value for {@code name} as a float. If the value is NULL, {@code 0.0f} is returned. + * If you need to distinguish NULL and 0.0f, check first with {@link #isNull(String)} or use + * {@code get(name, Float.class)}. + * @throws IllegalArgumentException if {@code name} is not valid name for this object. + * @throws CodecNotFoundException if there is no registered codec to convert the underlying CQL + * type to a float. + */ + public float getFloat(String name); + + /** + * Returns the value for {@code name} as a double. + * + *

This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL + * type to a Java {@code double} (for CQL type {@code double}, this will be the built-in codec). + * + * @param name the name to retrieve. + * @return the value for {@code name} as a double. If the value is NULL, {@code 0.0} is returned. + * If you need to distinguish NULL and 0.0, check first with {@link #isNull(String)} or use + * {@code get(name, Double.class)}. + * @throws IllegalArgumentException if {@code name} is not valid name for this object. + * @throws CodecNotFoundException if there is no registered codec to convert the underlying CQL + * type to a double. + */ + public double getDouble(String name); + + /** + * Returns the value for {@code name} as a ByteBuffer. + * + *

This method does not use any codec; it returns a copy of the binary representation of the + * value. It is up to the caller to convert the returned value appropriately. + * + *

Note: this method always return the bytes composing the value, even if the column is not of + * type BLOB. That is, this method never throw an InvalidTypeException. However, if the type is + * not BLOB, it is up to the caller to handle the returned value correctly. + * + * @param name the name to retrieve. + * @return the value for {@code name} as a ByteBuffer. If the value is NULL, {@code null} is + * returned. + * @throws IllegalArgumentException if {@code name} is not valid name for this object. + */ + public ByteBuffer getBytesUnsafe(String name); + + /** + * Returns the value for {@code name} as a byte array. + * + *

This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL + * type to a Java {@code ByteBuffer} (for CQL type {@code blob}, this will be the built-in codec). + * + * @param name the name to retrieve. + * @return the value for {@code name} as a byte array. If the value is NULL, {@code null} is + * returned. + * @throws IllegalArgumentException if {@code name} is not valid name for this object. + * @throws CodecNotFoundException if there is no registered codec to convert the underlying CQL + * type to a {@code ByteBuffer}. + */ + public ByteBuffer getBytes(String name); + + /** + * Returns the value for {@code name} as a string. + * + *

This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL + * type to a Java string (for CQL types {@code text}, {@code varchar} and {@code ascii}, this will + * be the built-in codec). + * + * @param name the name to retrieve. + * @return the value for {@code name} as a string. If the value is NULL, {@code null} is returned. + * @throws IllegalArgumentException if {@code name} is not valid name for this object. + * @throws CodecNotFoundException if there is no registered codec to convert the underlying CQL + * type to a string. + */ + public String getString(String name); + + /** + * Returns the value for {@code name} as a variable length integer. + * + *

This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL + * type to a {@code BigInteger} (for CQL type {@code varint}, this will be the built-in codec). + * + * @param name the name to retrieve. + * @return the value for {@code name} as a variable length integer. If the value is NULL, {@code + * null} is returned. + * @throws IllegalArgumentException if {@code name} is not valid name for this object. + * @throws CodecNotFoundException if there is no registered codec to convert the underlying CQL + * type to a {@code BigInteger}. + */ + public BigInteger getVarint(String name); + + /** + * Returns the value for {@code name} as a variable length decimal. + * + *

This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL + * type to a {@code BigDecimal} (for CQL type {@code decimal}, this will be the built-in codec). + * + * @param name the name to retrieve. + * @return the value for {@code name} as a variable length decimal. If the value is NULL, {@code + * null} is returned. + * @throws IllegalArgumentException if {@code name} is not valid name for this object. + * @throws CodecNotFoundException if there is no registered codec to convert the underlying CQL + * type to a {@code BigDecimal}. + */ + public BigDecimal getDecimal(String name); + + /** + * Returns the value for {@code name} as a UUID. + * + *

This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL + * type to a {@code UUID} (for CQL types {@code uuid} and {@code timeuuid}, this will be the + * built-in codec). + * + * @param name the name to retrieve. + * @return the value for {@code name} as a UUID. If the value is NULL, {@code null} is returned. + * @throws IllegalArgumentException if {@code name} is not valid name for this object. + * @throws CodecNotFoundException if there is no registered codec to convert the underlying CQL + * type to a {@code UUID}. + */ + public UUID getUUID(String name); + + /** + * Returns the value for {@code name} as an InetAddress. + * + *

This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL + * type to an {@code InetAddress} (for CQL type {@code inet}, this will be the built-in codec). + * + * @param name the name to retrieve. + * @return the value for {@code name} as an InetAddress. If the value is NULL, {@code null} is + * returned. + * @throws IllegalArgumentException if {@code name} is not valid name for this object. + * @throws CodecNotFoundException if there is no registered codec to convert the underlying CQL + * type to a {@code InetAddress}. + */ + public InetAddress getInet(String name); + + /** + * Returns the value for {@code name} as a list. + * + *

This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL + * type to a list of the specified type. + * + *

If the type of the elements is generic, use {@link #getList(String, TypeToken)}. + * + *

Implementation note: the actual {@link List} implementation will depend on the {@link + * TypeCodec codec} being used; therefore, callers should make no assumptions concerning its + * mutability nor its thread-safety. Furthermore, the behavior of this method in respect to CQL + * {@code NULL} values is also codec-dependent. By default, the driver will return mutable + * instances, and a CQL {@code NULL} will mapped to an empty collection (note that Cassandra makes + * no distinction between {@code NULL} and an empty collection). + * + * @param name the name to retrieve. + * @param elementsClass the class for the elements of the list to retrieve. + * @return the value of the {@code i}th element as a list of {@code T} objects. + * @throws IllegalArgumentException if {@code name} is not valid name for this object. + * @throws CodecNotFoundException if there is no registered codec to convert the underlying CQL + * type to a list. + */ + public List getList(String name, Class elementsClass); + + /** + * Returns the value for {@code name} as a list. + * + *

This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL + * type to a list of the specified type. + * + *

Use this variant with nested collections, which produce a generic element type: + * + *

+   * {@code List> l = row.getList("theColumn", new TypeToken>() {});}
+   * 
+ * + *

Implementation note: the actual {@link List} implementation will depend on the {@link + * TypeCodec codec} being used; therefore, callers should make no assumptions concerning its + * mutability nor its thread-safety. Furthermore, the behavior of this method in respect to CQL + * {@code NULL} values is also codec-dependent. By default, the driver will return mutable + * instances, and a CQL {@code NULL} will mapped to an empty collection (note that Cassandra makes + * no distinction between {@code NULL} and an empty collection). + * + * @param name the name to retrieve. + * @param elementsType the type for the elements of the list to retrieve. + * @return the value of the {@code i}th element as a list of {@code T} objects. + * @throws IllegalArgumentException if {@code name} is not valid name for this object. + * @throws CodecNotFoundException if there is no registered codec to convert the underlying CQL + * type to a list. + */ + public List getList(String name, TypeToken elementsType); + + /** + * Returns the value for {@code name} as a set. + * + *

This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL + * type to a set of the specified type. + * + *

If the type of the elements is generic, use {@link #getSet(String, TypeToken)}. + * + *

Implementation note: the actual {@link Set} implementation will depend on the {@link + * TypeCodec codec} being used; therefore, callers should make no assumptions concerning its + * mutability nor its thread-safety. Furthermore, the behavior of this method in respect to CQL + * {@code NULL} values is also codec-dependent. By default, the driver will return mutable + * instances, and a CQL {@code NULL} will mapped to an empty collection (note that Cassandra makes + * no distinction between {@code NULL} and an empty collection). + * + * @param name the name to retrieve. + * @param elementsClass the class for the elements of the set to retrieve. + * @return the value of the {@code i}th element as a set of {@code T} objects. + * @throws IllegalArgumentException if {@code name} is not valid name for this object. + * @throws CodecNotFoundException if there is no registered codec to convert the underlying CQL + * type to a set. + */ + public Set getSet(String name, Class elementsClass); + + /** + * Returns the value for {@code name} as a set. + * + *

This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL + * type to a set of the specified type. + * + *

Use this variant with nested collections, which produce a generic element type: + * + *

+   * {@code Set> l = row.getSet("theColumn", new TypeToken>() {});}
+   * 
+ * + *

Implementation note: the actual {@link Set} implementation will depend on the {@link + * TypeCodec codec} being used; therefore, callers should make no assumptions concerning its + * mutability nor its thread-safety. Furthermore, the behavior of this method in respect to CQL + * {@code NULL} values is also codec-dependent. By default, the driver will return mutable + * instances, and a CQL {@code NULL} will mapped to an empty collection (note that Cassandra makes + * no distinction between {@code NULL} and an empty collection). + * + * @param name the name to retrieve. + * @param elementsType the type for the elements of the set to retrieve. + * @return the value of the {@code i}th element as a set of {@code T} objects. + * @throws IllegalArgumentException if {@code name} is not valid name for this object. + * @throws CodecNotFoundException if there is no registered codec to convert the underlying CQL + * type to a set. + */ + public Set getSet(String name, TypeToken elementsType); + + /** + * Returns the value for {@code name} as a map. + * + *

This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL + * type to a map of the specified types. + * + *

If the type of the keys and/or values is generic, use {@link #getMap(String, TypeToken, + * TypeToken)}. + * + *

Implementation note: the actual {@link Map} implementation will depend on the {@link + * TypeCodec codec} being used; therefore, callers should make no assumptions concerning its + * mutability nor its thread-safety. Furthermore, the behavior of this method in respect to CQL + * {@code NULL} values is also codec-dependent. By default, the driver will return mutable + * instances, and a CQL {@code NULL} will mapped to an empty collection (note that Cassandra makes + * no distinction between {@code NULL} and an empty collection). + * + * @param name the name to retrieve. + * @param keysClass the class for the keys of the map to retrieve. + * @param valuesClass the class for the values of the map to retrieve. + * @return the value of {@code name} as a map of {@code K} to {@code V} objects. + * @throws IllegalArgumentException if {@code name} is not valid name for this object. + * @throws CodecNotFoundException if there is no registered codec to convert the underlying CQL + * type to a map. + */ + public Map getMap(String name, Class keysClass, Class valuesClass); + + /** + * Returns the value for {@code name} as a map. + * + *

This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL + * type to a map of the specified types. + * + *

Use this variant with nested collections, which produce a generic element type: + * + *

+   * {@code Map> l = row.getMap("theColumn", TypeToken.of(Integer.class), new TypeToken>() {});}
+   * 
+ * + *

Implementation note: the actual {@link Map} implementation will depend on the {@link + * TypeCodec codec} being used; therefore, callers should make no assumptions concerning its + * mutability nor its thread-safety. Furthermore, the behavior of this method in respect to CQL + * {@code NULL} values is also codec-dependent. By default, the driver will return mutable + * instances, and a CQL {@code NULL} will mapped to an empty collection (note that Cassandra makes + * no distinction between {@code NULL} and an empty collection). + * + * @param name the name to retrieve. + * @param keysType the class for the keys of the map to retrieve. + * @param valuesType the class for the values of the map to retrieve. + * @return the value of {@code name} as a map of {@code K} to {@code V} objects. + * @throws IllegalArgumentException if {@code name} is not valid name for this object. + * @throws CodecNotFoundException if there is no registered codec to convert the underlying CQL + * type to a map. + */ + public Map getMap(String name, TypeToken keysType, TypeToken valuesType); + + /** + * Return the value for {@code name} as a UDT value. + * + *

This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL + * type to a {@code UDTValue} (if the CQL type is a UDT, the registry will generate a codec + * automatically). + * + * @param name the name to retrieve. + * @return the value of {@code name} as a UDT value. If the value is NULL, then {@code null} will + * be returned. + * @throws IllegalArgumentException if {@code name} is not valid name for this object. + * @throws CodecNotFoundException if there is no registered codec to convert the underlying CQL + * type to a {@code UDTValue}. + */ + public UDTValue getUDTValue(String name); + + /** + * Return the value for {@code name} as a tuple value. + * + *

This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL + * type to a {@code TupleValue} (if the CQL type is a tuple, the registry will generate a codec + * automatically). + * + * @param name the name to retrieve. + * @return the value of {@code name} as a tuple value. If the value is NULL, then {@code null} + * will be returned. + * @throws IllegalArgumentException if {@code name} is not valid name for this object. + * @throws CodecNotFoundException if there is no registered codec to convert the underlying CQL + * type to a {@code TupleValue}. + */ + public TupleValue getTupleValue(String name); + + /** + * Returns the value for {@code name} as the Java type matching its CQL type. + * + *

This method uses the {@link CodecRegistry} to find the first codec that handles the + * underlying CQL type. The Java type of the returned object will be determined by the codec that + * was selected. + * + *

Use this method to dynamically inspect elements when types aren't known in advance, for + * instance if you're writing a generic row logger. If you know the target Java type, it is + * generally preferable to use typed getters, such as the ones for built-in types ({@link + * #getBool(String)}, {@link #getInt(String)}, etc.), or {@link #get(String, Class)} and {@link + * #get(String, TypeToken)} for custom types. + * + * @param name the name to retrieve. + * @return the value of {@code name} as the Java type matching its CQL type. If the value is NULL + * and is a simple type, UDT or tuple, {@code null} is returned. If it is NULL and is a + * collection type, an empty (immutable) collection is returned. + * @throws IllegalArgumentException if {@code name} is not a valid name for this object. + * @see CodecRegistry#codecFor(DataType) + */ + Object getObject(String name); + + /** + * Returns the value for {@code name} converted to the given Java type. + * + *

This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL + * type to the given Java type. + * + *

If the target type is generic, use {@link #get(String, TypeToken)}. + * + *

Implementation note: the actual object returned by this method will depend on the {@link + * TypeCodec codec} being used; therefore, callers should make no assumptions concerning its + * mutability nor its thread-safety. Furthermore, the behavior of this method in respect to CQL + * {@code NULL} values is also codec-dependent; by default, a CQL {@code NULL} value translates to + * {@code null} for simple CQL types, UDTs and tuples, and to empty collections for all CQL + * collection types. + * + * @param name the name to retrieve. + * @param targetClass The Java type the value should be converted to. + * @return the value for {@code name} value converted to the given Java type. + * @throws IllegalArgumentException if {@code name} is not a valid name for this object. + * @throws CodecNotFoundException if there is no registered codec to convert the underlying CQL + * type to {@code targetClass}. + */ + T get(String name, Class targetClass); + + /** + * Returns the value for {@code name} converted to the given Java type. + * + *

This method uses the {@link CodecRegistry} to find a codec to convert the underlying CQL + * type to the given Java type. + * + *

Implementation note: the actual object returned by this method will depend on the {@link + * TypeCodec codec} being used; therefore, callers should make no assumptions concerning its + * mutability nor its thread-safety. Furthermore, the behavior of this method in respect to CQL + * {@code NULL} values is also codec-dependent; by default, a CQL {@code NULL} value translates to + * {@code null} for simple CQL types, UDTs and tuples, and to empty collections for all CQL + * collection types. + * + * @param name the name to retrieve. + * @param targetType The Java type the value should be converted to. + * @return the value for {@code name} value converted to the given Java type. + * @throws IllegalArgumentException if {@code name} is not a valid name for this object. + * @throws CodecNotFoundException if there is no registered codec to convert the underlying CQL + * type to {@code targetType}. + */ + T get(String name, TypeToken targetType); + + /** + * Returns the value for {@code name} converted using the given {@link TypeCodec}. + * + *

This method entirely bypasses the {@link CodecRegistry} and forces the driver to use the + * given codec instead. This can be useful if the codec would collide with a previously registered + * one, or if you want to use the codec just once without registering it. + * + *

It is the caller's responsibility to ensure that the given codec {@link + * TypeCodec#accepts(DataType) accepts} the underlying CQL type; failing to do so may result in + * {@link InvalidTypeException}s being thrown. + * + *

Implementation note: the actual object returned by this method will depend on the {@link + * TypeCodec codec} being used; therefore, callers should make no assumptions concerning its + * mutability nor its thread-safety. Furthermore, the behavior of this method in respect to CQL + * {@code NULL} values is also codec-dependent; by default, a CQL {@code NULL} value translates to + * {@code null} for simple CQL types, UDTs and tuples, and to empty collections for all CQL + * collection types. + * + * @param name the name to retrieve. + * @param codec The {@link TypeCodec} to use to deserialize the value; may not be {@code null}. + * @return the value of the {@code i}th value converted using the given {@link TypeCodec}. + * @throws InvalidTypeException if the given codec does not {@link TypeCodec#accepts(DataType) + * accept} the underlying CQL type. + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + */ + T get(String name, TypeCodec codec); } diff --git a/driver-core/src/main/java/com/datastax/driver/core/GettableData.java b/driver-core/src/main/java/com/datastax/driver/core/GettableData.java index a2f62a6448c..90fdaa2a730 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/GettableData.java +++ b/driver-core/src/main/java/com/datastax/driver/core/GettableData.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,7 +18,7 @@ package com.datastax.driver.core; /** - * Collection of (typed) CQL values that can be retrieved either by index (starting at zero) or by name. + * Collection of (typed) CQL values that can be retrieved either by index (starting at zero) or by + * name. */ -public interface GettableData extends GettableByIndexData, GettableByNameData { -} +public interface GettableData extends GettableByIndexData, GettableByNameData {} diff --git a/driver-core/src/main/java/com/datastax/driver/core/GuavaCompatibility.java b/driver-core/src/main/java/com/datastax/driver/core/GuavaCompatibility.java index 6f8c400c37a..ec53806b7f7 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/GuavaCompatibility.java +++ b/driver-core/src/main/java/com/datastax/driver/core/GuavaCompatibility.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,246 +18,346 @@ package com.datastax.driver.core; import com.datastax.driver.core.exceptions.DriverInternalError; +import com.google.common.base.Function; import com.google.common.collect.BiMap; import com.google.common.collect.Maps; +import com.google.common.net.HostAndPort; import com.google.common.reflect.TypeToken; -import com.google.common.util.concurrent.*; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - +import com.google.common.util.concurrent.AsyncFunction; +import com.google.common.util.concurrent.FutureCallback; +import com.google.common.util.concurrent.Futures; +import com.google.common.util.concurrent.ListenableFuture; +import com.google.common.util.concurrent.MoreExecutors; import java.lang.reflect.ParameterizedType; import java.lang.reflect.Type; import java.util.Map; import java.util.concurrent.Executor; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * A compatibility layer to support a wide range of Guava versions. - *

- * The driver is compatible with Guava 16.0.1 or higher, but Guava 20 introduced incompatible breaking changes in its - * API, that could in turn be breaking for legacy driver clients if we simply upgraded our dependency. We don't want to - * increment our major version "just" for Guava (we have other changes planned). - *

- * Therefore we depend on Guava 19, which has both the deprecated and the new APIs, and detect the actual version at - * runtime in order to call the relevant methods. - *

- * This is a hack, and might not work with subsequent Guava releases; the real fix is to stop exposing Guava in our - * public API. We'll address that in version 4 of the driver. + * + *

The driver is compatible with Guava 16.0.1 or higher, but Guava 20 introduced incompatible + * breaking changes in its API, that could in turn be breaking for legacy driver clients if we + * simply upgraded our dependency. We don't want to increment our major version "just" for Guava (we + * have other changes planned). + * + *

Therefore we depend on Guava 19, which has both the deprecated and the new APIs, and detect + * the actual version at runtime in order to call the relevant methods. + * + *

This is a hack, and might not work with subsequent Guava releases; the real fix is to stop + * exposing Guava in our public API. We'll address that in version 4 of the driver. */ @SuppressWarnings("deprecation") public abstract class GuavaCompatibility { - private static final Logger logger = LoggerFactory.getLogger(GuavaCompatibility.class); + private static final Logger logger = LoggerFactory.getLogger(GuavaCompatibility.class); + + /** + * The unique instance of this class, that is compatible with the Guava version found in the + * classpath. + */ + public static final GuavaCompatibility INSTANCE = selectImplementation(); + + /** + * Force the initialization of the class. This should be called early to ensure a fast failure if + * an incompatible version of Guava is in the classpath (the driver code calls it when loading the + * {@link Cluster} class). + */ + public static void init() { + // nothing to do, we just want the static initializers to run + } + + /** + * Returns a {@code Future} whose result is taken from the given primary {@code input} or, if the + * primary input fails, from the {@code Future} provided by the {@code fallback}. + * + * @see Futures#withFallback(ListenableFuture, com.google.common.util.concurrent.FutureFallback) + * @see Futures#catchingAsync(ListenableFuture, Class, AsyncFunction) + */ + public abstract ListenableFuture withFallback( + ListenableFuture input, AsyncFunction fallback); + + /** + * Returns a {@code Future} whose result is taken from the given primary {@code input} or, if the + * primary input fails, from the {@code Future} provided by the {@code fallback}. + * + * @see Futures#withFallback(ListenableFuture, com.google.common.util.concurrent.FutureFallback, + * Executor) + * @see Futures#catchingAsync(ListenableFuture, Class, AsyncFunction, Executor) + */ + public abstract ListenableFuture withFallback( + ListenableFuture input, AsyncFunction fallback, Executor executor); + + /** + * Registers separate success and failure callbacks to be run when the {@code Future}'s + * computation is {@linkplain java.util.concurrent.Future#isDone() complete} or, if the + * computation is already complete, immediately. + * + *

The callback is run in {@link #sameThreadExecutor()}. + * + * @see Futures#addCallback(ListenableFuture, FutureCallback, Executor) + */ + public void addCallback(ListenableFuture input, FutureCallback callback) { + addCallback(input, callback, sameThreadExecutor()); + } + + /** + * Registers separate success and failure callbacks to be run when the {@code Future}'s + * computation is {@linkplain java.util.concurrent.Future#isDone() complete} or, if the + * computation is already complete, immediately. + * + * @see Futures#addCallback(ListenableFuture, FutureCallback, Executor) + */ + public void addCallback( + ListenableFuture input, FutureCallback callback, Executor executor) { + Futures.addCallback(input, callback, executor); + } + + /** + * Returns a new {@code ListenableFuture} whose result is the product of applying the given {@code + * Function} to the result of the given {@code Future}. + * + *

The callback is run in {@link #sameThreadExecutor()}. + * + * @see Futures#transform(ListenableFuture, Function, Executor) + */ + public ListenableFuture transform( + ListenableFuture input, Function function) { + return transform(input, function, sameThreadExecutor()); + } + + /** + * Returns a new {@code ListenableFuture} whose result is the product of applying the given {@code + * Function} to the result of the given {@code Future}. + * + * @see Futures#transform(ListenableFuture, Function, Executor) + */ + public ListenableFuture transform( + ListenableFuture input, Function function, Executor executor) { + return Futures.transform(input, function, executor); + } + + /** + * Returns a new {@code ListenableFuture} whose result is asynchronously derived from the result + * of the given {@code Future}. More precisely, the returned {@code Future} takes its result from + * a {@code Future} produced by applying the given {@code AsyncFunction} to the result of the + * original {@code Future}. + * + * @see Futures#transform(ListenableFuture, AsyncFunction) + * @see Futures#transformAsync(ListenableFuture, AsyncFunction) + */ + public abstract ListenableFuture transformAsync( + ListenableFuture input, AsyncFunction function); + + /** + * Returns a new {@code ListenableFuture} whose result is asynchronously derived from the result + * of the given {@code Future}. More precisely, the returned {@code Future} takes its result from + * a {@code Future} produced by applying the given {@code AsyncFunction} to the result of the + * original {@code Future}. + * + * @see Futures#transform(ListenableFuture, AsyncFunction, Executor) + * @see Futures#transformAsync(ListenableFuture, AsyncFunction, Executor) + */ + public abstract ListenableFuture transformAsync( + ListenableFuture input, AsyncFunction function, Executor executor); + + /** + * Returns true if {@code target} is a supertype of {@code argument}. "Supertype" is defined + * according to the rules for type arguments introduced with Java generics. + * + * @see TypeToken#isAssignableFrom(Type) + * @see TypeToken#isSupertypeOf(Type) + */ + public abstract boolean isSupertypeOf(TypeToken target, TypeToken argument); + + /** + * Returns an {@link Executor} that runs each task in the thread that invokes {@link + * Executor#execute execute}, as in {@link + * java.util.concurrent.ThreadPoolExecutor.CallerRunsPolicy}. + * + * @see MoreExecutors#sameThreadExecutor() + * @see MoreExecutors#directExecutor() + */ + public abstract Executor sameThreadExecutor(); + + /** + * Returns the portion of the given {@link HostAndPort} instance that should represent the + * hostname or IPv4/IPv6 literal. + * + *

The method {@code HostAndPort.getHostText} has been replaced with {@code + * HostAndPort.getHost} starting with Guava 20.0; it has been completely removed in Guava 22.0. + */ + @SuppressWarnings("JavaReflectionMemberAccess") + public String getHost(HostAndPort hostAndPort) { + try { + // Guava >= 20.0 + return (String) HostAndPort.class.getMethod("getHost").invoke(hostAndPort); + } catch (Exception e) { + // Guava < 22.0 + return hostAndPort.getHostText(); + } + } + + private static GuavaCompatibility selectImplementation() { + if (isGuava_19_0_OrHigher()) { + logger.info("Detected Guava >= 19 in the classpath, using modern compatibility layer"); + return new Version19OrHigher(); + } else if (isGuava_16_0_1_OrHigher()) { + logger.info("Detected Guava < 19 in the classpath, using legacy compatibility layer"); + return new Version18OrLower(); + } else { + throw new DriverInternalError( + "Detected incompatible version of Guava in the classpath. " + + "You need 16.0.1 or higher."); + } + } - /** - * The unique instance of this class, that is compatible with the Guava version found in the classpath. - */ - public static final GuavaCompatibility INSTANCE = selectImplementation(); + private static class Version18OrLower extends GuavaCompatibility { - /** - * Force the initialization of the class. This should be called early to ensure a fast failure if an incompatible - * version of Guava is in the classpath (the driver code calls it when loading the {@link Cluster} class). - */ - public static void init() { - // nothing to do, we just want the static initializers to run + @Override + public ListenableFuture withFallback( + ListenableFuture input, final AsyncFunction fallback) { + return Futures.withFallback( + input, + new com.google.common.util.concurrent.FutureFallback() { + @Override + public ListenableFuture create(Throwable t) throws Exception { + return fallback.apply(t); + } + }); } - /** - * Returns a {@code Future} whose result is taken from the given primary - * {@code input} or, if the primary input fails, from the {@code Future} - * provided by the {@code fallback}. - * - * @see Futures#withFallback(ListenableFuture, FutureFallback) - * @see Futures#catchingAsync(ListenableFuture, Class, AsyncFunction) - */ - public abstract ListenableFuture withFallback(ListenableFuture input, - AsyncFunction fallback); - - /** - * Returns a {@code Future} whose result is taken from the given primary - * {@code input} or, if the primary input fails, from the {@code Future} - * provided by the {@code fallback}. - * - * @see Futures#withFallback(ListenableFuture, FutureFallback, Executor) - * @see Futures#catchingAsync(ListenableFuture, Class, AsyncFunction, Executor) - */ - public abstract ListenableFuture withFallback(ListenableFuture input, - AsyncFunction fallback, Executor executor); - - /** - * Returns a new {@code ListenableFuture} whose result is asynchronously - * derived from the result of the given {@code Future}. More precisely, the - * returned {@code Future} takes its result from a {@code Future} produced by - * applying the given {@code AsyncFunction} to the result of the original - * {@code Future}. - * - * @see Futures#transform(ListenableFuture, AsyncFunction) - * @see Futures#transformAsync(ListenableFuture, AsyncFunction) - */ - public abstract ListenableFuture transformAsync(ListenableFuture input, - AsyncFunction function); - - /** - * Returns a new {@code ListenableFuture} whose result is asynchronously - * derived from the result of the given {@code Future}. More precisely, the - * returned {@code Future} takes its result from a {@code Future} produced by - * applying the given {@code AsyncFunction} to the result of the original - * {@code Future}. - * - * @see Futures#transform(ListenableFuture, AsyncFunction, Executor) - * @see Futures#transformAsync(ListenableFuture, AsyncFunction, Executor) - */ - public abstract ListenableFuture transformAsync(ListenableFuture input, - AsyncFunction function, - Executor executor); - - /** - * Returns true if {@code target} is a supertype of {@code argument}. "Supertype" is defined - * according to the rules for type arguments introduced with Java generics. - * - * @see TypeToken#isAssignableFrom(Type) - * @see TypeToken#isSupertypeOf(Type) - */ - public abstract boolean isSupertypeOf(TypeToken target, TypeToken argument); - - /** - * Returns an {@link Executor} that runs each task in the thread that invokes - * {@link Executor#execute execute}, as in {@link java.util.concurrent.ThreadPoolExecutor.CallerRunsPolicy}. - * - * @see MoreExecutors#sameThreadExecutor() - * @see MoreExecutors#directExecutor() - */ - public abstract Executor sameThreadExecutor(); - - private static GuavaCompatibility selectImplementation() { - if (isGuava_19_0_OrHigher()) { - logger.info("Detected Guava >= 19 in the classpath, using modern compatibility layer"); - return new Version19OrHigher(); - } else if (isGuava_16_0_1_OrHigher()) { - logger.info("Detected Guava < 19 in the classpath, using legacy compatibility layer"); - return new Version18OrLower(); - } else { - throw new DriverInternalError("Detected incompatible version of Guava in the classpath. " + - "You need 16.0.1 or higher."); - } + @Override + public ListenableFuture withFallback( + ListenableFuture input, + final AsyncFunction fallback, + Executor executor) { + return Futures.withFallback( + input, + new com.google.common.util.concurrent.FutureFallback() { + @Override + public ListenableFuture create(Throwable t) throws Exception { + return fallback.apply(t); + } + }, + executor); } - private static class Version18OrLower extends GuavaCompatibility { - - @Override - public ListenableFuture withFallback(ListenableFuture input, - final AsyncFunction fallback) { - return Futures.withFallback(input, new FutureFallback() { - @Override - public ListenableFuture create(Throwable t) throws Exception { - return fallback.apply(t); - } - }); - } - - @Override - public ListenableFuture withFallback(ListenableFuture input, - final AsyncFunction fallback, - Executor executor) { - return Futures.withFallback(input, new FutureFallback() { - @Override - public ListenableFuture create(Throwable t) throws Exception { - return fallback.apply(t); - } - }, executor); - } - - @Override - public ListenableFuture transformAsync(ListenableFuture input, AsyncFunction function) { - return Futures.transform(input, function); - } - - @Override - public ListenableFuture transformAsync(ListenableFuture input, AsyncFunction function, Executor executor) { - return Futures.transform(input, function, executor); - } - - @Override - public boolean isSupertypeOf(TypeToken target, TypeToken argument) { - return target.isAssignableFrom(argument); - } - - @Override - public Executor sameThreadExecutor() { - return MoreExecutors.sameThreadExecutor(); - } + @Override + public ListenableFuture transformAsync( + ListenableFuture input, AsyncFunction function) { + return Futures.transform(input, function); } - private static class Version19OrHigher extends GuavaCompatibility { - - @Override - public ListenableFuture withFallback(ListenableFuture input, - AsyncFunction fallback) { - return Futures.catchingAsync(input, Throwable.class, fallback); - } - - @Override - public ListenableFuture withFallback(ListenableFuture input, - AsyncFunction fallback, Executor executor) { - return Futures.catchingAsync(input, Throwable.class, fallback, executor); - } - - @Override - public ListenableFuture transformAsync(ListenableFuture input, AsyncFunction function) { - return Futures.transformAsync(input, function); - } - - @Override - public ListenableFuture transformAsync(ListenableFuture input, AsyncFunction function, Executor executor) { - return Futures.transformAsync(input, function, executor); - } - - @Override - public boolean isSupertypeOf(TypeToken target, TypeToken argument) { - return target.isSupertypeOf(argument); - } - - @Override - public Executor sameThreadExecutor() { - return MoreExecutors.directExecutor(); - } + @Override + public ListenableFuture transformAsync( + ListenableFuture input, + AsyncFunction function, + Executor executor) { + return Futures.transform(input, function, executor); } - private static boolean isGuava_19_0_OrHigher() { - return methodExists(Futures.class, "transformAsync", ListenableFuture.class, AsyncFunction.class); + @Override + public boolean isSupertypeOf(TypeToken target, TypeToken argument) { + return target.isAssignableFrom(argument); } - private static boolean isGuava_16_0_1_OrHigher() { - // Cheap check for < 16.0 - if (!methodExists(Maps.class, "asConverter", BiMap.class)) { - return false; - } - // More elaborate check to filter out 16.0, which has a bug in TypeToken. We need 16.0.1. - boolean resolved = false; - TypeToken> mapOfString = TypeTokens.mapOf(String.class, String.class); - Type type = mapOfString.getType(); - if (type instanceof ParameterizedType) { - ParameterizedType pType = (ParameterizedType) type; - Type[] types = pType.getActualTypeArguments(); - if (types.length == 2) { - TypeToken valueType = TypeToken.of(types[1]); - resolved = valueType.getRawType().equals(String.class); - } - } - if (!resolved) { - logger.debug("Detected Guava issue #1635 which indicates that version 16.0 is in the classpath"); - } - return resolved; + @Override + public Executor sameThreadExecutor() { + return MoreExecutors.sameThreadExecutor(); + } + } + + private static class Version19OrHigher extends GuavaCompatibility { + + @Override + public ListenableFuture withFallback( + ListenableFuture input, AsyncFunction fallback) { + return withFallback(input, fallback, sameThreadExecutor()); + } + + @Override + public ListenableFuture withFallback( + ListenableFuture input, + AsyncFunction fallback, + Executor executor) { + return Futures.catchingAsync(input, Throwable.class, fallback, executor); + } + + @Override + public ListenableFuture transformAsync( + ListenableFuture input, AsyncFunction function) { + return transformAsync(input, function, sameThreadExecutor()); + } + + @Override + public ListenableFuture transformAsync( + ListenableFuture input, + AsyncFunction function, + Executor executor) { + return Futures.transformAsync(input, function, executor); + } + + @Override + public boolean isSupertypeOf(TypeToken target, TypeToken argument) { + return target.isSupertypeOf(argument); + } + + @Override + public Executor sameThreadExecutor() { + return MoreExecutors.directExecutor(); + } + } + + private static boolean isGuava_19_0_OrHigher() { + return methodExists( + Futures.class, + "transformAsync", + ListenableFuture.class, + AsyncFunction.class, + Executor.class); + } + + private static boolean isGuava_16_0_1_OrHigher() { + // Cheap check for < 16.0 + if (!methodExists(Maps.class, "asConverter", BiMap.class)) { + return false; + } + // More elaborate check to filter out 16.0, which has a bug in TypeToken. We need 16.0.1. + boolean resolved = false; + TypeToken> mapOfString = TypeTokens.mapOf(String.class, String.class); + Type type = mapOfString.getType(); + if (type instanceof ParameterizedType) { + ParameterizedType pType = (ParameterizedType) type; + Type[] types = pType.getActualTypeArguments(); + if (types.length == 2) { + TypeToken valueType = TypeToken.of(types[1]); + resolved = valueType.getRawType().equals(String.class); + } + } + if (!resolved) { + logger.debug( + "Detected Guava issue #1635 which indicates that version 16.0 is in the classpath"); } + return resolved; + } - private static boolean methodExists(Class declaringClass, String methodName, Class... parameterTypes) { - try { - declaringClass.getMethod(methodName, parameterTypes); - return true; - } catch (Exception e) { - logger.debug("Error while checking existence of method " + - declaringClass.getSimpleName() + "." + methodName, e); - return false; - } + private static boolean methodExists( + Class declaringClass, String methodName, Class... parameterTypes) { + try { + declaringClass.getMethod(methodName, parameterTypes); + return true; + } catch (Exception e) { + logger.debug( + "Error while checking existence of method " + + declaringClass.getSimpleName() + + "." + + methodName, + e); + return false; } + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/Host.java b/driver-core/src/main/java/com/datastax/driver/core/Host.java index 70aa7ac7f5f..c904f7957eb 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Host.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Host.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,483 +17,584 @@ */ package com.datastax.driver.core; -import com.datastax.driver.core.policies.AddressTranslator; -import com.google.common.collect.ImmutableList; import com.google.common.util.concurrent.ListenableFuture; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import java.net.InetAddress; import java.net.InetSocketAddress; import java.util.Set; +import java.util.UUID; import java.util.concurrent.atomic.AtomicReference; import java.util.concurrent.locks.ReentrantLock; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * A Cassandra node. - *

- * This class keeps the information the driver maintain on a given Cassandra node. + * + *

This class keeps the information the driver maintain on a given Cassandra node. */ public class Host { - private static final Logger logger = LoggerFactory.getLogger(Host.class); - - static final Logger statesLogger = LoggerFactory.getLogger(Host.class.getName() + ".STATES"); - - // The address we'll use to connect to the node - private final InetSocketAddress address; - - // The broadcast_address as known by Cassandra. - // We use that internally because - // that's the 'peer' in the 'System.peers' table and avoids querying the full peers table in - // ControlConnection.refreshNodeInfo. - private volatile InetAddress broadcastAddress; - - // The listen_address as known by Cassandra. - // This is usually the same as broadcast_address unless - // specified otherwise in cassandra.yaml file. - private volatile InetAddress listenAddress; - - enum State {ADDED, DOWN, UP} - - volatile State state; - /** - * Ensures state change notifications for that host are handled serially - */ - final ReentrantLock notificationsLock = new ReentrantLock(true); - - final ConvictionPolicy convictionPolicy; - private final Cluster.Manager manager; - - // Tracks later reconnection attempts to that host so we avoid adding multiple tasks. - final AtomicReference> reconnectionAttempt = new AtomicReference>(); - - final ExecutionInfo defaultExecutionInfo; - - private volatile String datacenter; - private volatile String rack; - private volatile VersionNumber cassandraVersion; - - private volatile Set tokens; - - private volatile String dseWorkload; - private volatile boolean dseGraphEnabled; - private volatile VersionNumber dseVersion; - - // ClusterMetadata keeps one Host object per inet address and we rely on this (more precisely, - // we rely on the fact that we can use Object equality as a valid equality), so don't use - // that constructor but ClusterMetadata.getHost instead. - Host(InetSocketAddress address, ConvictionPolicy.Factory convictionPolicyFactory, Cluster.Manager manager) { - if (address == null || convictionPolicyFactory == null) - throw new NullPointerException(); - - this.address = address; - this.convictionPolicy = convictionPolicyFactory.create(this, manager.reconnectionPolicy()); - this.manager = manager; - this.defaultExecutionInfo = new ExecutionInfo(this); - this.state = State.ADDED; - } - - void setLocationInfo(String datacenter, String rack) { - this.datacenter = datacenter; - this.rack = rack; - } - - void setVersion(String cassandraVersion) { - VersionNumber versionNumber = null; - try { - if (cassandraVersion != null) { - versionNumber = VersionNumber.parse(cassandraVersion); - } - } catch (IllegalArgumentException e) { - logger.warn("Error parsing Cassandra version {}. This shouldn't have happened", cassandraVersion); - } - this.cassandraVersion = versionNumber; - } - - void setBroadcastAddress(InetAddress broadcastAddress) { - this.broadcastAddress = broadcastAddress; - } - - void setListenAddress(InetAddress listenAddress) { - this.listenAddress = listenAddress; - } - - void setDseVersion(String dseVersion) { - VersionNumber versionNumber = null; - try { - if (dseVersion != null) { - versionNumber = VersionNumber.parse(dseVersion); - } - } catch (IllegalArgumentException e) { - logger.warn("Error parsing DSE version {}. This shouldn't have happened", dseVersion); - } - this.dseVersion = versionNumber; - } - - void setDseWorkload(String dseWorkload) { - this.dseWorkload = dseWorkload; - } - - void setDseGraphEnabled(boolean dseGraphEnabled) { - this.dseGraphEnabled = dseGraphEnabled; - } - - boolean supports(ProtocolVersion version) { - return getCassandraVersion() == null - || version.minCassandraVersion().compareTo(getCassandraVersion().nextStable()) <= 0; - } + private static final Logger logger = LoggerFactory.getLogger(Host.class); - /** - * Returns the address that the driver will use to connect to the node. - *

- * This is a shortcut for {@code getSocketAddress().getAddress()}. - * - * @return the address. - * @see #getSocketAddress() - */ - public InetAddress getAddress() { - return address.getAddress(); - } - - /** - * Returns the address and port that the driver will use to connect to the node. - *

- * This is the node's broadcast RPC address, possibly translated if an {@link AddressTranslator} has been configured - * for this cluster. - *

- * The broadcast RPC address is inferred from the following cassandra.yaml file settings: - *

    - *
  1. {@code rpc_address}, {@code rpc_interface} or {@code broadcast_rpc_address}
  2. - *
  3. {@code native_transport_port}
  4. - *
- * - * @return the address and port. - * @see The cassandra.yaml configuration file - */ - public InetSocketAddress getSocketAddress() { - return address; - } - - /** - * Returns the node broadcast address (that is, the IP by which it should be contacted by other peers in the - * cluster), if known. - *

- * This corresponds to the {@code broadcast_address} cassandra.yaml file setting and - * is by default the same as {@link #getListenAddress()}, unless specified - * otherwise in cassandra.yaml. - * This is NOT the address clients should use to contact this node. - *

- * This information is always available for peer hosts. For the control host, it's only available if CASSANDRA-9436 - * is fixed on the server side (Cassandra versions >= 2.0.16, 2.1.6, 2.2.0 rc1). For older versions, note that if - * the driver loses the control connection and reconnects to a different control host, the old control host becomes - * a peer, and therefore its broadcast address is updated. - * - * @return the node broadcast address, if known. Otherwise {@code null}. - * @see The cassandra.yaml configuration file - */ - public InetAddress getBroadcastAddress() { - return broadcastAddress; - } + static final Logger statesLogger = LoggerFactory.getLogger(Host.class.getName() + ".STATES"); + + // The address we'll use to connect to the node + private final EndPoint endPoint; + + // The broadcast RPC address, as reported in system tables. + // Note that, unlike previous versions of the driver, this address is NOT TRANSLATED. + private volatile InetSocketAddress broadcastRpcAddress; + + // The broadcast_address as known by Cassandra. + // We use that internally because + // that's the 'peer' in the 'System.peers' table and avoids querying the full peers table in + // ControlConnection.refreshNodeInfo. + private volatile InetSocketAddress broadcastSocketAddress; + + // The listen_address as known by Cassandra. + // This is usually the same as broadcast_address unless + // specified otherwise in cassandra.yaml file. + private volatile InetSocketAddress listenSocketAddress; - /** - * Returns the node listen address (that is, the IP the node uses to contact other peers in the cluster), if known. - *

- * This corresponds to the {@code listen_address} cassandra.yaml file setting. - * This is NOT the address clients should use to contact this node. - *

- * This information is available for the control host if CASSANDRA-9603 is fixed on the server side (Cassandra - * versions >= 2.0.17, 2.1.8, 2.2.0 rc2). It's currently not available for peer hosts. Note that the current driver - * code already tries to read a {@code listen_address} column in {@code system.peers}; when a future Cassandra - * version adds it, it will be picked by the driver without any further change needed. - * - * @return the node listen address, if known. Otherwise {@code null}. - * @see The cassandra.yaml configuration file - */ - public InetAddress getListenAddress() { - return listenAddress; - } + private volatile UUID hostId; + + private volatile UUID schemaVersion; - /** - * Returns the name of the datacenter this host is part of. - *

- * The returned datacenter name is the one as known by Cassandra. - * It is also possible for this information to be unavailable. In that - * case this method returns {@code null}, and the caller should always be aware - * of this possibility. - * - * @return the Cassandra datacenter name or null if datacenter is unavailable. - */ - public String getDatacenter() { - return datacenter; - } + enum State { + ADDED, + DOWN, + UP + } + + volatile State state; + /** Ensures state change notifications for that host are handled serially */ + final ReentrantLock notificationsLock = new ReentrantLock(true); + + final ConvictionPolicy convictionPolicy; + private final Cluster.Manager manager; + + // Tracks later reconnection attempts to that host so we avoid adding multiple tasks. + final AtomicReference> reconnectionAttempt = + new AtomicReference>(); + + final ExecutionInfo defaultExecutionInfo; + + private volatile String datacenter; + private volatile String rack; + private volatile VersionNumber cassandraVersion; + + private volatile Set tokens; + + private volatile String dseWorkload; + private volatile boolean dseGraphEnabled; + private volatile VersionNumber dseVersion; + + Host( + EndPoint endPoint, + ConvictionPolicy.Factory convictionPolicyFactory, + Cluster.Manager manager) { + if (endPoint == null || convictionPolicyFactory == null) throw new NullPointerException(); + + this.endPoint = endPoint; + this.convictionPolicy = convictionPolicyFactory.create(this, manager.reconnectionPolicy()); + this.manager = manager; + this.defaultExecutionInfo = new ExecutionInfo(this); + this.state = State.ADDED; + } + + void setLocationInfo(String datacenter, String rack) { + this.datacenter = datacenter; + this.rack = rack; + } + + void setVersion(String cassandraVersion) { + VersionNumber versionNumber = null; + try { + if (cassandraVersion != null) { + versionNumber = VersionNumber.parse(cassandraVersion); + } + } catch (IllegalArgumentException e) { + logger.warn( + "Error parsing Cassandra version {}. This shouldn't have happened", cassandraVersion); + } + this.cassandraVersion = versionNumber; + } + + void setBroadcastRpcAddress(InetSocketAddress broadcastRpcAddress) { + this.broadcastRpcAddress = broadcastRpcAddress; + } + + void setBroadcastSocketAddress(InetSocketAddress broadcastAddress) { + this.broadcastSocketAddress = broadcastAddress; + } + + void setListenSocketAddress(InetSocketAddress listenAddress) { + this.listenSocketAddress = listenAddress; + } + + void setDseVersion(String dseVersion) { + VersionNumber versionNumber = null; + try { + if (dseVersion != null) { + versionNumber = VersionNumber.parse(dseVersion); + } + } catch (IllegalArgumentException e) { + logger.warn("Error parsing DSE version {}. This shouldn't have happened", dseVersion); + } + this.dseVersion = versionNumber; + } + + void setDseWorkload(String dseWorkload) { + this.dseWorkload = dseWorkload; + } + + void setDseGraphEnabled(boolean dseGraphEnabled) { + this.dseGraphEnabled = dseGraphEnabled; + } + + void setHostId(UUID hostId) { + this.hostId = hostId; + } + + void setSchemaVersion(UUID schemaVersion) { + this.schemaVersion = schemaVersion; + } + + boolean supports(ProtocolVersion version) { + return getCassandraVersion() == null + || version.minCassandraVersion().compareTo(getCassandraVersion().nextStable()) <= 0; + } + + /** Returns information to connect to the node. */ + public EndPoint getEndPoint() { + return endPoint; + } + + /** + * Returns the address that the driver will use to connect to the node. + * + * @deprecated This is exposed mainly for historical reasons. Internally, the driver uses {@link + * #getEndPoint()} to establish connections. This is a shortcut for {@code + * getEndPoint().resolve().getAddress()}. + */ + @Deprecated + public InetAddress getAddress() { + return endPoint.resolve().getAddress(); + } + + /** + * Returns the address and port that the driver will use to connect to the node. + * + * @deprecated This is exposed mainly for historical reasons. Internally, the driver uses {@link + * #getEndPoint()} to establish connections. This is a shortcut for {@code + * getEndPoint().resolve()}. + * @see The + * cassandra.yaml configuration file + */ + @Deprecated + public InetSocketAddress getSocketAddress() { + return endPoint.resolve(); + } + + /** + * Returns the broadcast RPC address, as reported by the node. + * + *

This is address reported in {@code system.peers.rpc_address} (Cassandra 3) or {@code + * system.peers_v2.native_address/native_port} (Cassandra 4+). + * + *

Note that this is not necessarily the address that the driver will use to connect: if the + * node is accessed through a proxy, a translation might be necessary; this is handled by {@link + * #getEndPoint()}. + * + *

For versions of Cassandra less than 2.0.16, 2.1.6 or 2.2.0-rc1, this will be {@code null} + * for the control host. It will get updated if the control connection switches to another host. + * + * @see CASSANDRA-9436 (where the + * information was added for the control host) + */ + public InetSocketAddress getBroadcastRpcAddress() { + return broadcastRpcAddress; + } + + /** + * Returns the node broadcast address, if known. Otherwise {@code null}. + * + *

This is a shortcut for {@code getBroadcastSocketAddress().getAddress()}. + * + * @return the node broadcast address, if known. Otherwise {@code null}. + * @see #getBroadcastSocketAddress() + * @see The + * cassandra.yaml configuration file + */ + public InetAddress getBroadcastAddress() { + return broadcastSocketAddress != null ? broadcastSocketAddress.getAddress() : null; + } + + /** + * Returns the node broadcast address (that is, the address by which it should be contacted by + * other peers in the cluster), if known. Otherwise {@code null}. + * + *

Note that the port of the returned address will be 0 for versions of Cassandra older than + * 4.0. + * + *

This corresponds to the {@code broadcast_address} cassandra.yaml file setting and is by + * default the same as {@link #getListenSocketAddress()}, unless specified otherwise in + * cassandra.yaml. This is NOT the address clients should use to contact this node. + * + *

This information is always available for peer hosts. For the control host, it's only + * available if CASSANDRA-9436 is fixed on the server side (Cassandra versions >= 2.0.16, 2.1.6, + * 2.2.0 rc1). For older versions, note that if the driver loses the control connection and + * reconnects to a different control host, the old control host becomes a peer, and therefore its + * broadcast address is updated. + * + * @return the node broadcast address, if known. Otherwise {@code null}. + * @see The + * cassandra.yaml configuration file + */ + public InetSocketAddress getBroadcastSocketAddress() { + return broadcastSocketAddress; + } + + /** + * Returns the node listen address, if known. Otherwise {@code null}. + * + *

This is a shortcut for {@code getListenSocketAddress().getAddress()}. + * + * @return the node listen address, if known. Otherwise {@code null}. + * @see #getListenSocketAddress() + * @see The + * cassandra.yaml configuration file + */ + public InetAddress getListenAddress() { + return listenSocketAddress != null ? listenSocketAddress.getAddress() : null; + } + + /** + * Returns the node listen address (that is, the address the node uses to contact other peers in + * the cluster), if known. Otherwise {@code null}. + * + *

Note that the port of the returned address will be 0 for versions of Cassandra older than + * 4.0. + * + *

This corresponds to the {@code listen_address} cassandra.yaml file setting. This is NOT + * the address clients should use to contact this node. + * + *

This information is available for the control host if CASSANDRA-9603 is fixed on the server + * side (Cassandra versions >= 2.0.17, 2.1.8, 2.2.0 rc2). It's currently not available for peer + * hosts. Note that the current driver code already tries to read a {@code listen_address} column + * in {@code system.peers}; when a future Cassandra version adds it, it will be picked by the + * driver without any further change needed. + * + * @return the node listen address, if known. Otherwise {@code null}. + * @see The + * cassandra.yaml configuration file + */ + public InetSocketAddress getListenSocketAddress() { + return listenSocketAddress; + } + + /** + * Returns the name of the datacenter this host is part of. + * + *

The returned datacenter name is the one as known by Cassandra. It is also possible for this + * information to be unavailable. In that case this method returns {@code null}, and the caller + * should always be aware of this possibility. + * + * @return the Cassandra datacenter name or null if datacenter is unavailable. + */ + public String getDatacenter() { + return datacenter; + } + + /** + * Returns the name of the rack this host is part of. + * + *

The returned rack name is the one as known by Cassandra. It is also possible for this + * information to be unavailable. In that case this method returns {@code null}, and the caller + * should always be aware of this possibility. + * + * @return the Cassandra rack name or null if the rack is unavailable + */ + public String getRack() { + return rack; + } + + /** + * The Cassandra version the host is running. + * + *

It is also possible for this information to be unavailable. In that case this method returns + * {@code null}, and the caller should always be aware of this possibility. + * + * @return the Cassandra version the host is running. + */ + public VersionNumber getCassandraVersion() { + return cassandraVersion; + } + + /** + * The DSE version the host is running. + * + *

It is also possible for this information to be unavailable. In that case this method returns + * {@code null}, and the caller should always be aware of this possibility. + * + * @return the DSE version the host is running. + * @deprecated Please use the Java Driver + * for DSE if you are connecting to a DataStax Enterprise (DSE) cluster. This method might + * not function properly with future versions of DSE. + */ + @Deprecated + public VersionNumber getDseVersion() { + return dseVersion; + } + + /** + * The DSE Workload the host is running. + * + *

It is also possible for this information to be unavailable. In that case this method returns + * {@code null}, and the caller should always be aware of this possibility. + * + * @return the DSE workload the host is running. + * @deprecated Please use the Java + * Driver for DSE if you are connecting to a DataStax Enterprise (DSE) cluster. This + * method might not function properly with future versions of DSE. + */ + @Deprecated + public String getDseWorkload() { + return dseWorkload; + } + + /** + * Returns whether the host is running DSE Graph. + * + * @return whether the node is running DSE Graph. + * @deprecated Please use the Java + * Driver for DSE if you are connecting to a DataStax Enterprise (DSE) cluster. This + * method might not function properly with future versions of DSE. + */ + @Deprecated + public boolean isDseGraphEnabled() { + return dseGraphEnabled; + } + + /** + * Return the host id value for the host. + * + *

The host id is the main identifier used by Cassandra on the server for internal + * communication (gossip). It is referenced as the column {@code host_id} in the {@code + * system.local} or {@code system.peers} table. + * + * @return the node's host id value. + */ + public UUID getHostId() { + return hostId; + } + + /** + * Return the current schema version for the host. + * + *

Schema versions in Cassandra are used to ensure all the nodes agree on the current Cassandra + * schema when it is modified. For more information see {@link + * ExecutionInfo#isSchemaInAgreement()} + * + * @return the node's current schema version value. + */ + public UUID getSchemaVersion() { + return schemaVersion; + } + + /** + * Returns the tokens that this host owns. + * + * @return the (immutable) set of tokens. + */ + public Set getTokens() { + return tokens; + } + + void setTokens(Set tokens) { + this.tokens = tokens; + } + + /** + * Returns whether the host is considered up by the driver. + * + *

Please note that this is only the view of the driver and may not reflect reality. In + * particular a node can be down but the driver hasn't detected it yet, or it can have been + * restarted and the driver hasn't detected it yet (in particular, for hosts to which the driver + * does not connect (because the {@code LoadBalancingPolicy.distance} method says so), this + * information may be durably inaccurate). This information should thus only be considered as best + * effort and should not be relied upon too strongly. + * + * @return whether the node is considered up. + */ + public boolean isUp() { + return state == State.UP; + } + + /** + * Returns a description of the host's state, as seen by the driver. + * + *

This is exposed for debugging purposes only; the format of this string might change between + * driver versions, so clients should not make any assumptions about it. + * + * @return a description of the host's state. + */ + public String getState() { + return state.name(); + } + + /** + * Returns a {@code ListenableFuture} representing the completion of the reconnection attempts + * scheduled after a host is marked {@code DOWN}. + * + *

If the caller cancels this future, the driver will not try to reconnect to this host + * until it receives an UP event for it. Note that this could mean never, if the node was marked + * down because of a driver-side error (e.g. read timeout) but no failure was detected by + * Cassandra. The caller might decide to trigger an explicit reconnection attempt at a later point + * with {@link #tryReconnectOnce()}. + * + * @return the future, or {@code null} if no reconnection attempt was in progress. + */ + public ListenableFuture getReconnectionAttemptFuture() { + return reconnectionAttempt.get(); + } + + /** + * Triggers an asynchronous reconnection attempt to this host. + * + *

This method is intended for load balancing policies that mark hosts as {@link + * HostDistance#IGNORED IGNORED}, but still need a way to periodically check these hosts' states + * (UP / DOWN). + * + *

For a host that is at distance {@code IGNORED}, this method will try to reconnect exactly + * once: if reconnection succeeds, the host is marked {@code UP}; otherwise, no further attempts + * will be scheduled. It has no effect if the node is already {@code UP}, or if a reconnection + * attempt is already in progress. + * + *

Note that if the host is not a distance {@code IGNORED}, this method will + * trigger a periodic reconnection attempt if the reconnection fails. + */ + public void tryReconnectOnce() { + this.manager.startSingleReconnectionAttempt(this); + } + + @Override + public boolean equals(Object other) { + if (other instanceof Host) { + Host that = (Host) other; + return this.endPoint.equals(that.endPoint); + } + return false; + } + + @Override + public int hashCode() { + return endPoint.hashCode(); + } + + boolean wasJustAdded() { + return state == State.ADDED; + } + + @Override + public String toString() { + return endPoint.toString(); + } + + void setDown() { + state = State.DOWN; + } + + void setUp() { + state = State.UP; + } + + /** + * Interface for listeners that are interested in hosts added, up, down and removed events. + * + *

It is possible for the same event to be fired multiple times, particularly for up or down + * events. Therefore, a listener should ignore the same event if it has already been notified of a + * node's state. + */ + public interface StateListener { /** - * Returns the name of the rack this host is part of. - *

- * The returned rack name is the one as known by Cassandra. - * It is also possible for this information to be unavailable. In that case - * this method returns {@code null}, and the caller should always be aware of this - * possibility. + * Called when a new node is added to the cluster. * - * @return the Cassandra rack name or null if the rack is unavailable - */ - public String getRack() { - return rack; - } - - /** - * The Cassandra version the host is running. - *

- * It is also possible for this information to be unavailable. In that case - * this method returns {@code null}, and the caller should always be aware of this - * possibility. + *

The newly added node should be considered up. * - * @return the Cassandra version the host is running. + * @param host the host that has been newly added. */ - public VersionNumber getCassandraVersion() { - return cassandraVersion; - } + void onAdd(Host host); /** - * The DSE version the host is running. - *

- * It is also possible for this information to be unavailable. In that case - * this method returns {@code null}, and the caller should always be aware of this - * possibility. + * Called when a node is determined to be up. * - * @return the DSE version the host is running. - * @deprecated Please use the - * Java driver for DSE - * if you are connecting to a DataStax Enterprise (DSE) cluster. - * This method might not function properly with future versions of DSE. + * @param host the host that has been detected up. */ - @Deprecated - public VersionNumber getDseVersion() { - return dseVersion; - } + void onUp(Host host); /** - * The DSE Workload the host is running. - *

- * It is also possible for this information to be unavailable. In that case - * this method returns {@code null}, and the caller should always be aware of this - * possibility. + * Called when a node is determined to be down. * - * @return the DSE workload the host is running. - * @deprecated Please use the - * Java driver for DSE - * if you are connecting to a DataStax Enterprise (DSE) cluster. - * This method might not function properly with future versions of DSE. + * @param host the host that has been detected down. */ - @Deprecated - public String getDseWorkload() { - return dseWorkload; - } + void onDown(Host host); /** - * Returns whether the host is running DSE Graph. + * Called when a node is removed from the cluster. * - * @return whether the node is running DSE Graph. - * @deprecated Please use the - * Java driver for DSE - * if you are connecting to a DataStax Enterprise (DSE) cluster. - * This method might not function properly with future versions of DSE. + * @param host the removed host. */ - @Deprecated - public boolean isDseGraphEnabled() { - return dseGraphEnabled; - } + void onRemove(Host host); /** - * Returns the tokens that this host owns. + * Gets invoked when the tracker is registered with a cluster, or at cluster startup if the + * tracker was registered at initialization with {@link + * com.datastax.driver.core.Cluster.Initializer#register(LatencyTracker)}. * - * @return the (immutable) set of tokens. + * @param cluster the cluster that this tracker is registered with. */ - public Set getTokens() { - return tokens; - } - - void setTokens(Set tokens) { - this.tokens = tokens; - } + void onRegister(Cluster cluster); /** - * Returns whether the host is considered up by the driver. - *

- * Please note that this is only the view of the driver and may not reflect - * reality. In particular a node can be down but the driver hasn't detected - * it yet, or it can have been restarted and the driver hasn't detected it - * yet (in particular, for hosts to which the driver does not connect (because - * the {@code LoadBalancingPolicy.distance} method says so), this information - * may be durably inaccurate). This information should thus only be - * considered as best effort and should not be relied upon too strongly. + * Gets invoked when the tracker is unregistered from a cluster, or at cluster shutdown if the + * tracker was not unregistered. * - * @return whether the node is considered up. + * @param cluster the cluster that this tracker was registered with. */ - public boolean isUp() { - return state == State.UP; - } - + void onUnregister(Cluster cluster); + } + + /** + * A {@code StateListener} that tracks when it gets registered or unregistered with a cluster. + * + *

This interface exists only for backward-compatibility reasons: starting with the 3.0 branch + * of the driver, its methods are on the parent interface directly. + */ + public interface LifecycleAwareStateListener extends StateListener { /** - * Returns a description of the host's state, as seen by the driver. - *

- * This is exposed for debugging purposes only; the format of this string might - * change between driver versions, so clients should not make any assumptions - * about it. + * Gets invoked when the listener is registered with a cluster, or at cluster startup if the + * listener was registered at initialization with {@link + * com.datastax.driver.core.Cluster#register(Host.StateListener)}. * - * @return a description of the host's state. + * @param cluster the cluster that this listener is registered with. */ - public String getState() { - return state.name(); - } + @Override + void onRegister(Cluster cluster); /** - * Returns a {@code ListenableFuture} representing the completion of the reconnection - * attempts scheduled after a host is marked {@code DOWN}. - *

- * If the caller cancels this future, the driver will not try to reconnect to - * this host until it receives an UP event for it. Note that this could mean never, if - * the node was marked down because of a driver-side error (e.g. read timeout) but no - * failure was detected by Cassandra. The caller might decide to trigger an explicit - * reconnection attempt at a later point with {@link #tryReconnectOnce()}. + * Gets invoked when the listener is unregistered from a cluster, or at cluster shutdown if the + * listener was not unregistered. * - * @return the future, or {@code null} if no reconnection attempt was in progress. - */ - public ListenableFuture getReconnectionAttemptFuture() { - return reconnectionAttempt.get(); - } - - /** - * Triggers an asynchronous reconnection attempt to this host. - *

- * This method is intended for load balancing policies that mark hosts as {@link HostDistance#IGNORED IGNORED}, - * but still need a way to periodically check these hosts' states (UP / DOWN). - *

- * For a host that is at distance {@code IGNORED}, this method will try to reconnect exactly once: if - * reconnection succeeds, the host is marked {@code UP}; otherwise, no further attempts will be scheduled. - * It has no effect if the node is already {@code UP}, or if a reconnection attempt is already in progress. - *

- * Note that if the host is not a distance {@code IGNORED}, this method will trigger a periodic - * reconnection attempt if the reconnection fails. + * @param cluster the cluster that this listener was registered with. */ - public void tryReconnectOnce() { - this.manager.startSingleReconnectionAttempt(this); - } - - @Override - public boolean equals(Object other) { - if (other instanceof Host) { - Host that = (Host) other; - return this.address.equals(that.address); - } - return false; - } - - @Override - public int hashCode() { - return address.hashCode(); - } - - boolean wasJustAdded() { - return state == State.ADDED; - } - @Override - public String toString() { - return address.toString(); - } - - void setDown() { - state = State.DOWN; - } - - void setUp() { - state = State.UP; - } - - /** - * Interface for listeners that are interested in hosts added, up, down and - * removed events. - *

- * It is possible for the same event to be fired multiple times, - * particularly for up or down events. Therefore, a listener should ignore - * the same event if it has already been notified of a node's state. - */ - public interface StateListener { - - /** - * Called when a new node is added to the cluster. - *

- * The newly added node should be considered up. - * - * @param host the host that has been newly added. - */ - void onAdd(Host host); - - /** - * Called when a node is determined to be up. - * - * @param host the host that has been detected up. - */ - void onUp(Host host); - - /** - * Called when a node is determined to be down. - * - * @param host the host that has been detected down. - */ - void onDown(Host host); - - /** - * Called when a node is removed from the cluster. - * - * @param host the removed host. - */ - void onRemove(Host host); - - /** - * Gets invoked when the tracker is registered with a cluster, or at cluster startup if the - * tracker was registered at initialization with - * {@link com.datastax.driver.core.Cluster.Initializer#register(LatencyTracker)}. - * - * @param cluster the cluster that this tracker is registered with. - */ - void onRegister(Cluster cluster); - - /** - * Gets invoked when the tracker is unregistered from a cluster, or at cluster shutdown if - * the tracker was not unregistered. - * - * @param cluster the cluster that this tracker was registered with. - */ - void onUnregister(Cluster cluster); - } - - /** - * A {@code StateListener} that tracks when it gets registered or unregistered with a cluster. - *

- * This interface exists only for backward-compatibility reasons: starting with the 3.0 branch of the driver, its - * methods are on the parent interface directly. - */ - public interface LifecycleAwareStateListener extends StateListener { - /** - * Gets invoked when the listener is registered with a cluster, or at cluster startup if the - * listener was registered at initialization with - * {@link com.datastax.driver.core.Cluster#register(Host.StateListener)}. - * - * @param cluster the cluster that this listener is registered with. - */ - @Override - void onRegister(Cluster cluster); - - /** - * Gets invoked when the listener is unregistered from a cluster, or at cluster shutdown if - * the listener was not unregistered. - * - * @param cluster the cluster that this listener was registered with. - */ - @Override - void onUnregister(Cluster cluster); - } + void onUnregister(Cluster cluster); + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/HostConnectionPool.java b/driver-core/src/main/java/com/datastax/driver/core/HostConnectionPool.java index 662305b3ea4..82ba3963a6e 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/HostConnectionPool.java +++ b/driver-core/src/main/java/com/datastax/driver/core/HostConnectionPool.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,6 +17,11 @@ */ package com.datastax.driver.core; +import static com.datastax.driver.core.Connection.State.GONE; +import static com.datastax.driver.core.Connection.State.OPEN; +import static com.datastax.driver.core.Connection.State.RESURRECTING; +import static com.datastax.driver.core.Connection.State.TRASHED; + import com.datastax.driver.core.exceptions.AuthenticationException; import com.datastax.driver.core.exceptions.BusyPoolException; import com.datastax.driver.core.exceptions.ConnectionException; @@ -23,689 +30,735 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Throwables; import com.google.common.collect.Lists; -import com.google.common.util.concurrent.*; +import com.google.common.util.concurrent.AsyncFunction; +import com.google.common.util.concurrent.FutureCallback; +import com.google.common.util.concurrent.Futures; +import com.google.common.util.concurrent.ListenableFuture; +import com.google.common.util.concurrent.SettableFuture; +import com.google.common.util.concurrent.Uninterruptibles; import io.netty.util.concurrent.EventExecutor; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.*; -import java.util.concurrent.*; +import java.util.ArrayList; +import java.util.List; +import java.util.ListIterator; +import java.util.Queue; +import java.util.Set; +import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.CopyOnWriteArraySet; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Executor; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; - -import static com.datastax.driver.core.Connection.State.*; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; class HostConnectionPool implements Connection.Owner { - private static final Logger logger = LoggerFactory.getLogger(HostConnectionPool.class); + private static final Logger logger = LoggerFactory.getLogger(HostConnectionPool.class); - private static final int MAX_SIMULTANEOUS_CREATION = 1; + private static final int MAX_SIMULTANEOUS_CREATION = 1; - final Host host; - volatile HostDistance hostDistance; - protected final SessionManager manager; + final Host host; + volatile HostDistance hostDistance; + protected final SessionManager manager; - final List connections; - private final AtomicInteger open; - /** - * The total number of in-flight requests on all connections of this pool. - */ - final AtomicInteger totalInFlight = new AtomicInteger(); - /** - * The maximum value of {@link #totalInFlight} since the last call to {@link #cleanupIdleConnections(long)} - */ - private final AtomicInteger maxTotalInFlight = new AtomicInteger(); - @VisibleForTesting - final Set trash = new CopyOnWriteArraySet(); + final List connections; + private final AtomicInteger open; + /** The total number of in-flight requests on all connections of this pool. */ + final AtomicInteger totalInFlight = new AtomicInteger(); + /** + * The maximum value of {@link #totalInFlight} since the last call to {@link + * #cleanupIdleConnections(long)} + */ + private final AtomicInteger maxTotalInFlight = new AtomicInteger(); - private final Queue pendingBorrows = new ConcurrentLinkedQueue(); - private final AtomicInteger pendingBorrowCount = new AtomicInteger(); + @VisibleForTesting final Set trash = new CopyOnWriteArraySet(); - private final Runnable newConnectionTask; + private final Queue pendingBorrows = new ConcurrentLinkedQueue(); + final AtomicInteger pendingBorrowCount = new AtomicInteger(); - private final AtomicInteger scheduledForCreation = new AtomicInteger(); + private final Runnable newConnectionTask; - private final EventExecutor timeoutsExecutor; + private final AtomicInteger scheduledForCreation = new AtomicInteger(); - private final AtomicReference closeFuture = new AtomicReference(); + private final EventExecutor timeoutsExecutor; - private enum Phase {INITIALIZING, READY, INIT_FAILED, CLOSING} + private final AtomicReference closeFuture = new AtomicReference(); - protected final AtomicReference phase = new AtomicReference(Phase.INITIALIZING); + private enum Phase { + INITIALIZING, + READY, + INIT_FAILED, + CLOSING + } - // When a request times out, we may never release its stream ID. So over time, a given connection - // may get less an less available streams. When the number of available ones go below the - // following threshold, we just replace the connection by a new one. - private final int minAllowedStreams; + protected final AtomicReference phase = new AtomicReference(Phase.INITIALIZING); - HostConnectionPool(Host host, HostDistance hostDistance, SessionManager manager) { - assert hostDistance != HostDistance.IGNORED; - this.host = host; - this.hostDistance = hostDistance; - this.manager = manager; + // When a request times out, we may never release its stream ID. So over time, a given connection + // may get less an less available streams. When the number of available ones go below the + // following threshold, we just replace the connection by a new one. + private final int minAllowedStreams; - this.newConnectionTask = new Runnable() { - @Override - public void run() { - addConnectionIfUnderMaximum(); - scheduledForCreation.decrementAndGet(); - } + HostConnectionPool(Host host, HostDistance hostDistance, SessionManager manager) { + assert hostDistance != HostDistance.IGNORED; + this.host = host; + this.hostDistance = hostDistance; + this.manager = manager; + + this.newConnectionTask = + new Runnable() { + @Override + public void run() { + addConnectionIfUnderMaximum(); + scheduledForCreation.decrementAndGet(); + } }; - this.connections = new CopyOnWriteArrayList(); - this.open = new AtomicInteger(); + this.connections = new CopyOnWriteArrayList(); + this.open = new AtomicInteger(); - this.minAllowedStreams = options().getMaxRequestsPerConnection(hostDistance) * 3 / 4; + this.minAllowedStreams = options().getMaxRequestsPerConnection(hostDistance) * 3 / 4; - this.timeoutsExecutor = manager.getCluster().manager.connectionFactory.eventLoopGroup.next(); - } + this.timeoutsExecutor = manager.getCluster().manager.connectionFactory.eventLoopGroup.next(); + } - /** - * @param reusedConnection an existing connection (from a reconnection attempt) that we want to - * reuse as part of this pool. Might be null or already used by another - * pool. - */ - ListenableFuture initAsync(Connection reusedConnection) { - Executor initExecutor = manager.cluster.manager.configuration.getPoolingOptions().getInitializationExecutor(); - - // Create initial core connections - final int coreSize = options().getCoreConnectionsPerHost(hostDistance); - final List connections = Lists.newArrayListWithCapacity(coreSize); - final List> connectionFutures = Lists.newArrayListWithCapacity(coreSize); - - int toCreate = coreSize; - - if (reusedConnection != null && reusedConnection.setOwner(this)) { - toCreate -= 1; - connections.add(reusedConnection); - connectionFutures.add(MoreFutures.VOID_SUCCESS); - } + /** + * @param reusedConnection an existing connection (from a reconnection attempt) that we want to + * reuse as part of this pool. Might be null or already used by another pool. + */ + ListenableFuture initAsync(Connection reusedConnection) { + Executor initExecutor = + manager.cluster.manager.configuration.getPoolingOptions().getInitializationExecutor(); - List newConnections = manager.connectionFactory().newConnections(this, toCreate); - connections.addAll(newConnections); - for (Connection connection : newConnections) { - ListenableFuture connectionFuture = connection.initAsync(); - connectionFutures.add(handleErrors(connectionFuture, initExecutor)); - } + // Create initial core connections + final int coreSize = options().getCoreConnectionsPerHost(hostDistance); + final List connections = Lists.newArrayListWithCapacity(coreSize); + final List> connectionFutures = Lists.newArrayListWithCapacity(coreSize); - ListenableFuture> allConnectionsFuture = Futures.allAsList(connectionFutures); + int toCreate = coreSize; - final SettableFuture initFuture = SettableFuture.create(); - Futures.addCallback(allConnectionsFuture, new FutureCallback>() { - @Override - public void onSuccess(List l) { - // Some of the connections might have failed, keep only the successful ones - ListIterator it = connections.listIterator(); - while (it.hasNext()) { - if (it.next().isClosed()) - it.remove(); - } + if (reusedConnection != null && toCreate > 0 && reusedConnection.setOwner(this)) { + toCreate -= 1; + connections.add(reusedConnection); + connectionFutures.add(MoreFutures.VOID_SUCCESS); + } - HostConnectionPool.this.connections.addAll(connections); - open.set(connections.size()); - - if (isClosed()) { - initFuture.setException(new ConnectionException(host.getSocketAddress(), "Pool was closed during initialization")); - // we're not sure if closeAsync() saw the connections, so ensure they get closed - forceClose(connections); - } else { - logger.debug("Created connection pool to host {} ({} connections needed, {} successfully opened)", - host, coreSize, connections.size()); - phase.compareAndSet(Phase.INITIALIZING, Phase.READY); - initFuture.set(null); - } - } + List newConnections = manager.connectionFactory().newConnections(this, toCreate); + connections.addAll(newConnections); + for (Connection connection : newConnections) { + ListenableFuture connectionFuture = connection.initAsync(); + connectionFutures.add(handleErrors(connectionFuture, initExecutor)); + } - @Override - public void onFailure(Throwable t) { - phase.compareAndSet(Phase.INITIALIZING, Phase.INIT_FAILED); - forceClose(connections); - initFuture.setException(t); + ListenableFuture> allConnectionsFuture = Futures.allAsList(connectionFutures); + + final SettableFuture initFuture = SettableFuture.create(); + GuavaCompatibility.INSTANCE.addCallback( + allConnectionsFuture, + new FutureCallback>() { + @Override + public void onSuccess(List l) { + // Some of the connections might have failed, keep only the successful ones + ListIterator it = connections.listIterator(); + while (it.hasNext()) { + if (it.next().isClosed()) it.remove(); } - }, initExecutor); - return initFuture; - } - private ListenableFuture handleErrors(ListenableFuture connectionInitFuture, Executor executor) { - return GuavaCompatibility.INSTANCE.withFallback(connectionInitFuture, new AsyncFunction() { - @Override - public ListenableFuture apply(Throwable t) throws Exception { - // Propagate these exceptions because they mean no connection will ever succeed. They will be handled - // accordingly in SessionManager#maybeAddPool. - Throwables.propagateIfInstanceOf(t, ClusterNameMismatchException.class); - Throwables.propagateIfInstanceOf(t, UnsupportedProtocolVersionException.class); - Throwables.propagateIfInstanceOf(t, AuthenticationException.class); - - // We don't want to swallow Errors either as they probably indicate a more serious issue (OOME...) - Throwables.propagateIfInstanceOf(t, Error.class); - - // Otherwise, log the exception but return success. - // The pool will simply ignore this connection when it sees that it's been closed. - logger.warn("Error creating connection to " + host, t); - return MoreFutures.VOID_SUCCESS; + HostConnectionPool.this.connections.addAll(connections); + open.set(connections.size()); + + if (isClosed()) { + initFuture.setException( + new ConnectionException( + host.getEndPoint(), "Pool was closed during initialization")); + // we're not sure if closeAsync() saw the connections, so ensure they get closed + forceClose(connections); + } else { + logger.debug( + "Created connection pool to host {} ({} connections needed, {} successfully opened)", + host, + coreSize, + connections.size()); + phase.compareAndSet(Phase.INITIALIZING, Phase.READY); + initFuture.set(null); } - }, executor); + } + + @Override + public void onFailure(Throwable t) { + phase.compareAndSet(Phase.INITIALIZING, Phase.INIT_FAILED); + forceClose(connections); + initFuture.setException(t); + } + }, + initExecutor); + return initFuture; + } + + private ListenableFuture handleErrors( + ListenableFuture connectionInitFuture, Executor executor) { + return GuavaCompatibility.INSTANCE.withFallback( + connectionInitFuture, + new AsyncFunction() { + @Override + public ListenableFuture apply(Throwable t) throws Exception { + // Propagate these exceptions because they mean no connection will ever succeed. They + // will be handled + // accordingly in SessionManager#maybeAddPool. + Throwables.propagateIfInstanceOf(t, ClusterNameMismatchException.class); + Throwables.propagateIfInstanceOf(t, UnsupportedProtocolVersionException.class); + Throwables.propagateIfInstanceOf(t, AuthenticationException.class); + + // We don't want to swallow Errors either as they probably indicate a more serious issue + // (OOME...) + Throwables.propagateIfInstanceOf(t, Error.class); + + // Otherwise, log the exception but return success. + // The pool will simply ignore this connection when it sees that it's been closed. + logger.warn("Error creating connection to " + host, t); + return MoreFutures.VOID_SUCCESS; + } + }, + executor); + } + + // Clean up if we got a fatal error at construction time but still created part of the core + // connections + private void forceClose(List connections) { + for (Connection connection : connections) { + connection.closeAsync().force(); } - - // Clean up if we got a fatal error at construction time but still created part of the core connections - private void forceClose(List connections) { - for (Connection connection : connections) { - connection.closeAsync().force(); + } + + private PoolingOptions options() { + return manager.configuration().getPoolingOptions(); + } + + ListenableFuture borrowConnection(long timeout, TimeUnit unit, int maxQueueSize) { + Phase phase = this.phase.get(); + if (phase != Phase.READY) + return Futures.immediateFailedFuture( + new ConnectionException(host.getEndPoint(), "Pool is " + phase)); + + if (connections.isEmpty()) { + if (host.convictionPolicy.canReconnectNow()) { + int coreSize = options().getCoreConnectionsPerHost(hostDistance); + if (coreSize == 0) { + maybeSpawnNewConnection(); + } else if (scheduledForCreation.compareAndSet(0, coreSize)) { + for (int i = 0; i < coreSize; i++) { + // We don't respect MAX_SIMULTANEOUS_CREATION here because it's only to + // protect against creating connection in excess of core too quickly + manager.blockingExecutor().submit(newConnectionTask); + } } + return enqueue(timeout, unit, maxQueueSize); + } } - private PoolingOptions options() { - return manager.configuration().getPoolingOptions(); + int minInFlight = Integer.MAX_VALUE; + Connection leastBusy = null; + for (Connection connection : connections) { + int inFlight = connection.inFlight.get(); + if (inFlight < minInFlight) { + minInFlight = inFlight; + leastBusy = connection; + } } - ListenableFuture borrowConnection(long timeout, TimeUnit unit, int maxQueueSize) { - Phase phase = this.phase.get(); - if (phase != Phase.READY) - return Futures.immediateFailedFuture(new ConnectionException(host.getSocketAddress(), "Pool is " + phase)); - - if (connections.isEmpty()) { - if (host.convictionPolicy.canReconnectNow()) { - int coreSize = options().getCoreConnectionsPerHost(hostDistance); - if (coreSize == 0) { - maybeSpawnNewConnection(); - } else if (scheduledForCreation.compareAndSet(0, coreSize)) { - for (int i = 0; i < coreSize; i++) { - // We don't respect MAX_SIMULTANEOUS_CREATION here because it's only to - // protect against creating connection in excess of core too quickly - manager.blockingExecutor().submit(newConnectionTask); - } - } - return enqueue(timeout, unit, maxQueueSize); - } + if (leastBusy == null) { + // We could have raced with a shutdown since the last check + if (isClosed()) + return Futures.immediateFailedFuture( + new ConnectionException(host.getEndPoint(), "Pool is shutdown")); + // This might maybe happen if the number of core connections per host is 0 and a connection + // was trashed between + // the previous check to connections and now. But in that case, the line above will have + // trigger the creation of + // a new connection, so just wait that connection and move on + return enqueue(timeout, unit, maxQueueSize); + } else { + while (true) { + int inFlight = leastBusy.inFlight.get(); + + if (inFlight + >= Math.min( + leastBusy.maxAvailableStreams(), + options().getMaxRequestsPerConnection(hostDistance))) { + return enqueue(timeout, unit, maxQueueSize); } - int minInFlight = Integer.MAX_VALUE; - Connection leastBusy = null; - for (Connection connection : connections) { - int inFlight = connection.inFlight.get(); - if (inFlight < minInFlight) { - minInFlight = inFlight; - leastBusy = connection; - } - } - - if (leastBusy == null) { - // We could have raced with a shutdown since the last check - if (isClosed()) - return Futures.immediateFailedFuture(new ConnectionException(host.getSocketAddress(), "Pool is shutdown")); - // This might maybe happen if the number of core connections per host is 0 and a connection was trashed between - // the previous check to connections and now. But in that case, the line above will have trigger the creation of - // a new connection, so just wait that connection and move on - return enqueue(timeout, unit, maxQueueSize); - } else { - while (true) { - int inFlight = leastBusy.inFlight.get(); + if (leastBusy.inFlight.compareAndSet(inFlight, inFlight + 1)) break; + } + } - if (inFlight >= Math.min(leastBusy.maxAvailableStreams(), options().getMaxRequestsPerConnection(hostDistance))) { - return enqueue(timeout, unit, maxQueueSize); - } + int totalInFlightCount = totalInFlight.incrementAndGet(); + // update max atomically: + while (true) { + int oldMax = maxTotalInFlight.get(); + if (totalInFlightCount <= oldMax + || maxTotalInFlight.compareAndSet(oldMax, totalInFlightCount)) break; + } - if (leastBusy.inFlight.compareAndSet(inFlight, inFlight + 1)) - break; - } - } + int connectionCount = open.get() + scheduledForCreation.get(); + if (connectionCount < options().getCoreConnectionsPerHost(hostDistance)) { + maybeSpawnNewConnection(); + } else if (connectionCount < options().getMaxConnectionsPerHost(hostDistance)) { + // Add a connection if we fill the first n-1 connections and almost fill the last one + int currentCapacity = + (connectionCount - 1) * options().getMaxRequestsPerConnection(hostDistance) + + options().getNewConnectionThreshold(hostDistance); + if (totalInFlightCount > currentCapacity) maybeSpawnNewConnection(); + } - int totalInFlightCount = totalInFlight.incrementAndGet(); - // update max atomically: - while (true) { - int oldMax = maxTotalInFlight.get(); - if (totalInFlightCount <= oldMax || maxTotalInFlight.compareAndSet(oldMax, totalInFlightCount)) - break; - } + return leastBusy.setKeyspaceAsync(manager.poolsState.keyspace); + } - int connectionCount = open.get() + scheduledForCreation.get(); - if (connectionCount < options().getCoreConnectionsPerHost(hostDistance)) { - maybeSpawnNewConnection(); - } else if (connectionCount < options().getMaxConnectionsPerHost(hostDistance)) { - // Add a connection if we fill the first n-1 connections and almost fill the last one - int currentCapacity = (connectionCount - 1) * options().getMaxRequestsPerConnection(hostDistance) - + options().getNewConnectionThreshold(hostDistance); - if (totalInFlightCount > currentCapacity) - maybeSpawnNewConnection(); - } + private ListenableFuture enqueue(long timeout, TimeUnit unit, int maxQueueSize) { + if (timeout == 0 || maxQueueSize == 0) { + return Futures.immediateFailedFuture(new BusyPoolException(host.getEndPoint(), 0)); + } - return leastBusy.setKeyspaceAsync(manager.poolsState.keyspace); + while (true) { + int count = pendingBorrowCount.get(); + if (count >= maxQueueSize) { + return Futures.immediateFailedFuture( + new BusyPoolException(host.getEndPoint(), maxQueueSize)); + } + if (pendingBorrowCount.compareAndSet(count, count + 1)) { + break; + } } - private ListenableFuture enqueue(long timeout, TimeUnit unit, int maxQueueSize) { - if (timeout == 0 || maxQueueSize == 0) { - return Futures.immediateFailedFuture(new BusyPoolException(host.getSocketAddress(), 0)); - } + PendingBorrow pendingBorrow = new PendingBorrow(timeout, unit, timeoutsExecutor); + pendingBorrows.add(pendingBorrow); - while (true) { - int count = pendingBorrowCount.get(); - if (count >= maxQueueSize) { - return Futures.immediateFailedFuture(new BusyPoolException(host.getSocketAddress(), maxQueueSize)); - } - if (pendingBorrowCount.compareAndSet(count, count + 1)) { - break; - } - } + // If we raced with shutdown, make sure the future will be completed. This has no effect if it + // was properly + // handled in closeAsync. + if (phase.get() == Phase.CLOSING) { + pendingBorrow.setException(new ConnectionException(host.getEndPoint(), "Pool is shutdown")); + } - PendingBorrow pendingBorrow = new PendingBorrow(timeout, unit, timeoutsExecutor); - pendingBorrows.add(pendingBorrow); + return pendingBorrow.future; + } - // If we raced with shutdown, make sure the future will be completed. This has no effect if it was properly - // handled in closeAsync. - if (phase.get() == Phase.CLOSING) { - pendingBorrow.setException(new ConnectionException(host.getSocketAddress(), "Pool is shutdown")); - } + void returnConnection(Connection connection, boolean busy) { + connection.inFlight.decrementAndGet(); + totalInFlight.decrementAndGet(); - return pendingBorrow.future; + if (isClosed()) { + close(connection); + return; } - void returnConnection(Connection connection) { - connection.inFlight.decrementAndGet(); - totalInFlight.decrementAndGet(); + if (connection.isDefunct()) { + // As part of making it defunct, we have already replaced it or + // closed the pool. + return; + } - if (isClosed()) { - close(connection); - return; + if (connection.state.get() != TRASHED) { + if (connection.maxAvailableStreams() < minAllowedStreams) { + replaceConnection(connection); + } else if (!busy) { + dequeue(connection); + } + } + } + + // When a connection gets returned to the pool, check if there are pending borrows that can be + // completed with it. + private void dequeue(final Connection connection) { + while (!pendingBorrows.isEmpty()) { + + // We can only reuse the connection if it's under its maximum number of inFlight requests. + // Do this atomically, as we could be competing with other borrowConnection or dequeue calls. + while (true) { + int inFlight = connection.inFlight.get(); + if (inFlight + >= Math.min( + connection.maxAvailableStreams(), + options().getMaxRequestsPerConnection(hostDistance))) { + // Connection is full again, stop dequeuing + return; } - - if (connection.isDefunct()) { - // As part of making it defunct, we have already replaced it or - // closed the pool. - return; + if (connection.inFlight.compareAndSet(inFlight, inFlight + 1)) { + // We acquired the right to reuse the connection for one request, proceed + break; } + } - if (connection.state.get() != TRASHED) { - if (connection.maxAvailableStreams() < minAllowedStreams) { - replaceConnection(connection); + final PendingBorrow pendingBorrow = pendingBorrows.poll(); + if (pendingBorrow == null) { + // Another thread has emptied the queue since our last check, restore the count + connection.inFlight.decrementAndGet(); + } else { + pendingBorrowCount.decrementAndGet(); + // Ensure that the keyspace set on the connection is the one set on the pool state, in the + // general case it will be. + ListenableFuture setKeyspaceFuture = + connection.setKeyspaceAsync(manager.poolsState.keyspace); + // Slight optimization, if the keyspace was already correct the future will be complete, so + // simply complete it here. + if (setKeyspaceFuture.isDone()) { + try { + if (pendingBorrow.set(Uninterruptibles.getUninterruptibly(setKeyspaceFuture))) { + totalInFlight.incrementAndGet(); } else { - dequeue(connection); + connection.inFlight.decrementAndGet(); } - } - } + } catch (ExecutionException e) { + pendingBorrow.setException(e.getCause()); + connection.inFlight.decrementAndGet(); + } + } else { + // Otherwise the keyspace did need to be set, tie the pendingBorrow future to the set + // keyspace completion. + GuavaCompatibility.INSTANCE.addCallback( + setKeyspaceFuture, + new FutureCallback() { - // When a connection gets returned to the pool, check if there are pending borrows that can be completed with it. - private void dequeue(final Connection connection) { - while (!pendingBorrows.isEmpty()) { - - // We can only reuse the connection if it's under its maximum number of inFlight requests. - // Do this atomically, as we could be competing with other borrowConnection or dequeue calls. - while (true) { - int inFlight = connection.inFlight.get(); - if (inFlight >= Math.min(connection.maxAvailableStreams(), options().getMaxRequestsPerConnection(hostDistance))) { - // Connection is full again, stop dequeuing - return; - } - if (connection.inFlight.compareAndSet(inFlight, inFlight + 1)) { - // We acquired the right to reuse the connection for one request, proceed - break; + @Override + public void onSuccess(Connection c) { + if (pendingBorrow.set(c)) { + totalInFlight.incrementAndGet(); + } else { + connection.inFlight.decrementAndGet(); + } } - } - final PendingBorrow pendingBorrow = pendingBorrows.poll(); - if (pendingBorrow == null) { - // Another thread has emptied the queue since our last check, restore the count - connection.inFlight.decrementAndGet(); - } else { - pendingBorrowCount.decrementAndGet(); - // Ensure that the keyspace set on the connection is the one set on the pool state, in the general case it will be. - ListenableFuture setKeyspaceFuture = connection.setKeyspaceAsync(manager.poolsState.keyspace); - // Slight optimization, if the keyspace was already correct the future will be complete, so simply complete it here. - if (setKeyspaceFuture.isDone()) { - try { - if (pendingBorrow.set(Uninterruptibles.getUninterruptibly(setKeyspaceFuture))) { - totalInFlight.incrementAndGet(); - } else { - connection.inFlight.decrementAndGet(); - } - } catch (ExecutionException e) { - pendingBorrow.setException(e.getCause()); - connection.inFlight.decrementAndGet(); - } - } else { - // Otherwise the keyspace did need to be set, tie the pendingBorrow future to the set keyspace completion. - Futures.addCallback(setKeyspaceFuture, new FutureCallback() { - - @Override - public void onSuccess(Connection c) { - if (pendingBorrow.set(c)) { - totalInFlight.incrementAndGet(); - } else { - connection.inFlight.decrementAndGet(); - } - } - - @Override - public void onFailure(Throwable t) { - pendingBorrow.setException(t); - connection.inFlight.decrementAndGet(); - } - }); + @Override + public void onFailure(Throwable t) { + pendingBorrow.setException(t); + connection.inFlight.decrementAndGet(); } - } + }); } + } } - - // Trash the connection and create a new one, but we don't call trashConnection - // directly because we want to make sure the connection is always trashed. - private void replaceConnection(Connection connection) { - if (!connection.state.compareAndSet(OPEN, TRASHED)) - return; - open.decrementAndGet(); - maybeSpawnNewConnection(); - connection.maxIdleTime = Long.MIN_VALUE; - doTrashConnection(connection); + } + + // Trash the connection and create a new one, but we don't call trashConnection + // directly because we want to make sure the connection is always trashed. + private void replaceConnection(Connection connection) { + if (!connection.state.compareAndSet(OPEN, TRASHED)) return; + open.decrementAndGet(); + maybeSpawnNewConnection(); + connection.maxIdleTime = Long.MIN_VALUE; + doTrashConnection(connection); + } + + private boolean trashConnection(Connection connection) { + if (!connection.state.compareAndSet(OPEN, TRASHED)) return true; + + // First, make sure we don't go below core connections + for (; ; ) { + int opened = open.get(); + if (opened <= options().getCoreConnectionsPerHost(hostDistance)) { + connection.state.set(OPEN); + return false; + } + + if (open.compareAndSet(opened, opened - 1)) break; } + logger.trace("Trashing {}", connection); + connection.maxIdleTime = System.currentTimeMillis() + options().getIdleTimeoutSeconds() * 1000; + doTrashConnection(connection); + return true; + } - private boolean trashConnection(Connection connection) { - if (!connection.state.compareAndSet(OPEN, TRASHED)) - return true; + private void doTrashConnection(Connection connection) { + connections.remove(connection); + trash.add(connection); + } - // First, make sure we don't go below core connections - for (; ; ) { - int opened = open.get(); - if (opened <= options().getCoreConnectionsPerHost(hostDistance)) { - connection.state.set(OPEN); - return false; - } + private boolean addConnectionIfUnderMaximum() { - if (open.compareAndSet(opened, opened - 1)) - break; - } - logger.trace("Trashing {}", connection); - connection.maxIdleTime = System.currentTimeMillis() + options().getIdleTimeoutSeconds() * 1000; - doTrashConnection(connection); - return true; - } + // First, make sure we don't cross the allowed limit of open connections + for (; ; ) { + int opened = open.get(); + if (opened >= options().getMaxConnectionsPerHost(hostDistance)) return false; - private void doTrashConnection(Connection connection) { - connections.remove(connection); - trash.add(connection); + if (open.compareAndSet(opened, opened + 1)) break; } - private boolean addConnectionIfUnderMaximum() { - - // First, make sure we don't cross the allowed limit of open connections - for (; ; ) { - int opened = open.get(); - if (opened >= options().getMaxConnectionsPerHost(hostDistance)) - return false; - - if (open.compareAndSet(opened, opened + 1)) - break; - } - - if (phase.get() != Phase.READY) { - open.decrementAndGet(); - return false; - } - - // Now really open the connection - try { - Connection newConnection = tryResurrectFromTrash(); - if (newConnection == null) { - if (!host.convictionPolicy.canReconnectNow()) { - open.decrementAndGet(); - return false; - } - logger.debug("Creating new connection on busy pool to {}", host); - newConnection = manager.connectionFactory().open(this); - newConnection.setKeyspace(manager.poolsState.keyspace); - } - connections.add(newConnection); - - newConnection.state.compareAndSet(RESURRECTING, OPEN); // no-op if it was already OPEN - - // We might have raced with pool shutdown since the last check; ensure the connection gets closed in case the pool did not do it. - if (isClosed() && !newConnection.isClosed()) { - close(newConnection); - open.decrementAndGet(); - return false; - } - - dequeue(newConnection); - return true; - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - // Skip the open but ignore otherwise - open.decrementAndGet(); - return false; - } catch (ConnectionException e) { - open.decrementAndGet(); - logger.debug("Connection error to {} while creating additional connection", host); - return false; - } catch (AuthenticationException e) { - // This shouldn't really happen in theory - open.decrementAndGet(); - logger.error("Authentication error while creating additional connection (error is: {})", e.getMessage()); - return false; - } catch (UnsupportedProtocolVersionException e) { - // This shouldn't happen since we shouldn't have been able to connect in the first place - open.decrementAndGet(); - logger.error("UnsupportedProtocolVersionException error while creating additional connection (error is: {})", e.getMessage()); - return false; - } catch (ClusterNameMismatchException e) { - open.decrementAndGet(); - logger.error("ClusterNameMismatchException error while creating additional connection (error is: {})", e.getMessage()); - return false; - } + if (phase.get() != Phase.READY) { + open.decrementAndGet(); + return false; } - private Connection tryResurrectFromTrash() { - long highestMaxIdleTime = System.currentTimeMillis(); - Connection chosen = null; - - while (true) { - for (Connection connection : trash) - if (connection.maxIdleTime > highestMaxIdleTime && connection.maxAvailableStreams() > minAllowedStreams) { - chosen = connection; - highestMaxIdleTime = connection.maxIdleTime; - } - - if (chosen == null) - return null; - else if (chosen.state.compareAndSet(TRASHED, RESURRECTING)) - break; + // Now really open the connection + try { + Connection newConnection = tryResurrectFromTrash(); + if (newConnection == null) { + if (!host.convictionPolicy.canReconnectNow()) { + open.decrementAndGet(); + return false; } - logger.trace("Resurrecting {}", chosen); - trash.remove(chosen); - return chosen; + logger.debug("Creating new connection on busy pool to {}", host); + newConnection = manager.connectionFactory().open(this); + newConnection.setKeyspace(manager.poolsState.keyspace); + } + connections.add(newConnection); + + newConnection.state.compareAndSet(RESURRECTING, OPEN); // no-op if it was already OPEN + + // We might have raced with pool shutdown since the last check; ensure the connection gets + // closed in case the pool did not do it. + if (isClosed() && !newConnection.isClosed()) { + close(newConnection); + open.decrementAndGet(); + return false; + } + + dequeue(newConnection); + return true; + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + // Skip the open but ignore otherwise + open.decrementAndGet(); + return false; + } catch (ConnectionException e) { + open.decrementAndGet(); + logger.debug("Connection error to {} while creating additional connection", host); + return false; + } catch (AuthenticationException e) { + // This shouldn't really happen in theory + open.decrementAndGet(); + logger.error( + "Authentication error while creating additional connection (error is: {})", + e.getMessage()); + return false; + } catch (UnsupportedProtocolVersionException e) { + // This shouldn't happen since we shouldn't have been able to connect in the first place + open.decrementAndGet(); + logger.error( + "UnsupportedProtocolVersionException error while creating additional connection (error is: {})", + e.getMessage()); + return false; + } catch (ClusterNameMismatchException e) { + open.decrementAndGet(); + logger.error( + "ClusterNameMismatchException error while creating additional connection (error is: {})", + e.getMessage()); + return false; } - - private void maybeSpawnNewConnection() { - if (isClosed() || !host.convictionPolicy.canReconnectNow()) - return; - - while (true) { - int inCreation = scheduledForCreation.get(); - if (inCreation >= MAX_SIMULTANEOUS_CREATION) - return; - if (scheduledForCreation.compareAndSet(inCreation, inCreation + 1)) - break; + } + + private Connection tryResurrectFromTrash() { + long highestMaxIdleTime = System.currentTimeMillis(); + Connection chosen = null; + + while (true) { + for (Connection connection : trash) + if (connection.maxIdleTime > highestMaxIdleTime + && connection.maxAvailableStreams() > minAllowedStreams) { + chosen = connection; + highestMaxIdleTime = connection.maxIdleTime; } - manager.blockingExecutor().submit(newConnectionTask); + if (chosen == null) return null; + else if (chosen.state.compareAndSet(TRASHED, RESURRECTING)) break; } - - @Override - public void onConnectionDefunct(final Connection connection) { - if (connection.state.compareAndSet(OPEN, GONE)) - open.decrementAndGet(); - connections.remove(connection); - - // Don't try to replace the connection now. Connection.defunct already signaled the failure, - // and either the host will be marked DOWN (which destroys all pools), or we want to prevent - // new connections for some time + logger.trace("Resurrecting {}", chosen); + trash.remove(chosen); + return chosen; + } + + private void maybeSpawnNewConnection() { + if (isClosed() || !host.convictionPolicy.canReconnectNow()) return; + + while (true) { + int inCreation = scheduledForCreation.get(); + if (inCreation >= MAX_SIMULTANEOUS_CREATION) return; + if (scheduledForCreation.compareAndSet(inCreation, inCreation + 1)) break; } - void cleanupIdleConnections(long now) { - if (isClosed()) - return; - - shrinkIfBelowCapacity(); - cleanupTrash(now); - } - - /** - * If we have more active connections than needed, trash some of them - */ - private void shrinkIfBelowCapacity() { - int currentLoad = maxTotalInFlight.getAndSet(totalInFlight.get()); - - int maxRequestsPerConnection = options().getMaxRequestsPerConnection(hostDistance); - int needed = currentLoad / maxRequestsPerConnection + 1; - if (currentLoad % maxRequestsPerConnection > options().getNewConnectionThreshold(hostDistance)) - needed += 1; - needed = Math.max(needed, options().getCoreConnectionsPerHost(hostDistance)); - int actual = open.get(); - int toTrash = Math.max(0, actual - needed); - - logger.trace("Current inFlight = {}, {} connections needed, {} connections available, trashing {}", - currentLoad, needed, actual, toTrash); - - if (toTrash <= 0) - return; - - for (Connection connection : connections) - if (trashConnection(connection)) { - toTrash -= 1; - if (toTrash == 0) - return; - } - } - - /** - * Close connections that have been sitting in the trash for too long - */ - private void cleanupTrash(long now) { - for (Connection connection : trash) { - if (connection.maxIdleTime < now && connection.state.compareAndSet(TRASHED, GONE)) { - if (connection.inFlight.get() == 0) { - logger.trace("Cleaning up {}", connection); - trash.remove(connection); - close(connection); - } else { - // Given that idleTimeout >> request timeout, all outstanding requests should - // have finished by now, so we should not get here. - // Restore the status so that it's retried on the next cleanup. - connection.state.set(TRASHED); - } - } + manager.blockingExecutor().submit(newConnectionTask); + } + + @Override + public void onConnectionDefunct(final Connection connection) { + if (connection.state.compareAndSet(OPEN, GONE)) open.decrementAndGet(); + connections.remove(connection); + + // Don't try to replace the connection now. Connection.defunct already signaled the failure, + // and either the host will be marked DOWN (which destroys all pools), or we want to prevent + // new connections for some time + } + + void cleanupIdleConnections(long now) { + if (isClosed()) return; + + shrinkIfBelowCapacity(); + cleanupTrash(now); + } + + /** If we have more active connections than needed, trash some of them */ + private void shrinkIfBelowCapacity() { + int currentLoad = maxTotalInFlight.getAndSet(totalInFlight.get()); + + int maxRequestsPerConnection = options().getMaxRequestsPerConnection(hostDistance); + int needed = currentLoad / maxRequestsPerConnection + 1; + if (currentLoad % maxRequestsPerConnection > options().getNewConnectionThreshold(hostDistance)) + needed += 1; + needed = Math.max(needed, options().getCoreConnectionsPerHost(hostDistance)); + int actual = open.get(); + int toTrash = Math.max(0, actual - needed); + + logger.trace( + "Current inFlight = {}, {} connections needed, {} connections available, trashing {}", + currentLoad, + needed, + actual, + toTrash); + + if (toTrash <= 0) return; + + for (Connection connection : connections) + if (trashConnection(connection)) { + toTrash -= 1; + if (toTrash == 0) return; + } + } + + /** Close connections that have been sitting in the trash for too long */ + private void cleanupTrash(long now) { + for (Connection connection : trash) { + if (connection.maxIdleTime < now && connection.state.compareAndSet(TRASHED, GONE)) { + if (connection.inFlight.get() == 0) { + logger.trace("Cleaning up {}", connection); + trash.remove(connection); + close(connection); + } else { + // Given that idleTimeout >> request timeout, all outstanding requests should + // have finished by now, so we should not get here. + // Restore the status so that it's retried on the next cleanup. + connection.state.set(TRASHED); } + } } + } - private void close(final Connection connection) { - connection.closeAsync(); - } + private void close(final Connection connection) { + connection.closeAsync(); + } - final boolean isClosed() { - return closeFuture.get() != null; - } - - final CloseFuture closeAsync() { + final boolean isClosed() { + return closeFuture.get() != null; + } - CloseFuture future = closeFuture.get(); - if (future != null) - return future; + final CloseFuture closeAsync() { - phase.set(Phase.CLOSING); - - for (PendingBorrow pendingBorrow : pendingBorrows) { - pendingBorrow.setException(new ConnectionException(host.getSocketAddress(), "Pool is shutdown")); - } + CloseFuture future = closeFuture.get(); + if (future != null) return future; - future = new CloseFuture.Forwarding(discardAvailableConnections()); + phase.set(Phase.CLOSING); - return closeFuture.compareAndSet(null, future) - ? future - : closeFuture.get(); // We raced, it's ok, return the future that was actually set + for (PendingBorrow pendingBorrow : pendingBorrows) { + pendingBorrow.setException(new ConnectionException(host.getEndPoint(), "Pool is shutdown")); } - int opened() { - return open.get(); - } + future = new CloseFuture.Forwarding(discardAvailableConnections()); - int trashed() { - return trash.size(); - } + return closeFuture.compareAndSet(null, future) + ? future + : closeFuture.get(); // We raced, it's ok, return the future that was actually set + } - private List discardAvailableConnections() { - // Note: if this gets called before initialization has completed, both connections and trash will be empty, - // so this will return an empty list + int opened() { + return open.get(); + } - List futures = new ArrayList(connections.size() + trash.size()); + int trashed() { + return trash.size(); + } - for (final Connection connection : connections) { - CloseFuture future = connection.closeAsync(); - future.addListener(new Runnable() { - @Override - public void run() { - if (connection.state.compareAndSet(OPEN, GONE)) - open.decrementAndGet(); - } - }, GuavaCompatibility.INSTANCE.sameThreadExecutor()); - futures.add(future); - } + private List discardAvailableConnections() { + // Note: if this gets called before initialization has completed, both connections and trash + // will be empty, + // so this will return an empty list - // Some connections in the trash might still be open if they hadn't reached their idle timeout - for (Connection connection : trash) - futures.add(connection.closeAsync()); + List futures = new ArrayList(connections.size() + trash.size()); - return futures; + for (final Connection connection : connections) { + CloseFuture future = connection.closeAsync(); + future.addListener( + new Runnable() { + @Override + public void run() { + if (connection.state.compareAndSet(OPEN, GONE)) open.decrementAndGet(); + } + }, + GuavaCompatibility.INSTANCE.sameThreadExecutor()); + futures.add(future); } - // This creates connections if we have less than core connections (if we - // have more than core, connection will just get trash when we can). - void ensureCoreConnections() { - if (isClosed()) - return; - - if (!host.convictionPolicy.canReconnectNow()) - return; - - // Note: this process is a bit racy, but it doesn't matter since we're still guaranteed to not create - // more connection than maximum (and if we create more than core connection due to a race but this isn't - // justified by the load, the connection in excess will be quickly trashed anyway) - int opened = open.get(); - for (int i = opened; i < options().getCoreConnectionsPerHost(hostDistance); i++) { - // We don't respect MAX_SIMULTANEOUS_CREATION here because it's only to - // protect against creating connection in excess of core too quickly - scheduledForCreation.incrementAndGet(); - manager.blockingExecutor().submit(newConnectionTask); - } + // Some connections in the trash might still be open if they hadn't reached their idle timeout + for (Connection connection : trash) futures.add(connection.closeAsync()); + + return futures; + } + + // This creates connections if we have less than core connections (if we + // have more than core, connection will just get trash when we can). + void ensureCoreConnections() { + if (isClosed()) return; + + if (!host.convictionPolicy.canReconnectNow()) return; + + // Note: this process is a bit racy, but it doesn't matter since we're still guaranteed to not + // create + // more connection than maximum (and if we create more than core connection due to a race but + // this isn't + // justified by the load, the connection in excess will be quickly trashed anyway) + int opened = open.get(); + for (int i = opened; i < options().getCoreConnectionsPerHost(hostDistance); i++) { + // We don't respect MAX_SIMULTANEOUS_CREATION here because it's only to + // protect against creating connection in excess of core too quickly + scheduledForCreation.incrementAndGet(); + manager.blockingExecutor().submit(newConnectionTask); } + } - static class PoolState { - volatile String keyspace; + static class PoolState { + volatile String keyspace; - void setKeyspace(String keyspace) { - this.keyspace = keyspace; - } + void setKeyspace(String keyspace) { + this.keyspace = keyspace; } + } - private class PendingBorrow { - final SettableFuture future; - final Future timeoutTask; + private class PendingBorrow { + final SettableFuture future; + final Future timeoutTask; - PendingBorrow(final long timeout, final TimeUnit unit, EventExecutor timeoutsExecutor) { - this.future = SettableFuture.create(); - this.timeoutTask = timeoutsExecutor.schedule(new Runnable() { + PendingBorrow(final long timeout, final TimeUnit unit, EventExecutor timeoutsExecutor) { + this.future = SettableFuture.create(); + this.timeoutTask = + timeoutsExecutor.schedule( + new Runnable() { @Override public void run() { - future.setException( - new BusyPoolException(host.getSocketAddress(), timeout, unit)); + future.setException(new BusyPoolException(host.getEndPoint(), timeout, unit)); } - }, timeout, unit); - } + }, + timeout, + unit); + } - boolean set(Connection connection) { - boolean succeeded = this.future.set(connection); - this.timeoutTask.cancel(false); - return succeeded; - } + boolean set(Connection connection) { + boolean succeeded = this.future.set(connection); + this.timeoutTask.cancel(false); + return succeeded; + } - void setException(Throwable exception) { - this.future.setException(exception); - this.timeoutTask.cancel(false); - } + void setException(Throwable exception) { + this.future.setException(exception); + this.timeoutTask.cancel(false); } + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/HostDistance.java b/driver-core/src/main/java/com/datastax/driver/core/HostDistance.java index d1913d3408c..7056d429618 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/HostDistance.java +++ b/driver-core/src/main/java/com/datastax/driver/core/HostDistance.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,26 +18,23 @@ package com.datastax.driver.core; /** - * The distance to a Cassandra node as assigned by a - * {@link com.datastax.driver.core.policies.LoadBalancingPolicy} (through its {@code - * distance} method). - *

- * The distance assigned to an host influences how many connections the driver - * maintains towards this host. If for a given host the assigned {@code HostDistance} - * is {@code LOCAL} or {@code REMOTE}, some connections will be maintained by - * the driver to this host. More active connections will be kept to - * {@code LOCAL} host than to a {@code REMOTE} one (and thus well behaving - * {@code LoadBalancingPolicy} should assign a {@code REMOTE} distance only to - * hosts that are the less often queried). - *

- * However, if a host is assigned the distance {@code IGNORED}, no connection - * to that host will maintained active. In other words, {@code IGNORED} should - * be assigned to hosts that should not be used by this driver (because they - * are in a remote data center for instance). + * The distance to a Cassandra node as assigned by a {@link + * com.datastax.driver.core.policies.LoadBalancingPolicy} (through its {@code distance} method). + * + *

The distance assigned to an host influences how many connections the driver maintains towards + * this host. If for a given host the assigned {@code HostDistance} is {@code LOCAL} or {@code + * REMOTE}, some connections will be maintained by the driver to this host. More active connections + * will be kept to {@code LOCAL} host than to a {@code REMOTE} one (and thus well behaving {@code + * LoadBalancingPolicy} should assign a {@code REMOTE} distance only to hosts that are the less + * often queried). + * + *

However, if a host is assigned the distance {@code IGNORED}, no connection to that host will + * maintained active. In other words, {@code IGNORED} should be assigned to hosts that should not be + * used by this driver (because they are in a remote data center for instance). */ public enum HostDistance { - // Note: PoolingOptions rely on the order of the enum. - LOCAL, - REMOTE, - IGNORED + // Note: PoolingOptions rely on the order of the enum. + LOCAL, + REMOTE, + IGNORED } diff --git a/driver-core/src/main/java/com/datastax/driver/core/IgnoreJDK6Requirement.java b/driver-core/src/main/java/com/datastax/driver/core/IgnoreJDK6Requirement.java new file mode 100644 index 00000000000..4a870970f93 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/IgnoreJDK6Requirement.java @@ -0,0 +1,26 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +/** + * Annotation used to mark classes in this project as excluded from JDK 6 signature check performed + * by animal-sniffer + * Maven plugin as they require JDK 8 or higher. + */ +public @interface IgnoreJDK6Requirement {} diff --git a/driver-core/src/main/java/com/datastax/driver/core/InboundTrafficMeter.java b/driver-core/src/main/java/com/datastax/driver/core/InboundTrafficMeter.java new file mode 100644 index 00000000000..18e5cf3e562 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/InboundTrafficMeter.java @@ -0,0 +1,42 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import com.codahale.metrics.Meter; +import io.netty.buffer.ByteBuf; +import io.netty.channel.ChannelHandler.Sharable; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelInboundHandlerAdapter; + +@Sharable +class InboundTrafficMeter extends ChannelInboundHandlerAdapter { + + private final Meter meter; + + InboundTrafficMeter(Meter meter) { + this.meter = meter; + } + + @Override + public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception { + if (msg instanceof ByteBuf) { + meter.mark(((ByteBuf) msg).readableBytes()); + } + super.channelRead(ctx, msg); + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/IndexMetadata.java b/driver-core/src/main/java/com/datastax/driver/core/IndexMetadata.java index d4a4bbd7b87..395b94fba9b 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/IndexMetadata.java +++ b/driver-core/src/main/java/com/datastax/driver/core/IndexMetadata.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,243 +21,229 @@ import com.google.common.base.Predicate; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Iterables; - import java.util.Iterator; import java.util.Map; -/** - * An immutable representation of secondary index metadata. - */ +/** An immutable representation of secondary index metadata. */ public class IndexMetadata { - public enum Kind { - KEYS, - CUSTOM, - COMPOSITES - } - - static final String NAME = "index_name"; - - static final String KIND = "kind"; - - static final String OPTIONS = "options"; - - /** - * The name of the option used to specify the index target (Cassandra 3.0 onwards). - */ - public static final String TARGET_OPTION_NAME = "target"; - - /** - * The name of the option used to specify a custom index class name. - */ - public static final String CUSTOM_INDEX_OPTION_NAME = "class_name"; - - /** - * The name of the option used to specify that the index is on the collection (map) keys. - */ - public static final String INDEX_KEYS_OPTION_NAME = "index_keys"; - - /** - * The name of the option used to specify that the index is on the collection (map) entries. - */ - public static final String INDEX_ENTRIES_OPTION_NAME = "index_keys_and_values"; - - private final TableMetadata table; - private final String name; - private final Kind kind; - private final String target; - private final Map options; - - private IndexMetadata(TableMetadata table, String name, Kind kind, String target, Map options) { - this.table = table; - this.name = name; - this.kind = kind; - this.target = target; - this.options = options; - } - - /** - * Build an IndexMetadata from a system_schema.indexes row. - */ - static IndexMetadata fromRow(TableMetadata table, Row indexRow) { - String name = indexRow.getString(NAME); - Kind kind = Kind.valueOf(indexRow.getString(KIND)); - Map options = indexRow.getMap(OPTIONS, String.class, String.class); - String target = options.get(TARGET_OPTION_NAME); - return new IndexMetadata(table, name, kind, target, options); - } - - /** - * Build an IndexMetadata from a legacy layout (index information is stored - * along with indexed column). - */ - static IndexMetadata fromLegacy(ColumnMetadata column, ColumnMetadata.Raw raw) { - Map indexColumns = raw.indexColumns; - if (indexColumns.isEmpty()) - return null; - String type = indexColumns.get(ColumnMetadata.INDEX_TYPE); - if (type == null) - return null; - String indexName = indexColumns.get(ColumnMetadata.INDEX_NAME); - String kindStr = indexColumns.get(ColumnMetadata.INDEX_TYPE); - Kind kind = kindStr == null ? null : Kind.valueOf(kindStr); - // Special case check for the value of the index_options column being a string with value 'null' as this - // column appears to be set this way (JAVA-834). - String indexOptionsCol = indexColumns.get(ColumnMetadata.INDEX_OPTIONS); - Map options; - if (indexOptionsCol == null || indexOptionsCol.isEmpty() || indexOptionsCol.equals("null")) { - options = ImmutableMap.of(); - } else { - options = SimpleJSONParser.parseStringMap(indexOptionsCol); - } - String target = targetFromLegacyOptions(column, options); - return new IndexMetadata((TableMetadata) column.getParent(), indexName, kind, target, options); - } - - private static String targetFromLegacyOptions(ColumnMetadata column, Map options) { - String columnName = Metadata.quoteIfNecessary(column.getName()); - if (options.containsKey(INDEX_KEYS_OPTION_NAME)) - return String.format("keys(%s)", columnName); - if (options.containsKey(INDEX_ENTRIES_OPTION_NAME)) - return String.format("entries(%s)", columnName); - if (column.getType() instanceof DataType.CollectionType && column.getType().isFrozen()) - return String.format("full(%s)", columnName); - // Note: the keyword 'values' is not accepted as a valid index target function until 3.0 - return columnName; - } - - /** - * Returns the metadata of the table this index is part of. - * - * @return the table this index is part of. - */ - public TableMetadata getTable() { - return table; - } - - /** - * Returns the index name. - * - * @return the index name. - */ - public String getName() { - return name; - } - - /** - * Returns the index kind. - * - * @return the index kind. - */ - public Kind getKind() { - return kind; - } - - /** - * Returns the index target. - * - * @return the index target. - */ - public String getTarget() { - return target; - } - - /** - * Returns whether this index is a custom one. - *

- * If it is indeed a custom index, {@link #getIndexClassName} will - * return the name of the class used in Cassandra to implement that - * index. - * - * @return {@code true} if this metadata represents a custom index. - */ - public boolean isCustomIndex() { - return getIndexClassName() != null; - } - - /** - * The name of the class used to implement the custom index, if it is one. - * - * @return the name of the class used Cassandra side to implement this - * custom index if {@code isCustomIndex() == true}, {@code null} otherwise. - */ - public String getIndexClassName() { - return getOption(CUSTOM_INDEX_OPTION_NAME); - } - - /** - * Return the value for the given option name. - * - * @param name Option name - * @return Option value - */ - public String getOption(String name) { - return options != null ? options.get(name) : null; - } - - /** - * Returns a CQL query representing this index. - *

- * This method returns a single 'CREATE INDEX' query corresponding to - * this index definition. - * - * @return the 'CREATE INDEX' query corresponding to this index. - */ - public String asCQLQuery() { - String keyspaceName = Metadata.quoteIfNecessary(table.getKeyspace().getName()); - String tableName = Metadata.quoteIfNecessary(table.getName()); - String indexName = Metadata.quoteIfNecessary(this.name); - return isCustomIndex() - ? String.format("CREATE CUSTOM INDEX %s ON %s.%s (%s) USING '%s' %s;", indexName, keyspaceName, tableName, getTarget(), getIndexClassName(), getOptionsAsCql()) - : String.format("CREATE INDEX %s ON %s.%s (%s);", indexName, keyspaceName, tableName, getTarget()); - } - - /** - * Builds a string representation of the custom index options. - * - * @return String representation of the custom index options, similar to what Cassandra stores in - * the 'index_options' column of the 'schema_columns' table in the 'system' keyspace. - */ - private String getOptionsAsCql() { - Iterable> filtered = Iterables.filter(options.entrySet(), new Predicate>() { - @Override - public boolean apply(Map.Entry input) { - return - !input.getKey().equals(TARGET_OPTION_NAME) && - !input.getKey().equals(CUSTOM_INDEX_OPTION_NAME); - } - }); - if (Iterables.isEmpty(filtered)) return ""; - StringBuilder builder = new StringBuilder(); - builder.append("WITH OPTIONS = {"); - Iterator> it = filtered.iterator(); - while (it.hasNext()) { - Map.Entry option = it.next(); - builder.append(String.format("'%s' : '%s'", option.getKey(), option.getValue())); - if (it.hasNext()) - builder.append(", "); - } - builder.append("}"); - return builder.toString(); - } - - public int hashCode() { - return MoreObjects.hashCode(name, kind, target, options); - } - - public boolean equals(Object obj) { - if (obj == this) - return true; - - if (!(obj instanceof IndexMetadata)) - return false; - - IndexMetadata other = (IndexMetadata) obj; - - return MoreObjects.equal(name, other.name) - && MoreObjects.equal(kind, other.kind) - && MoreObjects.equal(target, other.target) - && MoreObjects.equal(options, other.options); - } - + public enum Kind { + KEYS, + CUSTOM, + COMPOSITES + } + + static final String NAME = "index_name"; + + static final String KIND = "kind"; + + static final String OPTIONS = "options"; + + /** The name of the option used to specify the index target (Cassandra 3.0 onwards). */ + public static final String TARGET_OPTION_NAME = "target"; + + /** The name of the option used to specify a custom index class name. */ + public static final String CUSTOM_INDEX_OPTION_NAME = "class_name"; + + /** The name of the option used to specify that the index is on the collection (map) keys. */ + public static final String INDEX_KEYS_OPTION_NAME = "index_keys"; + + /** The name of the option used to specify that the index is on the collection (map) entries. */ + public static final String INDEX_ENTRIES_OPTION_NAME = "index_keys_and_values"; + + private final TableMetadata table; + private final String name; + private final Kind kind; + private final String target; + private final Map options; + + private IndexMetadata( + TableMetadata table, String name, Kind kind, String target, Map options) { + this.table = table; + this.name = name; + this.kind = kind; + this.target = target; + this.options = options; + } + + /** Build an IndexMetadata from a system_schema.indexes row. */ + static IndexMetadata fromRow(TableMetadata table, Row indexRow) { + String name = indexRow.getString(NAME); + Kind kind = Kind.valueOf(indexRow.getString(KIND)); + Map options = indexRow.getMap(OPTIONS, String.class, String.class); + String target = options.get(TARGET_OPTION_NAME); + return new IndexMetadata(table, name, kind, target, options); + } + + /** + * Build an IndexMetadata from a legacy layout (index information is stored along with indexed + * column). + */ + static IndexMetadata fromLegacy(ColumnMetadata column, ColumnMetadata.Raw raw) { + Map indexColumns = raw.indexColumns; + if (indexColumns.isEmpty()) return null; + String type = indexColumns.get(ColumnMetadata.INDEX_TYPE); + if (type == null) return null; + String indexName = indexColumns.get(ColumnMetadata.INDEX_NAME); + String kindStr = indexColumns.get(ColumnMetadata.INDEX_TYPE); + Kind kind = kindStr == null ? null : Kind.valueOf(kindStr); + // Special case check for the value of the index_options column being a string with value 'null' + // as this + // column appears to be set this way (JAVA-834). + String indexOptionsCol = indexColumns.get(ColumnMetadata.INDEX_OPTIONS); + Map options; + if (indexOptionsCol == null || indexOptionsCol.isEmpty() || indexOptionsCol.equals("null")) { + options = ImmutableMap.of(); + } else { + options = SimpleJSONParser.parseStringMap(indexOptionsCol); + } + String target = targetFromLegacyOptions(column, options); + return new IndexMetadata((TableMetadata) column.getParent(), indexName, kind, target, options); + } + + private static String targetFromLegacyOptions( + ColumnMetadata column, Map options) { + String columnName = Metadata.quoteIfNecessary(column.getName()); + if (options.containsKey(INDEX_KEYS_OPTION_NAME)) return String.format("keys(%s)", columnName); + if (options.containsKey(INDEX_ENTRIES_OPTION_NAME)) + return String.format("entries(%s)", columnName); + if (column.getType() instanceof DataType.CollectionType && column.getType().isFrozen()) + return String.format("full(%s)", columnName); + // Note: the keyword 'values' is not accepted as a valid index target function until 3.0 + return columnName; + } + + /** + * Returns the metadata of the table this index is part of. + * + * @return the table this index is part of. + */ + public TableMetadata getTable() { + return table; + } + + /** + * Returns the index name. + * + * @return the index name. + */ + public String getName() { + return name; + } + + /** + * Returns the index kind. + * + * @return the index kind. + */ + public Kind getKind() { + return kind; + } + + /** + * Returns the index target. + * + * @return the index target. + */ + public String getTarget() { + return target; + } + + /** + * Returns whether this index is a custom one. + * + *

If it is indeed a custom index, {@link #getIndexClassName} will return the name of the class + * used in Cassandra to implement that index. + * + * @return {@code true} if this metadata represents a custom index. + */ + public boolean isCustomIndex() { + return getIndexClassName() != null; + } + + /** + * The name of the class used to implement the custom index, if it is one. + * + * @return the name of the class used Cassandra side to implement this custom index if {@code + * isCustomIndex() == true}, {@code null} otherwise. + */ + public String getIndexClassName() { + return getOption(CUSTOM_INDEX_OPTION_NAME); + } + + /** + * Return the value for the given option name. + * + * @param name Option name + * @return Option value + */ + public String getOption(String name) { + return options != null ? options.get(name) : null; + } + + /** + * Returns a CQL query representing this index. + * + *

This method returns a single 'CREATE INDEX' query corresponding to this index definition. + * + * @return the 'CREATE INDEX' query corresponding to this index. + */ + public String asCQLQuery() { + String keyspaceName = Metadata.quoteIfNecessary(table.getKeyspace().getName()); + String tableName = Metadata.quoteIfNecessary(table.getName()); + String indexName = Metadata.quoteIfNecessary(this.name); + return isCustomIndex() + ? String.format( + "CREATE CUSTOM INDEX %s ON %s.%s (%s) USING '%s' %s;", + indexName, keyspaceName, tableName, getTarget(), getIndexClassName(), getOptionsAsCql()) + : String.format( + "CREATE INDEX %s ON %s.%s (%s);", indexName, keyspaceName, tableName, getTarget()); + } + + /** + * Builds a string representation of the custom index options. + * + * @return String representation of the custom index options, similar to what Cassandra stores in + * the 'index_options' column of the 'schema_columns' table in the 'system' keyspace. + */ + private String getOptionsAsCql() { + Iterable> filtered = + Iterables.filter( + options.entrySet(), + new Predicate>() { + @Override + public boolean apply(Map.Entry input) { + return !input.getKey().equals(TARGET_OPTION_NAME) + && !input.getKey().equals(CUSTOM_INDEX_OPTION_NAME); + } + }); + if (Iterables.isEmpty(filtered)) return ""; + StringBuilder builder = new StringBuilder(); + builder.append("WITH OPTIONS = {"); + Iterator> it = filtered.iterator(); + while (it.hasNext()) { + Map.Entry option = it.next(); + builder.append(String.format("'%s' : '%s'", option.getKey(), option.getValue())); + if (it.hasNext()) builder.append(", "); + } + builder.append("}"); + return builder.toString(); + } + + public int hashCode() { + return MoreObjects.hashCode(name, kind, target, options); + } + + public boolean equals(Object obj) { + if (obj == this) return true; + + if (!(obj instanceof IndexMetadata)) return false; + + IndexMetadata other = (IndexMetadata) obj; + + return MoreObjects.equal(name, other.name) + && MoreObjects.equal(kind, other.kind) + && MoreObjects.equal(target, other.target) + && MoreObjects.equal(options, other.options); + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/JdkSSLOptions.java b/driver-core/src/main/java/com/datastax/driver/core/JdkSSLOptions.java index ee65aa0dcc5..a4f7cdf5c95 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/JdkSSLOptions.java +++ b/driver-core/src/main/java/com/datastax/driver/core/JdkSSLOptions.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,10 +19,9 @@ import io.netty.channel.socket.SocketChannel; import io.netty.handler.ssl.SslHandler; - +import java.security.NoSuchAlgorithmException; import javax.net.ssl.SSLContext; import javax.net.ssl.SSLEngine; -import java.security.NoSuchAlgorithmException; /** * {@link SSLOptions} implementation based on built-in JDK classes. @@ -31,106 +32,106 @@ @Deprecated public class JdkSSLOptions implements SSLOptions { - /** - * Creates a builder to create a new instance. - * - * @return the builder. - */ - public static Builder builder() { - return new Builder(); + /** + * Creates a builder to create a new instance. + * + * @return the builder. + */ + public static Builder builder() { + return new Builder(); + } + + protected final SSLContext context; + protected final String[] cipherSuites; + + /** + * Creates a new instance. + * + * @param context the SSL context. + * @param cipherSuites the cipher suites to use. + */ + protected JdkSSLOptions(SSLContext context, String[] cipherSuites) { + this.context = (context == null) ? makeDefaultContext() : context; + this.cipherSuites = cipherSuites; + } + + @Override + public SslHandler newSSLHandler(SocketChannel channel) { + SSLEngine engine = newSSLEngine(channel); + return new SslHandler(engine); + } + + /** + * Creates an SSL engine each time a connection is established. + * + *

+ * + *

You might want to override this if you need to fine-tune the engine's configuration (for + * example enabling hostname verification). + * + * @param channel the Netty channel for that connection. + * @return the engine. + */ + protected SSLEngine newSSLEngine(@SuppressWarnings("unused") SocketChannel channel) { + SSLEngine engine = context.createSSLEngine(); + engine.setUseClientMode(true); + if (cipherSuites != null) engine.setEnabledCipherSuites(cipherSuites); + return engine; + } + + private static SSLContext makeDefaultContext() throws IllegalStateException { + try { + return SSLContext.getDefault(); + } catch (NoSuchAlgorithmException e) { + throw new IllegalStateException("Cannot initialize SSL Context", e); } + } - protected final SSLContext context; - protected final String[] cipherSuites; + /** Helper class to build JDK-based SSL options. */ + public static class Builder { + protected SSLContext context; + protected String[] cipherSuites; /** - * Creates a new instance. + * Set the SSL context to use. * - * @param context the SSL context. - * @param cipherSuites the cipher suites to use. + *

If this method isn't called, a context with the default options will be used, and you can + * use the default JSSE + * System properties to customize its behavior. This may in particular involve creating + * a simple keyStore and trustStore. + * + * @param context the SSL context. + * @return this builder. */ - protected JdkSSLOptions(SSLContext context, String[] cipherSuites) { - this.context = (context == null) ? makeDefaultContext() : context; - this.cipherSuites = cipherSuites; - } - - @Override - public SslHandler newSSLHandler(SocketChannel channel) { - SSLEngine engine = newSSLEngine(channel); - return new SslHandler(engine); + public Builder withSSLContext(SSLContext context) { + this.context = context; + return this; } /** - * Creates an SSL engine each time a connection is established. - *

- *

- * You might want to override this if you need to fine-tune the engine's configuration - * (for example enabling hostname verification). + * Set the cipher suites to use. + * + *

If this method isn't called, the default is to present all the eligible client ciphers to + * the server. * - * @param channel the Netty channel for that connection. - * @return the engine. + * @param cipherSuites the cipher suites to use. + * @return this builder. */ - protected SSLEngine newSSLEngine(@SuppressWarnings("unused") SocketChannel channel) { - SSLEngine engine = context.createSSLEngine(); - engine.setUseClientMode(true); - if (cipherSuites != null) - engine.setEnabledCipherSuites(cipherSuites); - return engine; - } - - private static SSLContext makeDefaultContext() throws IllegalStateException { - try { - return SSLContext.getDefault(); - } catch (NoSuchAlgorithmException e) { - throw new IllegalStateException("Cannot initialize SSL Context", e); - } + public Builder withCipherSuites(String[] cipherSuites) { + this.cipherSuites = cipherSuites; + return this; } /** - * Helper class to build JDK-based SSL options. + * Builds a new instance based on the parameters provided to this builder. + * + * @return the new instance. */ - public static class Builder { - protected SSLContext context; - protected String[] cipherSuites; - - /** - * Set the SSL context to use. - *

- * If this method isn't called, a context with the default options will be used, - * and you can use the default - * JSSE System properties - * to customize its behavior. This may in particular involve - * creating a simple keyStore and trustStore. - * - * @param context the SSL context. - * @return this builder. - */ - public Builder withSSLContext(SSLContext context) { - this.context = context; - return this; - } - - /** - * Set the cipher suites to use. - *

- * If this method isn't called, the default is to present all the eligible client ciphers to the server. - * - * @param cipherSuites the cipher suites to use. - * @return this builder. - */ - public Builder withCipherSuites(String[] cipherSuites) { - this.cipherSuites = cipherSuites; - return this; - } - - /** - * Builds a new instance based on the parameters provided to this builder. - * - * @return the new instance. - */ - @SuppressWarnings("deprecation") - public JdkSSLOptions build() { - return new JdkSSLOptions(context, cipherSuites); - } + @SuppressWarnings("deprecation") + public JdkSSLOptions build() { + return new JdkSSLOptions(context, cipherSuites); } + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/KeyspaceMetadata.java b/driver-core/src/main/java/com/datastax/driver/core/KeyspaceMetadata.java index 45ce2977dbf..b4b8b8eec5f 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/KeyspaceMetadata.java +++ b/driver-core/src/main/java/com/datastax/driver/core/KeyspaceMetadata.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,351 +18,446 @@ package com.datastax.driver.core; import com.google.common.annotations.VisibleForTesting; +import com.google.common.collect.ImmutableSortedSet; import com.google.common.collect.Lists; - -import java.util.*; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.Comparator; +import java.util.HashMap; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; import java.util.concurrent.ConcurrentHashMap; -/** - * Describes a keyspace defined in this cluster. - */ +/** Describes a keyspace defined in this cluster. */ public class KeyspaceMetadata { - public static final String KS_NAME = "keyspace_name"; - private static final String DURABLE_WRITES = "durable_writes"; - private static final String STRATEGY_CLASS = "strategy_class"; - private static final String STRATEGY_OPTIONS = "strategy_options"; - private static final String REPLICATION = "replication"; - - private final String name; - private final boolean durableWrites; - - private final ReplicationStrategy strategy; - private final Map replication; - - final Map tables = new ConcurrentHashMap(); - final Map views = new ConcurrentHashMap(); - final Map userTypes = Collections.synchronizedMap(new LinkedHashMap()); - final Map functions = new ConcurrentHashMap(); - final Map aggregates = new ConcurrentHashMap(); - - @VisibleForTesting - KeyspaceMetadata(String name, boolean durableWrites, Map replication) { - this.name = name; - this.durableWrites = durableWrites; - this.replication = replication; - this.strategy = ReplicationStrategy.create(replication); - } - - static KeyspaceMetadata build(Row row, VersionNumber cassandraVersion) { - if (cassandraVersion.getMajor() <= 2) { - String name = row.getString(KS_NAME); - boolean durableWrites = row.getBool(DURABLE_WRITES); - Map replicationOptions; - replicationOptions = new HashMap(); - replicationOptions.put("class", row.getString(STRATEGY_CLASS)); - replicationOptions.putAll(SimpleJSONParser.parseStringMap(row.getString(STRATEGY_OPTIONS))); - return new KeyspaceMetadata(name, durableWrites, replicationOptions); - } else { - String name = row.getString(KS_NAME); - boolean durableWrites = row.getBool(DURABLE_WRITES); - return new KeyspaceMetadata(name, durableWrites, row.getMap(REPLICATION, String.class, String.class)); + public static final String KS_NAME = "keyspace_name"; + private static final String DURABLE_WRITES = "durable_writes"; + private static final String STRATEGY_CLASS = "strategy_class"; + private static final String STRATEGY_OPTIONS = "strategy_options"; + private static final String REPLICATION = "replication"; + + private final String name; + private final boolean durableWrites; + private final boolean virtual; + + private final ReplicationStrategy strategy; + private final Map replication; + + final Map tables = new ConcurrentHashMap(); + final Map views = + new ConcurrentHashMap(); + final Map userTypes = + Collections.synchronizedMap(new LinkedHashMap()); + final Map functions = new ConcurrentHashMap(); + final Map aggregates = + new ConcurrentHashMap(); + + @VisibleForTesting + @Deprecated + KeyspaceMetadata(String name, boolean durableWrites, Map replication) { + this(name, durableWrites, replication, false); + } + + @VisibleForTesting + KeyspaceMetadata( + String name, boolean durableWrites, Map replication, boolean virtual) { + this.name = name; + this.durableWrites = durableWrites; + this.replication = replication; + this.strategy = ReplicationStrategy.create(replication); + this.virtual = virtual; + } + + static KeyspaceMetadata build(Row row, VersionNumber cassandraVersion) { + if (cassandraVersion.getMajor() <= 2) { + String name = row.getString(KS_NAME); + boolean durableWrites = row.getBool(DURABLE_WRITES); + Map replicationOptions; + replicationOptions = new HashMap(); + replicationOptions.put("class", row.getString(STRATEGY_CLASS)); + replicationOptions.putAll(SimpleJSONParser.parseStringMap(row.getString(STRATEGY_OPTIONS))); + return new KeyspaceMetadata(name, durableWrites, replicationOptions, false); + } else { + String name = row.getString(KS_NAME); + boolean durableWrites = row.getBool(DURABLE_WRITES); + return new KeyspaceMetadata( + name, durableWrites, row.getMap(REPLICATION, String.class, String.class), false); + } + } + + static KeyspaceMetadata buildVirtual(Row row, VersionNumber cassandraVersion) { + String name = row.getString(KS_NAME); + return new KeyspaceMetadata(name, false, Collections.emptyMap(), true); + } + + /** + * Returns the name of this keyspace. + * + * @return the name of this CQL keyspace. + */ + public String getName() { + return name; + } + + /** + * Returns whether durable writes are set on this keyspace. + * + * @return {@code true} if durable writes are set on this keyspace (the default), {@code false} + * otherwise. + */ + public boolean isDurableWrites() { + return durableWrites; + } + + /** + * Returns whether or not this keyspace is a virtual keyspace + * + * @return {@code true} if virtual keyspace default), {@code false} otherwise. + */ + public boolean isVirtual() { + return virtual; + } + + /** + * Returns the replication options for this keyspace. + * + * @return a map containing the replication options for this keyspace. + */ + public Map getReplication() { + return Collections.unmodifiableMap(replication); + } + + /** + * Returns the metadata for a table contained in this keyspace. + * + * @param name the name of table to retrieve + * @return the metadata for table {@code name} if it exists in this keyspace, {@code null} + * otherwise. + */ + public TableMetadata getTable(String name) { + return tables.get(Metadata.handleId(name)); + } + + TableMetadata removeTable(String table) { + return tables.remove(table); + } + + /** + * Returns the tables defined in this keyspace. + * + * @return a collection of the metadata for the tables defined in this keyspace. + */ + public Collection getTables() { + return Collections.unmodifiableCollection(tables.values()); + } + + /** + * Returns the metadata for a materialized view contained in this keyspace. + * + * @param name the name of materialized view to retrieve + * @return the metadata for materialized view {@code name} if it exists in this keyspace, {@code + * null} otherwise. + */ + public MaterializedViewMetadata getMaterializedView(String name) { + return views.get(Metadata.handleId(name)); + } + + MaterializedViewMetadata removeMaterializedView(String materializedView) { + return views.remove(materializedView); + } + + /** + * Returns the materialized views defined in this keyspace. + * + * @return a collection of the metadata for the materialized views defined in this keyspace. + */ + public Collection getMaterializedViews() { + return Collections.unmodifiableCollection(views.values()); + } + + /** + * Returns the definition for a user defined type (UDT) in this keyspace. + * + * @param name the name of UDT definition to retrieve + * @return the definition for {@code name} if it exists in this keyspace, {@code null} otherwise. + */ + public UserType getUserType(String name) { + return userTypes.get(Metadata.handleId(name)); + } + + /** + * Returns the user types defined in this keyspace. + * + * @return a collection of the definition for the user types defined in this keyspace. + */ + public Collection getUserTypes() { + return Collections.unmodifiableCollection(userTypes.values()); + } + + UserType removeUserType(String userType) { + return userTypes.remove(userType); + } + + /** + * Returns the definition of a function in this keyspace. + * + * @param name the name of the function. + * @param argumentTypes the types of the function's arguments. + * @return the function definition if it exists in this keyspace, {@code null} otherwise. + */ + public FunctionMetadata getFunction(String name, Collection argumentTypes) { + return functions.get(Metadata.fullFunctionName(Metadata.handleId(name), argumentTypes)); + } + + /** + * Returns the definition of a function in this keyspace. + * + * @param name the name of the function. + * @param argumentTypes the types of the function's arguments. + * @return the function definition if it exists in this keyspace, {@code null} otherwise. + */ + public FunctionMetadata getFunction(String name, DataType... argumentTypes) { + return getFunction(name, Lists.newArrayList(argumentTypes)); + } + + /** + * Returns the functions defined in this keyspace. + * + * @return a collection of the definition for the functions defined in this keyspace. + */ + public Collection getFunctions() { + return Collections.unmodifiableCollection(functions.values()); + } + + FunctionMetadata removeFunction(String fullName) { + return functions.remove(fullName); + } + + /** + * Returns the definition of an aggregate in this keyspace. + * + * @param name the name of the aggregate. + * @param argumentTypes the types of the aggregate's arguments. + * @return the aggregate definition if it exists in this keyspace, {@code null} otherwise. + */ + public AggregateMetadata getAggregate(String name, Collection argumentTypes) { + return aggregates.get(Metadata.fullFunctionName(Metadata.handleId(name), argumentTypes)); + } + + /** + * Returns the definition of an aggregate in this keyspace. + * + * @param name the name of the aggregate. + * @param argumentTypes the types of the aggregate's arguments. + * @return the aggregate definition if it exists in this keyspace, {@code null} otherwise. + */ + public AggregateMetadata getAggregate(String name, DataType... argumentTypes) { + return getAggregate(name, Lists.newArrayList(argumentTypes)); + } + + /** + * Returns the aggregates defined in this keyspace. + * + * @return a collection of the definition for the aggregates defined in this keyspace. + */ + public Collection getAggregates() { + return Collections.unmodifiableCollection(aggregates.values()); + } + + AggregateMetadata removeAggregate(String fullName) { + return aggregates.remove(fullName); + } + + // comparators for ordering types in cqlsh output. + + private static final Comparator typeByName = + new Comparator() { + @Override + public int compare(UserType o1, UserType o2) { + return o1.getTypeName().compareTo(o2.getTypeName()); } - } - - /** - * Returns the name of this keyspace. - * - * @return the name of this CQL keyspace. - */ - public String getName() { - return name; - } - - /** - * Returns whether durable writes are set on this keyspace. - * - * @return {@code true} if durable writes are set on this keyspace (the - * default), {@code false} otherwise. - */ - public boolean isDurableWrites() { - return durableWrites; - } - - /** - * Returns the replication options for this keyspace. - * - * @return a map containing the replication options for this keyspace. - */ - public Map getReplication() { - return Collections.unmodifiableMap(replication); - } - - /** - * Returns the metadata for a table contained in this keyspace. - * - * @param name the name of table to retrieve - * @return the metadata for table {@code name} if it exists in this keyspace, - * {@code null} otherwise. - */ - public TableMetadata getTable(String name) { - return tables.get(Metadata.handleId(name)); - } - - TableMetadata removeTable(String table) { - return tables.remove(table); - } - - /** - * Returns the tables defined in this keyspace. - * - * @return a collection of the metadata for the tables defined in this - * keyspace. - */ - public Collection getTables() { - return Collections.unmodifiableCollection(tables.values()); - } + }; - /** - * Returns the metadata for a materialized view contained in this keyspace. - * - * @param name the name of materialized view to retrieve - * @return the metadata for materialized view {@code name} if it exists in this keyspace, - * {@code null} otherwise. - */ - public MaterializedViewMetadata getMaterializedView(String name) { - return views.get(Metadata.handleId(name)); - } - - MaterializedViewMetadata removeMaterializedView(String materializedView) { - return views.remove(materializedView); - } - - /** - * Returns the materialized views defined in this keyspace. - * - * @return a collection of the metadata for the materialized views defined in this - * keyspace. - */ - public Collection getMaterializedViews() { - return Collections.unmodifiableCollection(views.values()); - } - - /** - * Returns the definition for a user defined type (UDT) in this keyspace. - * - * @param name the name of UDT definition to retrieve - * @return the definition for {@code name} if it exists in this keyspace, - * {@code null} otherwise. - */ - public UserType getUserType(String name) { - return userTypes.get(Metadata.handleId(name)); - } - - /** - * Returns the user types defined in this keyspace. - * - * @return a collection of the definition for the user types defined in this - * keyspace. - */ - public Collection getUserTypes() { - return Collections.unmodifiableCollection(userTypes.values()); - } - - UserType removeUserType(String userType) { - return userTypes.remove(userType); - } - - /** - * Returns the definition of a function in this keyspace. - * - * @param name the name of the function. - * @param argumentTypes the types of the function's arguments. - * @return the function definition if it exists in this keyspace, {@code null} otherwise. - */ - public FunctionMetadata getFunction(String name, Collection argumentTypes) { - return functions.get(Metadata.fullFunctionName(Metadata.handleId(name), argumentTypes)); - } - - /** - * Returns the definition of a function in this keyspace. - * - * @param name the name of the function. - * @param argumentTypes the types of the function's arguments. - * @return the function definition if it exists in this keyspace, {@code null} otherwise. - */ - public FunctionMetadata getFunction(String name, DataType... argumentTypes) { - return getFunction(name, Lists.newArrayList(argumentTypes)); - } - - /** - * Returns the functions defined in this keyspace. - * - * @return a collection of the definition for the functions defined in this - * keyspace. - */ - public Collection getFunctions() { - return Collections.unmodifiableCollection(functions.values()); - } - - FunctionMetadata removeFunction(String fullName) { - return functions.remove(fullName); - } - - /** - * Returns the definition of an aggregate in this keyspace. - * - * @param name the name of the aggregate. - * @param argumentTypes the types of the aggregate's arguments. - * @return the aggregate definition if it exists in this keyspace, {@code null} otherwise. - */ - public AggregateMetadata getAggregate(String name, Collection argumentTypes) { - return aggregates.get(Metadata.fullFunctionName(Metadata.handleId(name), argumentTypes)); - } - - /** - * Returns the definition of an aggregate in this keyspace. - * - * @param name the name of the aggregate. - * @param argumentTypes the types of the aggregate's arguments. - * @return the aggregate definition if it exists in this keyspace, {@code null} otherwise. - */ - public AggregateMetadata getAggregate(String name, DataType... argumentTypes) { - return getAggregate(name, Lists.newArrayList(argumentTypes)); - } - - /** - * Returns the aggregates defined in this keyspace. - * - * @return a collection of the definition for the aggregates defined in this - * keyspace. - */ - public Collection getAggregates() { - return Collections.unmodifiableCollection(aggregates.values()); - } - - AggregateMetadata removeAggregate(String fullName) { - return aggregates.remove(fullName); - } - - /** - * Returns a {@code String} containing CQL queries representing this - * keyspace and the user types and tables it contains. - *

- * In other words, this method returns the queries that would allow to - * recreate the schema of this keyspace, along with all its user - * types/tables. - *

- * Note that the returned String is formatted to be human readable (for - * some definition of human readable at least). - * - * @return the CQL queries representing this keyspace schema as a {code - * String}. - */ - public String exportAsString() { - StringBuilder sb = new StringBuilder(); - - sb.append(asCQLQuery()).append('\n'); - - for (UserType udt : userTypes.values()) - sb.append('\n').append(udt.exportAsString()).append('\n'); - - for (TableMetadata tm : tables.values()) - sb.append('\n').append(tm.exportAsString()).append('\n'); - - for (FunctionMetadata fm : functions.values()) - sb.append('\n').append(fm.exportAsString()).append('\n'); - - for (AggregateMetadata am : aggregates.values()) - sb.append('\n').append(am.exportAsString()).append('\n'); - - return sb.toString(); - } - - /** - * Returns a CQL query representing this keyspace. - *

- * This method returns a single 'CREATE KEYSPACE' query with the options - * corresponding to this keyspace definition. - * - * @return the 'CREATE KEYSPACE' query corresponding to this keyspace. - * @see #exportAsString - */ - public String asCQLQuery() { - StringBuilder sb = new StringBuilder(); - - sb.append("CREATE KEYSPACE ").append(Metadata.quoteIfNecessary(name)).append(" WITH "); - sb.append("REPLICATION = { 'class' : '").append(replication.get("class")).append('\''); - for (Map.Entry entry : replication.entrySet()) { - if (entry.getKey().equals("class")) - continue; - sb.append(", '").append(entry.getKey()).append("': '").append(entry.getValue()).append('\''); + private static final Comparator functionByName = + new Comparator() { + @Override + public int compare(FunctionMetadata o1, FunctionMetadata o2) { + return o1.getSimpleName().compareTo(o2.getSimpleName()); } - sb.append(" } AND DURABLE_WRITES = ").append(durableWrites); - sb.append(';'); - return sb.toString(); - } - - @Override - public String toString() { - return asCQLQuery(); - } - - @Override - public boolean equals(Object o) { - if (this == o) - return true; - if (o == null || getClass() != o.getClass()) - return false; - - KeyspaceMetadata that = (KeyspaceMetadata) o; - - if (durableWrites != that.durableWrites) - return false; - if (!name.equals(that.name)) - return false; - if (strategy != null ? !strategy.equals(that.strategy) : that.strategy != null) - return false; - if (!replication.equals(that.replication)) - return false; - return tables.equals(that.tables); - - } - - @Override - public int hashCode() { - int result = name.hashCode(); - result = 31 * result + (durableWrites ? 1 : 0); - result = 31 * result + (strategy != null ? strategy.hashCode() : 0); - result = 31 * result + replication.hashCode(); - result = 31 * result + tables.hashCode(); - return result; - } - - void add(TableMetadata tm) { - tables.put(tm.getName(), tm); - } - - void add(MaterializedViewMetadata view) { - views.put(view.getName(), view); - } - - void add(FunctionMetadata function) { - String functionName = Metadata.fullFunctionName(function.getSimpleName(), function.getArguments().values()); - functions.put(functionName, function); - } - - void add(AggregateMetadata aggregate) { - String aggregateName = Metadata.fullFunctionName(aggregate.getSimpleName(), aggregate.getArgumentTypes()); - aggregates.put(aggregateName, aggregate); - } - - void add(UserType type) { - userTypes.put(type.getTypeName(), type); - } - - ReplicationStrategy replicationStrategy() { - return strategy; - } + }; + private static final Comparator aggregateByName = + new Comparator() { + @Override + public int compare(AggregateMetadata o1, AggregateMetadata o2) { + return o1.getSimpleName().compareTo(o2.getSimpleName()); + } + }; + + /** + * Returns a {@code String} containing CQL queries representing this keyspace and the user types + * and tables it contains. + * + *

In other words, this method returns the queries that would allow to recreate the schema of + * this keyspace, along with all its user types/tables. + * + *

Note that the returned String is formatted to be human readable (for some definition of + * human readable at least). + * + * @return the CQL queries representing this keyspace schema as a {code String}. + */ + public String exportAsString() { + StringBuilder sb = new StringBuilder(); + + sb.append(asCQLQuery()).append('\n'); + + // include types, tables, views, functions and aggregates, each ordered by name, with one small + // exception + // being that user types are ordered topologically and then by name within same level. + for (UserType udt : getSortedUserTypes()) + sb.append('\n').append(udt.exportAsString()).append('\n'); + + for (AbstractTableMetadata tm : + ImmutableSortedSet.orderedBy(AbstractTableMetadata.byNameComparator) + .addAll(tables.values()) + .build()) sb.append('\n').append(tm.exportAsString()).append('\n'); + + for (FunctionMetadata fm : + ImmutableSortedSet.orderedBy(functionByName).addAll(functions.values()).build()) + sb.append('\n').append(fm.exportAsString()).append('\n'); + + for (AggregateMetadata am : + ImmutableSortedSet.orderedBy(aggregateByName).addAll(aggregates.values()).build()) + sb.append('\n').append(am.exportAsString()).append('\n'); + + return sb.toString(); + } + + private List getSortedUserTypes() { + // rebuilds dependency tree of user types so they may be sorted within each dependency level. + List unsortedTypes = new ArrayList(userTypes.values()); + DirectedGraph graph = new DirectedGraph(typeByName, unsortedTypes); + for (UserType from : unsortedTypes) { + for (UserType to : unsortedTypes) { + if (from != to && dependsOn(to, from)) graph.addEdge(from, to); + } + } + return graph.topologicalSort(); + } + + private boolean dependsOn(UserType udt1, UserType udt2) { + for (UserType.Field field : udt1) { + if (references(field.getType(), udt2)) { + return true; + } + } + return false; + } + + private boolean references(DataType dataType, DataType udtType) { + if (dataType.equals(udtType)) return true; + for (DataType arg : dataType.getTypeArguments()) { + if (references(arg, udtType)) return true; + } + if (dataType instanceof TupleType) { + for (DataType arg : ((TupleType) dataType).getComponentTypes()) { + if (references(arg, udtType)) return true; + } + } + return false; + } + + /** + * Returns a CQL query representing this keyspace. + * + *

This method returns a single 'CREATE KEYSPACE' query with the options corresponding to this + * keyspace definition. + * + * @return the 'CREATE KEYSPACE' query corresponding to this keyspace. + * @see #exportAsString + */ + public String asCQLQuery() { + StringBuilder sb = new StringBuilder(); + if (virtual) { + sb.append("/* VIRTUAL "); + } else { + sb.append("CREATE "); + } + + sb.append("KEYSPACE ").append(Metadata.quoteIfNecessary(name)).append(" WITH "); + sb.append("REPLICATION = { 'class' : '").append(replication.get("class")).append('\''); + for (Map.Entry entry : replication.entrySet()) { + if (entry.getKey().equals("class")) continue; + sb.append(", '").append(entry.getKey()).append("': '").append(entry.getValue()).append('\''); + } + sb.append(" } AND DURABLE_WRITES = ").append(durableWrites); + sb.append(';'); + if (virtual) { + sb.append("*/"); + } + return sb.toString(); + } + + @Override + public String toString() { + if (virtual) { + return name; + } + return asCQLQuery(); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + KeyspaceMetadata that = (KeyspaceMetadata) o; + + if (durableWrites != that.durableWrites) return false; + if (!name.equals(that.name)) return false; + if (strategy != null ? !strategy.equals(that.strategy) : that.strategy != null) return false; + if (!replication.equals(that.replication)) return false; + return tables.equals(that.tables); + } + + @Override + public int hashCode() { + int result = name.hashCode(); + result = 31 * result + (durableWrites ? 1 : 0); + result = 31 * result + (strategy != null ? strategy.hashCode() : 0); + result = 31 * result + replication.hashCode(); + result = 31 * result + tables.hashCode(); + return result; + } + + void add(TableMetadata tm) { + tables.put(tm.getName(), tm); + } + + void add(MaterializedViewMetadata view) { + views.put(view.getName(), view); + } + + void add(FunctionMetadata function) { + String functionName = + Metadata.fullFunctionName(function.getSimpleName(), function.getArguments().values()); + functions.put(functionName, function); + } + + void add(AggregateMetadata aggregate) { + String aggregateName = + Metadata.fullFunctionName(aggregate.getSimpleName(), aggregate.getArgumentTypes()); + aggregates.put(aggregateName, aggregate); + } + + void add(UserType type) { + userTypes.put(type.getTypeName(), type); + } + + ReplicationStrategy replicationStrategy() { + return strategy; + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/LZ4Compressor.java b/driver-core/src/main/java/com/datastax/driver/core/LZ4Compressor.java index 19633c116df..d639b8272fc 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/LZ4Compressor.java +++ b/driver-core/src/main/java/com/datastax/driver/core/LZ4Compressor.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,167 +18,200 @@ package com.datastax.driver.core; import io.netty.buffer.ByteBuf; +import java.io.IOException; +import java.nio.ByteBuffer; import net.jpountz.lz4.LZ4Factory; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.IOException; -import java.nio.ByteBuffer; - class LZ4Compressor extends FrameCompressor { - private static final Logger logger = LoggerFactory.getLogger(LZ4Compressor.class); - - static final LZ4Compressor instance; - - static { - LZ4Compressor i; - try { - i = new LZ4Compressor(); - } catch (NoClassDefFoundError e) { - i = null; - logger.warn("Cannot find LZ4 class, you should make sure the LZ4 library is in the classpath if you intend to use it. LZ4 compression will not be available for the protocol."); - } catch (Throwable e) { - i = null; - logger.warn("Error loading LZ4 library ({}). LZ4 compression will not be available for the protocol.", e.toString()); - } - instance = i; + private static final Logger logger = LoggerFactory.getLogger(LZ4Compressor.class); + + static final LZ4Compressor instance; + + static { + LZ4Compressor i; + try { + i = new LZ4Compressor(); + } catch (NoClassDefFoundError e) { + i = null; + logger.warn( + "Cannot find LZ4 class, you should make sure the LZ4 library is in the classpath if you intend to use it. LZ4 compression will not be available for the protocol."); + } catch (Throwable e) { + i = null; + logger.warn( + "Error loading LZ4 library ({}). LZ4 compression will not be available for the protocol.", + e.toString()); } - - private static final int INTEGER_BYTES = 4; - private final net.jpountz.lz4.LZ4Compressor compressor; - private final net.jpountz.lz4.LZ4FastDecompressor decompressor; - - private LZ4Compressor() { - final LZ4Factory lz4Factory = LZ4Factory.fastestInstance(); - logger.info("Using {}", lz4Factory.toString()); - compressor = lz4Factory.fastCompressor(); - decompressor = lz4Factory.fastDecompressor(); + instance = i; + } + + private static final int INTEGER_BYTES = 4; + private final net.jpountz.lz4.LZ4Compressor compressor; + private final net.jpountz.lz4.LZ4FastDecompressor decompressor; + + private LZ4Compressor() { + final LZ4Factory lz4Factory = LZ4Factory.fastestInstance(); + logger.info("Using {}", lz4Factory.toString()); + compressor = lz4Factory.fastCompressor(); + decompressor = lz4Factory.fastDecompressor(); + } + + @Override + Frame compress(Frame frame) throws IOException { + ByteBuf input = frame.body; + ByteBuf frameBody = compress(input, true); + return frame.with(frameBody); + } + + @Override + ByteBuf compress(ByteBuf buffer) throws IOException { + return compress(buffer, false); + } + + private ByteBuf compress(ByteBuf buffer, boolean prependWithUncompressedLength) + throws IOException { + return buffer.isDirect() + ? compressDirect(buffer, prependWithUncompressedLength) + : compressHeap(buffer, prependWithUncompressedLength); + } + + private ByteBuf compressDirect(ByteBuf input, boolean prependWithUncompressedLength) + throws IOException { + int maxCompressedLength = compressor.maxCompressedLength(input.readableBytes()); + // If the input is direct we will allocate a direct output buffer as well as this will allow us + // to use + // LZ4Compressor.compress and so eliminate memory copies. + ByteBuf output = + input + .alloc() + .directBuffer( + (prependWithUncompressedLength ? INTEGER_BYTES : 0) + maxCompressedLength); + try { + ByteBuffer in = inputNioBuffer(input); + // Increase reader index. + input.readerIndex(input.writerIndex()); + + if (prependWithUncompressedLength) { + output.writeInt(in.remaining()); + } + + ByteBuffer out = outputNioBuffer(output); + int written = + compressor.compress( + in, in.position(), in.remaining(), out, out.position(), out.remaining()); + // Set the writer index so the amount of written bytes is reflected + output.writerIndex(output.writerIndex() + written); + } catch (Exception e) { + // release output buffer so we not leak and rethrow exception. + output.release(); + throw new IOException(e); } - - @Override - Frame compress(Frame frame) throws IOException { - ByteBuf input = frame.body; - - // TODO: JAVA-1306: Use the same API calls for direct and heap buffers when LZ4 updated. - ByteBuf frameBody = input.isDirect() ? compressDirect(input) : compressHeap(input); - return frame.with(frameBody); + return output; + } + + private ByteBuf compressHeap(ByteBuf input, boolean prependWithUncompressedLength) + throws IOException { + int maxCompressedLength = compressor.maxCompressedLength(input.readableBytes()); + + // Not a direct buffer so use byte arrays... + int inOffset = input.arrayOffset() + input.readerIndex(); + byte[] in = input.array(); + int len = input.readableBytes(); + // Increase reader index. + input.readerIndex(input.writerIndex()); + + // Allocate a heap buffer from the ByteBufAllocator as we may use a PooledByteBufAllocator and + // so + // can eliminate the overhead of allocate a new byte[]. + ByteBuf output = + input + .alloc() + .heapBuffer((prependWithUncompressedLength ? INTEGER_BYTES : 0) + maxCompressedLength); + try { + if (prependWithUncompressedLength) { + output.writeInt(len); + } + // calculate the correct offset. + int offset = output.arrayOffset() + output.writerIndex(); + byte[] out = output.array(); + int written = compressor.compress(in, inOffset, len, out, offset); + + // Set the writer index so the amount of written bytes is reflected + output.writerIndex(output.writerIndex() + written); + } catch (Exception e) { + // release output buffer so we not leak and rethrow exception. + output.release(); + throw new IOException(e); } - - private ByteBuf compressDirect(ByteBuf input) throws IOException { - int maxCompressedLength = compressor.maxCompressedLength(input.readableBytes()); - // If the input is direct we will allocate a direct output buffer as well as this will allow us to use - // LZ4Compressor.compress and so eliminate memory copies. - ByteBuf output = input.alloc().directBuffer(INTEGER_BYTES + maxCompressedLength); - try { - ByteBuffer in = inputNioBuffer(input); - // Increase reader index. - input.readerIndex(input.writerIndex()); - - output.writeInt(in.remaining()); - - ByteBuffer out = outputNioBuffer(output); - int written = compressor.compress(in, in.position(), in.remaining(), out, out.position(), out.remaining()); - // Set the writer index so the amount of written bytes is reflected - output.writerIndex(output.writerIndex() + written); - } catch (Exception e) { - // release output buffer so we not leak and rethrow exception. - output.release(); - throw new IOException(e); - } - return output; - } - - private ByteBuf compressHeap(ByteBuf input) throws IOException { - int maxCompressedLength = compressor.maxCompressedLength(input.readableBytes()); - - // Not a direct buffer so use byte arrays... - int inOffset = input.arrayOffset() + input.readerIndex(); - byte[] in = input.array(); - int len = input.readableBytes(); - // Increase reader index. - input.readerIndex(input.writerIndex()); - - // Allocate a heap buffer from the ByteBufAllocator as we may use a PooledByteBufAllocator and so - // can eliminate the overhead of allocate a new byte[]. - ByteBuf output = input.alloc().heapBuffer(INTEGER_BYTES + maxCompressedLength); - try { - output.writeInt(len); - // calculate the correct offset. - int offset = output.arrayOffset() + output.writerIndex(); - byte[] out = output.array(); - int written = compressor.compress(in, inOffset, len, out, offset); - - // Set the writer index so the amount of written bytes is reflected - output.writerIndex(output.writerIndex() + written); - } catch (Exception e) { - // release output buffer so we not leak and rethrow exception. - output.release(); - throw new IOException(e); - } - return output; + return output; + } + + @Override + Frame decompress(Frame frame) throws IOException { + ByteBuf input = frame.body; + int uncompressedLength = input.readInt(); + ByteBuf frameBody = decompress(input, uncompressedLength); + return frame.with(frameBody); + } + + @Override + ByteBuf decompress(ByteBuf buffer, int uncompressedLength) throws IOException { + return buffer.isDirect() + ? decompressDirect(buffer, uncompressedLength) + : decompressHeap(buffer, uncompressedLength); + } + + private ByteBuf decompressDirect(ByteBuf input, int uncompressedLength) throws IOException { + // If the input is direct we will allocate a direct output buffer as well as this will allow us + // to use + // LZ4Compressor.decompress and so eliminate memory copies. + int readable = input.readableBytes(); + ByteBuffer in = inputNioBuffer(input); + // Increase reader index. + input.readerIndex(input.writerIndex()); + ByteBuf output = input.alloc().directBuffer(uncompressedLength); + try { + ByteBuffer out = outputNioBuffer(output); + int read = decompressor.decompress(in, in.position(), out, out.position(), out.remaining()); + if (read != readable) throw new IOException("Compressed lengths mismatch"); + + // Set the writer index so the amount of written bytes is reflected + output.writerIndex(output.writerIndex() + uncompressedLength); + } catch (Exception e) { + // release output buffer so we not leak and rethrow exception. + output.release(); + throw new IOException(e); } - - @Override - Frame decompress(Frame frame) throws IOException { - ByteBuf input = frame.body; - - // TODO: JAVA-1306: Use the same API calls for direct and heap buffers when LZ4 updated. - ByteBuf frameBody = input.isDirect() ? decompressDirect(input) : decompressHeap(input); - return frame.with(frameBody); - } - - private ByteBuf decompressDirect(ByteBuf input) throws IOException { - // If the input is direct we will allocate a direct output buffer as well as this will allow us to use - // LZ4Compressor.decompress and so eliminate memory copies. - int readable = input.readableBytes(); - int uncompressedLength = input.readInt(); - ByteBuffer in = inputNioBuffer(input); - // Increase reader index. - input.readerIndex(input.writerIndex()); - ByteBuf output = input.alloc().directBuffer(uncompressedLength); - try { - ByteBuffer out = outputNioBuffer(output); - int read = decompressor.decompress(in, in.position(), out, out.position(), out.remaining()); - if (read != readable - INTEGER_BYTES) - throw new IOException("Compressed lengths mismatch"); - - // Set the writer index so the amount of written bytes is reflected - output.writerIndex(output.writerIndex() + uncompressedLength); - } catch (Exception e) { - // release output buffer so we not leak and rethrow exception. - output.release(); - throw new IOException(e); - } - return output; - } - - private ByteBuf decompressHeap(ByteBuf input) throws IOException { - // Not a direct buffer so use byte arrays... - byte[] in = input.array(); - int len = input.readableBytes(); - int uncompressedLength = input.readInt(); - int inOffset = input.arrayOffset() + input.readerIndex(); - // Increase reader index. - input.readerIndex(input.writerIndex()); - - // Allocate a heap buffer from the ByteBufAllocator as we may use a PooledByteBufAllocator and so - // can eliminate the overhead of allocate a new byte[]. - ByteBuf output = input.alloc().heapBuffer(uncompressedLength); - try { - int offset = output.arrayOffset() + output.writerIndex(); - byte out[] = output.array(); - int read = decompressor.decompress(in, inOffset, out, offset, uncompressedLength); - if (read != len - INTEGER_BYTES) - throw new IOException("Compressed lengths mismatch"); - - // Set the writer index so the amount of written bytes is reflected - output.writerIndex(output.writerIndex() + uncompressedLength); - } catch (Exception e) { - // release output buffer so we not leak and rethrow exception. - output.release(); - throw new IOException(e); - } - return output; + return output; + } + + private ByteBuf decompressHeap(ByteBuf input, int uncompressedLength) throws IOException { + // Not a direct buffer so use byte arrays... + byte[] in = input.array(); + int len = input.readableBytes(); + int inOffset = input.arrayOffset() + input.readerIndex(); + // Increase reader index. + input.readerIndex(input.writerIndex()); + + // Allocate a heap buffer from the ByteBufAllocator as we may use a PooledByteBufAllocator and + // so + // can eliminate the overhead of allocate a new byte[]. + ByteBuf output = input.alloc().heapBuffer(uncompressedLength); + try { + int offset = output.arrayOffset() + output.writerIndex(); + byte out[] = output.array(); + int read = decompressor.decompress(in, inOffset, out, offset, uncompressedLength); + if (read != len) throw new IOException("Compressed lengths mismatch"); + + // Set the writer index so the amount of written bytes is reflected + output.writerIndex(output.writerIndex() + uncompressedLength); + } catch (Exception e) { + // release output buffer so we not leak and rethrow exception. + output.release(); + throw new IOException(e); } + return output; + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/LatencyTracker.java b/driver-core/src/main/java/com/datastax/driver/core/LatencyTracker.java index b8b76670017..6dec2f9db96 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/LatencyTracker.java +++ b/driver-core/src/main/java/com/datastax/driver/core/LatencyTracker.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,53 +18,50 @@ package com.datastax.driver.core; /** - * Interface for objects that are interested in tracking the latencies - * of the driver queries to each Cassandra nodes. - *

- * An implementation of this interface can be registered against a Cluster - * object trough the {@link Cluster#register} method, after which the - * {@link #update(Host, Statement, Exception, long)} method will be called after each query of the driver to a Cassandra - * host with the latency/duration (in nanoseconds) of this operation. + * Interface for objects that are interested in tracking the latencies of the driver queries to each + * Cassandra nodes. + * + *

An implementation of this interface can be registered against a Cluster object trough the + * {@link Cluster#register} method, after which the {@link #update(Host, Statement, Exception, + * long)} method will be called after each query of the driver to a Cassandra host with the + * latency/duration (in nanoseconds) of this operation. */ public interface LatencyTracker { - /** - * A method that is called after each request to a Cassandra node with - * the duration of that operation. - *

- * Note that there is no guarantee that this method won't be called - * concurrently by multiple threads, so implementations should synchronize - * internally if need be. - * - * @param host The Cassandra host on which a request has been performed. - * This parameter is never {@code null}. - * @param statement The {@link com.datastax.driver.core.Statement} that has been executed. - * This parameter is never {@code null}. - * @param exception An {@link Exception} thrown when receiving the response, or {@code null} - * if the response was successful. - * @param newLatencyNanos the latency in nanoseconds of the operation. - * This latency corresponds to the time elapsed between - * when the query was sent to {@code host} and - * when the response was received by the driver - * (or the operation timed out, in which {@code newLatencyNanos} - * will approximately be the timeout value). - */ - public void update(Host host, Statement statement, Exception exception, long newLatencyNanos); + /** + * A method that is called after each request to a Cassandra node with the duration of that + * operation. + * + *

Note that there is no guarantee that this method won't be called concurrently by multiple + * threads, so implementations should synchronize internally if need be. + * + * @param host The Cassandra host on which a request has been performed. This parameter is never + * {@code null}. + * @param statement The {@link com.datastax.driver.core.Statement} that has been executed. This + * parameter is never {@code null}. + * @param exception An {@link Exception} thrown when receiving the response, or {@code null} if + * the response was successful. + * @param newLatencyNanos the latency in nanoseconds of the operation. This latency corresponds to + * the time elapsed between when the query was sent to {@code host} and when the response was + * received by the driver (or the operation timed out, in which {@code newLatencyNanos} will + * approximately be the timeout value). + */ + public void update(Host host, Statement statement, Exception exception, long newLatencyNanos); - /** - * Gets invoked when the tracker is registered with a cluster, or at cluster startup if the - * tracker was registered at initialization with - * {@link com.datastax.driver.core.Cluster.Initializer#register(LatencyTracker)}. - * - * @param cluster the cluster that this tracker is registered with. - */ - void onRegister(Cluster cluster); + /** + * Gets invoked when the tracker is registered with a cluster, or at cluster startup if the + * tracker was registered at initialization with {@link + * com.datastax.driver.core.Cluster.Initializer#register(LatencyTracker)}. + * + * @param cluster the cluster that this tracker is registered with. + */ + void onRegister(Cluster cluster); - /** - * Gets invoked when the tracker is unregistered from a cluster, or at cluster shutdown if - * the tracker was not unregistered. - * - * @param cluster the cluster that this tracker was registered with. - */ - void onUnregister(Cluster cluster); + /** + * Gets invoked when the tracker is unregistered from a cluster, or at cluster shutdown if the + * tracker was not unregistered. + * + * @param cluster the cluster that this tracker was registered with. + */ + void onUnregister(Cluster cluster); } diff --git a/driver-core/src/main/java/com/datastax/driver/core/LocalDate.java b/driver-core/src/main/java/com/datastax/driver/core/LocalDate.java index 424c87c3abb..271b53f581d 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/LocalDate.java +++ b/driver-core/src/main/java/com/datastax/driver/core/LocalDate.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,220 +17,221 @@ */ package com.datastax.driver.core; +import static com.google.common.base.Preconditions.checkArgument; + import java.util.Calendar; import java.util.Date; import java.util.GregorianCalendar; import java.util.TimeZone; import java.util.concurrent.TimeUnit; -import static com.google.common.base.Preconditions.checkArgument; - /** * A date with no time components, no time zone, in the ISO 8601 calendar. - *

- * Note that ISO 8601 has a number of differences with the default gregorian calendar used in Java: + * + *

Note that ISO 8601 has a number of differences with the default gregorian calendar used in + * Java: + * *

    - *
  • it uses a proleptic gregorian calendar, meaning that it's gregorian indefinitely back in the past (there is no gregorian change);
  • - *
  • there is a year 0.
  • + *
  • it uses a proleptic gregorian calendar, meaning that it's gregorian indefinitely back in + * the past (there is no gregorian change); + *
  • there is a year 0. *
- *

- * This class implements these differences, so that year/month/day fields match exactly the ones in - * CQL string literals. + * + *

This class implements these differences, so that year/month/day fields match exactly the ones + * in CQL string literals. * * @since 2.2 */ public final class LocalDate { - private static final TimeZone UTC = TimeZone.getTimeZone("UTC"); - - private final long millisSinceEpoch; - private final int daysSinceEpoch; - - // This gets initialized lazily if we ever need it. Once set, it is effectively immutable. - private volatile GregorianCalendar calendar; - - private LocalDate(int daysSinceEpoch) { - this.daysSinceEpoch = daysSinceEpoch; - this.millisSinceEpoch = TimeUnit.DAYS.toMillis(daysSinceEpoch); - } - - /** - * Builds a new instance from a number of days since January 1st, 1970 GMT. - * - * @param daysSinceEpoch the number of days. - * @return the new instance. - */ - public static LocalDate fromDaysSinceEpoch(int daysSinceEpoch) { - return new LocalDate(daysSinceEpoch); - } - - /** - * Builds a new instance from a number of milliseconds since January 1st, 1970 GMT. - * Note that if the given number does not correspond to a whole number of days, - * it will be rounded towards 0. - * - * @param millisSinceEpoch the number of milliseconds since January 1st, 1970 GMT. - * @return the new instance. - * @throws IllegalArgumentException if the date is not in the range [-5877641-06-23; 5881580-07-11]. - */ - public static LocalDate fromMillisSinceEpoch(long millisSinceEpoch) throws IllegalArgumentException { - long daysSinceEpoch = TimeUnit.MILLISECONDS.toDays(millisSinceEpoch); - checkArgument(daysSinceEpoch >= Integer.MIN_VALUE && daysSinceEpoch <= Integer.MAX_VALUE, - "Date should be in the range [-5877641-06-23; 5881580-07-11]"); - - return new LocalDate((int) daysSinceEpoch); - } - - /** - * Builds a new instance from a year/month/day specification. - *

- * This method is not lenient, i.e. '2014-12-32' will not be treated as '2015-01-01', but - * instead throw an {@code IllegalArgumentException}. - * - * @param year the year in ISO format (see {@link LocalDate this class's Javadoc}). - * @param month the month. It is 1-based (e.g. 1 for January). - * @param dayOfMonth the day of the month. - * @return the new instance. - * @throws IllegalArgumentException if the corresponding date does not exist in the ISO8601 - * calendar. - */ - public static LocalDate fromYearMonthDay(int year, int month, int dayOfMonth) { - int calendarYear = (year <= 0) ? -year + 1 : year; - int calendarEra = (year <= 0) ? GregorianCalendar.BC : GregorianCalendar.AD; - - GregorianCalendar calendar = isoCalendar(); - // We can't allow leniency because that could mess with our year shift above (for example if the arguments were 0, 12, 32) - calendar.setLenient(false); - calendar.clear(); - calendar.set(calendarYear, month - 1, dayOfMonth, 0, 0, 0); - calendar.set(Calendar.ERA, calendarEra); - - LocalDate date = fromMillisSinceEpoch(calendar.getTimeInMillis()); - date.calendar = calendar; - return date; - } - - /** - * Returns the number of days since January 1st, 1970 GMT. - * - * @return the number of days. - */ - public int getDaysSinceEpoch() { - return daysSinceEpoch; - } - - /** - * Returns the number of milliseconds since January 1st, 1970 GMT. - * - * @return the number of milliseconds. - */ - public long getMillisSinceEpoch() { - return millisSinceEpoch; - } - - /** - * Returns the year. - * - * @return the year. - */ - public int getYear() { - GregorianCalendar c = getCalendar(); - int year = c.get(Calendar.YEAR); - if (c.get(Calendar.ERA) == GregorianCalendar.BC) - year = -year + 1; - return year; - } - - /** - * Returns the month. - * - * @return the month. It is 1-based, e.g. 1 for January. - */ - public int getMonth() { - return getCalendar().get(Calendar.MONTH) + 1; - } - - /** - * Returns the day in the month. - * - * @return the day in the month. - */ - public int getDay() { - return getCalendar().get(Calendar.DAY_OF_MONTH); - } - - /** - * Return a new {@link LocalDate} with the specified (signed) amount - * of time added to (or subtracted from) the given {@link Calendar} field, - * based on the calendar's rules. - *

- * Note that adding any amount to a field smaller than - * {@link Calendar#DAY_OF_MONTH} will remain without effect, - * as this class does not keep time components. - *

- * See {@link Calendar} javadocs for more information. - * - * @param field a {@link Calendar} field to modify. - * @param amount the amount of date or time to be added to the field. - * @return a new {@link LocalDate} with the specified (signed) amount - * of time added to (or subtracted from) the given {@link Calendar} field. - * @throws IllegalArgumentException if the new date is not in the range [-5877641-06-23; 5881580-07-11]. - */ - public LocalDate add(int field, int amount) { - GregorianCalendar newCalendar = isoCalendar(); - newCalendar.setTimeInMillis(millisSinceEpoch); - newCalendar.add(field, amount); - LocalDate newDate = fromMillisSinceEpoch(newCalendar.getTimeInMillis()); - newDate.calendar = newCalendar; - return newDate; - } - - @Override - public boolean equals(Object o) { - if (this == o) - return true; - - if (o instanceof LocalDate) { - LocalDate that = (LocalDate) o; - return this.daysSinceEpoch == that.daysSinceEpoch; - } - return false; - } - - @Override - public int hashCode() { - return daysSinceEpoch; - } - - @Override - public String toString() { - return String.format("%d-%s-%s", getYear(), - pad2(getMonth()), - pad2(getDay())); - } - - private static String pad2(int i) { - String s = Integer.toString(i); - return s.length() == 2 ? s : "0" + s; - } - - private GregorianCalendar getCalendar() { - // Two threads can race and both create a calendar. This is not a problem. - if (calendar == null) { - - // Use a local variable to only expose after we're done mutating it. - GregorianCalendar tmp = isoCalendar(); - tmp.setTimeInMillis(millisSinceEpoch); - - calendar = tmp; - } - return calendar; - } - - // This matches what Cassandra uses server side (from Joda Time's LocalDate) - private static GregorianCalendar isoCalendar() { - GregorianCalendar calendar = new GregorianCalendar(UTC); - calendar.setGregorianChange(new Date(Long.MIN_VALUE)); - return calendar; - } + private static final TimeZone UTC = TimeZone.getTimeZone("UTC"); + + private final long millisSinceEpoch; + private final int daysSinceEpoch; + + // This gets initialized lazily if we ever need it. Once set, it is effectively immutable. + private volatile GregorianCalendar calendar; + + private LocalDate(int daysSinceEpoch) { + this.daysSinceEpoch = daysSinceEpoch; + this.millisSinceEpoch = TimeUnit.DAYS.toMillis(daysSinceEpoch); + } + + /** + * Builds a new instance from a number of days since January 1st, 1970 GMT. + * + * @param daysSinceEpoch the number of days. + * @return the new instance. + */ + public static LocalDate fromDaysSinceEpoch(int daysSinceEpoch) { + return new LocalDate(daysSinceEpoch); + } + + /** + * Builds a new instance from a number of milliseconds since January 1st, 1970 GMT. Note that if + * the given number does not correspond to a whole number of days, it will be rounded towards 0. + * + * @param millisSinceEpoch the number of milliseconds since January 1st, 1970 GMT. + * @return the new instance. + * @throws IllegalArgumentException if the date is not in the range [-5877641-06-23; + * 5881580-07-11]. + */ + public static LocalDate fromMillisSinceEpoch(long millisSinceEpoch) + throws IllegalArgumentException { + long daysSinceEpoch = TimeUnit.MILLISECONDS.toDays(millisSinceEpoch); + checkArgument( + daysSinceEpoch >= Integer.MIN_VALUE && daysSinceEpoch <= Integer.MAX_VALUE, + "Date should be in the range [-5877641-06-23; 5881580-07-11]"); + + return new LocalDate((int) daysSinceEpoch); + } + + /** + * Builds a new instance from a year/month/day specification. + * + *

This method is not lenient, i.e. '2014-12-32' will not be treated as '2015-01-01', but + * instead throw an {@code IllegalArgumentException}. + * + * @param year the year in ISO format (see {@link LocalDate this class's Javadoc}). + * @param month the month. It is 1-based (e.g. 1 for January). + * @param dayOfMonth the day of the month. + * @return the new instance. + * @throws IllegalArgumentException if the corresponding date does not exist in the ISO8601 + * calendar. + */ + public static LocalDate fromYearMonthDay(int year, int month, int dayOfMonth) { + int calendarYear = (year <= 0) ? -year + 1 : year; + int calendarEra = (year <= 0) ? GregorianCalendar.BC : GregorianCalendar.AD; + + GregorianCalendar calendar = isoCalendar(); + // We can't allow leniency because that could mess with our year shift above (for example if the + // arguments were 0, 12, 32) + calendar.setLenient(false); + calendar.clear(); + calendar.set(calendarYear, month - 1, dayOfMonth, 0, 0, 0); + calendar.set(Calendar.ERA, calendarEra); + + LocalDate date = fromMillisSinceEpoch(calendar.getTimeInMillis()); + date.calendar = calendar; + return date; + } + + /** + * Returns the number of days since January 1st, 1970 GMT. + * + * @return the number of days. + */ + public int getDaysSinceEpoch() { + return daysSinceEpoch; + } + + /** + * Returns the number of milliseconds since January 1st, 1970 GMT. + * + * @return the number of milliseconds. + */ + public long getMillisSinceEpoch() { + return millisSinceEpoch; + } + + /** + * Returns the year. + * + * @return the year. + */ + public int getYear() { + GregorianCalendar c = getCalendar(); + int year = c.get(Calendar.YEAR); + if (c.get(Calendar.ERA) == GregorianCalendar.BC) year = -year + 1; + return year; + } + + /** + * Returns the month. + * + * @return the month. It is 1-based, e.g. 1 for January. + */ + public int getMonth() { + return getCalendar().get(Calendar.MONTH) + 1; + } + + /** + * Returns the day in the month. + * + * @return the day in the month. + */ + public int getDay() { + return getCalendar().get(Calendar.DAY_OF_MONTH); + } + + /** + * Return a new {@link LocalDate} with the specified (signed) amount of time added to (or + * subtracted from) the given {@link Calendar} field, based on the calendar's rules. + * + *

Note that adding any amount to a field smaller than {@link Calendar#DAY_OF_MONTH} will + * remain without effect, as this class does not keep time components. + * + *

See {@link Calendar} javadocs for more information. + * + * @param field a {@link Calendar} field to modify. + * @param amount the amount of date or time to be added to the field. + * @return a new {@link LocalDate} with the specified (signed) amount of time added to (or + * subtracted from) the given {@link Calendar} field. + * @throws IllegalArgumentException if the new date is not in the range [-5877641-06-23; + * 5881580-07-11]. + */ + public LocalDate add(int field, int amount) { + GregorianCalendar newCalendar = isoCalendar(); + newCalendar.setTimeInMillis(millisSinceEpoch); + newCalendar.add(field, amount); + LocalDate newDate = fromMillisSinceEpoch(newCalendar.getTimeInMillis()); + newDate.calendar = newCalendar; + return newDate; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + + if (o instanceof LocalDate) { + LocalDate that = (LocalDate) o; + return this.daysSinceEpoch == that.daysSinceEpoch; + } + return false; + } + + @Override + public int hashCode() { + return daysSinceEpoch; + } + + @Override + public String toString() { + return String.format("%d-%s-%s", getYear(), pad2(getMonth()), pad2(getDay())); + } + + private static String pad2(int i) { + String s = Integer.toString(i); + return s.length() == 2 ? s : "0" + s; + } + + private GregorianCalendar getCalendar() { + // Two threads can race and both create a calendar. This is not a problem. + if (calendar == null) { + + // Use a local variable to only expose after we're done mutating it. + GregorianCalendar tmp = isoCalendar(); + tmp.setTimeInMillis(millisSinceEpoch); + + calendar = tmp; + } + return calendar; + } + + // This matches what Cassandra uses server side (from Joda Time's LocalDate) + private static GregorianCalendar isoCalendar() { + GregorianCalendar calendar = new GregorianCalendar(UTC); + calendar.setGregorianChange(new Date(Long.MIN_VALUE)); + return calendar; + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/LoggingMonotonicTimestampGenerator.java b/driver-core/src/main/java/com/datastax/driver/core/LoggingMonotonicTimestampGenerator.java index cae816324df..676e01da47a 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/LoggingMonotonicTimestampGenerator.java +++ b/driver-core/src/main/java/com/datastax/driver/core/LoggingMonotonicTimestampGenerator.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,62 +17,71 @@ */ package com.datastax.driver.core; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import static java.util.concurrent.TimeUnit.MICROSECONDS; +import static java.util.concurrent.TimeUnit.MILLISECONDS; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; - -import static java.util.concurrent.TimeUnit.MICROSECONDS; -import static java.util.concurrent.TimeUnit.MILLISECONDS; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** - * A monotonic timestamp generator that logs warnings when timestamps drift in the future - * (see this class's constructors and {@link #onDrift(long, long)} for more information). + * A monotonic timestamp generator that logs warnings when timestamps drift in the future (see this + * class's constructors and {@link #onDrift(long, long)} for more information). */ -public abstract class LoggingMonotonicTimestampGenerator extends AbstractMonotonicTimestampGenerator { - private static final Logger LOGGER = LoggerFactory.getLogger(TimestampGenerator.class); +public abstract class LoggingMonotonicTimestampGenerator + extends AbstractMonotonicTimestampGenerator { + private static final Logger LOGGER = LoggerFactory.getLogger(TimestampGenerator.class); - private final long warningThresholdMicros; - private final long warningIntervalMillis; + private final long warningThresholdMicros; + private final long warningIntervalMillis; - private final AtomicLong lastDriftWarning = new AtomicLong(Long.MIN_VALUE); + private final AtomicLong lastDriftWarning = new AtomicLong(Long.MIN_VALUE); - /** - * Creates a new instance. - * - * @param warningThreshold how far in the future timestamps are allowed to drift before a warning is logged. - * @param warningThresholdUnit the unit for {@code warningThreshold}. - * @param warningInterval how often the warning will be logged if timestamps keep drifting above the threshold. - * @param warningIntervalUnit the unit for {@code warningIntervalUnit}. - */ - protected LoggingMonotonicTimestampGenerator( - long warningThreshold, TimeUnit warningThresholdUnit, - long warningInterval, TimeUnit warningIntervalUnit) { - this.warningThresholdMicros = MICROSECONDS.convert(warningThreshold, warningThresholdUnit); - this.warningIntervalMillis = MILLISECONDS.convert(warningInterval, warningIntervalUnit); - } + /** + * Creates a new instance. + * + * @param warningThreshold how far in the future timestamps are allowed to drift before a warning + * is logged. + * @param warningThresholdUnit the unit for {@code warningThreshold}. + * @param warningInterval how often the warning will be logged if timestamps keep drifting above + * the threshold. + * @param warningIntervalUnit the unit for {@code warningIntervalUnit}. + */ + protected LoggingMonotonicTimestampGenerator( + long warningThreshold, + TimeUnit warningThresholdUnit, + long warningInterval, + TimeUnit warningIntervalUnit) { + this.warningThresholdMicros = MICROSECONDS.convert(warningThreshold, warningThresholdUnit); + this.warningIntervalMillis = MILLISECONDS.convert(warningInterval, warningIntervalUnit); + } - /** - * {@inheritDoc} - *

- * This implementation logs a warning at regular intervals when timestamps drift more than a specified threshold in - * the future. These messages are emitted at {@code WARN} level in the category - * {@code com.datastax.driver.core.TimestampGenerator}. - * - * @param currentTick the current clock tick. - * @param lastTimestamp the last timestamp that was generated. - */ - protected void onDrift(long currentTick, long lastTimestamp) { - if (LOGGER.isWarnEnabled() && warningThresholdMicros >= 0 && lastTimestamp > currentTick + warningThresholdMicros) { - long now = System.currentTimeMillis(); - long lastWarning = lastDriftWarning.get(); - if (now > lastWarning + warningIntervalMillis && lastDriftWarning.compareAndSet(lastWarning, now)) { - LOGGER.warn( - "Clock skew detected: current tick ({}) was {} microseconds behind the last generated timestamp ({}), " + - "returned timestamps will be artificially incremented to guarantee monotonicity.", - currentTick, lastTimestamp - currentTick, lastTimestamp); - } - } + /** + * {@inheritDoc} + * + *

This implementation logs a warning at regular intervals when timestamps drift more than a + * specified threshold in the future. These messages are emitted at {@code WARN} level in the + * category {@code com.datastax.driver.core.TimestampGenerator}. + * + * @param currentTick the current clock tick. + * @param lastTimestamp the last timestamp that was generated. + */ + protected void onDrift(long currentTick, long lastTimestamp) { + if (LOGGER.isWarnEnabled() + && warningThresholdMicros >= 0 + && lastTimestamp > currentTick + warningThresholdMicros) { + long now = System.currentTimeMillis(); + long lastWarning = lastDriftWarning.get(); + if (now > lastWarning + warningIntervalMillis + && lastDriftWarning.compareAndSet(lastWarning, now)) { + LOGGER.warn( + "Clock skew detected: current tick ({}) was {} microseconds behind the last generated timestamp ({}), " + + "returned timestamps will be artificially incremented to guarantee monotonicity.", + currentTick, + lastTimestamp - currentTick, + lastTimestamp); + } } + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/MD5Digest.java b/driver-core/src/main/java/com/datastax/driver/core/MD5Digest.java index 12f50bc13ad..8665e11d794 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/MD5Digest.java +++ b/driver-core/src/main/java/com/datastax/driver/core/MD5Digest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,46 +18,42 @@ package com.datastax.driver.core; import com.datastax.driver.core.utils.Bytes; - import java.util.Arrays; /** * The result of the computation of an MD5 digest. - *

- * A MD5 is really just a byte[] but arrays are a no go as map keys. We could - * wrap it in a ByteBuffer but: - * 1. MD5Digest is a more explicit name than ByteBuffer to represent a md5. - * 2. Using our own class allows to use our FastByteComparison for equals. + * + *

A MD5 is really just a byte[] but arrays are a no go as map keys. We could wrap it in a + * ByteBuffer but: 1. MD5Digest is a more explicit name than ByteBuffer to represent a md5. 2. Using + * our own class allows to use our FastByteComparison for equals. */ class MD5Digest { - public final byte[] bytes; - - private MD5Digest(byte[] bytes) { - this.bytes = bytes; - } - - public static MD5Digest wrap(byte[] digest) { - return new MD5Digest(digest); - } - - @Override - public final int hashCode() { - return Arrays.hashCode(bytes); - } - - @Override - public final boolean equals(Object o) { - if (!(o instanceof MD5Digest)) - return false; - MD5Digest that = (MD5Digest) o; - // handles nulls properly - return Arrays.equals(this.bytes, that.bytes); - } - - @Override - public String toString() { - return Bytes.toHexString(bytes); - } + public final byte[] bytes; + + private MD5Digest(byte[] bytes) { + this.bytes = bytes; + } + + public static MD5Digest wrap(byte[] digest) { + return new MD5Digest(digest); + } + + @Override + public final int hashCode() { + return Arrays.hashCode(bytes); + } + + @Override + public final boolean equals(Object o) { + if (!(o instanceof MD5Digest)) return false; + MD5Digest that = (MD5Digest) o; + // handles nulls properly + return Arrays.equals(this.bytes, that.bytes); + } + + @Override + public String toString() { + return Bytes.toHexString(bytes); + } } - diff --git a/driver-core/src/main/java/com/datastax/driver/core/MaterializedViewMetadata.java b/driver-core/src/main/java/com/datastax/driver/core/MaterializedViewMetadata.java index e232e248083..ca87a6c126b 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/MaterializedViewMetadata.java +++ b/driver-core/src/main/java/com/datastax/driver/core/MaterializedViewMetadata.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,225 +17,294 @@ */ package com.datastax.driver.core; - import com.datastax.driver.core.utils.MoreObjects; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.Iterator; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.TreeSet; +import java.util.UUID; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.util.*; - /** - * An immutable representation of a materialized view. - * Materialized views are available starting from Cassandra 3.0. + * An immutable representation of a materialized view. Materialized views are available starting + * from Cassandra 3.0. */ public class MaterializedViewMetadata extends AbstractTableMetadata { - private static final Logger logger = LoggerFactory.getLogger(MaterializedViewMetadata.class); - - private final TableMetadata baseTable; - - private final boolean includeAllColumns; - - private final String whereClause; - - private MaterializedViewMetadata( - KeyspaceMetadata keyspace, - TableMetadata baseTable, - String name, - UUID id, - List partitionKey, - List clusteringColumns, - Map columns, - boolean includeAllColumns, - String whereClause, - TableOptionsMetadata options, - List clusteringOrder, - VersionNumber cassandraVersion) { - super(keyspace, name, id, partitionKey, clusteringColumns, columns, options, clusteringOrder, cassandraVersion); - this.baseTable = baseTable; - this.includeAllColumns = includeAllColumns; - this.whereClause = whereClause; - } - - static MaterializedViewMetadata build(KeyspaceMetadata keyspace, Row row, Map rawCols, VersionNumber cassandraVersion, Cluster cluster) { - - String name = row.getString("view_name"); - String tableName = row.getString("base_table_name"); - TableMetadata baseTable = keyspace.tables.get(tableName); - if (baseTable == null) { - logger.trace(String.format("Cannot find base table %s for materialized view %s.%s: " - + "Cluster.getMetadata().getKeyspace(\"%s\").getView(\"%s\") will return null", - tableName, keyspace.getName(), name, keyspace.getName(), name)); - return null; - } - - UUID id = row.getUUID("id"); - boolean includeAllColumns = row.getBool("include_all_columns"); - String whereClause = row.getString("where_clause"); - - int partitionKeySize = findCollectionSize(rawCols.values(), ColumnMetadata.Raw.Kind.PARTITION_KEY); - int clusteringSize = findCollectionSize(rawCols.values(), ColumnMetadata.Raw.Kind.CLUSTERING_COLUMN); - - List partitionKey = new ArrayList(Collections.nCopies(partitionKeySize, null)); - List clusteringColumns = new ArrayList(Collections.nCopies(clusteringSize, null)); - List clusteringOrder = new ArrayList(Collections.nCopies(clusteringSize, null)); - - // We use a linked hashmap because we will keep this in the order of a 'SELECT * FROM ...'. - LinkedHashMap columns = new LinkedHashMap(); - - TableOptionsMetadata options = null; - try { - options = new TableOptionsMetadata(row, false, cassandraVersion); - } catch (RuntimeException e) { - // See ControlConnection#refreshSchema for why we'd rather not probably this further. Since table options is one thing - // that tends to change often in Cassandra, it's worth special casing this. - logger.error(String.format("Error parsing schema options for view %s.%s: " - + "Cluster.getMetadata().getKeyspace(\"%s\").getView(\"%s\").getOptions() will return null", - keyspace.getName(), name, keyspace.getName(), name), e); - } - - MaterializedViewMetadata view = new MaterializedViewMetadata( - keyspace, baseTable, name, id, partitionKey, clusteringColumns, columns, - includeAllColumns, whereClause, options, clusteringOrder, cassandraVersion); - - // We use this temporary set just so non PK columns are added in lexicographical order, which is the one of a - // 'SELECT * FROM ...' - Set otherColumns = new TreeSet(columnMetadataComparator); - for (ColumnMetadata.Raw rawCol : rawCols.values()) { - DataType dataType; - if (cassandraVersion.getMajor() >= 3) { - dataType = DataTypeCqlNameParser.parse(rawCol.dataType, cluster, keyspace.getName(), keyspace.userTypes, keyspace.userTypes, false, false); - } else { - ProtocolVersion protocolVersion = cluster.getConfiguration().getProtocolOptions().getProtocolVersion(); - CodecRegistry codecRegistry = cluster.getConfiguration().getCodecRegistry(); - dataType = DataTypeClassNameParser.parseOne(rawCol.dataType, protocolVersion, codecRegistry); - } - ColumnMetadata col = ColumnMetadata.fromRaw(view, rawCol, dataType); - switch (rawCol.kind) { - case PARTITION_KEY: - partitionKey.set(rawCol.position, col); - break; - case CLUSTERING_COLUMN: - clusteringColumns.set(rawCol.position, col); - clusteringOrder.set(rawCol.position, rawCol.isReversed ? ClusteringOrder.DESC : ClusteringOrder.ASC); - break; - default: - otherColumns.add(col); - break; - } - } - for (ColumnMetadata c : partitionKey) - columns.put(c.getName(), c); - for (ColumnMetadata c : clusteringColumns) - columns.put(c.getName(), c); - for (ColumnMetadata c : otherColumns) - columns.put(c.getName(), c); - - baseTable.add(view); - - return view; - + private static final Logger logger = LoggerFactory.getLogger(MaterializedViewMetadata.class); + + private volatile TableMetadata baseTable; + + private final boolean includeAllColumns; + + private final String whereClause; + + private MaterializedViewMetadata( + KeyspaceMetadata keyspace, + TableMetadata baseTable, + String name, + UUID id, + List partitionKey, + List clusteringColumns, + Map columns, + boolean includeAllColumns, + String whereClause, + TableOptionsMetadata options, + List clusteringOrder, + VersionNumber cassandraVersion) { + super( + keyspace, + name, + id, + partitionKey, + clusteringColumns, + columns, + options, + clusteringOrder, + cassandraVersion); + this.baseTable = baseTable; + this.includeAllColumns = includeAllColumns; + this.whereClause = whereClause; + } + + static MaterializedViewMetadata build( + KeyspaceMetadata keyspace, + Row row, + Map rawCols, + VersionNumber cassandraVersion, + Cluster cluster) { + + String name = row.getString("view_name"); + String tableName = row.getString("base_table_name"); + TableMetadata baseTable = keyspace.tables.get(tableName); + if (baseTable == null) { + logger.trace( + String.format( + "Cannot find base table %s for materialized view %s.%s: " + + "Cluster.getMetadata().getKeyspace(\"%s\").getView(\"%s\") will return null", + tableName, keyspace.getName(), name, keyspace.getName(), name)); + return null; } - private static int findCollectionSize(Collection cols, ColumnMetadata.Raw.Kind kind) { - int maxId = -1; - for (ColumnMetadata.Raw col : cols) - if (col.kind == kind) - maxId = Math.max(maxId, col.position); - return maxId + 1; + UUID id = row.getUUID("id"); + boolean includeAllColumns = row.getBool("include_all_columns"); + String whereClause = row.getString("where_clause"); + + int partitionKeySize = + findCollectionSize(rawCols.values(), ColumnMetadata.Raw.Kind.PARTITION_KEY); + int clusteringSize = + findCollectionSize(rawCols.values(), ColumnMetadata.Raw.Kind.CLUSTERING_COLUMN); + + List partitionKey = + new ArrayList(Collections.nCopies(partitionKeySize, null)); + List clusteringColumns = + new ArrayList(Collections.nCopies(clusteringSize, null)); + List clusteringOrder = + new ArrayList(Collections.nCopies(clusteringSize, null)); + + // We use a linked hashmap because we will keep this in the order of a 'SELECT * FROM ...'. + LinkedHashMap columns = new LinkedHashMap(); + + TableOptionsMetadata options = null; + try { + options = new TableOptionsMetadata(row, false, cassandraVersion); + } catch (RuntimeException e) { + // See ControlConnection#refreshSchema for why we'd rather not probably this further. Since + // table options is one thing + // that tends to change often in Cassandra, it's worth special casing this. + logger.error( + String.format( + "Error parsing schema options for view %s.%s: " + + "Cluster.getMetadata().getKeyspace(\"%s\").getView(\"%s\").getOptions() will return null", + keyspace.getName(), name, keyspace.getName(), name), + e); } - /** - * Return this materialized view's base table. - * - * @return this materialized view's base table. - */ - public TableMetadata getBaseTable() { - return baseTable; + MaterializedViewMetadata view = + new MaterializedViewMetadata( + keyspace, + baseTable, + name, + id, + partitionKey, + clusteringColumns, + columns, + includeAllColumns, + whereClause, + options, + clusteringOrder, + cassandraVersion); + + // We use this temporary set just so non PK columns are added in lexicographical order, which is + // the one of a + // 'SELECT * FROM ...' + Set otherColumns = new TreeSet(columnMetadataComparator); + for (ColumnMetadata.Raw rawCol : rawCols.values()) { + DataType dataType; + if (cassandraVersion.getMajor() >= 3) { + dataType = + DataTypeCqlNameParser.parse( + rawCol.dataType, + cluster, + keyspace.getName(), + keyspace.userTypes, + keyspace.userTypes, + false, + false); + } else { + ProtocolVersion protocolVersion = + cluster.getConfiguration().getProtocolOptions().getProtocolVersion(); + CodecRegistry codecRegistry = cluster.getConfiguration().getCodecRegistry(); + dataType = + DataTypeClassNameParser.parseOne(rawCol.dataType, protocolVersion, codecRegistry); + } + ColumnMetadata col = ColumnMetadata.fromRaw(view, rawCol, dataType); + switch (rawCol.kind) { + case PARTITION_KEY: + partitionKey.set(rawCol.position, col); + break; + case CLUSTERING_COLUMN: + clusteringColumns.set(rawCol.position, col); + clusteringOrder.set( + rawCol.position, rawCol.isReversed ? ClusteringOrder.DESC : ClusteringOrder.ASC); + break; + default: + otherColumns.add(col); + break; + } } - - @Override - protected String asCQLQuery(boolean formatted) { - - String keyspaceName = Metadata.quoteIfNecessary(keyspace.getName()); - String baseTableName = Metadata.quoteIfNecessary(baseTable.getName()); - String viewName = Metadata.quoteIfNecessary(name); - - StringBuilder sb = new StringBuilder(); - sb.append("CREATE MATERIALIZED VIEW ") - .append(keyspaceName).append('.').append(viewName) - .append(" AS "); - newLine(sb, formatted); - - // SELECT - sb.append("SELECT "); - if (includeAllColumns) { - sb.append(" * "); - } else { - Iterator it = columns.values().iterator(); - while (it.hasNext()) { - ColumnMetadata column = it.next(); - sb.append(spaces(4, formatted)).append(Metadata.quoteIfNecessary(column.getName())); - if (it.hasNext()) sb.append(","); - sb.append(" "); - newLine(sb, formatted); - } - } - - // FROM - newLine(sb.append("FROM ").append(keyspaceName).append('.').append(baseTableName).append(" "), formatted); - - // WHERE - // the CQL grammar allows missing WHERE clauses, although C* currently disallows it - if (whereClause != null && !whereClause.isEmpty()) - newLine(sb.append("WHERE ").append(whereClause).append(' '), formatted); - - // PK - sb.append("PRIMARY KEY ("); - if (partitionKey.size() == 1) { - sb.append(Metadata.quoteIfNecessary(partitionKey.get(0).getName())); - } else { - sb.append('('); - boolean first = true; - for (ColumnMetadata cm : partitionKey) { - if (first) - first = false; - else - sb.append(", "); - sb.append(Metadata.quoteIfNecessary(cm.getName())); - } - sb.append(')'); - } - for (ColumnMetadata cm : clusteringColumns) - sb.append(", ").append(Metadata.quoteIfNecessary(cm.getName())); - sb.append(')'); - - appendOptions(sb, formatted); - return sb.toString(); - + for (ColumnMetadata c : partitionKey) columns.put(c.getName(), c); + for (ColumnMetadata c : clusteringColumns) columns.put(c.getName(), c); + for (ColumnMetadata c : otherColumns) columns.put(c.getName(), c); + + baseTable.add(view); + + return view; + } + + private static int findCollectionSize( + Collection cols, ColumnMetadata.Raw.Kind kind) { + int maxId = -1; + for (ColumnMetadata.Raw col : cols) if (col.kind == kind) maxId = Math.max(maxId, col.position); + return maxId + 1; + } + + /** + * Return this materialized view's base table. + * + * @return this materialized view's base table. + */ + public TableMetadata getBaseTable() { + return baseTable; + } + + @Override + protected String asCQLQuery(boolean formatted) { + + String keyspaceName = Metadata.quoteIfNecessary(keyspace.getName()); + String baseTableName = Metadata.quoteIfNecessary(baseTable.getName()); + String viewName = Metadata.quoteIfNecessary(name); + + StringBuilder sb = new StringBuilder(); + sb.append("CREATE MATERIALIZED VIEW ") + .append(keyspaceName) + .append('.') + .append(viewName) + .append(" AS"); + + // SELECT + spaceOrNewLine(sb, formatted).append("SELECT "); + if (includeAllColumns) { + sb.append("*"); + } else { + Iterator it = columns.values().iterator(); + while (it.hasNext()) { + ColumnMetadata column = it.next(); + sb.append(Metadata.quoteIfNecessary(column.getName())); + if (it.hasNext()) sb.append(", "); + } } - @Override - public boolean equals(Object other) { - if (other == this) - return true; - if (!(other instanceof MaterializedViewMetadata)) - return false; - - MaterializedViewMetadata that = (MaterializedViewMetadata) other; - return MoreObjects.equal(this.name, that.name) && - MoreObjects.equal(this.id, that.id) && - MoreObjects.equal(this.partitionKey, that.partitionKey) && - MoreObjects.equal(this.clusteringColumns, that.clusteringColumns) && - MoreObjects.equal(this.columns, that.columns) && - MoreObjects.equal(this.options, that.options) && - MoreObjects.equal(this.clusteringOrder, that.clusteringOrder) && - MoreObjects.equal(this.baseTable.getName(), that.baseTable.getName()) && - this.includeAllColumns == that.includeAllColumns; + // FROM + spaceOrNewLine(sb, formatted) + .append("FROM ") + .append(keyspaceName) + .append('.') + .append(baseTableName); + + // WHERE + // the CQL grammar allows missing WHERE clauses, although C* currently disallows it + if (whereClause != null && !whereClause.isEmpty()) { + spaceOrNewLine(sb, formatted).append("WHERE ").append(whereClause); } - @Override - public int hashCode() { - return MoreObjects.hashCode(name, id, partitionKey, clusteringColumns, columns, options, clusteringOrder, baseTable.getName(), includeAllColumns); + // PK + spaceOrNewLine(sb, formatted).append("PRIMARY KEY ("); + if (partitionKey.size() == 1) { + sb.append(Metadata.quoteIfNecessary(partitionKey.get(0).getName())); + } else { + sb.append('('); + boolean first = true; + for (ColumnMetadata cm : partitionKey) { + if (first) first = false; + else sb.append(", "); + sb.append(Metadata.quoteIfNecessary(cm.getName())); + } + sb.append(')'); } + for (ColumnMetadata cm : clusteringColumns) + sb.append(", ").append(Metadata.quoteIfNecessary(cm.getName())); + sb.append(')'); + + // append 3 extra spaces if formatted to align WITH. + spaceOrNewLine(sb, formatted); + appendOptions(sb, formatted); + return sb.toString(); + } + + /** + * Updates the base table for this view and adds it to that table. This is used when a table + * update is processed and the views need to be carried over. + */ + void setBaseTable(TableMetadata table) { + this.baseTable = table; + table.add(this); + } + + @Override + public boolean equals(Object other) { + if (other == this) return true; + if (!(other instanceof MaterializedViewMetadata)) return false; + + MaterializedViewMetadata that = (MaterializedViewMetadata) other; + return MoreObjects.equal(this.name, that.name) + && MoreObjects.equal(this.id, that.id) + && MoreObjects.equal(this.partitionKey, that.partitionKey) + && MoreObjects.equal(this.clusteringColumns, that.clusteringColumns) + && MoreObjects.equal(this.columns, that.columns) + && MoreObjects.equal(this.options, that.options) + && MoreObjects.equal(this.clusteringOrder, that.clusteringOrder) + && MoreObjects.equal(this.baseTable.getName(), that.baseTable.getName()) + && this.includeAllColumns == that.includeAllColumns; + } + + @Override + public int hashCode() { + return MoreObjects.hashCode( + name, + id, + partitionKey, + clusteringColumns, + columns, + options, + clusteringOrder, + baseTable.getName(), + includeAllColumns); + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/Message.java b/driver-core/src/main/java/com/datastax/driver/core/Message.java index d2ddba63248..05e9ef3500c 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Message.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Message.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,365 +25,395 @@ import io.netty.handler.codec.MessageToMessageDecoder; import io.netty.handler.codec.MessageToMessageEncoder; import io.netty.util.AttributeKey; +import java.nio.ByteBuffer; +import java.util.Collections; +import java.util.EnumSet; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.UUID; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.nio.ByteBuffer; -import java.util.*; - -/** - * A message from the CQL binary protocol. - */ +/** A message from the CQL binary protocol. */ abstract class Message { - protected static final Logger logger = LoggerFactory.getLogger(Message.class); + protected static final Logger logger = LoggerFactory.getLogger(Message.class); - static AttributeKey CODEC_REGISTRY_ATTRIBUTE_KEY = AttributeKey.valueOf("com.datastax.driver.core.CodecRegistry"); + static AttributeKey CODEC_REGISTRY_ATTRIBUTE_KEY = + AttributeKey.valueOf("com.datastax.driver.core.CodecRegistry"); - interface Coder { - void encode(R request, ByteBuf dest, ProtocolVersion version); + interface Coder { + void encode(R request, ByteBuf dest, ProtocolVersion version); - int encodedSize(R request, ProtocolVersion version); - } + int encodedSize(R request, ProtocolVersion version); + } - interface Decoder { - R decode(ByteBuf body, ProtocolVersion version, CodecRegistry codecRegistry); - } + interface Decoder { + R decode(ByteBuf body, ProtocolVersion version, CodecRegistry codecRegistry); + } - private volatile int streamId = -1; + private volatile int streamId = -1; - /** - * A generic key-value custom payload. Custom payloads are simply - * ignored by the default QueryHandler implementation server-side. - * - * @since Protocol V4 - */ - private volatile Map customPayload; + /** + * A generic key-value custom payload. Custom payloads are simply ignored by the default + * QueryHandler implementation server-side. + * + * @since Protocol V4 + */ + private volatile Map customPayload; - protected Message() { - } + protected Message() {} - Message setStreamId(int streamId) { - this.streamId = streamId; - return this; - } + Message setStreamId(int streamId) { + this.streamId = streamId; + return this; + } - int getStreamId() { - return streamId; - } + int getStreamId() { + return streamId; + } - Map getCustomPayload() { - return customPayload; - } + Map getCustomPayload() { + return customPayload; + } - Message setCustomPayload(Map customPayload) { - this.customPayload = customPayload; - return this; - } + Message setCustomPayload(Map customPayload) { + this.customPayload = customPayload; + return this; + } - static abstract class Request extends Message { - - enum Type { - STARTUP(1, Requests.Startup.coder), - CREDENTIALS(4, Requests.Credentials.coder), - OPTIONS(5, Requests.Options.coder), - QUERY(7, Requests.Query.coder), - PREPARE(9, Requests.Prepare.coder), - EXECUTE(10, Requests.Execute.coder), - REGISTER(11, Requests.Register.coder), - BATCH(13, Requests.Batch.coder), - AUTH_RESPONSE(15, Requests.AuthResponse.coder); - - final int opcode; - final Coder coder; - - Type(int opcode, Coder coder) { - this.opcode = opcode; - this.coder = coder; - } - } + abstract static class Request extends Message { - final Type type; - private final boolean tracingRequested; + enum Type { + STARTUP(1, Requests.Startup.coder), + CREDENTIALS(4, Requests.Credentials.coder), + OPTIONS(5, Requests.Options.coder), + QUERY(7, Requests.Query.coder), + PREPARE(9, Requests.Prepare.coder), + EXECUTE(10, Requests.Execute.coder), + REGISTER(11, Requests.Register.coder), + BATCH(13, Requests.Batch.coder), + AUTH_RESPONSE(15, Requests.AuthResponse.coder); - protected Request(Type type) { - this(type, false); - } + final int opcode; + final Coder coder; - protected Request(Type type, boolean tracingRequested) { - this.type = type; - this.tracingRequested = tracingRequested; - } + Type(int opcode, Coder coder) { + this.opcode = opcode; + this.coder = coder; + } + } - @Override - Request setStreamId(int streamId) { - // JAVA-1179: defensively guard against reusing the same Request object twice. - // If no streamId was ever set we can use this object directly, otherwise make a copy. - if (getStreamId() < 0) - return (Request) super.setStreamId(streamId); - else { - Request copy = this.copy(); - copy.setStreamId(streamId); - return copy; - } - } + final Type type; + private final boolean tracingRequested; - boolean isTracingRequested() { - return tracingRequested; - } + protected Request(Type type) { + this(type, false); + } - ConsistencyLevel consistency() { - switch (this.type) { - case QUERY: - return ((Requests.Query) this).options.consistency; - case EXECUTE: - return ((Requests.Execute) this).options.consistency; - case BATCH: - return ((Requests.Batch) this).options.consistency; - default: - return null; - } - } + protected Request(Type type, boolean tracingRequested) { + this.type = type; + this.tracingRequested = tracingRequested; + } - ConsistencyLevel serialConsistency() { - switch (this.type) { - case QUERY: - return ((Requests.Query) this).options.serialConsistency; - case EXECUTE: - return ((Requests.Execute) this).options.serialConsistency; - case BATCH: - return ((Requests.Batch) this).options.serialConsistency; - default: - return null; - } - } + @Override + Request setStreamId(int streamId) { + // JAVA-1179: defensively guard against reusing the same Request object twice. + // If no streamId was ever set we can use this object directly, otherwise make a copy. + if (getStreamId() < 0) return (Request) super.setStreamId(streamId); + else { + Request copy = this.copy(); + copy.setStreamId(streamId); + return copy; + } + } - long defaultTimestamp() { - switch (this.type) { - case QUERY: - return ((Requests.Query) this).options.defaultTimestamp; - case EXECUTE: - return ((Requests.Execute) this).options.defaultTimestamp; - case BATCH: - return ((Requests.Batch) this).options.defaultTimestamp; - default: - return 0; - } - } + boolean isTracingRequested() { + return tracingRequested; + } - ByteBuffer pagingState() { - switch (this.type) { - case QUERY: - return ((Requests.Query) this).options.pagingState; - case EXECUTE: - return ((Requests.Execute) this).options.pagingState; - default: - return null; - } - } + ConsistencyLevel consistency() { + switch (this.type) { + case QUERY: + return ((Requests.Query) this).options.consistency; + case EXECUTE: + return ((Requests.Execute) this).options.consistency; + case BATCH: + return ((Requests.Batch) this).options.consistency; + default: + return null; + } + } - Request copy() { - Request request = copyInternal(); - request.setCustomPayload(this.getCustomPayload()); - return request; - } + ConsistencyLevel serialConsistency() { + switch (this.type) { + case QUERY: + return ((Requests.Query) this).options.serialConsistency; + case EXECUTE: + return ((Requests.Execute) this).options.serialConsistency; + case BATCH: + return ((Requests.Batch) this).options.serialConsistency; + default: + return null; + } + } - protected abstract Request copyInternal(); + long defaultTimestamp() { + switch (this.type) { + case QUERY: + return ((Requests.Query) this).options.defaultTimestamp; + case EXECUTE: + return ((Requests.Execute) this).options.defaultTimestamp; + case BATCH: + return ((Requests.Batch) this).options.defaultTimestamp; + default: + return 0; + } + } - Request copy(ConsistencyLevel newConsistencyLevel) { - Request request = copyInternal(newConsistencyLevel); - request.setCustomPayload(this.getCustomPayload()); - return request; - } + ByteBuffer pagingState() { + switch (this.type) { + case QUERY: + return ((Requests.Query) this).options.pagingState; + case EXECUTE: + return ((Requests.Execute) this).options.pagingState; + default: + return null; + } + } - protected Request copyInternal(ConsistencyLevel newConsistencyLevel) { - throw new UnsupportedOperationException(); - } + Request copy() { + Request request = copyInternal(); + request.setCustomPayload(this.getCustomPayload()); + return request; } - static abstract class Response extends Message { - - enum Type { - ERROR(0, Responses.Error.decoder), - READY(2, Responses.Ready.decoder), - AUTHENTICATE(3, Responses.Authenticate.decoder), - SUPPORTED(6, Responses.Supported.decoder), - RESULT(8, Responses.Result.decoder), - EVENT(12, Responses.Event.decoder), - AUTH_CHALLENGE(14, Responses.AuthChallenge.decoder), - AUTH_SUCCESS(16, Responses.AuthSuccess.decoder); - - final int opcode; - final Decoder decoder; - - private static final Type[] opcodeIdx; - - static { - int maxOpcode = -1; - for (Type type : Type.values()) - maxOpcode = Math.max(maxOpcode, type.opcode); - opcodeIdx = new Type[maxOpcode + 1]; - for (Type type : Type.values()) { - if (opcodeIdx[type.opcode] != null) - throw new IllegalStateException("Duplicate opcode"); - opcodeIdx[type.opcode] = type; - } - } - - Type(int opcode, Decoder decoder) { - this.opcode = opcode; - this.decoder = decoder; - } - - static Type fromOpcode(int opcode) { - if (opcode < 0 || opcode >= opcodeIdx.length) - throw new DriverInternalError(String.format("Unknown response opcode %d", opcode)); - Type t = opcodeIdx[opcode]; - if (t == null) - throw new DriverInternalError(String.format("Unknown response opcode %d", opcode)); - return t; - } - } + protected abstract Request copyInternal(); - final Type type; - protected volatile UUID tracingId; - protected volatile List warnings; + Request copy(ConsistencyLevel newConsistencyLevel) { + Request request = copyInternal(newConsistencyLevel); + request.setCustomPayload(this.getCustomPayload()); + return request; + } - protected Response(Type type) { - this.type = type; + protected Request copyInternal(ConsistencyLevel newConsistencyLevel) { + throw new UnsupportedOperationException(); + } + } + + abstract static class Response extends Message { + + enum Type { + ERROR(0, Responses.Error.decoder), + READY(2, Responses.Ready.decoder), + AUTHENTICATE(3, Responses.Authenticate.decoder), + SUPPORTED(6, Responses.Supported.decoder), + RESULT(8, Responses.Result.decoder), + EVENT(12, Responses.Event.decoder), + AUTH_CHALLENGE(14, Responses.AuthChallenge.decoder), + AUTH_SUCCESS(16, Responses.AuthSuccess.decoder); + + final int opcode; + final Decoder decoder; + + private static final Type[] opcodeIdx; + + static { + int maxOpcode = -1; + for (Type type : Type.values()) maxOpcode = Math.max(maxOpcode, type.opcode); + opcodeIdx = new Type[maxOpcode + 1]; + for (Type type : Type.values()) { + if (opcodeIdx[type.opcode] != null) throw new IllegalStateException("Duplicate opcode"); + opcodeIdx[type.opcode] = type; } + } + + Type(int opcode, Decoder decoder) { + this.opcode = opcode; + this.decoder = decoder; + } + + static Type fromOpcode(int opcode) { + if (opcode < 0 || opcode >= opcodeIdx.length) + throw new DriverInternalError(String.format("Unknown response opcode %d", opcode)); + Type t = opcodeIdx[opcode]; + if (t == null) + throw new DriverInternalError(String.format("Unknown response opcode %d", opcode)); + return t; + } + } - Response setTracingId(UUID tracingId) { - this.tracingId = tracingId; - return this; - } + final Type type; + protected volatile UUID tracingId; + protected volatile List warnings; - UUID getTracingId() { - return tracingId; - } + protected Response(Type type) { + this.type = type; + } - Response setWarnings(List warnings) { - this.warnings = warnings; - return this; - } + Response setTracingId(UUID tracingId) { + this.tracingId = tracingId; + return this; } - @ChannelHandler.Sharable - static class ProtocolDecoder extends MessageToMessageDecoder { - - @Override - protected void decode(ChannelHandlerContext ctx, Frame frame, List out) throws Exception { - boolean isTracing = frame.header.flags.contains(Frame.Header.Flag.TRACING); - boolean isCustomPayload = frame.header.flags.contains(Frame.Header.Flag.CUSTOM_PAYLOAD); - UUID tracingId = isTracing ? CBUtil.readUUID(frame.body) : null; - Map customPayload = isCustomPayload ? CBUtil.readBytesMap(frame.body) : null; - - if (customPayload != null && logger.isTraceEnabled()) { - logger.trace("Received payload: {} ({} bytes total)", printPayload(customPayload), CBUtil.sizeOfBytesMap(customPayload)); - } - - boolean hasWarnings = frame.header.flags.contains(Frame.Header.Flag.WARNING); - List warnings = hasWarnings ? CBUtil.readStringList(frame.body) : Collections.emptyList(); - - try { - CodecRegistry codecRegistry = ctx.channel().attr(CODEC_REGISTRY_ATTRIBUTE_KEY).get(); - assert codecRegistry != null; - Response response = Response.Type.fromOpcode(frame.header.opcode).decoder.decode(frame.body, frame.header.version, codecRegistry); - response - .setTracingId(tracingId) - .setWarnings(warnings) - .setCustomPayload(customPayload) - .setStreamId(frame.header.streamId); - out.add(response); - } finally { - frame.body.release(); - } - } + UUID getTracingId() { + return tracingId; + } + Response setWarnings(List warnings) { + this.warnings = warnings; + return this; + } + } + + @ChannelHandler.Sharable + static class ProtocolDecoder extends MessageToMessageDecoder { + + @Override + protected void decode(ChannelHandlerContext ctx, Frame frame, List out) + throws Exception { + boolean isTracing = frame.header.flags.contains(Frame.Header.Flag.TRACING); + boolean hasWarnings = frame.header.flags.contains(Frame.Header.Flag.WARNING); + boolean isCustomPayload = frame.header.flags.contains(Frame.Header.Flag.CUSTOM_PAYLOAD); + UUID tracingId = isTracing ? CBUtil.readUUID(frame.body) : null; + + List warnings = + hasWarnings ? CBUtil.readStringList(frame.body) : Collections.emptyList(); + + Map customPayload = + isCustomPayload ? CBUtil.readBytesMap(frame.body) : null; + + if (customPayload != null && logger.isTraceEnabled()) { + logger.trace( + "Received payload: {} ({} bytes total)", + printPayload(customPayload), + CBUtil.sizeOfBytesMap(customPayload)); + } + + try { + CodecRegistry codecRegistry = ctx.channel().attr(CODEC_REGISTRY_ATTRIBUTE_KEY).get(); + assert codecRegistry != null; + Response response = + Response.Type.fromOpcode(frame.header.opcode) + .decoder + .decode(frame.body, frame.header.version, codecRegistry); + response + .setTracingId(tracingId) + .setWarnings(warnings) + .setCustomPayload(customPayload) + .setStreamId(frame.header.streamId); + out.add(response); + } finally { + frame.body.release(); + } } + } - @ChannelHandler.Sharable - static class ProtocolEncoder extends MessageToMessageEncoder { + @ChannelHandler.Sharable + static class ProtocolEncoder extends MessageToMessageEncoder { - private final ProtocolVersion protocolVersion; + final ProtocolVersion protocolVersion; - ProtocolEncoder(ProtocolVersion version) { - this.protocolVersion = version; - } + ProtocolEncoder(ProtocolVersion version) { + this.protocolVersion = version; + } - @Override - protected void encode(ChannelHandlerContext ctx, Request request, List out) throws Exception { - EnumSet flags = EnumSet.noneOf(Frame.Header.Flag.class); - if (request.isTracingRequested()) - flags.add(Frame.Header.Flag.TRACING); - if (protocolVersion == ProtocolVersion.NEWEST_BETA) - flags.add(Frame.Header.Flag.USE_BETA); - Map customPayload = request.getCustomPayload(); - if (customPayload != null) { - if (protocolVersion.compareTo(ProtocolVersion.V4) < 0) - throw new UnsupportedFeatureException( - protocolVersion, - "Custom payloads are only supported since native protocol V4"); - flags.add(Frame.Header.Flag.CUSTOM_PAYLOAD); - } - - @SuppressWarnings("unchecked") - Coder coder = (Coder) request.type.coder; - int messageSize = coder.encodedSize(request, protocolVersion); - int payloadLength = -1; - if (customPayload != null) { - payloadLength = CBUtil.sizeOfBytesMap(customPayload); - messageSize += payloadLength; - } - ByteBuf body = ctx.alloc().buffer(messageSize); - if (customPayload != null) { - CBUtil.writeBytesMap(customPayload, body); - if (logger.isTraceEnabled()) { - logger.trace("Sending payload: {} ({} bytes total)", printPayload(customPayload), payloadLength); - } - } - - coder.encode(request, body, protocolVersion); - out.add(Frame.create(protocolVersion, request.type.opcode, request.getStreamId(), flags, body)); - } + @Override + protected void encode(ChannelHandlerContext ctx, Request request, List out) { + EnumSet flags = computeFlags(request); + int messageSize = encodedSize(request); + ByteBuf body = ctx.alloc().buffer(messageSize); + encode(request, body); + + if (body.capacity() != messageSize) { + logger.debug( + "Detected buffer resizing while encoding {} message ({} => {}), " + + "this is a driver bug " + + "(ultimately it does not affect the query, but leads to a small inefficiency)", + request.type, + messageSize, + body.capacity()); + } + out.add( + Frame.create(protocolVersion, request.type.opcode, request.getStreamId(), flags, body)); } - // private stuff to debug custom payloads - - private static final char[] hexArray = "0123456789ABCDEF".toCharArray(); - - static String printPayload(Map customPayload) { - if (customPayload == null) - return "null"; - if (customPayload.isEmpty()) - return "{}"; - StringBuilder sb = new StringBuilder("{"); - Iterator> iterator = customPayload.entrySet().iterator(); - while (iterator.hasNext()) { - Map.Entry entry = iterator.next(); - sb.append(entry.getKey()); - sb.append(":"); - if (entry.getValue() == null) - sb.append("null"); - else - bytesToHex(entry.getValue(), sb); - if (iterator.hasNext()) - sb.append(", "); - } - sb.append("}"); - return sb.toString(); + EnumSet computeFlags(Request request) { + EnumSet flags = EnumSet.noneOf(Frame.Header.Flag.class); + if (request.isTracingRequested()) flags.add(Frame.Header.Flag.TRACING); + if (protocolVersion == ProtocolVersion.NEWEST_BETA) flags.add(Frame.Header.Flag.USE_BETA); + Map customPayload = request.getCustomPayload(); + if (customPayload != null) { + if (protocolVersion.compareTo(ProtocolVersion.V4) < 0) + throw new UnsupportedFeatureException( + protocolVersion, "Custom payloads are only supported since native protocol V4"); + flags.add(Frame.Header.Flag.CUSTOM_PAYLOAD); + } + return flags; + } + + int encodedSize(Request request) { + @SuppressWarnings("unchecked") + Coder coder = (Coder) request.type.coder; + int messageSize = coder.encodedSize(request, protocolVersion); + int payloadLength = -1; + if (request.getCustomPayload() != null) { + payloadLength = CBUtil.sizeOfBytesMap(request.getCustomPayload()); + messageSize += payloadLength; + } + return messageSize; } - // this method doesn't modify the given ByteBuffer - static void bytesToHex(ByteBuffer bytes, StringBuilder sb) { - int length = Math.min(bytes.remaining(), 50); - sb.append("0x"); - for (int i = 0; i < length; i++) { - int v = bytes.get(i) & 0xFF; - sb.append(hexArray[v >>> 4]); - sb.append(hexArray[v & 0x0F]); + void encode(Request request, ByteBuf destination) { + @SuppressWarnings("unchecked") + Coder coder = (Coder) request.type.coder; + + Map customPayload = request.getCustomPayload(); + if (customPayload != null) { + CBUtil.writeBytesMap(customPayload, destination); + if (logger.isTraceEnabled()) { + logger.trace( + "Sending payload: {} ({} bytes total)", + printPayload(customPayload), + CBUtil.sizeOfBytesMap(customPayload)); } - if (bytes.remaining() > 50) - sb.append("... [TRUNCATED]"); + } + + coder.encode(request, destination, protocolVersion); + } + } + + // private stuff to debug custom payloads + + private static final char[] hexArray = "0123456789ABCDEF".toCharArray(); + + static String printPayload(Map customPayload) { + if (customPayload == null) return "null"; + if (customPayload.isEmpty()) return "{}"; + StringBuilder sb = new StringBuilder("{"); + Iterator> iterator = customPayload.entrySet().iterator(); + while (iterator.hasNext()) { + Map.Entry entry = iterator.next(); + sb.append(entry.getKey()); + sb.append(":"); + if (entry.getValue() == null) sb.append("null"); + else bytesToHex(entry.getValue(), sb); + if (iterator.hasNext()) sb.append(", "); + } + sb.append("}"); + return sb.toString(); + } + + // this method doesn't modify the given ByteBuffer + static void bytesToHex(ByteBuffer bytes, StringBuilder sb) { + int length = Math.min(bytes.remaining(), 50); + sb.append("0x"); + for (int i = 0; i < length; i++) { + int v = bytes.get(i) & 0xFF; + sb.append(hexArray[v >>> 4]); + sb.append(hexArray[v & 0x0F]); } + if (bytes.remaining() > 50) sb.append("... [TRUNCATED]"); + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/MessageToSegmentEncoder.java b/driver-core/src/main/java/com/datastax/driver/core/MessageToSegmentEncoder.java new file mode 100644 index 00000000000..bad9e8fefb7 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/MessageToSegmentEncoder.java @@ -0,0 +1,58 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import io.netty.buffer.ByteBufAllocator; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelOutboundHandlerAdapter; +import io.netty.channel.ChannelPromise; + +class MessageToSegmentEncoder extends ChannelOutboundHandlerAdapter { + + private final ByteBufAllocator allocator; + private final Message.ProtocolEncoder requestEncoder; + + private SegmentBuilder segmentBuilder; + + MessageToSegmentEncoder(ByteBufAllocator allocator, Message.ProtocolEncoder requestEncoder) { + this.allocator = allocator; + this.requestEncoder = requestEncoder; + } + + @Override + public void handlerAdded(ChannelHandlerContext ctx) throws Exception { + super.handlerAdded(ctx); + this.segmentBuilder = new SegmentBuilder(ctx, allocator, requestEncoder); + } + + @Override + public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) + throws Exception { + if (msg instanceof Message.Request) { + segmentBuilder.addRequest(((Message.Request) msg), promise); + } else { + super.write(ctx, msg, promise); + } + } + + @Override + public void flush(ChannelHandlerContext ctx) throws Exception { + segmentBuilder.flush(); + super.flush(ctx); + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/Metadata.java b/driver-core/src/main/java/com/datastax/driver/core/Metadata.java index 15e82f2e5b9..cb9a8821962 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Metadata.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Metadata.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,795 +20,968 @@ import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; import com.google.common.collect.Maps; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - +import io.netty.util.collection.IntObjectHashMap; import java.net.InetSocketAddress; import java.nio.ByteBuffer; -import java.util.*; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.TreeSet; +import java.util.UUID; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.locks.ReentrantLock; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; -/** - * Keeps metadata on the connected cluster, including known nodes and schema definitions. - */ +/** Keeps metadata on the connected cluster, including known nodes and schema definitions. */ public class Metadata { - private static final Logger logger = LoggerFactory.getLogger(Metadata.class); - - final Cluster.Manager cluster; - volatile String clusterName; - volatile String partitioner; - private final ConcurrentMap hosts = new ConcurrentHashMap(); - final ConcurrentMap keyspaces = new ConcurrentHashMap(); - private volatile TokenMap tokenMap; - - final ReentrantLock lock = new ReentrantLock(); - - // See https://github.com/apache/cassandra/blob/trunk/doc/cql3/CQL.textile#appendixA - private static final Set RESERVED_KEYWORDS = ImmutableSet.of( - "add", "allow", "alter", "and", "any", "apply", "asc", "authorize", "batch", "begin", "by", - "columnfamily", "create", "delete", "desc", "drop", "each_quorum", "from", "grant", "in", - "index", "inet", "infinity", "insert", "into", "keyspace", "keyspaces", "limit", "local_one", - "local_quorum", "modify", "nan", "norecursive", "of", "on", "one", "order", "password", - "primary", "quorum", "rename", "revoke", "schema", "select", "set", "table", "to", - "token", "three", "truncate", "two", "unlogged", "update", "use", "using", "where", "with" - ); - - Metadata(Cluster.Manager cluster) { - this.cluster = cluster; - } - - // rebuilds the token map with the current hosts, typically when refreshing schema metadata - void rebuildTokenMap() { - lock.lock(); - try { - if (tokenMap == null) - return; - this.tokenMap = TokenMap.build( - tokenMap.factory, - tokenMap.primaryToTokens, - keyspaces.values(), - tokenMap.ring, - tokenMap.tokenRanges, - tokenMap.tokenToPrimary); - } finally { - lock.unlock(); - } - } - - // rebuilds the token map for a new set of hosts, typically when refreshing nodes list - void rebuildTokenMap(Token.Factory factory, Map> allTokens) { - lock.lock(); - try { - this.tokenMap = TokenMap.build(factory, allTokens, keyspaces.values()); - } finally { - lock.unlock(); - } - } - - Host newHost(InetSocketAddress address) { - return new Host(address, cluster.convictionPolicyFactory, cluster); - } - - Host addIfAbsent(Host host) { - Host previous = hosts.putIfAbsent(host.getSocketAddress(), host); - return previous == null ? host : null; - } - - Host add(InetSocketAddress address) { - return addIfAbsent(newHost(address)); - } - - boolean remove(Host host) { - return hosts.remove(host.getSocketAddress()) != null; - } - - Host getHost(InetSocketAddress address) { - return hosts.get(address); - } - - // For internal use only - Collection allHosts() { - return hosts.values(); - } - - /* - * Deal with case sensitivity for a given element id (keyspace, table, column, etc.) - * - * This method is used to convert identifiers provided by the client (through methods such as getKeyspace(String)), - * to the format used internally by the driver. - * - * We expect client-facing APIs to behave like cqlsh, that is: - * - identifiers that are mixed-case or contain special characters should be quoted. - * - unquoted identifiers will be lowercased: getKeyspace("Foo") will look for a keyspace named "foo" - */ - static String handleId(String id) { - // Shouldn't really happen for this method, but no reason to fail here - if (id == null) - return null; - - if (isAlphanumeric(id)) - return id.toLowerCase(); - - // Check if it's enclosed in quotes. If it is, remove them and unescape internal double quotes - return ParseUtils.unDoubleQuote(id); - } - - private static boolean isAlphanumeric(String s) { - for (int i = 0; i < s.length(); i++) { - char c = s.charAt(i); - if (!( - (c >= 48 && c <= 57) // 0-9 - || (c >= 65 && c <= 90) // A-Z - || (c == 95) // _ (underscore) - || (c >= 97 && c <= 122) // a-z - )) - return false; - } + private static final Logger logger = LoggerFactory.getLogger(Metadata.class); + + final Cluster.Manager cluster; + volatile String clusterName; + volatile String partitioner; + // Holds the contact points until we have a connection to the cluster + private final List contactPoints = new CopyOnWriteArrayList(); + // The hosts, keyed by their host_id + private final ConcurrentMap hosts = new ConcurrentHashMap(); + final ConcurrentMap keyspaces = + new ConcurrentHashMap(); + private volatile TokenMap tokenMap; + + final ReentrantLock lock = new ReentrantLock(); + + // See https://github.com/apache/cassandra/blob/trunk/doc/cql3/CQL.textile#appendixA + private static final IntObjectHashMap> RESERVED_KEYWORDS = + indexByCaseInsensitiveHash( + "add", + "allow", + "alter", + "and", + "any", + "apply", + "asc", + "authorize", + "batch", + "begin", + "by", + "columnfamily", + "create", + "delete", + "desc", + "drop", + "each_quorum", + "from", + "grant", + "in", + "index", + "inet", + "infinity", + "insert", + "into", + "keyspace", + "keyspaces", + "limit", + "local_one", + "local_quorum", + "modify", + "nan", + "norecursive", + "of", + "on", + "one", + "order", + "password", + "primary", + "quorum", + "rename", + "revoke", + "schema", + "select", + "set", + "table", + "to", + "token", + "three", + "truncate", + "two", + "unlogged", + "update", + "use", + "using", + "where", + "with"); + + Metadata(Cluster.Manager cluster) { + this.cluster = cluster; + } + + // rebuilds the token map with the current hosts, typically when refreshing schema metadata + void rebuildTokenMap() { + lock.lock(); + try { + if (tokenMap == null) return; + this.tokenMap = + TokenMap.build( + tokenMap.factory, + tokenMap.primaryToTokens, + keyspaces.values(), + tokenMap.ring, + tokenMap.tokenRanges, + tokenMap.tokenToPrimary); + } finally { + lock.unlock(); + } + } + + // rebuilds the token map for a new set of hosts, typically when refreshing nodes list + void rebuildTokenMap(Token.Factory factory, Map> allTokens) { + lock.lock(); + try { + this.tokenMap = TokenMap.build(factory, allTokens, keyspaces.values()); + } finally { + lock.unlock(); + } + } + + Host newHost(EndPoint endPoint) { + return new Host(endPoint, cluster.convictionPolicyFactory, cluster); + } + + void addContactPoint(EndPoint contactPoint) { + contactPoints.add(newHost(contactPoint)); + } + + List getContactPoints() { + return contactPoints; + } + + Host getContactPoint(EndPoint endPoint) { + for (Host host : contactPoints) { + if (host.getEndPoint().equals(endPoint)) { + return host; + } + } + return null; + } + + /** + * @return the previous host associated with this id, or {@code null} if there was no such host. + */ + Host addIfAbsent(Host host) { + return hosts.putIfAbsent(host.getHostId(), host); + } + + boolean remove(Host host) { + return hosts.remove(host.getHostId()) != null; + } + + Host getHost(UUID hostId) { + return hosts.get(hostId); + } + + /** + * @param broadcastRpcAddress the untranslated broadcast RPC address, as indicated in + * server events. + */ + Host getHost(InetSocketAddress broadcastRpcAddress) { + for (Host host : hosts.values()) { + if (broadcastRpcAddress.equals(host.getBroadcastRpcAddress())) { + return host; + } + } + return null; + } + + Host getHost(EndPoint endPoint) { + for (Host host : hosts.values()) { + if (host.getEndPoint().equals(endPoint)) { + return host; + } + } + return null; + } + + // For internal use only + Collection allHosts() { + return hosts.values(); + } + + /* + * Deal with case sensitivity for a given element id (keyspace, table, column, etc.) + * + * This method is used to convert identifiers provided by the client (through methods such as getKeyspace(String)), + * to the format used internally by the driver. + * + * We expect client-facing APIs to behave like cqlsh, that is: + * - identifiers that are mixed-case or contain special characters should be quoted. + * - unquoted identifiers will be lowercased: getKeyspace("Foo") will look for a keyspace named "foo" + */ + static String handleId(String id) { + // Shouldn't really happen for this method, but no reason to fail here + if (id == null) return null; + + boolean isAlphanumericLowCase = true; + boolean isAlphanumeric = true; + for (int i = 0; i < id.length(); i++) { + char c = id.charAt(i); + if (c >= 65 && c <= 90) { // A-Z + isAlphanumericLowCase = false; + } else if (!((c >= 48 && c <= 57) // 0-9 + || (c == 95) // _ (underscore) + || (c >= 97 && c <= 122) // a-z + )) { + isAlphanumeric = false; + isAlphanumericLowCase = false; + break; + } + } + + if (isAlphanumericLowCase) { + return id; + } + if (isAlphanumeric) { + return id.toLowerCase(); + } + + // Check if it's enclosed in quotes. If it is, remove them and unescape internal double quotes + return ParseUtils.unDoubleQuote(id); + } + + /** + * Quotes a CQL identifier if necessary. + * + *

This is similar to {@link #quote(String)}, except that it won't quote the input string if it + * can safely be used as-is. For example: + * + *

    + *
  • {@code quoteIfNecessary("foo").equals("foo")} (no need to quote). + *
  • {@code quoteIfNecessary("Foo").equals("\"Foo\"")} (identifier is mixed case so case + * sensitivity is required) + *
  • {@code quoteIfNecessary("foo bar").equals("\"foo bar\"")} (identifier contains special + * characters) + *
  • {@code quoteIfNecessary("table").equals("\"table\"")} (identifier is a reserved CQL + * keyword) + *
+ * + * @param id the "internal" form of the identifier. That is, the identifier as it would appear in + * Cassandra system tables (such as {@code system_schema.tables}, {@code + * system_schema.columns}, etc.) + * @return the identifier as it would appear in a CQL query string. This is also how you need to + * pass it to public driver methods, such as {@link #getKeyspace(String)}. + */ + public static String quoteIfNecessary(String id) { + return needsQuote(id) ? quote(id) : id; + } + + /** + * We don't need to escape an identifier if it matches non-quoted CQL3 ids ([a-z][a-z0-9_]*), and + * if it's not a CQL reserved keyword. + * + *

When 'Migrating from compact storage' after DROP COMPACT STORAGE on the table, it can have a + * column with an empty name. (See JAVA-2174 for the reference) For that case, we need to escape + * empty column name. + */ + private static boolean needsQuote(String s) { + // this method should only be called for C*-provided identifiers, + // so we expect it to be non-null + assert s != null; + if (s.isEmpty()) return true; + char c = s.charAt(0); + if (!(c >= 97 && c <= 122)) // a-z + return true; + for (int i = 1; i < s.length(); i++) { + c = s.charAt(i); + if (!((c >= 48 && c <= 57) // 0-9 + || (c == 95) // _ + || (c >= 97 && c <= 122) // a-z + )) { return true; - } - - /** - * Quotes a CQL identifier if necessary. - *

- * This is similar to {@link #quote(String)}, except that it won't quote the input string - * if it can safely be used as-is. For example: - *

    - *
  • {@code quoteIfNecessary("foo").equals("foo")} (no need to quote).
  • - *
  • {@code quoteIfNecessary("Foo").equals("\"Foo\"")} (identifier is mixed case so case - * sensitivity is required)
  • - *
  • {@code quoteIfNecessary("foo bar").equals("\"foo bar\"")} (identifier contains - * special characters)
  • - *
  • {@code quoteIfNecessary("table").equals("\"table\"")} (identifier is a reserved CQL - * keyword)
  • - *
- * - * @param id the "internal" form of the identifier. That is, the identifier as it would - * appear in Cassandra system tables (such as {@code system_schema.tables}, - * {@code system_schema.columns}, etc.) - * @return the identifier as it would appear in a CQL query string. This is also how you need - * to pass it to public driver methods, such as {@link #getKeyspace(String)}. - */ - public static String quoteIfNecessary(String id) { - return needsQuote(id) - ? quote(id) - : id; - } - - /** - * We don't need to escape an identifier if it - * matches non-quoted CQL3 ids ([a-z][a-z0-9_]*), - * and if it's not a CQL reserved keyword. - */ - private static boolean needsQuote(String s) { - // this method should only be called for C*-provided identifiers, - // so we expect it to be non-null and non-empty. - assert s != null && !s.isEmpty(); - char c = s.charAt(0); - if (!(c >= 97 && c <= 122)) // a-z - return true; - for (int i = 1; i < s.length(); i++) { - c = s.charAt(i); - if (!( - (c >= 48 && c <= 57) // 0-9 - || (c == 95) // _ - || (c >= 97 && c <= 122) // a-z - )) { - return true; - } - } - return isReservedCqlKeyword(s); - } - - /** - * Builds the internal name of a function/aggregate, which is similar, but not identical, - * to the function/aggregate signature. - * This is only used to generate keys for internal metadata maps (KeyspaceMetadata.functions and. - * KeyspaceMetadata.aggregates). - * Note that if simpleName comes from the user, the caller must call handleId on it before passing it to this method. - * Note that this method does not necessarily generates a valid CQL function signature. - * Note that argumentTypes can be either a list of strings (schema change events) - * or a list of DataTypes (function lookup from client code). - * This method must ensure that both cases produce the same identifier. - */ - static String fullFunctionName(String simpleName, Collection argumentTypes) { - StringBuilder sb = new StringBuilder(simpleName); - sb.append('('); - boolean first = true; - for (Object argumentType : argumentTypes) { - if (first) - first = false; - else - sb.append(','); - // user types must be represented by their names only, - // without keyspace prefix, because that's how - // they appear in a schema change event (in targetSignature) - if (argumentType instanceof UserType) { - UserType userType = (UserType) argumentType; - String typeName = Metadata.quoteIfNecessary(userType.getTypeName()); - sb.append(typeName); - } else { - sb.append(argumentType); - } - } - sb.append(')'); - return sb.toString(); - } - - /** - * Quote a keyspace, table or column identifier to make it case sensitive. - *

- * CQL identifiers, including keyspace, table and column ones, are case insensitive - * by default. Case sensitive identifiers can however be provided by enclosing - * the identifier in double quotes (see the - * CQL documentation - * for details). If you are using case sensitive identifiers, this method - * can be used to enclose such identifiers in double quotes, making them case - * sensitive. - *

- * Note that - * reserved CQL keywords - * should also be quoted. You can check if a given identifier is a reserved keyword - * by calling {@link #isReservedCqlKeyword(String)}. - * - * @param id the keyspace or table identifier. - * @return {@code id} enclosed in double-quotes, for use in methods like - * {@link #getReplicas}, {@link #getKeyspace}, {@link KeyspaceMetadata#getTable} - * or even {@link Cluster#connect(String)}. - */ - public static String quote(String id) { - return ParseUtils.doubleQuote(id); - } - - /** - * Checks whether an identifier is a known reserved CQL keyword or not. - *

- * The check is case-insensitive, i.e., the word "{@code KeYsPaCe}" - * would be considered as a reserved CQL keyword just as "{@code keyspace}". - *

- * Note: The list of reserved CQL keywords is subject to change in future - * versions of Cassandra. As a consequence, this method is provided solely as a - * convenience utility and should not be considered as an authoritative - * source of truth for checking reserved CQL keywords. - * - * @param id the identifier to check; should not be {@code null}. - * @return {@code true} if the given identifier is a known reserved - * CQL keyword, {@code false} otherwise. - */ - public static boolean isReservedCqlKeyword(String id) { - return id != null && RESERVED_KEYWORDS.contains(id.toLowerCase()); - } - - /** - * Returns the token ranges that define data distribution in the ring. - *

- * Note that this information is refreshed asynchronously by the control - * connection, when schema or ring topology changes. It might occasionally - * be stale. - * - * @return the token ranges. Note that the result might be stale or empty if - * metadata was explicitly disabled with {@link QueryOptions#setMetadataEnabled(boolean)}. - */ - public Set getTokenRanges() { - TokenMap current = tokenMap; - return (current == null) ? Collections.emptySet() : current.tokenRanges; - } - - /** - * Returns the token ranges that are replicated on the given host, for the given - * keyspace. - *

- * Note that this information is refreshed asynchronously by the control - * connection, when schema or ring topology changes. It might occasionally - * be stale (or even empty). - * - * @param keyspace the name of the keyspace to get token ranges for. - * @param host the host. - * @return the (immutable) set of token ranges for {@code host} as known - * by the driver. Note that the result might be stale or empty if metadata - * was explicitly disabled with {@link QueryOptions#setMetadataEnabled(boolean)}. - */ - public Set getTokenRanges(String keyspace, Host host) { - keyspace = handleId(keyspace); - TokenMap current = tokenMap; - if (current == null) { - return Collections.emptySet(); - } else { - Map> dcRanges = current.hostsToRangesByKeyspace.get(keyspace); - if (dcRanges == null) { - return Collections.emptySet(); - } else { - Set ranges = dcRanges.get(host); - return (ranges == null) ? Collections.emptySet() : ranges; - } - } - } - - /** - * Returns the set of hosts that are replica for a given partition key. - *

- * Note that this information is refreshed asynchronously by the control - * connection, when schema or ring topology changes. It might occasionally - * be stale (or even empty). - * - * @param keyspace the name of the keyspace to get replicas for. - * @param partitionKey the partition key for which to find the set of - * replica. - * @return the (immutable) set of replicas for {@code partitionKey} as known - * by the driver. Note that the result might be stale or empty if metadata was - * explicitly disabled with {@link QueryOptions#setMetadataEnabled(boolean)}. - */ - public Set getReplicas(String keyspace, ByteBuffer partitionKey) { - keyspace = handleId(keyspace); - TokenMap current = tokenMap; - if (current == null) { - return Collections.emptySet(); + } + } + return isReservedCqlKeyword(s); + } + + /** + * Builds the internal name of a function/aggregate, which is similar, but not identical, to the + * function/aggregate signature. This is only used to generate keys for internal metadata maps + * (KeyspaceMetadata.functions and. KeyspaceMetadata.aggregates). Note that if simpleName comes + * from the user, the caller must call handleId on it before passing it to this method. Note that + * this method does not necessarily generates a valid CQL function signature. Note that + * argumentTypes can be either a list of strings (schema change events) or a list of DataTypes + * (function lookup from client code). This method must ensure that both cases produce the same + * identifier. + */ + static String fullFunctionName(String simpleName, Collection argumentTypes) { + StringBuilder sb = new StringBuilder(simpleName); + sb.append('('); + boolean first = true; + for (Object argumentType : argumentTypes) { + if (first) first = false; + else sb.append(','); + // user types must be represented by their names only, + // without keyspace prefix, because that's how + // they appear in a schema change event (in targetSignature) + if (argumentType instanceof UserType) { + UserType userType = (UserType) argumentType; + String typeName = Metadata.quoteIfNecessary(userType.getTypeName()); + sb.append(typeName); + } else { + sb.append(argumentType); + } + } + sb.append(')'); + return sb.toString(); + } + + /** + * Quote a keyspace, table or column identifier to make it case sensitive. + * + *

CQL identifiers, including keyspace, table and column ones, are case insensitive by default. + * Case sensitive identifiers can however be provided by enclosing the identifier in double quotes + * (see the CQL + * documentation for details). If you are using case sensitive identifiers, this method can be + * used to enclose such identifiers in double quotes, making them case sensitive. + * + *

Note that reserved CQL + * keywords should also be quoted. You can check if a given identifier is a reserved keyword + * by calling {@link #isReservedCqlKeyword(String)}. + * + * @param id the keyspace or table identifier. + * @return {@code id} enclosed in double-quotes, for use in methods like {@link #getReplicas}, + * {@link #getKeyspace}, {@link KeyspaceMetadata#getTable} or even {@link + * Cluster#connect(String)}. + */ + public static String quote(String id) { + return ParseUtils.doubleQuote(id); + } + + /** + * Checks whether an identifier is a known reserved CQL keyword or not. + * + *

The check is case-insensitive, i.e., the word "{@code KeYsPaCe}" would be considered as a + * reserved CQL keyword just as "{@code keyspace}". + * + *

Note: The list of reserved CQL keywords is subject to change in future versions of + * Cassandra. As a consequence, this method is provided solely as a convenience utility and should + * not be considered as an authoritative source of truth for checking reserved CQL keywords. + * + * @param id the identifier to check; should not be {@code null}. + * @return {@code true} if the given identifier is a known reserved CQL keyword, {@code false} + * otherwise. + */ + public static boolean isReservedCqlKeyword(String id) { + if (id == null) { + return false; + } + int hash = caseInsensitiveHash(id); + List keywords = RESERVED_KEYWORDS.get(hash); + if (keywords == null) { + return false; + } else { + for (char[] keyword : keywords) { + if (equalsIgnoreCaseAscii(id, keyword)) { + return true; + } + } + return false; + } + } + + private static int caseInsensitiveHash(String str) { + int hashCode = 17; + for (int i = 0; i < str.length(); i++) { + char c = toLowerCaseAscii(str.charAt(i)); + hashCode = 31 * hashCode + c; + } + return hashCode; + } + + // keyword is expected as a second argument always in low case + private static boolean equalsIgnoreCaseAscii(String str1, char[] str2LowCase) { + if (str1.length() != str2LowCase.length) return false; + + for (int i = 0; i < str1.length(); i++) { + char c1 = str1.charAt(i); + char c2Low = str2LowCase[i]; + if (c1 == c2Low) { + continue; + } + char low1 = toLowerCaseAscii(c1); + if (low1 == c2Low) { + continue; + } + return false; + } + return true; + } + + private static char toLowerCaseAscii(char c) { + if (c >= 65 && c <= 90) { // A-Z + c ^= 0x20; // convert to low case + } + return c; + } + + private static IntObjectHashMap> indexByCaseInsensitiveHash(String... words) { + IntObjectHashMap> result = new IntObjectHashMap>(); + for (String word : words) { + char[] wordAsCharArray = word.toLowerCase().toCharArray(); + int hash = caseInsensitiveHash(word); + List list = result.get(hash); + if (list == null) { + list = new ArrayList(); + result.put(hash, list); + } + list.add(wordAsCharArray); + } + return result; + } + + /** + * Returns the token ranges that define data distribution in the ring. + * + *

Note that this information is refreshed asynchronously by the control connection, when + * schema or ring topology changes. It might occasionally be stale. + * + * @return the token ranges. Note that the result might be stale or empty if metadata was + * explicitly disabled with {@link QueryOptions#setMetadataEnabled(boolean)}. + */ + public Set getTokenRanges() { + TokenMap current = tokenMap; + return (current == null) ? Collections.emptySet() : current.tokenRanges; + } + + /** + * Returns the token ranges that are replicated on the given host, for the given keyspace. + * + *

Note that this information is refreshed asynchronously by the control connection, when + * schema or ring topology changes. It might occasionally be stale (or even empty). + * + * @param keyspace the name of the keyspace to get token ranges for. + * @param host the host. + * @return the (immutable) set of token ranges for {@code host} as known by the driver. Note that + * the result might be stale or empty if metadata was explicitly disabled with {@link + * QueryOptions#setMetadataEnabled(boolean)}. + */ + public Set getTokenRanges(String keyspace, Host host) { + keyspace = handleId(keyspace); + TokenMap current = tokenMap; + if (current == null) { + return Collections.emptySet(); + } else { + Map> dcRanges = current.hostsToRangesByKeyspace.get(keyspace); + if (dcRanges == null) { + return Collections.emptySet(); + } else { + Set ranges = dcRanges.get(host); + return (ranges == null) ? Collections.emptySet() : ranges; + } + } + } + + /** + * Returns the set of hosts that are replica for a given partition key. + * + *

Note that this information is refreshed asynchronously by the control connection, when + * schema or ring topology changes. It might occasionally be stale (or even empty). + * + * @param keyspace the name of the keyspace to get replicas for. + * @param partitionKey the partition key for which to find the set of replica. + * @return the (immutable) set of replicas for {@code partitionKey} as known by the driver. Note + * that the result might be stale or empty if metadata was explicitly disabled with {@link + * QueryOptions#setMetadataEnabled(boolean)}. + */ + public Set getReplicas(String keyspace, ByteBuffer partitionKey) { + keyspace = handleId(keyspace); + TokenMap current = tokenMap; + if (current == null) { + return Collections.emptySet(); + } else { + Set hosts = current.getReplicas(keyspace, current.factory.hash(partitionKey)); + return hosts == null ? Collections.emptySet() : hosts; + } + } + + /** + * Returns the set of hosts that are replica for a given token range. + * + *

Note that it is assumed that the input range does not overlap across multiple host ranges. + * If the range extends over multiple hosts, it only returns the replicas for those hosts that are + * replicas for the last token of the range. This behavior may change in a future release, see JAVA-1355. + * + *

Also note that this information is refreshed asynchronously by the control connection, when + * schema or ring topology changes. It might occasionally be stale (or even empty). + * + * @param keyspace the name of the keyspace to get replicas for. + * @param range the token range. + * @return the (immutable) set of replicas for {@code range} as known by the driver. Note that the + * result might be stale or empty if metadata was explicitly disabled with {@link + * QueryOptions#setMetadataEnabled(boolean)}. + */ + public Set getReplicas(String keyspace, TokenRange range) { + keyspace = handleId(keyspace); + TokenMap current = tokenMap; + if (current == null) { + return Collections.emptySet(); + } else { + Set hosts = current.getReplicas(keyspace, range.getEnd()); + return hosts == null ? Collections.emptySet() : hosts; + } + } + + /** + * The Cassandra name for the cluster connect to. + * + * @return the Cassandra name for the cluster connect to. + */ + public String getClusterName() { + return clusterName; + } + + /** + * The partitioner in use as reported by the Cassandra nodes. + * + * @return the partitioner in use as reported by the Cassandra nodes. + */ + public String getPartitioner() { + return partitioner; + } + + /** + * Returns the known hosts of this cluster. + * + * @return A set will all the know host of this cluster. + */ + public Set getAllHosts() { + return new HashSet(allHosts()); + } + + /** + * Checks whether hosts that are currently up agree on the schema definition. + * + *

This method performs a one-time check only, without any form of retry; therefore {@link + * Cluster.Builder#withMaxSchemaAgreementWaitSeconds(int)} does not apply in this case. + * + * @return {@code true} if all hosts agree on the schema; {@code false} if they don't agree, or if + * the check could not be performed (for example, if the control connection is down). + */ + public boolean checkSchemaAgreement() { + try { + return cluster.controlConnection.checkSchemaAgreement(); + } catch (Exception e) { + logger.warn("Error while checking schema agreement", e); + return false; + } + } + + /** + * Returns the metadata of a keyspace given its name. + * + * @param keyspace the name of the keyspace for which metadata should be returned. + * @return the metadata of the requested keyspace or {@code null} if {@code keyspace} is not a + * known keyspace. Note that the result might be stale or null if metadata was explicitly + * disabled with {@link QueryOptions#setMetadataEnabled(boolean)}. + */ + public KeyspaceMetadata getKeyspace(String keyspace) { + return keyspaces.get(handleId(keyspace)); + } + + KeyspaceMetadata removeKeyspace(String keyspace) { + KeyspaceMetadata removed = keyspaces.remove(keyspace); + if (tokenMap != null) tokenMap.tokenToHostsByKeyspace.remove(keyspace); + return removed; + } + + /** + * Returns a list of all the defined keyspaces. + * + * @return a list of all the defined keyspaces. Note that the result might be stale or empty if + * metadata was explicitly disabled with {@link QueryOptions#setMetadataEnabled(boolean)}. + */ + public List getKeyspaces() { + return new ArrayList(keyspaces.values()); + } + + /** + * Returns a {@code String} containing CQL queries representing the schema of this cluster. + * + *

In other words, this method returns the queries that would allow to recreate the schema of + * this cluster. + * + *

Note that the returned String is formatted to be human readable (for some definition of + * human readable at least). + * + *

It might be stale or empty if metadata was explicitly disabled with {@link + * QueryOptions#setMetadataEnabled(boolean)}. + * + * @return the CQL queries representing this cluster schema as a {code String}. + */ + public String exportSchemaAsString() { + StringBuilder sb = new StringBuilder(); + + for (KeyspaceMetadata ksm : keyspaces.values()) sb.append(ksm.exportAsString()).append('\n'); + + return sb.toString(); + } + + /** + * Creates a tuple type given a list of types. + * + * @param types the types for the tuple type. + * @return the newly created tuple type. + */ + public TupleType newTupleType(DataType... types) { + return newTupleType(Arrays.asList(types)); + } + + /** + * Creates a tuple type given a list of types. + * + * @param types the types for the tuple type. + * @return the newly created tuple type. + */ + public TupleType newTupleType(List types) { + return new TupleType( + types, cluster.protocolVersion(), cluster.configuration.getCodecRegistry()); + } + + /** + * Builds a new {@link Token} from its string representation, according to the partitioner + * reported by the Cassandra nodes. + * + * @param tokenStr the string representation. + * @return the token. + * @throws IllegalStateException if the token factory was not initialized. This would typically + * happen if metadata was explicitly disabled with {@link + * QueryOptions#setMetadataEnabled(boolean)} before startup. + */ + public Token newToken(String tokenStr) { + TokenMap current = tokenMap; + if (current == null) + throw new IllegalStateException( + "Token factory not set. This should only happen if metadata was explicitly disabled"); + return current.factory.fromString(tokenStr); + } + + /** + * Builds a new {@link Token} from a partition key. + * + * @param components the components of the partition key, in their serialized form (obtained with + * {@link TypeCodec#serialize(Object, ProtocolVersion)}). + * @return the token. + * @throws IllegalStateException if the token factory was not initialized. This would typically + * happen if metadata was explicitly disabled with {@link + * QueryOptions#setMetadataEnabled(boolean)} before startup. + */ + public Token newToken(ByteBuffer... components) { + TokenMap current = tokenMap; + if (current == null) + throw new IllegalStateException( + "Token factory not set. This should only happen if metadata was explicitly disabled"); + return current.factory.hash(SimpleStatement.compose(components)); + } + + /** + * Builds a new {@link TokenRange}. + * + * @param start the start token. + * @param end the end token. + * @return the range. + * @throws IllegalStateException if the token factory was not initialized. This would typically + * happen if metadata was explicitly disabled with {@link + * QueryOptions#setMetadataEnabled(boolean)} before startup. + */ + public TokenRange newTokenRange(Token start, Token end) { + TokenMap current = tokenMap; + if (current == null) + throw new IllegalStateException( + "Token factory not set. This should only happen if metadata was explicitly disabled"); + + return new TokenRange(start, end, current.factory); + } + + Token.Factory tokenFactory() { + TokenMap current = tokenMap; + return (current == null) ? null : current.factory; + } + + void triggerOnKeyspaceAdded(KeyspaceMetadata keyspace) { + for (SchemaChangeListener listener : cluster.schemaChangeListeners) { + listener.onKeyspaceAdded(keyspace); + } + } + + void triggerOnKeyspaceChanged(KeyspaceMetadata current, KeyspaceMetadata previous) { + for (SchemaChangeListener listener : cluster.schemaChangeListeners) { + listener.onKeyspaceChanged(current, previous); + } + } + + void triggerOnKeyspaceRemoved(KeyspaceMetadata keyspace) { + for (SchemaChangeListener listener : cluster.schemaChangeListeners) { + listener.onKeyspaceRemoved(keyspace); + } + } + + void triggerOnTableAdded(TableMetadata table) { + for (SchemaChangeListener listener : cluster.schemaChangeListeners) { + listener.onTableAdded(table); + } + } + + void triggerOnTableChanged(TableMetadata current, TableMetadata previous) { + for (SchemaChangeListener listener : cluster.schemaChangeListeners) { + listener.onTableChanged(current, previous); + } + } + + void triggerOnTableRemoved(TableMetadata table) { + for (SchemaChangeListener listener : cluster.schemaChangeListeners) { + listener.onTableRemoved(table); + } + } + + void triggerOnUserTypeAdded(UserType type) { + for (SchemaChangeListener listener : cluster.schemaChangeListeners) { + listener.onUserTypeAdded(type); + } + } + + void triggerOnUserTypeChanged(UserType current, UserType previous) { + for (SchemaChangeListener listener : cluster.schemaChangeListeners) { + listener.onUserTypeChanged(current, previous); + } + } + + void triggerOnUserTypeRemoved(UserType type) { + for (SchemaChangeListener listener : cluster.schemaChangeListeners) { + listener.onUserTypeRemoved(type); + } + } + + void triggerOnFunctionAdded(FunctionMetadata function) { + for (SchemaChangeListener listener : cluster.schemaChangeListeners) { + listener.onFunctionAdded(function); + } + } + + void triggerOnFunctionChanged(FunctionMetadata current, FunctionMetadata previous) { + for (SchemaChangeListener listener : cluster.schemaChangeListeners) { + listener.onFunctionChanged(current, previous); + } + } + + void triggerOnFunctionRemoved(FunctionMetadata function) { + for (SchemaChangeListener listener : cluster.schemaChangeListeners) { + listener.onFunctionRemoved(function); + } + } + + void triggerOnAggregateAdded(AggregateMetadata aggregate) { + for (SchemaChangeListener listener : cluster.schemaChangeListeners) { + listener.onAggregateAdded(aggregate); + } + } + + void triggerOnAggregateChanged(AggregateMetadata current, AggregateMetadata previous) { + for (SchemaChangeListener listener : cluster.schemaChangeListeners) { + listener.onAggregateChanged(current, previous); + } + } + + void triggerOnAggregateRemoved(AggregateMetadata aggregate) { + for (SchemaChangeListener listener : cluster.schemaChangeListeners) { + listener.onAggregateRemoved(aggregate); + } + } + + void triggerOnMaterializedViewAdded(MaterializedViewMetadata view) { + for (SchemaChangeListener listener : cluster.schemaChangeListeners) { + listener.onMaterializedViewAdded(view); + } + } + + void triggerOnMaterializedViewChanged( + MaterializedViewMetadata current, MaterializedViewMetadata previous) { + for (SchemaChangeListener listener : cluster.schemaChangeListeners) { + listener.onMaterializedViewChanged(current, previous); + } + } + + void triggerOnMaterializedViewRemoved(MaterializedViewMetadata view) { + for (SchemaChangeListener listener : cluster.schemaChangeListeners) { + listener.onMaterializedViewRemoved(view); + } + } + + private static class TokenMap { + + private final Token.Factory factory; + private final Map> primaryToTokens; + private final Map>> tokenToHostsByKeyspace; + private final Map>> hostsToRangesByKeyspace; + private final List ring; + private final Set tokenRanges; + private final Map tokenToPrimary; + + private TokenMap( + Token.Factory factory, + List ring, + Set tokenRanges, + Map tokenToPrimary, + Map> primaryToTokens, + Map>> tokenToHostsByKeyspace, + Map>> hostsToRangesByKeyspace) { + this.factory = factory; + this.ring = ring; + this.tokenRanges = tokenRanges; + this.tokenToPrimary = tokenToPrimary; + this.primaryToTokens = primaryToTokens; + this.tokenToHostsByKeyspace = tokenToHostsByKeyspace; + this.hostsToRangesByKeyspace = hostsToRangesByKeyspace; + for (Map.Entry> entry : primaryToTokens.entrySet()) { + Host host = entry.getKey(); + host.setTokens(ImmutableSet.copyOf(entry.getValue())); + } + } + + private static TokenMap build( + Token.Factory factory, + Map> allTokens, + Collection keyspaces) { + Map tokenToPrimary = new HashMap(); + Set allSorted = new TreeSet(); + for (Map.Entry> entry : allTokens.entrySet()) { + Host host = entry.getKey(); + for (Token t : entry.getValue()) { + try { + allSorted.add(t); + tokenToPrimary.put(t, host); + } catch (IllegalArgumentException e) { + // If we failed parsing that token, skip it + } + } + } + List ring = new ArrayList(allSorted); + Set tokenRanges = makeTokenRanges(ring, factory); + return build(factory, allTokens, keyspaces, ring, tokenRanges, tokenToPrimary); + } + + private static TokenMap build( + Token.Factory factory, + Map> allTokens, + Collection keyspaces, + List ring, + Set tokenRanges, + Map tokenToPrimary) { + Set hosts = allTokens.keySet(); + Map>> tokenToHosts = + new HashMap>>(); + Map>> replStrategyToHosts = + new HashMap>>(); + Map>> hostsToRanges = + new HashMap>>(); + for (KeyspaceMetadata keyspace : keyspaces) { + ReplicationStrategy strategy = keyspace.replicationStrategy(); + Map> ksTokens = replStrategyToHosts.get(strategy); + if (ksTokens == null) { + ksTokens = + (strategy == null) + ? makeNonReplicatedMap(tokenToPrimary) + : strategy.computeTokenToReplicaMap(keyspace.getName(), tokenToPrimary, ring); + replStrategyToHosts.put(strategy, ksTokens); + } + + tokenToHosts.put(keyspace.getName(), ksTokens); + + Map> ksRanges; + if (ring.size() == 1) { + // We forced the single range to ]minToken,minToken], make sure to use that instead of + // relying on the host's token + ImmutableMap.Builder> builder = ImmutableMap.builder(); + for (Host host : allTokens.keySet()) builder.put(host, tokenRanges); + ksRanges = builder.build(); } else { - Set hosts = current.getReplicas(keyspace, current.factory.hash(partitionKey)); - return hosts == null ? Collections.emptySet() : hosts; - } - } - - /** - * Returns the set of hosts that are replica for a given token range. - *

- * Note that it is assumed that the input range does not overlap across multiple host ranges. - * If the range extends over multiple hosts, it only returns the replicas for those hosts - * that are replicas for the last token of the range. This behavior may change in a future - * release, see JAVA-1355. - *

- * Also note that this information is refreshed asynchronously by the control - * connection, when schema or ring topology changes. It might occasionally - * be stale (or even empty). - * - * @param keyspace the name of the keyspace to get replicas for. - * @param range the token range. - * @return the (immutable) set of replicas for {@code range} as known by the driver. - * Note that the result might be stale or empty if metadata was explicitly disabled - * with {@link QueryOptions#setMetadataEnabled(boolean)}. - */ - public Set getReplicas(String keyspace, TokenRange range) { - keyspace = handleId(keyspace); - TokenMap current = tokenMap; - if (current == null) { - return Collections.emptySet(); - } else { - Set hosts = current.getReplicas(keyspace, range.getEnd()); - return hosts == null ? Collections.emptySet() : hosts; - } - } - - /** - * The Cassandra name for the cluster connect to. - * - * @return the Cassandra name for the cluster connect to. - */ - public String getClusterName() { - return clusterName; - } - - /** - * The partitioner in use as reported by the Cassandra nodes. - * - * @return the partitioner in use as reported by the Cassandra nodes. - */ - public String getPartitioner() { - return partitioner; - } - - /** - * Returns the known hosts of this cluster. - * - * @return A set will all the know host of this cluster. - */ - public Set getAllHosts() { - return new HashSet(allHosts()); - } - - /** - * Checks whether hosts that are currently up agree on the schema definition. - *

- * This method performs a one-time check only, without any form of retry; therefore {@link Cluster.Builder#withMaxSchemaAgreementWaitSeconds(int)} - * does not apply in this case. - * - * @return {@code true} if all hosts agree on the schema; {@code false} if they don't agree, or if the check could not be performed - * (for example, if the control connection is down). - */ - public boolean checkSchemaAgreement() { - try { - return cluster.controlConnection.checkSchemaAgreement(); - } catch (Exception e) { - logger.warn("Error while checking schema agreement", e); - return false; - } - } - - /** - * Returns the metadata of a keyspace given its name. - * - * @param keyspace the name of the keyspace for which metadata should be - * returned. - * @return the metadata of the requested keyspace or {@code null} if {@code - * keyspace} is not a known keyspace. Note that the result might be stale or null if - * metadata was explicitly disabled with {@link QueryOptions#setMetadataEnabled(boolean)}. - */ - public KeyspaceMetadata getKeyspace(String keyspace) { - return keyspaces.get(handleId(keyspace)); - } - - KeyspaceMetadata removeKeyspace(String keyspace) { - KeyspaceMetadata removed = keyspaces.remove(keyspace); - if (tokenMap != null) - tokenMap.tokenToHostsByKeyspace.remove(keyspace); - return removed; - } - - /** - * Returns a list of all the defined keyspaces. - * - * @return a list of all the defined keyspaces. Note that the result might be stale or empty if - * metadata was explicitly disabled with {@link QueryOptions#setMetadataEnabled(boolean)}. - */ - public List getKeyspaces() { - return new ArrayList(keyspaces.values()); - } - - /** - * Returns a {@code String} containing CQL queries representing the schema - * of this cluster. - *

- * In other words, this method returns the queries that would allow to - * recreate the schema of this cluster. - *

- * Note that the returned String is formatted to be human readable (for - * some definition of human readable at least). - *

- * It might be stale or empty if metadata was explicitly disabled with - * {@link QueryOptions#setMetadataEnabled(boolean)}. - * - * @return the CQL queries representing this cluster schema as a {code - * String}. - */ - public String exportSchemaAsString() { - StringBuilder sb = new StringBuilder(); - - for (KeyspaceMetadata ksm : keyspaces.values()) - sb.append(ksm.exportAsString()).append('\n'); - - return sb.toString(); - } - - /** - * Creates a tuple type given a list of types. - * - * @param types the types for the tuple type. - * @return the newly created tuple type. - */ - public TupleType newTupleType(DataType... types) { - return newTupleType(Arrays.asList(types)); - } - - /** - * Creates a tuple type given a list of types. - * - * @param types the types for the tuple type. - * @return the newly created tuple type. - */ - public TupleType newTupleType(List types) { - return new TupleType(types, cluster.protocolVersion(), cluster.configuration.getCodecRegistry()); - } - - /** - * Builds a new {@link Token} from its string representation, according to the partitioner - * reported by the Cassandra nodes. - * - * @param tokenStr the string representation. - * @return the token. - * @throws IllegalStateException if the token factory was not initialized. This would typically - * happen if metadata was explicitly disabled with {@link QueryOptions#setMetadataEnabled(boolean)} - * before startup. - */ - public Token newToken(String tokenStr) { - TokenMap current = tokenMap; - if (current == null) - throw new IllegalStateException("Token factory not set. This should only happen if metadata was explicitly disabled"); - return current.factory.fromString(tokenStr); - } - - /** - * Builds a new {@link Token} from a partition key. - * - * @param components the components of the partition key, in their serialized form (obtained with - * {@link TypeCodec#serialize(Object, ProtocolVersion)}). - * @return the token. - * @throws IllegalStateException if the token factory was not initialized. This would typically - * happen if metadata was explicitly disabled with {@link QueryOptions#setMetadataEnabled(boolean)} - * before startup. - */ - public Token newToken(ByteBuffer... components) { - TokenMap current = tokenMap; - if (current == null) - throw new IllegalStateException("Token factory not set. This should only happen if metadata was explicitly disabled"); - return current.factory.hash( - SimpleStatement.compose(components)); - } - - /** - * Builds a new {@link TokenRange}. - * - * @param start the start token. - * @param end the end token. - * @return the range. - * @throws IllegalStateException if the token factory was not initialized. This would typically - * happen if metadata was explicitly disabled with {@link QueryOptions#setMetadataEnabled(boolean)} - * before startup. - */ - public TokenRange newTokenRange(Token start, Token end) { - TokenMap current = tokenMap; - if (current == null) - throw new IllegalStateException("Token factory not set. This should only happen if metadata was explicitly disabled"); - - return new TokenRange(start, end, current.factory); - } - - Token.Factory tokenFactory() { - TokenMap current = tokenMap; - return (current == null) ? null : current.factory; - } - - void triggerOnKeyspaceAdded(KeyspaceMetadata keyspace) { - for (SchemaChangeListener listener : cluster.schemaChangeListeners) { - listener.onKeyspaceAdded(keyspace); - } - } - - void triggerOnKeyspaceChanged(KeyspaceMetadata current, KeyspaceMetadata previous) { - for (SchemaChangeListener listener : cluster.schemaChangeListeners) { - listener.onKeyspaceChanged(current, previous); - } - } - - void triggerOnKeyspaceRemoved(KeyspaceMetadata keyspace) { - for (SchemaChangeListener listener : cluster.schemaChangeListeners) { - listener.onKeyspaceRemoved(keyspace); - } - } - - void triggerOnTableAdded(TableMetadata table) { - for (SchemaChangeListener listener : cluster.schemaChangeListeners) { - listener.onTableAdded(table); - } - } - - void triggerOnTableChanged(TableMetadata current, TableMetadata previous) { - for (SchemaChangeListener listener : cluster.schemaChangeListeners) { - listener.onTableChanged(current, previous); - } - } - - void triggerOnTableRemoved(TableMetadata table) { - for (SchemaChangeListener listener : cluster.schemaChangeListeners) { - listener.onTableRemoved(table); - } - } - - void triggerOnUserTypeAdded(UserType type) { - for (SchemaChangeListener listener : cluster.schemaChangeListeners) { - listener.onUserTypeAdded(type); - } - } - - void triggerOnUserTypeChanged(UserType current, UserType previous) { - for (SchemaChangeListener listener : cluster.schemaChangeListeners) { - listener.onUserTypeChanged(current, previous); - } - } - - void triggerOnUserTypeRemoved(UserType type) { - for (SchemaChangeListener listener : cluster.schemaChangeListeners) { - listener.onUserTypeRemoved(type); - } - } - - void triggerOnFunctionAdded(FunctionMetadata function) { - for (SchemaChangeListener listener : cluster.schemaChangeListeners) { - listener.onFunctionAdded(function); - } - } - - void triggerOnFunctionChanged(FunctionMetadata current, FunctionMetadata previous) { - for (SchemaChangeListener listener : cluster.schemaChangeListeners) { - listener.onFunctionChanged(current, previous); - } - } - - void triggerOnFunctionRemoved(FunctionMetadata function) { - for (SchemaChangeListener listener : cluster.schemaChangeListeners) { - listener.onFunctionRemoved(function); - } - } - - void triggerOnAggregateAdded(AggregateMetadata aggregate) { - for (SchemaChangeListener listener : cluster.schemaChangeListeners) { - listener.onAggregateAdded(aggregate); - } - } - - void triggerOnAggregateChanged(AggregateMetadata current, AggregateMetadata previous) { - for (SchemaChangeListener listener : cluster.schemaChangeListeners) { - listener.onAggregateChanged(current, previous); - } - } - - void triggerOnAggregateRemoved(AggregateMetadata aggregate) { - for (SchemaChangeListener listener : cluster.schemaChangeListeners) { - listener.onAggregateRemoved(aggregate); - } - } - - void triggerOnMaterializedViewAdded(MaterializedViewMetadata view) { - for (SchemaChangeListener listener : cluster.schemaChangeListeners) { - listener.onMaterializedViewAdded(view); - } - } - - void triggerOnMaterializedViewChanged(MaterializedViewMetadata current, MaterializedViewMetadata previous) { - for (SchemaChangeListener listener : cluster.schemaChangeListeners) { - listener.onMaterializedViewChanged(current, previous); - } - } - - void triggerOnMaterializedViewRemoved(MaterializedViewMetadata view) { - for (SchemaChangeListener listener : cluster.schemaChangeListeners) { - listener.onMaterializedViewRemoved(view); - } - } - - private static class TokenMap { - - private final Token.Factory factory; - private final Map> primaryToTokens; - private final Map>> tokenToHostsByKeyspace; - private final Map>> hostsToRangesByKeyspace; - private final List ring; - private final Set tokenRanges; - private final Map tokenToPrimary; - - private TokenMap(Token.Factory factory, - List ring, - Set tokenRanges, - Map tokenToPrimary, - Map> primaryToTokens, - Map>> tokenToHostsByKeyspace, - Map>> hostsToRangesByKeyspace) { - this.factory = factory; - this.ring = ring; - this.tokenRanges = tokenRanges; - this.tokenToPrimary = tokenToPrimary; - this.primaryToTokens = primaryToTokens; - this.tokenToHostsByKeyspace = tokenToHostsByKeyspace; - this.hostsToRangesByKeyspace = hostsToRangesByKeyspace; - for (Map.Entry> entry : primaryToTokens.entrySet()) { - Host host = entry.getKey(); - host.setTokens(ImmutableSet.copyOf(entry.getValue())); - } - } - - private static TokenMap build(Token.Factory factory, Map> allTokens, Collection keyspaces) { - Map tokenToPrimary = new HashMap(); - Set allSorted = new TreeSet(); - for (Map.Entry> entry : allTokens.entrySet()) { - Host host = entry.getKey(); - for (Token t : entry.getValue()) { - try { - allSorted.add(t); - tokenToPrimary.put(t, host); - } catch (IllegalArgumentException e) { - // If we failed parsing that token, skip it - } - } - } - List ring = new ArrayList(allSorted); - Set tokenRanges = makeTokenRanges(ring, factory); - return build(factory, allTokens, keyspaces, ring, tokenRanges, tokenToPrimary); - } - - private static TokenMap build(Token.Factory factory, Map> allTokens, Collection keyspaces, List ring, Set tokenRanges, Map tokenToPrimary) { - Set hosts = allTokens.keySet(); - Map>> tokenToHosts = new HashMap>>(); - Map>> replStrategyToHosts = new HashMap>>(); - Map>> hostsToRanges = new HashMap>>(); - for (KeyspaceMetadata keyspace : keyspaces) { - ReplicationStrategy strategy = keyspace.replicationStrategy(); - Map> ksTokens = replStrategyToHosts.get(strategy); - if (ksTokens == null) { - ksTokens = (strategy == null) - ? makeNonReplicatedMap(tokenToPrimary) - : strategy.computeTokenToReplicaMap(keyspace.getName(), tokenToPrimary, ring); - replStrategyToHosts.put(strategy, ksTokens); - } - - tokenToHosts.put(keyspace.getName(), ksTokens); - - Map> ksRanges; - if (ring.size() == 1) { - // We forced the single range to ]minToken,minToken], make sure to use that instead of relying on the host's token - ImmutableMap.Builder> builder = ImmutableMap.builder(); - for (Host host : allTokens.keySet()) - builder.put(host, tokenRanges); - ksRanges = builder.build(); - } else { - ksRanges = computeHostsToRangesMap(tokenRanges, ksTokens, hosts.size()); - } - hostsToRanges.put(keyspace.getName(), ksRanges); - } - return new TokenMap(factory, ring, tokenRanges, tokenToPrimary, allTokens, tokenToHosts, hostsToRanges); - } - - private Set getReplicas(String keyspace, Token token) { - - Map> tokenToHosts = tokenToHostsByKeyspace.get(keyspace); - if (tokenToHosts == null) - return Collections.emptySet(); - - // If the token happens to be one of the "primary" tokens, get result directly - Set hosts = tokenToHosts.get(token); - if (hosts != null) - return hosts; - - // Otherwise, find closest "primary" token on the ring - int i = Collections.binarySearch(ring, token); - if (i < 0) { - i = -i - 1; - if (i >= ring.size()) - i = 0; - } - - return tokenToHosts.get(ring.get(i)); - } - - private static Map> makeNonReplicatedMap(Map input) { - Map> output = new HashMap>(input.size()); - for (Map.Entry entry : input.entrySet()) - output.put(entry.getKey(), ImmutableSet.of(entry.getValue())); - return output; - } - - private static Set makeTokenRanges(List ring, Token.Factory factory) { - ImmutableSet.Builder builder = ImmutableSet.builder(); - // JAVA-684: if there is only one token, return the range ]minToken, minToken] - if (ring.size() == 1) { - builder.add(new TokenRange(factory.minToken(), factory.minToken(), factory)); - } else { - for (int i = 0; i < ring.size(); i++) { - Token start = ring.get(i); - Token end = ring.get((i + 1) % ring.size()); - builder.add(new TokenRange(start, end, factory)); - } - } - return builder.build(); - } - - private static Map> computeHostsToRangesMap(Set tokenRanges, Map> ksTokens, int hostCount) { - Map> builders = Maps.newHashMapWithExpectedSize(hostCount); - for (TokenRange range : tokenRanges) { - Set replicas = ksTokens.get(range.getEnd()); - for (Host host : replicas) { - ImmutableSet.Builder hostRanges = builders.get(host); - if (hostRanges == null) { - hostRanges = ImmutableSet.builder(); - builders.put(host, hostRanges); - } - hostRanges.add(range); - } - } - Map> ksRanges = Maps.newHashMapWithExpectedSize(hostCount); - for (Map.Entry> entry : builders.entrySet()) { - ksRanges.put(entry.getKey(), entry.getValue().build()); - } - return ksRanges; - } - } + ksRanges = computeHostsToRangesMap(tokenRanges, ksTokens, hosts.size()); + } + hostsToRanges.put(keyspace.getName(), ksRanges); + } + return new TokenMap( + factory, ring, tokenRanges, tokenToPrimary, allTokens, tokenToHosts, hostsToRanges); + } + + private Set getReplicas(String keyspace, Token token) { + + Map> tokenToHosts = tokenToHostsByKeyspace.get(keyspace); + if (tokenToHosts == null) return Collections.emptySet(); + + // If the token happens to be one of the "primary" tokens, get result directly + Set hosts = tokenToHosts.get(token); + if (hosts != null) return hosts; + + // Otherwise, find closest "primary" token on the ring + int i = Collections.binarySearch(ring, token); + if (i < 0) { + i = -i - 1; + if (i >= ring.size()) i = 0; + } + + return tokenToHosts.get(ring.get(i)); + } + + private static Map> makeNonReplicatedMap(Map input) { + Map> output = new HashMap>(input.size()); + for (Map.Entry entry : input.entrySet()) + output.put(entry.getKey(), ImmutableSet.of(entry.getValue())); + return output; + } + + private static Set makeTokenRanges(List ring, Token.Factory factory) { + ImmutableSet.Builder builder = ImmutableSet.builder(); + // JAVA-684: if there is only one token, return the range ]minToken, minToken] + if (ring.size() == 1) { + builder.add(new TokenRange(factory.minToken(), factory.minToken(), factory)); + } else { + for (int i = 0; i < ring.size(); i++) { + Token start = ring.get(i); + Token end = ring.get((i + 1) % ring.size()); + builder.add(new TokenRange(start, end, factory)); + } + } + return builder.build(); + } + + private static Map> computeHostsToRangesMap( + Set tokenRanges, Map> ksTokens, int hostCount) { + Map> builders = + Maps.newHashMapWithExpectedSize(hostCount); + for (TokenRange range : tokenRanges) { + Set replicas = ksTokens.get(range.getEnd()); + for (Host host : replicas) { + ImmutableSet.Builder hostRanges = builders.get(host); + if (hostRanges == null) { + hostRanges = ImmutableSet.builder(); + builders.put(host, hostRanges); + } + hostRanges.add(range); + } + } + Map> ksRanges = Maps.newHashMapWithExpectedSize(hostCount); + for (Map.Entry> entry : builders.entrySet()) { + ksRanges.put(entry.getKey(), entry.getValue().build()); + } + return ksRanges; + } + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/Metrics.java b/driver-core/src/main/java/com/datastax/driver/core/Metrics.java index 1debeb4a6e9..744035b6885 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Metrics.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Metrics.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,9 +17,13 @@ */ package com.datastax.driver.core; -import com.codahale.metrics.*; +import com.codahale.metrics.Counter; +import com.codahale.metrics.Gauge; +import com.codahale.metrics.JmxReporter; +import com.codahale.metrics.Meter; +import com.codahale.metrics.MetricRegistry; +import com.codahale.metrics.Timer; import com.datastax.driver.core.policies.SpeculativeExecutionPolicy; - import java.util.HashSet; import java.util.Set; import java.util.concurrent.BlockingQueue; @@ -25,579 +31,640 @@ /** * Metrics exposed by the driver. - *

- * The metrics exposed by this class use the Metrics - * library and you should refer its documentation for details on how to handle the exposed - * metric objects. - *

- * By default, metrics are exposed through JMX, which is very useful for - * development and browsing, but for production environments you may want to - * have a look at the reporters - * provided by the Metrics library which could be more efficient/adapted. + * + *

The metrics exposed by this class use the Metrics + * library and you should refer its documentation for details on how to handle the exposed metric + * objects. + * + *

By default, metrics are exposed through JMX, which is very useful for development and + * browsing, but for production environments you may want to have a look at the reporters provided by the + * Metrics library which could be more efficient/adapted. */ public class Metrics { - private final Cluster.Manager manager; - private final MetricRegistry registry = new MetricRegistry(); - private final JmxReporter jmxReporter; - private final Errors errors = new Errors(); - - private final Timer requests = registry.timer("requests"); - - private final Gauge knownHosts = registry.register("known-hosts", new Gauge() { - @Override - public Integer getValue() { - return manager.metadata.allHosts().size(); - } - }); - private final Gauge connectedTo = registry.register("connected-to", new Gauge() { - @Override - public Integer getValue() { - Set s = new HashSet(); - for (SessionManager session : manager.sessions) - s.addAll(session.pools.keySet()); - return s.size(); - } - }); - private final Gauge openConnections = registry.register("open-connections", new Gauge() { + private final Cluster.Manager manager; + private final MetricRegistry registry = new MetricRegistry(); + private final JmxReporter jmxReporter; + private final Errors errors = new Errors(); + + private final Timer requests = registry.timer("requests"); + private final Meter bytesSent = registry.meter("bytes-sent"); + private final Meter bytesReceived = registry.meter("bytes-received"); + + private final Gauge knownHosts = + registry.register( + "known-hosts", + new Gauge() { + @Override + public Integer getValue() { + return manager.metadata.allHosts().size(); + } + }); + private final Gauge connectedTo = + registry.register( + "connected-to", + new Gauge() { + @Override + public Integer getValue() { + Set s = new HashSet(); + for (SessionManager session : manager.sessions) s.addAll(session.pools.keySet()); + return s.size(); + } + }); + private final Gauge openConnections = + registry.register( + "open-connections", + new Gauge() { + @Override + public Integer getValue() { + int value = manager.controlConnection.isOpen() ? 1 : 0; + for (SessionManager session : manager.sessions) + for (HostConnectionPool pool : session.pools.values()) value += pool.opened(); + return value; + } + }); + private final Gauge trashedConnections = + registry.register( + "trashed-connections", + new Gauge() { + @Override + public Integer getValue() { + int value = 0; + for (SessionManager session : manager.sessions) + for (HostConnectionPool pool : session.pools.values()) value += pool.trashed(); + return value; + } + }); + private final Gauge inFlightRequests = + registry.register( + "inflight-requests", + new Gauge() { + @Override + public Integer getValue() { + int value = 0; + for (SessionManager session : manager.sessions) + for (HostConnectionPool pool : session.pools.values()) + value += pool.totalInFlight.get(); + return value; + } + }); + + private final Gauge requestQueueDepth = + registry.register( + "request-queue-depth", + new Gauge() { + @Override + public Integer getValue() { + int value = 0; + for (SessionManager session : manager.sessions) + for (HostConnectionPool pool : session.pools.values()) + value += pool.pendingBorrowCount.get(); + return value; + } + }); + + private final Gauge executorQueueDepth; + private final Gauge blockingExecutorQueueDepth; + private final Gauge reconnectionSchedulerQueueSize; + private final Gauge taskSchedulerQueueSize; + + Metrics(Cluster.Manager manager) { + this.manager = manager; + this.executorQueueDepth = + registry.register("executor-queue-depth", buildQueueSizeGauge(manager.executorQueue)); + this.blockingExecutorQueueDepth = + registry.register( + "blocking-executor-queue-depth", buildQueueSizeGauge(manager.blockingExecutorQueue)); + this.reconnectionSchedulerQueueSize = + registry.register( + "reconnection-scheduler-task-count", + buildQueueSizeGauge(manager.reconnectionExecutorQueue)); + this.taskSchedulerQueueSize = + registry.register( + "task-scheduler-task-count", buildQueueSizeGauge(manager.scheduledTasksExecutorQueue)); + if (manager.configuration.getMetricsOptions().isJMXReportingEnabled()) { + this.jmxReporter = + JmxReporter.forRegistry(registry).inDomain(manager.clusterName + "-metrics").build(); + this.jmxReporter.start(); + } else { + this.jmxReporter = null; + } + } + + /** + * Returns the registry containing all metrics. + * + *

The metrics registry allows you to easily use the reporters that ship with Metrics or a custom + * written one. + * + *

For instance, if {@code metrics} is {@code this} object, you could export the metrics to csv + * files using: + * + *

+   *     com.codahale.metrics.CsvReporter.forRegistry(metrics.getRegistry()).build(new File("measurements/")).start(1, TimeUnit.SECONDS);
+   * 
+ * + *

If you already have a {@code MetricRegistry} in your application and wish to add the + * driver's metrics to it, the recommended approach is to use a listener: + * + *

+   *     // Your existing registry:
+   *     final com.codahale.metrics.MetricRegistry myRegistry = ...
+   *
+   *     cluster.getMetrics().getRegistry().addListener(new com.codahale.metrics.MetricRegistryListener() {
+   *         @Override
+   *         public void onGaugeAdded(String name, Gauge<?> gauge) {
+   *             if (myRegistry.getNames().contains(name)) {
+   *                 // name is already taken, maybe prefix with a namespace
+   *                 ...
+   *             } else {
+   *                 myRegistry.register(name, gauge);
+   *             }
+   *         }
+   *
+   *         ... // Implement other methods in a similar fashion
+   *     });
+   * 
+ * + * Since reporting is handled by your registry, you'll probably also want to disable JMX reporting + * with {@link Cluster.Builder#withoutJMXReporting()}. + * + * @return the registry containing all metrics. + */ + public MetricRegistry getRegistry() { + return registry; + } + + /** + * Returns metrics on the user requests performed on the Cluster. + * + *

This metric exposes + * + *

    + *
  • the total number of requests. + *
  • the requests rate (in requests per seconds), including 1, 5 and 15 minute rates. + *
  • the mean, min and max latencies, as well as latency at a given percentile. + *
+ * + * @return a {@code Timer} metric object exposing the rate and latency for user requests. + */ + public Timer getRequestsTimer() { + return requests; + } + + /** + * Returns an object grouping metrics related to the errors encountered. + * + * @return an object grouping metrics related to the errors encountered. + */ + public Errors getErrorMetrics() { + return errors; + } + + /** + * Returns the number of Cassandra hosts currently known by the driver (that is whether they are + * currently considered up or down). + * + * @return the number of Cassandra hosts currently known by the driver. + */ + public Gauge getKnownHosts() { + return knownHosts; + } + + /** + * Returns the number of Cassandra hosts the driver is currently connected to (that is have at + * least one connection opened to). + * + * @return the number of Cassandra hosts the driver is currently connected to. + */ + public Gauge getConnectedToHosts() { + return connectedTo; + } + + /** + * Returns the total number of currently opened connections to Cassandra hosts. + * + * @return The total number of currently opened connections to Cassandra hosts. + */ + public Gauge getOpenConnections() { + return openConnections; + } + + /** + * Returns the total number of currently "trashed" connections to Cassandra hosts. + * + *

When the load to a host decreases, the driver will reclaim some connections in order to save + * resources. No requests are sent to these connections anymore, but they are kept open for an + * additional amount of time ({@link PoolingOptions#getIdleTimeoutSeconds()}), in case the load + * goes up again. This metric counts connections in that state. + * + * @return The total number of currently trashed connections to Cassandra hosts. + */ + public Gauge getTrashedConnections() { + return trashedConnections; + } + + /** + * Returns the total number of in flight requests to Cassandra hosts. + * + * @return The total number of in flight requests to Cassandra hosts. + */ + public Gauge getInFlightRequests() { + return inFlightRequests; + } + + /** + * Returns the total number of enqueued requests on all Cassandra hosts. + * + * @see Session.State#getRequestQueueDepth(Host) + * @return The total number of enqueued requests on all Cassandra hosts. + */ + public Gauge getRequestQueueDepth() { + return requestQueueDepth; + } + + /** + * Returns the number of queued up tasks in the {@link ThreadingOptions#createExecutor(String) + * main internal executor}. + * + *

If the executor's task queue is not accessible – which happens when the executor is not an + * instance of {@link ThreadPoolExecutor} – then this gauge returns -1. + * + * @return The number of queued up tasks in the main internal executor, or -1, if that number is + * unknown. + */ + public Gauge getExecutorQueueDepth() { + return executorQueueDepth; + } + + /** + * Returns the number of queued up tasks in the {@link + * ThreadingOptions#createBlockingExecutor(String) blocking executor}. + * + *

If the executor's task queue is not accessible – which happens when the executor is not an + * instance of {@link ThreadPoolExecutor} – then this gauge returns -1. + * + * @return The number of queued up tasks in the blocking executor, or -1, if that number is + * unknown. + */ + public Gauge getBlockingExecutorQueueDepth() { + return blockingExecutorQueueDepth; + } + + /** + * Returns the number of queued up tasks in the {@link + * ThreadingOptions#createReconnectionExecutor(String) reconnection executor}. + * + *

A queue size > 0 does not necessarily indicate a backlog as some tasks may not have been + * scheduled to execute yet. + * + *

If the executor's task queue is not accessible – which happens when the executor is not an + * instance of {@link ThreadPoolExecutor} – then this gauge returns -1. + * + * @return The size of the work queue for the reconnection executor, or -1, if that number is + * unknown. + */ + public Gauge getReconnectionSchedulerQueueSize() { + return reconnectionSchedulerQueueSize; + } + + /** + * Returns the number of queued up tasks in the {@link + * ThreadingOptions#createScheduledTasksExecutor(String) scheduled tasks executor}. + * + *

A queue size > 0 does not necessarily indicate a backlog as some tasks may not have been + * scheduled to execute yet. + * + *

If the executor's task queue is not accessible – which happens when the executor is not an + * instance of {@link ThreadPoolExecutor} – then this gauge returns -1. + * + * @return The size of the work queue for the scheduled tasks executor, or -1, if that number is + * unknown. + */ + public Gauge getTaskSchedulerQueueSize() { + return taskSchedulerQueueSize; + } + + /** + * Returns the number of bytes sent so far. + * + *

Note that this measures unencrypted traffic, even if SSL is enabled (the probe is inserted + * before SSL handlers in the Netty pipeline). In practice, SSL overhead should be negligible + * after the initial handshake. + * + * @return the number of bytes sent so far. + */ + public Meter getBytesSent() { + return bytesSent; + } + + /** + * Returns the number of bytes received so far. + * + *

Note that this measures unencrypted traffic, even if SSL is enabled (the probe is inserted + * before SSL handlers in the Netty pipeline). In practice, SSL overhead should be negligible + * after the initial handshake. + * + * @return the number of bytes received so far. + */ + public Meter getBytesReceived() { + return bytesReceived; + } + + void shutdown() { + if (jmxReporter != null) jmxReporter.stop(); + } + + private static Gauge buildQueueSizeGauge(final BlockingQueue queue) { + if (queue != null) { + return new Gauge() { @Override public Integer getValue() { - int value = manager.controlConnection.isOpen() ? 1 : 0; - for (SessionManager session : manager.sessions) - for (HostConnectionPool pool : session.pools.values()) - value += pool.opened(); - return value; + return queue.size(); } - }); - private final Gauge trashedConnections = registry.register("trashed-connections", new Gauge() { + }; + } else { + return new Gauge() { @Override public Integer getValue() { - int value = 0; - for (SessionManager session : manager.sessions) - for (HostConnectionPool pool : session.pools.values()) - value += pool.trashed(); - return value; - } - }); - - private final Gauge executorQueueDepth; - private final Gauge blockingExecutorQueueDepth; - private final Gauge reconnectionSchedulerQueueSize; - private final Gauge taskSchedulerQueueSize; - - Metrics(Cluster.Manager manager) { - this.manager = manager; - this.executorQueueDepth = registry.register( - "executor-queue-depth", - buildQueueSizeGauge(manager.executorQueue)); - this.blockingExecutorQueueDepth = registry.register( - "blocking-executor-queue-depth", - buildQueueSizeGauge(manager.blockingExecutorQueue)); - this.reconnectionSchedulerQueueSize = registry.register( - "reconnection-scheduler-task-count", - buildQueueSizeGauge(manager.reconnectionExecutorQueue)); - this.taskSchedulerQueueSize = registry.register( - "task-scheduler-task-count", - buildQueueSizeGauge(manager.scheduledTasksExecutorQueue)); - if (manager.configuration.getMetricsOptions().isJMXReportingEnabled()) { - this.jmxReporter = JmxReporter.forRegistry(registry).inDomain(manager.clusterName + "-metrics").build(); - this.jmxReporter.start(); - } else { - this.jmxReporter = null; + return -1; } + }; } + } + + /** Metrics on errors encountered. */ + public class Errors { + + private final Counter connectionErrors = registry.counter("connection-errors"); + private final Counter authenticationErrors = registry.counter("authentication-errors"); + + private final Counter writeTimeouts = registry.counter("write-timeouts"); + private final Counter readTimeouts = registry.counter("read-timeouts"); + private final Counter unavailables = registry.counter("unavailables"); + private final Counter clientTimeouts = registry.counter("client-timeouts"); + + private final Counter otherErrors = registry.counter("other-errors"); + + private final Counter retries = registry.counter("retries"); + private final Counter retriesOnWriteTimeout = registry.counter("retries-on-write-timeout"); + private final Counter retriesOnReadTimeout = registry.counter("retries-on-read-timeout"); + private final Counter retriesOnUnavailable = registry.counter("retries-on-unavailable"); + private final Counter retriesOnClientTimeout = registry.counter("retries-on-client-timeout"); + private final Counter retriesOnConnectionError = + registry.counter("retries-on-connection-error"); + private final Counter retriesOnOtherErrors = registry.counter("retries-on-other-errors"); + + private final Counter ignores = registry.counter("ignores"); + private final Counter ignoresOnWriteTimeout = registry.counter("ignores-on-write-timeout"); + private final Counter ignoresOnReadTimeout = registry.counter("ignores-on-read-timeout"); + private final Counter ignoresOnUnavailable = registry.counter("ignores-on-unavailable"); + private final Counter ignoresOnClientTimeout = registry.counter("ignores-on-client-timeout"); + private final Counter ignoresOnConnectionError = + registry.counter("ignores-on-connection-error"); + private final Counter ignoresOnOtherErrors = registry.counter("ignores-on-other-errors"); + + private final Counter speculativeExecutions = registry.counter("speculative-executions"); /** - * Returns the registry containing all metrics. - *

- * The metrics registry allows you to easily use the reporters that ship - * with Metrics - * or a custom written one. - *

- * For instance, if {@code metrics} is {@code this} object, you could export the - * metrics to csv files using: - *

-     *     com.codahale.metrics.CsvReporter.forRegistry(metrics.getRegistry()).build(new File("measurements/")).start(1, TimeUnit.SECONDS);
-     * 
- *

- * If you already have a {@code MetricRegistry} in your application and wish to - * add the driver's metrics to it, the recommended approach is to use a listener: - *

-     *     // Your existing registry:
-     *     final com.codahale.metrics.MetricRegistry myRegistry = ...
+     * Returns the number of errors while connecting to Cassandra nodes.
      *
-     *     cluster.getMetrics().getRegistry().addListener(new com.codahale.metrics.MetricRegistryListener() {
-     *         @Override
-     *         public void onGaugeAdded(String name, Gauge<?> gauge) {
-     *             if (myRegistry.getNames().contains(name)) {
-     *                 // name is already taken, maybe prefix with a namespace
-     *                 ...
-     *             } else {
-     *                 myRegistry.register(name, gauge);
-     *             }
-     *         }
+     * 

This represents the number of times that a request to a Cassandra node has failed due to a + * connection problem. This thus also corresponds to how often the driver had to pick a fallback + * host for a request. * - * ... // Implement other methods in a similar fashion - * }); - *

- * Since reporting is handled by your registry, you'll probably also want to disable - * JMX reporting with {@link Cluster.Builder#withoutJMXReporting()}. + *

You can expect a few connection errors when a Cassandra node fails (or is stopped) ,but if + * that number grows continuously you likely have a problem. * - * @return the registry containing all metrics. + * @return the number of errors while connecting to Cassandra nodes. */ - public MetricRegistry getRegistry() { - return registry; + public Counter getConnectionErrors() { + return connectionErrors; } /** - * Returns metrics on the user requests performed on the Cluster. - *

- * This metric exposes - *

    - *
  • the total number of requests.
  • - *
  • the requests rate (in requests per seconds), including 1, 5 and 15 minute rates.
  • - *
  • the mean, min and max latencies, as well as latency at a given percentile.
  • - *
+ * Returns the number of authentication errors while connecting to Cassandra nodes. * - * @return a {@code Timer} metric object exposing the rate and latency for - * user requests. + * @return the number of errors. */ - public Timer getRequestsTimer() { - return requests; + public Counter getAuthenticationErrors() { + return authenticationErrors; } /** - * Returns an object grouping metrics related to the errors encountered. + * Returns the number of write requests that returned a timeout (independently of the final + * decision taken by the {@link com.datastax.driver.core.policies.RetryPolicy}). * - * @return an object grouping metrics related to the errors encountered. + * @return the number of write timeout. */ - public Errors getErrorMetrics() { - return errors; + public Counter getWriteTimeouts() { + return writeTimeouts; } /** - * Returns the number of Cassandra hosts currently known by the driver (that is - * whether they are currently considered up or down). + * Returns the number of read requests that returned a timeout (independently of the final + * decision taken by the {@link com.datastax.driver.core.policies.RetryPolicy}). * - * @return the number of Cassandra hosts currently known by the driver. + * @return the number of read timeout. */ - public Gauge getKnownHosts() { - return knownHosts; + public Counter getReadTimeouts() { + return readTimeouts; } /** - * Returns the number of Cassandra hosts the driver is currently connected to - * (that is have at least one connection opened to). + * Returns the number of requests that returned an unavailable exception (independently of the + * final decision taken by the {@link com.datastax.driver.core.policies.RetryPolicy}). * - * @return the number of Cassandra hosts the driver is currently connected to. + * @return the number of unavailable exceptions. */ - public Gauge getConnectedToHosts() { - return connectedTo; + public Counter getUnavailables() { + return unavailables; } /** - * Returns the total number of currently opened connections to Cassandra hosts. + * Returns the number of requests that timed out before the driver received a response. * - * @return The total number of currently opened connections to Cassandra hosts. + * @return the number of client timeouts. */ - public Gauge getOpenConnections() { - return openConnections; + public Counter getClientTimeouts() { + return clientTimeouts; } /** - * Returns the total number of currently "trashed" connections to Cassandra hosts. - *

- * When the load to a host decreases, the driver will reclaim some connections in order to save - * resources. No requests are sent to these connections anymore, but they are kept open for an - * additional amount of time ({@link PoolingOptions#getIdleTimeoutSeconds()}), in case the load - * goes up again. This metric counts connections in that state. + * Returns the number of requests that returned errors not accounted for by another metric. This + * includes all types of invalid requests. * - * @return The total number of currently trashed connections to Cassandra hosts. + * @return the number of requests errors not accounted by another metric. */ - public Gauge getTrashedConnections() { - return trashedConnections; + public Counter getOthers() { + return otherErrors; } /** - * Returns the number of queued up tasks in the {@link ThreadingOptions#createExecutor(String) main internal executor}. - *

- * If the executor's task queue is not accessible – which happens when the executor - * is not an instance of {@link ThreadPoolExecutor} – then this gauge returns -1. + * Returns the number of times a request was retried due to the {@link + * com.datastax.driver.core.policies.RetryPolicy}. * - * @return The number of queued up tasks in the main internal executor, - * or -1, if that number is unknown. + * @return the number of times a request was retried due to the {@link + * com.datastax.driver.core.policies.RetryPolicy}. */ - public Gauge getExecutorQueueDepth() { - return executorQueueDepth; + public Counter getRetries() { + return retries; } /** - * Returns the number of queued up tasks in the {@link ThreadingOptions#createBlockingExecutor(String) blocking executor}. - *

- * If the executor's task queue is not accessible – which happens when the executor - * is not an instance of {@link ThreadPoolExecutor} – then this gauge returns -1. + * Returns the number of times a request was retried due to the {@link + * com.datastax.driver.core.policies.RetryPolicy}, after a read timed out. * - * @return The number of queued up tasks in the blocking executor, - * or -1, if that number is unknown. + * @return the number of times a request was retried due to the {@link + * com.datastax.driver.core.policies.RetryPolicy}, after a read timed out. */ - public Gauge getBlockingExecutorQueueDepth() { - return blockingExecutorQueueDepth; + public Counter getRetriesOnReadTimeout() { + return retriesOnReadTimeout; } /** - * Returns the number of queued up tasks in the {@link ThreadingOptions#createReconnectionExecutor(String) reconnection executor}. - *

- * A queue size > 0 does not - * necessarily indicate a backlog as some tasks may not have been scheduled to execute yet. - *

- * If the executor's task queue is not accessible – which happens when the executor - * is not an instance of {@link ThreadPoolExecutor} – then this gauge returns -1. + * Returns the number of times a request was retried due to the {@link + * com.datastax.driver.core.policies.RetryPolicy}, after a write timed out. * - * @return The size of the work queue for the reconnection executor, - * or -1, if that number is unknown. + * @return the number of times a request was retried due to the {@link + * com.datastax.driver.core.policies.RetryPolicy}, after a write timed out. */ - public Gauge getReconnectionSchedulerQueueSize() { - return reconnectionSchedulerQueueSize; + public Counter getRetriesOnWriteTimeout() { + return retriesOnWriteTimeout; } /** - * Returns the number of queued up tasks in the {@link ThreadingOptions#createScheduledTasksExecutor(String) scheduled tasks executor}. - *

- * A queue size > 0 does not - * necessarily indicate a backlog as some tasks may not have been scheduled to execute yet. - *

- * If the executor's task queue is not accessible – which happens when the executor - * is not an instance of {@link ThreadPoolExecutor} – then this gauge returns -1. + * Returns the number of times a request was retried due to the {@link + * com.datastax.driver.core.policies.RetryPolicy}, after an unavailable exception. * - * @return The size of the work queue for the scheduled tasks executor, - * or -1, if that number is unknown. + * @return the number of times a request was retried due to the {@link + * com.datastax.driver.core.policies.RetryPolicy}, after an unavailable exception. */ - public Gauge getTaskSchedulerQueueSize() { - return taskSchedulerQueueSize; + public Counter getRetriesOnUnavailable() { + return retriesOnUnavailable; } - void shutdown() { - if (jmxReporter != null) - jmxReporter.stop(); + /** + * Returns the number of times a request was retried due to the {@link + * com.datastax.driver.core.policies.RetryPolicy}, after a client timeout. + * + * @return the number of times a request was retried due to the {@link + * com.datastax.driver.core.policies.RetryPolicy}, after a client timeout. + */ + public Counter getRetriesOnClientTimeout() { + return retriesOnClientTimeout; } - private static Gauge buildQueueSizeGauge(final BlockingQueue queue) { - if (queue != null) { - return new Gauge() { - @Override - public Integer getValue() { - return queue.size(); - } - }; - } else { - return new Gauge() { - @Override - public Integer getValue() { - return -1; - } - }; - } + /** + * Returns the number of times a request was retried due to the {@link + * com.datastax.driver.core.policies.RetryPolicy}, after a connection error. + * + * @return the number of times a request was retried due to the {@link + * com.datastax.driver.core.policies.RetryPolicy}, after a connection error. + */ + public Counter getRetriesOnConnectionError() { + return retriesOnConnectionError; } /** - * Metrics on errors encountered. + * Returns the number of times a request was retried due to the {@link + * com.datastax.driver.core.policies.RetryPolicy}, after an unexpected error. + * + * @return the number of times a request was retried due to the {@link + * com.datastax.driver.core.policies.RetryPolicy}, after an unexpected error. */ - public class Errors { - - private final Counter connectionErrors = registry.counter("connection-errors"); - private final Counter authenticationErrors = registry.counter("authentication-errors"); - - private final Counter writeTimeouts = registry.counter("write-timeouts"); - private final Counter readTimeouts = registry.counter("read-timeouts"); - private final Counter unavailables = registry.counter("unavailables"); - private final Counter clientTimeouts = registry.counter("client-timeouts"); - - private final Counter otherErrors = registry.counter("other-errors"); - - private final Counter retries = registry.counter("retries"); - private final Counter retriesOnWriteTimeout = registry.counter("retries-on-write-timeout"); - private final Counter retriesOnReadTimeout = registry.counter("retries-on-read-timeout"); - private final Counter retriesOnUnavailable = registry.counter("retries-on-unavailable"); - private final Counter retriesOnClientTimeout = registry.counter("retries-on-client-timeout"); - private final Counter retriesOnConnectionError = registry.counter("retries-on-connection-error"); - private final Counter retriesOnOtherErrors = registry.counter("retries-on-other-errors"); - - private final Counter ignores = registry.counter("ignores"); - private final Counter ignoresOnWriteTimeout = registry.counter("ignores-on-write-timeout"); - private final Counter ignoresOnReadTimeout = registry.counter("ignores-on-read-timeout"); - private final Counter ignoresOnUnavailable = registry.counter("ignores-on-unavailable"); - private final Counter ignoresOnClientTimeout = registry.counter("ignores-on-client-timeout"); - private final Counter ignoresOnConnectionError = registry.counter("ignores-on-connection-error"); - private final Counter ignoresOnOtherErrors = registry.counter("ignores-on-other-errors"); - - private final Counter speculativeExecutions = registry.counter("speculative-executions"); - - /** - * Returns the number of errors while connecting to Cassandra nodes. - *

- * This represents the number of times that a request to a Cassandra node - * has failed due to a connection problem. This thus also corresponds to - * how often the driver had to pick a fallback host for a request. - *

- * You can expect a few connection errors when a Cassandra node fails - * (or is stopped) ,but if that number grows continuously you likely have - * a problem. - * - * @return the number of errors while connecting to Cassandra nodes. - */ - public Counter getConnectionErrors() { - return connectionErrors; - } - - /** - * Returns the number of authentication errors while connecting to Cassandra nodes. - * - * @return the number of errors. - */ - public Counter getAuthenticationErrors() { - return authenticationErrors; - } - - /** - * Returns the number of write requests that returned a timeout (independently - * of the final decision taken by the {@link com.datastax.driver.core.policies.RetryPolicy}). - * - * @return the number of write timeout. - */ - public Counter getWriteTimeouts() { - return writeTimeouts; - } - - /** - * Returns the number of read requests that returned a timeout (independently - * of the final decision taken by the {@link com.datastax.driver.core.policies.RetryPolicy}). - * - * @return the number of read timeout. - */ - public Counter getReadTimeouts() { - return readTimeouts; - } - - /** - * Returns the number of requests that returned an unavailable exception - * (independently of the final decision taken by the - * {@link com.datastax.driver.core.policies.RetryPolicy}). - * - * @return the number of unavailable exceptions. - */ - public Counter getUnavailables() { - return unavailables; - } - - /** - * Returns the number of requests that timed out before the driver - * received a response. - * - * @return the number of client timeouts. - */ - public Counter getClientTimeouts() { - return clientTimeouts; - } - - /** - * Returns the number of requests that returned errors not accounted for by - * another metric. This includes all types of invalid requests. - * - * @return the number of requests errors not accounted by another - * metric. - */ - public Counter getOthers() { - return otherErrors; - } - - /** - * Returns the number of times a request was retried due to the - * {@link com.datastax.driver.core.policies.RetryPolicy}. - * - * @return the number of times a request was retried due to the - * {@link com.datastax.driver.core.policies.RetryPolicy}. - */ - public Counter getRetries() { - return retries; - } - - /** - * Returns the number of times a request was retried due to the - * {@link com.datastax.driver.core.policies.RetryPolicy}, after a - * read timed out. - * - * @return the number of times a request was retried due to the - * {@link com.datastax.driver.core.policies.RetryPolicy}, after a - * read timed out. - */ - public Counter getRetriesOnReadTimeout() { - return retriesOnReadTimeout; - } - - /** - * Returns the number of times a request was retried due to the - * {@link com.datastax.driver.core.policies.RetryPolicy}, after a - * write timed out. - * - * @return the number of times a request was retried due to the - * {@link com.datastax.driver.core.policies.RetryPolicy}, after a - * write timed out. - */ - public Counter getRetriesOnWriteTimeout() { - return retriesOnWriteTimeout; - } - - /** - * Returns the number of times a request was retried due to the - * {@link com.datastax.driver.core.policies.RetryPolicy}, after an - * unavailable exception. - * - * @return the number of times a request was retried due to the - * {@link com.datastax.driver.core.policies.RetryPolicy}, after an - * unavailable exception. - */ - public Counter getRetriesOnUnavailable() { - return retriesOnUnavailable; - } - - /** - * Returns the number of times a request was retried due to the - * {@link com.datastax.driver.core.policies.RetryPolicy}, after a - * client timeout. - * - * @return the number of times a request was retried due to the - * {@link com.datastax.driver.core.policies.RetryPolicy}, after a - * client timeout. - */ - public Counter getRetriesOnClientTimeout() { - return retriesOnClientTimeout; - } - - /** - * Returns the number of times a request was retried due to the - * {@link com.datastax.driver.core.policies.RetryPolicy}, after a - * connection error. - * - * @return the number of times a request was retried due to the - * {@link com.datastax.driver.core.policies.RetryPolicy}, after a - * connection error. - */ - public Counter getRetriesOnConnectionError() { - return retriesOnConnectionError; - } - - /** - * Returns the number of times a request was retried due to the - * {@link com.datastax.driver.core.policies.RetryPolicy}, after an - * unexpected error. - * - * @return the number of times a request was retried due to the - * {@link com.datastax.driver.core.policies.RetryPolicy}, after an - * unexpected error. - */ - public Counter getRetriesOnOtherErrors() { - return retriesOnOtherErrors; - } + public Counter getRetriesOnOtherErrors() { + return retriesOnOtherErrors; + } - /** - * Returns the number of times a request was ignored - * due to the {@link com.datastax.driver.core.policies.RetryPolicy}, for - * example due to timeouts or unavailability. - * - * @return the number of times a request was ignored due to the - * {@link com.datastax.driver.core.policies.RetryPolicy}. - */ - public Counter getIgnores() { - return ignores; - } + /** + * Returns the number of times a request was ignored due to the {@link + * com.datastax.driver.core.policies.RetryPolicy}, for example due to timeouts or + * unavailability. + * + * @return the number of times a request was ignored due to the {@link + * com.datastax.driver.core.policies.RetryPolicy}. + */ + public Counter getIgnores() { + return ignores; + } - /** - * Returns the number of times a request was ignored due to the - * {@link com.datastax.driver.core.policies.RetryPolicy}, after a - * read timed out. - * - * @return the number of times a request was ignored due to the - * {@link com.datastax.driver.core.policies.RetryPolicy}, after a - * read timed out. - */ - public Counter getIgnoresOnReadTimeout() { - return ignoresOnReadTimeout; - } + /** + * Returns the number of times a request was ignored due to the {@link + * com.datastax.driver.core.policies.RetryPolicy}, after a read timed out. + * + * @return the number of times a request was ignored due to the {@link + * com.datastax.driver.core.policies.RetryPolicy}, after a read timed out. + */ + public Counter getIgnoresOnReadTimeout() { + return ignoresOnReadTimeout; + } - /** - * Returns the number of times a request was ignored due to the - * {@link com.datastax.driver.core.policies.RetryPolicy}, after a - * write timed out. - * - * @return the number of times a request was ignored due to the - * {@link com.datastax.driver.core.policies.RetryPolicy}, after a - * write timed out. - */ - public Counter getIgnoresOnWriteTimeout() { - return ignoresOnWriteTimeout; - } + /** + * Returns the number of times a request was ignored due to the {@link + * com.datastax.driver.core.policies.RetryPolicy}, after a write timed out. + * + * @return the number of times a request was ignored due to the {@link + * com.datastax.driver.core.policies.RetryPolicy}, after a write timed out. + */ + public Counter getIgnoresOnWriteTimeout() { + return ignoresOnWriteTimeout; + } - /** - * Returns the number of times a request was ignored due to the - * {@link com.datastax.driver.core.policies.RetryPolicy}, after an - * unavailable exception. - * - * @return the number of times a request was ignored due to the - * {@link com.datastax.driver.core.policies.RetryPolicy}, after an - * unavailable exception. - */ - public Counter getIgnoresOnUnavailable() { - return ignoresOnUnavailable; - } + /** + * Returns the number of times a request was ignored due to the {@link + * com.datastax.driver.core.policies.RetryPolicy}, after an unavailable exception. + * + * @return the number of times a request was ignored due to the {@link + * com.datastax.driver.core.policies.RetryPolicy}, after an unavailable exception. + */ + public Counter getIgnoresOnUnavailable() { + return ignoresOnUnavailable; + } - /** - * Returns the number of times a request was ignored due to the - * {@link com.datastax.driver.core.policies.RetryPolicy}, after a - * client timeout. - * - * @return the number of times a request was ignored due to the - * {@link com.datastax.driver.core.policies.RetryPolicy}, after a - * client timeout. - */ - public Counter getIgnoresOnClientTimeout() { - return ignoresOnClientTimeout; - } + /** + * Returns the number of times a request was ignored due to the {@link + * com.datastax.driver.core.policies.RetryPolicy}, after a client timeout. + * + * @return the number of times a request was ignored due to the {@link + * com.datastax.driver.core.policies.RetryPolicy}, after a client timeout. + */ + public Counter getIgnoresOnClientTimeout() { + return ignoresOnClientTimeout; + } - /** - * Returns the number of times a request was ignored due to the - * {@link com.datastax.driver.core.policies.RetryPolicy}, after a - * connection error. - * - * @return the number of times a request was ignored due to the - * {@link com.datastax.driver.core.policies.RetryPolicy}, after a - * connection error. - */ - public Counter getIgnoresOnConnectionError() { - return ignoresOnConnectionError; - } + /** + * Returns the number of times a request was ignored due to the {@link + * com.datastax.driver.core.policies.RetryPolicy}, after a connection error. + * + * @return the number of times a request was ignored due to the {@link + * com.datastax.driver.core.policies.RetryPolicy}, after a connection error. + */ + public Counter getIgnoresOnConnectionError() { + return ignoresOnConnectionError; + } - /** - * Returns the number of times a request was ignored due to the - * {@link com.datastax.driver.core.policies.RetryPolicy}, after an - * unexpected error. - * - * @return the number of times a request was ignored due to the - * {@link com.datastax.driver.core.policies.RetryPolicy}, after an - * unexpected error. - */ - public Counter getIgnoresOnOtherErrors() { - return ignoresOnOtherErrors; - } + /** + * Returns the number of times a request was ignored due to the {@link + * com.datastax.driver.core.policies.RetryPolicy}, after an unexpected error. + * + * @return the number of times a request was ignored due to the {@link + * com.datastax.driver.core.policies.RetryPolicy}, after an unexpected error. + */ + public Counter getIgnoresOnOtherErrors() { + return ignoresOnOtherErrors; + } - /** - * Returns the number of times a speculative execution was started - * because a previous execution did not complete within the delay - * specified by {@link SpeculativeExecutionPolicy}. - * - * @return the number of speculative executions. - */ - public Counter getSpeculativeExecutions() { - return speculativeExecutions; - } + /** + * Returns the number of times a speculative execution was started because a previous execution + * did not complete within the delay specified by {@link SpeculativeExecutionPolicy}. + * + * @return the number of speculative executions. + */ + public Counter getSpeculativeExecutions() { + return speculativeExecutions; } + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/MetricsOptions.java b/driver-core/src/main/java/com/datastax/driver/core/MetricsOptions.java index 4b43027321c..f069cf808bb 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/MetricsOptions.java +++ b/driver-core/src/main/java/com/datastax/driver/core/MetricsOptions.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,46 +17,45 @@ */ package com.datastax.driver.core; -/** - * {@link Metrics} options. - */ +/** {@link Metrics} options. */ public class MetricsOptions { - private final boolean metricsEnabled; - private final boolean jmxEnabled; + private final boolean metricsEnabled; + private final boolean jmxEnabled; - /** - * Creates a new {@code MetricsOptions} object with default values (metrics enabled, JMX reporting enabled). - */ - public MetricsOptions() { - this(true, true); - } + /** + * Creates a new {@code MetricsOptions} object with default values (metrics enabled, JMX reporting + * enabled). + */ + public MetricsOptions() { + this(true, true); + } - /** - * Creates a new {@code MetricsOptions} object. - * - * @param jmxEnabled whether to enable JMX reporting or not. - */ - public MetricsOptions(boolean enabled, boolean jmxEnabled) { - this.metricsEnabled = enabled; - this.jmxEnabled = jmxEnabled; - } + /** + * Creates a new {@code MetricsOptions} object. + * + * @param jmxEnabled whether to enable JMX reporting or not. + */ + public MetricsOptions(boolean enabled, boolean jmxEnabled) { + this.metricsEnabled = enabled; + this.jmxEnabled = jmxEnabled; + } - /** - * Returns whether metrics are enabled. - * - * @return whether metrics are enabled. - */ - public boolean isEnabled() { - return metricsEnabled; - } + /** + * Returns whether metrics are enabled. + * + * @return whether metrics are enabled. + */ + public boolean isEnabled() { + return metricsEnabled; + } - /** - * Returns whether JMX reporting is enabled. - * - * @return whether JMX reporting is enabled. - */ - public boolean isJMXReportingEnabled() { - return jmxEnabled; - } + /** + * Returns whether JMX reporting is enabled. + * + * @return whether JMX reporting is enabled. + */ + public boolean isJMXReportingEnabled() { + return jmxEnabled; + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/MetricsUtil.java b/driver-core/src/main/java/com/datastax/driver/core/MetricsUtil.java new file mode 100644 index 00000000000..0c3d39eae22 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/MetricsUtil.java @@ -0,0 +1,48 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import java.net.InetAddress; + +public class MetricsUtil { + + public static String hostMetricName(String prefix, Host host) { + EndPoint endPoint = host.getEndPoint(); + if (endPoint instanceof TranslatedAddressEndPoint) { + InetAddress address = endPoint.resolve().getAddress(); + return hostMetricNameFromAddress(prefix, address); + } else { + // We have no guarantee that endpoints resolve to unique addresses + return prefix + endPoint.toString(); + } + } + + private static String hostMetricNameFromAddress(String prefix, InetAddress address) { + StringBuilder result = new StringBuilder(prefix); + boolean first = true; + for (byte b : address.getAddress()) { + if (first) { + first = false; + } else { + result.append('_'); + } + result.append(b & 0xFF); + } + return result.toString(); + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/Native.java b/driver-core/src/main/java/com/datastax/driver/core/Native.java index aba985b6426..00f6a271e52 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Native.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Native.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,6 +17,7 @@ */ package com.datastax.driver.core; +import java.lang.reflect.Method; import jnr.ffi.LibraryLoader; import jnr.ffi.Pointer; import jnr.ffi.Runtime; @@ -24,221 +27,218 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.lang.reflect.Method; - /** - * Helper class to deal with native system calls through the - * JNR library. - *

- * The driver can benefit from native system calls to improve its performance and accuracy - * in some situations. - *

- * Currently, the following features may be used by the driver when available: + * Helper class to deal with native system calls through the JNR library. + * + *

The driver can benefit from native system calls to improve its performance and accuracy in + * some situations. + * + *

Currently, the following features may be used by the driver when available: + * *

    - *
  1. {@link #currentTimeMicros()}: thanks to a system call to {@code gettimeofday()}, - * the driver is able to generate timestamps with true microsecond precision - * (see {@link AtomicMonotonicTimestampGenerator} or {@link ThreadLocalMonotonicTimestampGenerator} for - * more information);
  2. - *
  3. {@link #processId()}: thanks to a system call to {@code getpid()}, - * the driver has access to the JVM's process ID it is running under – which - * makes time-based UUID generation easier and more reliable (see {@link com.datastax.driver.core.utils.UUIDs UUIDs} - * for more information).
  4. + *
  5. {@link #currentTimeMicros()}: thanks to a system call to {@code gettimeofday()}, the driver + * is able to generate timestamps with true microsecond precision (see {@link + * AtomicMonotonicTimestampGenerator} or {@link ThreadLocalMonotonicTimestampGenerator} for + * more information); + *
  6. {@link #processId()}: thanks to a system call to {@code getpid()}, the driver has access to + * the JVM's process ID it is running under – which makes time-based UUID generation easier + * and more reliable (see {@link com.datastax.driver.core.utils.UUIDs UUIDs} for more + * information). *
- *

- * The availability of the aforementioned system calls depends on the underlying operation system's - * capabilities. For instance, {@code gettimeofday()} is not available under Windows systems. - * You can check if any of the system calls exposed through this class is available - * by calling {@link #isGettimeofdayAvailable()} or {@link #isGetpidAvailable()}. - *

- * Note: This class is public because it needs to be accessible from other packages of the Java driver, - * but it is not meant to be used directly by client code. + * + *

The availability of the aforementioned system calls depends on the underlying operation + * system's capabilities. For instance, {@code gettimeofday()} is not available under Windows + * systems. You can check if any of the system calls exposed through this class is available by + * calling {@link #isGettimeofdayAvailable()} or {@link #isGetpidAvailable()}. + * + *

Note: This class is public because it needs to be accessible from other packages of the Java + * driver, but it is not meant to be used directly by client code. * * @see JNR library on Github */ public final class Native { - private static final Logger LOGGER = LoggerFactory.getLogger(Native.class); - - private static class LibCLoader { + private static final Logger LOGGER = LoggerFactory.getLogger(Native.class); - /** - * Timeval struct. - * - * @see GETTIMEOFDAY(2) - */ - static class Timeval extends Struct { + private static class LibCLoader { - public final time_t tv_sec = new time_t(); - - public final Unsigned32 tv_usec = new Unsigned32(); - - public Timeval(Runtime runtime) { - super(runtime); - } - } - - /** - * Interface for LIBC calls through JNR. - * Note that this interface must be declared public. - */ - public interface LibC { - - /** - * JNR call to {@code gettimeofday}. - * - * @param tv Timeval struct - * @param unused Timezone struct (unused) - * @return 0 for success, or -1 for failure - * @see GETTIMEOFDAY(2) - */ - int gettimeofday(@Out @Transient Timeval tv, Pointer unused); + /** + * Timeval struct. + * + * @see GETTIMEOFDAY(2) + */ + static class Timeval extends Struct { - } + public final time_t tv_sec = new time_t(); - private static final LibC LIB_C; - - private static final Runtime LIB_C_RUNTIME; - - private static final boolean GETTIMEOFDAY_AVAILABLE; - - static { - LibC libc; - Runtime runtime = null; - try { - libc = LibraryLoader.create(LibC.class).load("c"); - runtime = Runtime.getRuntime(libc); - } catch (Throwable t) { - libc = null; // dereference proxy to library if runtime could not be loaded - if (LOGGER.isDebugEnabled()) - LOGGER.debug("Could not load JNR C Library, native system calls through this library will not be available", t); - else - LOGGER.info("Could not load JNR C Library, native system calls through this library will not be available " + - "(set this logger level to DEBUG to see the full stack trace)."); - - } - LIB_C = libc; - LIB_C_RUNTIME = runtime; - boolean gettimeofday = false; - if (LIB_C_RUNTIME != null) { - try { - gettimeofday = LIB_C.gettimeofday(new Timeval(LIB_C_RUNTIME), null) == 0; - } catch (Throwable t) { - if (LOGGER.isDebugEnabled()) - LOGGER.debug("Native calls to gettimeofday() not available on this system.", t); - else - LOGGER.info("Native calls to gettimeofday() not available on this system " + - "(set this logger level to DEBUG to see the full stack trace)."); - } - } - GETTIMEOFDAY_AVAILABLE = gettimeofday; - } + public final Unsigned32 tv_usec = new Unsigned32(); + public Timeval(Runtime runtime) { + super(runtime); + } } - private static class PosixLoader { - - public static final jnr.posix.POSIX POSIX; - - private static final boolean GETPID_AVAILABLE; - - static { - jnr.posix.POSIX posix; - try { - // use reflection below to get the classloader a chance to load this class - Class posixHandler = Class.forName("jnr.posix.POSIXHandler"); - Class defaultPosixHandler = Class.forName("jnr.posix.util.DefaultPOSIXHandler"); - Class posixFactory = Class.forName("jnr.posix.POSIXFactory"); - Method getPOSIX = posixFactory.getMethod("getPOSIX", posixHandler, Boolean.TYPE); - posix = (jnr.posix.POSIX) getPOSIX.invoke(null, defaultPosixHandler.newInstance(), true); - } catch (Throwable t) { - posix = null; - if (LOGGER.isDebugEnabled()) - LOGGER.debug("Could not load JNR POSIX Library, native system calls through this library will not be available.", t); - else - LOGGER.info("Could not load JNR POSIX Library, native system calls through this library will not be available " + - "(set this logger level to DEBUG to see the full stack trace)."); - } - POSIX = posix; - boolean getpid = false; - if (POSIX != null) { - try { - POSIX.getpid(); - getpid = true; - } catch (Throwable t) { - if (LOGGER.isDebugEnabled()) - LOGGER.debug("Native calls to getpid() not available on this system.", t); - else - LOGGER.info("Native calls to getpid() not available on this system " + - "(set this logger level to DEBUG to see the full stack trace)."); - } - } - GETPID_AVAILABLE = getpid; - } - + /** Interface for LIBC calls through JNR. Note that this interface must be declared public. */ + public interface LibC { + + /** + * JNR call to {@code gettimeofday}. + * + * @param tv Timeval struct + * @param unused Timezone struct (unused) + * @return 0 for success, or -1 for failure + * @see GETTIMEOFDAY(2) + */ + int gettimeofday(@Out @Transient Timeval tv, Pointer unused); } - /** - * Returns {@code true} if JNR C library is loaded and - * a call to {@code gettimeofday} is possible through this library - * on this system, and {@code false} otherwise. - * - * @return {@code true} if JNR C library is loaded and - * a call to {@code gettimeofday} is possible. - */ - public static boolean isGettimeofdayAvailable() { + private static final LibC LIB_C; + + private static final Runtime LIB_C_RUNTIME; + + private static final boolean GETTIMEOFDAY_AVAILABLE; + + static { + LibC libc; + Runtime runtime = null; + try { + libc = LibraryLoader.create(LibC.class).load("c"); + runtime = Runtime.getRuntime(libc); + } catch (Throwable t) { + libc = null; // dereference proxy to library if runtime could not be loaded + if (LOGGER.isDebugEnabled()) + LOGGER.debug( + "Could not load JNR C Library, native system calls through this library will not be available", + t); + else + LOGGER.info( + "Could not load JNR C Library, native system calls through this library will not be available " + + "(set this logger level to DEBUG to see the full stack trace)."); + } + LIB_C = libc; + LIB_C_RUNTIME = runtime; + boolean gettimeofday = false; + if (LIB_C_RUNTIME != null) { try { - return LibCLoader.GETTIMEOFDAY_AVAILABLE; - } catch (NoClassDefFoundError e) { - return false; + gettimeofday = LIB_C.gettimeofday(new Timeval(LIB_C_RUNTIME), null) == 0; + } catch (Throwable t) { + if (LOGGER.isDebugEnabled()) + LOGGER.debug("Native calls to gettimeofday() not available on this system.", t); + else + LOGGER.info( + "Native calls to gettimeofday() not available on this system " + + "(set this logger level to DEBUG to see the full stack trace)."); } + } + GETTIMEOFDAY_AVAILABLE = gettimeofday; } - - /** - * Returns {@code true} if JNR POSIX library is loaded and - * a call to {@code getpid} is possible through this library - * on this system, and {@code false} otherwise. - * - * @return {@code true} if JNR POSIX library is loaded and - * a call to {@code getpid} is possible. - */ - public static boolean isGetpidAvailable() { + } + + private static class PosixLoader { + + public static final jnr.posix.POSIX POSIX; + + private static final boolean GETPID_AVAILABLE; + + static { + jnr.posix.POSIX posix; + try { + // use reflection below to get the classloader a chance to load this class + Class posixHandler = Class.forName("jnr.posix.POSIXHandler"); + Class defaultPosixHandler = Class.forName("jnr.posix.util.DefaultPOSIXHandler"); + Class posixFactory = Class.forName("jnr.posix.POSIXFactory"); + Method getPOSIX = posixFactory.getMethod("getPOSIX", posixHandler, Boolean.TYPE); + posix = (jnr.posix.POSIX) getPOSIX.invoke(null, defaultPosixHandler.newInstance(), true); + } catch (Throwable t) { + posix = null; + if (LOGGER.isDebugEnabled()) + LOGGER.debug( + "Could not load JNR POSIX Library, native system calls through this library will not be available.", + t); + else + LOGGER.info( + "Could not load JNR POSIX Library, native system calls through this library will not be available " + + "(set this logger level to DEBUG to see the full stack trace)."); + } + POSIX = posix; + boolean getpid = false; + if (POSIX != null) { try { - return PosixLoader.GETPID_AVAILABLE; - } catch (NoClassDefFoundError e) { - return false; + POSIX.getpid(); + getpid = true; + } catch (Throwable t) { + if (LOGGER.isDebugEnabled()) + LOGGER.debug("Native calls to getpid() not available on this system.", t); + else + LOGGER.info( + "Native calls to getpid() not available on this system " + + "(set this logger level to DEBUG to see the full stack trace)."); } - + } + GETPID_AVAILABLE = getpid; } - - /** - * Returns the current timestamp with microsecond precision - * via a system call to {@code gettimeofday}, through JNR C library. - * - * @return the current timestamp with microsecond precision. - * @throws UnsupportedOperationException if JNR C library is not loaded or {@code gettimeofday} is not available. - * @throws IllegalStateException if the call to {@code gettimeofday} did not complete with return code 0. - */ - public static long currentTimeMicros() { - if (!isGettimeofdayAvailable()) - throw new UnsupportedOperationException("JNR C library not loaded or gettimeofday not available"); - LibCLoader.Timeval tv = new LibCLoader.Timeval(LibCLoader.LIB_C_RUNTIME); - int res = LibCLoader.LIB_C.gettimeofday(tv, null); - if (res != 0) - throw new IllegalStateException("Call to gettimeofday failed with result " + res); - return tv.tv_sec.get() * 1000000 + tv.tv_usec.get(); + } + + /** + * Returns {@code true} if JNR C library is loaded and a call to {@code gettimeofday} is possible + * through this library on this system, and {@code false} otherwise. + * + * @return {@code true} if JNR C library is loaded and a call to {@code gettimeofday} is possible. + */ + public static boolean isGettimeofdayAvailable() { + try { + return LibCLoader.GETTIMEOFDAY_AVAILABLE; + } catch (NoClassDefFoundError e) { + return false; } - - /** - * Returns the JVM's process identifier (PID) - * via a system call to {@code getpid}. - * - * @return the JVM's process identifier (PID). - * @throws UnsupportedOperationException if JNR POSIX library is not loaded or {@code getpid} is not available. - */ - public static int processId() { - if (!isGetpidAvailable()) - throw new UnsupportedOperationException("JNR POSIX library not loaded or getpid not available"); - return PosixLoader.POSIX.getpid(); + } + + /** + * Returns {@code true} if JNR POSIX library is loaded and a call to {@code getpid} is possible + * through this library on this system, and {@code false} otherwise. + * + * @return {@code true} if JNR POSIX library is loaded and a call to {@code getpid} is possible. + */ + public static boolean isGetpidAvailable() { + try { + return PosixLoader.GETPID_AVAILABLE; + } catch (NoClassDefFoundError e) { + return false; } - + } + + /** + * Returns the current timestamp with microsecond precision via a system call to {@code + * gettimeofday}, through JNR C library. + * + * @return the current timestamp with microsecond precision. + * @throws UnsupportedOperationException if JNR C library is not loaded or {@code gettimeofday} is + * not available. + * @throws IllegalStateException if the call to {@code gettimeofday} did not complete with return + * code 0. + */ + public static long currentTimeMicros() { + if (!isGettimeofdayAvailable()) + throw new UnsupportedOperationException( + "JNR C library not loaded or gettimeofday not available"); + LibCLoader.Timeval tv = new LibCLoader.Timeval(LibCLoader.LIB_C_RUNTIME); + int res = LibCLoader.LIB_C.gettimeofday(tv, null); + if (res != 0) throw new IllegalStateException("Call to gettimeofday failed with result " + res); + return tv.tv_sec.get() * 1000000 + tv.tv_usec.get(); + } + + /** + * Returns the JVM's process identifier (PID) via a system call to {@code getpid}. + * + * @return the JVM's process identifier (PID). + * @throws UnsupportedOperationException if JNR POSIX library is not loaded or {@code getpid} is + * not available. + */ + public static int processId() { + if (!isGetpidAvailable()) + throw new UnsupportedOperationException( + "JNR POSIX library not loaded or getpid not available"); + return PosixLoader.POSIX.getpid(); + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/NettyOptions.java b/driver-core/src/main/java/com/datastax/driver/core/NettyOptions.java index b4a8c619591..8b2919ec836 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/NettyOptions.java +++ b/driver-core/src/main/java/com/datastax/driver/core/NettyOptions.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,18 +24,18 @@ import io.netty.channel.socket.SocketChannel; import io.netty.util.HashedWheelTimer; import io.netty.util.Timer; - import java.util.concurrent.ThreadFactory; /** * A set of hooks that allow clients to customize the driver's underlying Netty layer. - *

- * Clients that need to hook into the driver's underlying Netty layer can - * subclass this class and provide the necessary customization by overriding - * its methods. - *

- * Typically, clients would register this class with {@link Cluster#builder()}: - *

+ * + *

Clients that need to hook into the driver's underlying Netty layer can subclass this class and + * provide the necessary customization by overriding its methods. + * + *

Typically, clients would register this class with {@link Cluster#builder()}: + * + *

+ * *

  *     NettyOptions nettyOptions = ...
  *     Cluster cluster = Cluster.builder()
@@ -41,200 +43,206 @@
  *          .withNettyOptions(nettyOptions)
  *          .build();
  * 
- *

- * Extending the NettyOptions API - *

- * Contrary to other driver options, the options available in this class should - * be considered as advanced features and as such, they should only be - * modified by expert users. - *

- * A misconfiguration introduced by the means of this API can have unexpected results - * and cause the driver to completely fail to connect. - *

- * Moreover, since versions 2.0.9 and 2.1.4 (see JAVA-538), - * the driver is available in two different flavors: with a standard Maven dependency on Netty, - * or with a "shaded" (internalized) Netty dependency. - *

- * Given that NettyOptions API exposes Netty classes ({@link SocketChannel}, etc.), - * it should only be extended by clients using the non-shaded - * version of driver. - *

- * Extending this API with shaded Netty classes is not supported, - * and in particular for OSGi applications, it is likely that such a configuration would lead to - * compile and/or runtime errors. + * + *

Extending the NettyOptions API + * + *

Contrary to other driver options, the options available in this class should be considered as + * advanced features and as such, they should only be modified by expert users. + * + *

A misconfiguration introduced by the means of this API can have unexpected results and + * cause the driver to completely fail to connect. + * + *

Moreover, since versions 2.0.9 and 2.1.4 (see JAVA-538), the driver is available in two + * different flavors: with a standard Maven dependency on Netty, or with a "shaded" (internalized) + * Netty dependency. + * + *

Given that NettyOptions API exposes Netty classes ({@link SocketChannel}, etc.), it should + * only be extended by clients using the non-shaded version of driver. + * + *

Extending this API with shaded Netty classes is not supported, and in particular for + * OSGi applications, it is likely that such a configuration would lead to compile and/or runtime + * errors. * * @since 2.0.10 */ public class NettyOptions { - /** - * The default instance of {@link NettyOptions} to use. - */ - public static final NettyOptions DEFAULT_INSTANCE = new NettyOptions(); + /** The default instance of {@link NettyOptions} to use. */ + public static final NettyOptions DEFAULT_INSTANCE = new NettyOptions(); - /** - * Return the {@code EventLoopGroup} instance to use. - *

- * This hook is invoked only once at {@link Cluster} initialization; - * the returned instance will be kept in use throughout the cluster lifecycle. - *

- * Typically, implementors would return a newly-created instance; - * it is however possible to re-use a shared instance, but in this - * case implementors should also override {@link #onClusterClose(EventLoopGroup)} - * to prevent the shared instance to be closed when the cluster is closed. - *

- * The default implementation returns a new instance of {@code io.netty.channel.epoll.EpollEventLoopGroup} - * if {@link NettyUtil#isEpollAvailable() epoll is available}, - * or {@code io.netty.channel.nio.NioEventLoopGroup} otherwise. - * - * @param threadFactory The {@link ThreadFactory} to use when creating a new {@code EventLoopGroup} instance; - * The driver will provide its own internal thread factory here. - * It is safe to ignore it and use another thread factory. Note however that for optimal - * performance it is recommended to use a factory that returns - * {@link io.netty.util.concurrent.FastThreadLocalThread} instances (such as Netty's - * {@link java.util.concurrent.Executors.DefaultThreadFactory}). - * @return the {@code EventLoopGroup} instance to use. - */ - public EventLoopGroup eventLoopGroup(ThreadFactory threadFactory) { - return NettyUtil.newEventLoopGroupInstance(threadFactory); - } + /** + * Return the {@code EventLoopGroup} instance to use. + * + *

This hook is invoked only once at {@link Cluster} initialization; the returned instance will + * be kept in use throughout the cluster lifecycle. + * + *

Typically, implementors would return a newly-created instance; it is however possible to + * re-use a shared instance, but in this case implementors should also override {@link + * #onClusterClose(EventLoopGroup)} to prevent the shared instance to be closed when the cluster + * is closed. + * + *

The default implementation returns a new instance of {@code + * io.netty.channel.epoll.EpollEventLoopGroup} if {@link NettyUtil#isEpollAvailable() epoll is + * available}, or {@code io.netty.channel.nio.NioEventLoopGroup} otherwise. + * + * @param threadFactory The {@link ThreadFactory} to use when creating a new {@code + * EventLoopGroup} instance; The driver will provide its own internal thread factory here. It + * is safe to ignore it and use another thread factory. Note however that for optimal + * performance it is recommended to use a factory that returns {@link + * io.netty.util.concurrent.FastThreadLocalThread} instances (such as Netty's {@link + * java.util.concurrent.Executors.DefaultThreadFactory}). + * @return the {@code EventLoopGroup} instance to use. + */ + public EventLoopGroup eventLoopGroup(ThreadFactory threadFactory) { + return NettyUtil.newEventLoopGroupInstance(threadFactory); + } - /** - * Return the specific {@code SocketChannel} subclass to use. - *

- * This hook is invoked only once at {@link Cluster} initialization; - * the returned instance will then be used each time the driver creates a new {@link Connection} - * and configures a new instance of {@link Bootstrap} for it. - *

- * The default implementation returns {@code io.netty.channel.epoll.EpollSocketChannel} if {@link NettyUtil#isEpollAvailable() epoll is available}, - * or {@code io.netty.channel.socket.nio.NioSocketChannel} otherwise. - * - * @return The {@code SocketChannel} subclass to use. - */ - public Class channelClass() { - return NettyUtil.channelClass(); - } + /** + * Return the specific {@code SocketChannel} subclass to use. + * + *

This hook is invoked only once at {@link Cluster} initialization; the returned instance will + * then be used each time the driver creates a new {@link Connection} and configures a new + * instance of {@link Bootstrap} for it. + * + *

The default implementation returns {@code io.netty.channel.epoll.EpollSocketChannel} if + * {@link NettyUtil#isEpollAvailable() epoll is available}, or {@code + * io.netty.channel.socket.nio.NioSocketChannel} otherwise. + * + * @return The {@code SocketChannel} subclass to use. + */ + public Class channelClass() { + return NettyUtil.channelClass(); + } - /** - * Hook invoked each time the driver creates a new {@link Connection} - * and configures a new instance of {@link Bootstrap} for it. - *

- * This hook is guaranteed to be called after the driver has applied all - * {@link SocketOptions}s. - *

- * This is a good place to add extra {@link io.netty.channel.ChannelHandler ChannelOption}s to the boostrap; e.g. - * plug a custom {@link io.netty.buffer.ByteBufAllocator ByteBufAllocator} implementation: - *

- *

-     * ByteBufAllocator myCustomByteBufAllocator = ...
-     *
-     * public void afterBootstrapInitialized(Bootstrap bootstrap) {
-     *     bootstrap.option(ChannelOption.ALLOCATOR, myCustomByteBufAllocator);
-     * }
-     * 
- *

- * Note that the default implementation of this method configures a pooled {@code ByteBufAllocator} (Netty 4.0 - * defaults to unpooled). If you override this method to set unrelated options, make sure you call - * {@code super.afterBootstrapInitialized(bootstrap)}. - * - * @param bootstrap the {@link Bootstrap} being initialized. - */ - public void afterBootstrapInitialized(Bootstrap bootstrap) { - // In Netty 4.1.x, pooled will be the default, so this won't be necessary anymore - bootstrap.option(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT); - } + /** + * Hook invoked each time the driver creates a new {@link Connection} and configures a new + * instance of {@link Bootstrap} for it. + * + *

This hook is guaranteed to be called after the driver has applied all {@link + * SocketOptions}s. + * + *

This is a good place to add extra {@link io.netty.channel.ChannelHandler ChannelOption}s to + * the boostrap; e.g. plug a custom {@link io.netty.buffer.ByteBufAllocator ByteBufAllocator} + * implementation: + * + *

+ * + *

+   * ByteBufAllocator myCustomByteBufAllocator = ...
+   *
+   * public void afterBootstrapInitialized(Bootstrap bootstrap) {
+   *     bootstrap.option(ChannelOption.ALLOCATOR, myCustomByteBufAllocator);
+   * }
+   * 
+ * + *

Note that the default implementation of this method configures a pooled {@code + * ByteBufAllocator} (Netty 4.0 defaults to unpooled). If you override this method to set + * unrelated options, make sure you call {@code super.afterBootstrapInitialized(bootstrap)}. + * + * @param bootstrap the {@link Bootstrap} being initialized. + */ + public void afterBootstrapInitialized(Bootstrap bootstrap) { + // In Netty 4.1.x, pooled will be the default, so this won't be necessary anymore + bootstrap.option(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT); + } - /** - * Hook invoked each time the driver creates a new {@link Connection} - * and initializes the {@link SocketChannel channel}. - *

- * This hook is guaranteed to be called after the driver has registered - * all its internal channel handlers, and applied the configured {@link SSLOptions}, if any. - *

- * This is a good place to add extra {@link io.netty.channel.ChannelHandler ChannelHandler}s - * to the channel's pipeline; e.g. to add a custom SSL handler to the beginning of the handler chain, - * do the following: - *

- *

-     * ChannelPipeline pipeline = channel.pipeline();
-     * SSLEngine myCustomSSLEngine = ...
-     * SslHandler myCustomSSLHandler = new SslHandler(myCustomSSLEngine);
-     * pipeline.addFirst("ssl", myCustomSSLHandler);
-     * 
- *

- * Note: if you intend to provide your own SSL implementation, - * do not enable the driver's built-in {@link SSLOptions} at the same time. - * - * @param channel the {@link SocketChannel} instance, after being initialized by the driver. - * @throws Exception if this methods encounters any errors. - */ - public void afterChannelInitialized(SocketChannel channel) throws Exception { - //noop - } + /** + * Hook invoked each time the driver creates a new {@link Connection} and initializes the {@link + * SocketChannel channel}. + * + *

This hook is guaranteed to be called after the driver has registered all its + * internal channel handlers, and applied the configured {@link SSLOptions}, if any. + * + *

This is a good place to add extra {@link io.netty.channel.ChannelHandler ChannelHandler}s to + * the channel's pipeline; e.g. to add a custom SSL handler to the beginning of the handler chain, + * do the following: + * + *

+ * + *

+   * ChannelPipeline pipeline = channel.pipeline();
+   * SSLEngine myCustomSSLEngine = ...
+   * SslHandler myCustomSSLHandler = new SslHandler(myCustomSSLEngine);
+   * pipeline.addFirst("ssl", myCustomSSLHandler);
+   * 
+ * + *

Note: if you intend to provide your own SSL implementation, do not enable the driver's + * built-in {@link SSLOptions} at the same time. + * + * @param channel the {@link SocketChannel} instance, after being initialized by the driver. + * @throws Exception if this methods encounters any errors. + */ + public void afterChannelInitialized(SocketChannel channel) throws Exception { + // noop + } - /** - * Hook invoked when the cluster is shutting down after a call to {@link Cluster#close()}. - *

- * This is guaranteed to be called only after all connections have been individually - * closed, and their channels closed, and only once per {@link EventLoopGroup} instance. - *

- * This gives the implementor a chance to close the {@link EventLoopGroup} properly, if required. - *

- * The default implementation initiates a {@link EventLoopGroup#shutdownGracefully() graceful shutdown} - * of the passed {@link EventLoopGroup}, then waits uninterruptibly for the shutdown to complete or timeout. - *

- * Implementation note: if the {@link EventLoopGroup} instance is being shared, or used for other purposes than to - * coordinate Netty events for the current cluster, then it should not be shut down here; - * subclasses would have to override this method accordingly to take the appropriate action. - * - * @param eventLoopGroup the event loop group used by the cluster being closed - */ - public void onClusterClose(EventLoopGroup eventLoopGroup) { - eventLoopGroup.shutdownGracefully().syncUninterruptibly(); - } + /** + * Hook invoked when the cluster is shutting down after a call to {@link Cluster#close()}. + * + *

This is guaranteed to be called only after all connections have been individually closed, + * and their channels closed, and only once per {@link EventLoopGroup} instance. + * + *

This gives the implementor a chance to close the {@link EventLoopGroup} properly, if + * required. + * + *

The default implementation initiates a {@link EventLoopGroup#shutdownGracefully() graceful + * shutdown} of the passed {@link EventLoopGroup}, then waits uninterruptibly for the shutdown to + * complete or timeout. + * + *

Implementation note: if the {@link EventLoopGroup} instance is being shared, or used for + * other purposes than to coordinate Netty events for the current cluster, then it should not be + * shut down here; subclasses would have to override this method accordingly to take the + * appropriate action. + * + * @param eventLoopGroup the event loop group used by the cluster being closed + */ + public void onClusterClose(EventLoopGroup eventLoopGroup) { + eventLoopGroup.shutdownGracefully().syncUninterruptibly(); + } - /** - * Return the {@link Timer} instance used by Read Timeouts and Speculative Execution. - *

- * This hook is invoked only once at {@link Cluster} initialization; - * the returned instance will be kept in use throughout the cluster lifecycle. - *

- * Typically, implementors would return a newly-created instance; - * it is however possible to re-use a shared instance, but in this - * case implementors should also override {@link #onClusterClose(Timer)} - * to prevent the shared instance to be closed when the cluster is closed. - *

- * The default implementation returns a new instance created by {@link HashedWheelTimer#HashedWheelTimer(ThreadFactory)}. - * - * @param threadFactory The {@link ThreadFactory} to use when creating a new {@link HashedWheelTimer} instance; - * The driver will provide its own internal thread factory here. - * It is safe to ignore it and use another thread factory. Note however that for optimal - * performance it is recommended to use a factory that returns - * {@link io.netty.util.concurrent.FastThreadLocalThread} instances (such as Netty's - * {@link java.util.concurrent.Executors.DefaultThreadFactory}). - * @return the {@link Timer} instance to use. - */ - public Timer timer(ThreadFactory threadFactory) { - return new HashedWheelTimer(threadFactory); - } + /** + * Return the {@link Timer} instance used by Read Timeouts and Speculative Execution. + * + *

This hook is invoked only once at {@link Cluster} initialization; the returned instance will + * be kept in use throughout the cluster lifecycle. + * + *

Typically, implementors would return a newly-created instance; it is however possible to + * re-use a shared instance, but in this case implementors should also override {@link + * #onClusterClose(Timer)} to prevent the shared instance to be closed when the cluster is closed. + * + *

The default implementation returns a new instance created by {@link + * HashedWheelTimer#HashedWheelTimer(ThreadFactory)}. + * + * @param threadFactory The {@link ThreadFactory} to use when creating a new {@link + * HashedWheelTimer} instance; The driver will provide its own internal thread factory here. + * It is safe to ignore it and use another thread factory. Note however that for optimal + * performance it is recommended to use a factory that returns {@link + * io.netty.util.concurrent.FastThreadLocalThread} instances (such as Netty's {@link + * java.util.concurrent.Executors.DefaultThreadFactory}). + * @return the {@link Timer} instance to use. + */ + public Timer timer(ThreadFactory threadFactory) { + return new HashedWheelTimer(threadFactory); + } - /** - * Hook invoked when the cluster is shutting down after a call to {@link Cluster#close()}. - *

- * This is guaranteed to be called only after all connections have been individually - * closed, and their channels closed, and only once per {@link Timer} instance. - *

- * This gives the implementor a chance to close the {@link Timer} properly, if required. - *

- * The default implementation calls a {@link Timer#stop()} of the passed {@link Timer} instance. - *

- * Implementation note: if the {@link Timer} instance is being shared, or used for other purposes than to - * schedule actions for the current cluster, than it should not be stopped here; - * subclasses would have to override this method accordingly to take the appropriate action. - * - * @param timer the timer used by the cluster being closed - */ - public void onClusterClose(Timer timer) { - timer.stop(); - } + /** + * Hook invoked when the cluster is shutting down after a call to {@link Cluster#close()}. + * + *

This is guaranteed to be called only after all connections have been individually closed, + * and their channels closed, and only once per {@link Timer} instance. + * + *

This gives the implementor a chance to close the {@link Timer} properly, if required. + * + *

The default implementation calls a {@link Timer#stop()} of the passed {@link Timer} + * instance. + * + *

Implementation note: if the {@link Timer} instance is being shared, or used for other + * purposes than to schedule actions for the current cluster, than it should not be stopped here; + * subclasses would have to override this method accordingly to take the appropriate action. + * + * @param timer the timer used by the cluster being closed + */ + public void onClusterClose(Timer timer) { + timer.stop(); + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/NettySSLOptions.java b/driver-core/src/main/java/com/datastax/driver/core/NettySSLOptions.java index 88909609055..295e3fd12de 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/NettySSLOptions.java +++ b/driver-core/src/main/java/com/datastax/driver/core/NettySSLOptions.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,27 +23,29 @@ /** * {@link SSLOptions} implementation based on Netty's SSL context. - *

- * Netty has the ability to use OpenSSL if available, instead of the JDK's built-in engine. This yields better performance. + * + *

Netty has the ability to use OpenSSL if available, instead of the JDK's built-in engine. This + * yields better performance. * * @deprecated Use {@link RemoteEndpointAwareNettySSLOptions} instead. */ @SuppressWarnings("DeprecatedIsStillUsed") @Deprecated public class NettySSLOptions implements SSLOptions { - protected final SslContext context; + protected final SslContext context; - /** - * Create a new instance from a given context. - * - * @param context the Netty context. {@code SslContextBuilder.forClient()} provides a fluent API to build it. - */ - public NettySSLOptions(SslContext context) { - this.context = context; - } + /** + * Create a new instance from a given context. + * + * @param context the Netty context. {@code SslContextBuilder.forClient()} provides a fluent API + * to build it. + */ + public NettySSLOptions(SslContext context) { + this.context = context; + } - @Override - public SslHandler newSSLHandler(SocketChannel channel) { - return context.newHandler(channel.alloc()); - } + @Override + public SslHandler newSSLHandler(SocketChannel channel) { + return context.newHandler(channel.alloc()); + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/NettyUtil.java b/driver-core/src/main/java/com/datastax/driver/core/NettyUtil.java index 66ac351395c..1587169ea13 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/NettyUtil.java +++ b/driver-core/src/main/java/com/datastax/driver/core/NettyUtil.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,140 +22,139 @@ import io.netty.channel.nio.NioEventLoopGroup; import io.netty.channel.socket.SocketChannel; import io.netty.channel.socket.nio.NioSocketChannel; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import java.lang.reflect.Constructor; import java.util.Locale; import java.util.concurrent.ThreadFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; -/** - * A set of utilities related to the underlying Netty layer. - */ +/** A set of utilities related to the underlying Netty layer. */ @SuppressWarnings("unchecked") class NettyUtil { - private static final boolean FORCE_NIO = SystemProperties.getBoolean("com.datastax.driver.FORCE_NIO", false); + private static final boolean FORCE_NIO = + SystemProperties.getBoolean("com.datastax.driver.FORCE_NIO", false); - private static final Logger LOGGER = LoggerFactory.getLogger(NettyUtil.class); + private static final Logger LOGGER = LoggerFactory.getLogger(NettyUtil.class); - private static final boolean SHADED; + private static final boolean USE_EPOLL; - private static final boolean USE_EPOLL; + private static final Constructor EPOLL_EVENT_LOOP_GROUP_CONSTRUCTOR; - private static final Constructor EPOLL_EVENT_LOOP_GROUP_CONSTRUCTOR; + private static final Class EPOLL_CHANNEL_CLASS; - private static final Class EPOLL_CHANNEL_CLASS; + private static final Class[] EVENT_GROUP_ARGUMENTS = {int.class, ThreadFactory.class}; - private static final Class[] EVENT_GROUP_ARGUMENTS = {int.class, ThreadFactory.class}; + private static final String SHADING_DETECTION_STRING = + "io.netty.shadingdetection.ShadingDetection"; - static { - boolean shaded; - try { - // prevent this string from being shaded - Class.forName(String.format("%s.%s.channel.Channel", "io", "netty")); - shaded = false; - } catch (ClassNotFoundException e) { - try { - Class.forName("com.datastax.shaded.netty.channel.Channel"); - shaded = true; - } catch (ClassNotFoundException e1) { - throw new AssertionError("Cannot locate Netty classes in the classpath:" + e1); - } - } - SHADED = shaded; - boolean useEpoll = false; - if (!SHADED) { - try { - Class epoll = Class.forName("io.netty.channel.epoll.Epoll"); - if (FORCE_NIO) { - LOGGER.info("Found Netty's native epoll transport in the classpath, " - + "but NIO was forced through the FORCE_NIO system property."); - } else if (!System.getProperty("os.name", "").toLowerCase(Locale.US).equals("linux")) { - LOGGER.warn("Found Netty's native epoll transport, but not running on linux-based operating " + - "system. Using NIO instead."); - } else if (!(Boolean) epoll.getMethod("isAvailable").invoke(null)) { - LOGGER.warn("Found Netty's native epoll transport in the classpath, but epoll is not available. " - + "Using NIO instead.", (Throwable) epoll.getMethod("unavailabilityCause").invoke(null)); - } else { - LOGGER.info("Found Netty's native epoll transport in the classpath, using it"); - useEpoll = true; - } - } catch (ClassNotFoundException e) { - LOGGER.info("Did not find Netty's native epoll transport in the classpath, defaulting to NIO."); - } catch (Exception e) { - LOGGER.warn("Unexpected error trying to find Netty's native epoll transport in the classpath, defaulting to NIO.", e); - } + private static final boolean SHADED = + !SHADING_DETECTION_STRING.equals( + String.format("%s.%s.shadingdetection.ShadingDetection", "io", "netty")); + + static { + boolean useEpoll = false; + if (!SHADED) { + try { + Class epoll = Class.forName("io.netty.channel.epoll.Epoll"); + if (FORCE_NIO) { + LOGGER.info( + "Found Netty's native epoll transport in the classpath, " + + "but NIO was forced through the FORCE_NIO system property."); + } else if (!System.getProperty("os.name", "").toLowerCase(Locale.US).equals("linux")) { + LOGGER.warn( + "Found Netty's native epoll transport, but not running on linux-based operating " + + "system. Using NIO instead."); + } else if (!(Boolean) epoll.getMethod("isAvailable").invoke(null)) { + LOGGER.warn( + "Found Netty's native epoll transport in the classpath, but epoll is not available. " + + "Using NIO instead.", + (Throwable) epoll.getMethod("unavailabilityCause").invoke(null)); } else { - LOGGER.info("Detected shaded Netty classes in the classpath; native epoll transport will not work properly, " - + "defaulting to NIO."); + LOGGER.info("Found Netty's native epoll transport in the classpath, using it"); + useEpoll = true; } - USE_EPOLL = useEpoll; - Constructor constructor = null; - Class channelClass = null; - if (USE_EPOLL) { - try { - channelClass = (Class) Class.forName("io.netty.channel.epoll.EpollSocketChannel"); - Class epoolEventLoupGroupClass = Class.forName("io.netty.channel.epoll.EpollEventLoopGroup"); - constructor = (Constructor) epoolEventLoupGroupClass.getDeclaredConstructor(EVENT_GROUP_ARGUMENTS); - } catch (Exception e) { - throw new AssertionError("Netty's native epoll is in use but cannot locate Epoll classes, this should not happen: " + e); - } - } - EPOLL_EVENT_LOOP_GROUP_CONSTRUCTOR = constructor; - EPOLL_CHANNEL_CLASS = channelClass; + } catch (ClassNotFoundException e) { + LOGGER.info( + "Did not find Netty's native epoll transport in the classpath, defaulting to NIO."); + } catch (Exception e) { + LOGGER.warn( + "Unexpected error trying to find Netty's native epoll transport in the classpath, defaulting to NIO.", + e); + } + } else { + LOGGER.info( + "Detected shaded Netty classes in the classpath; native epoll transport will not work properly, " + + "defaulting to NIO."); } - - /** - * @return true if the current driver bundle is using shaded Netty classes, false otherwise. - */ - - public static boolean isShaded() { - return SHADED; - } - - /** - * @return true if native epoll transport is available in the classpath, false otherwise. - */ - public static boolean isEpollAvailable() { - return USE_EPOLL; + USE_EPOLL = useEpoll; + Constructor constructor = null; + Class channelClass = null; + if (USE_EPOLL) { + try { + channelClass = + (Class) + Class.forName("io.netty.channel.epoll.EpollSocketChannel"); + Class epoolEventLoupGroupClass = + Class.forName("io.netty.channel.epoll.EpollEventLoopGroup"); + constructor = + (Constructor) + epoolEventLoupGroupClass.getDeclaredConstructor(EVENT_GROUP_ARGUMENTS); + } catch (Exception e) { + throw new AssertionError( + "Netty's native epoll is in use but cannot locate Epoll classes, this should not happen: " + + e); + } } - - /** - * Return a new instance of {@link EventLoopGroup}. - *

- * Returns an instance of {@link io.netty.channel.epoll.EpollEventLoopGroup} if {@link #isEpollAvailable() epoll is available}, - * or an instance of {@link NioEventLoopGroup} otherwise. - * - * @param factory the {@link ThreadFactory} instance to use to create the new instance of {@link EventLoopGroup} - * @return a new instance of {@link EventLoopGroup} - */ - public static EventLoopGroup newEventLoopGroupInstance(ThreadFactory factory) { - if (isEpollAvailable()) { - try { - return EPOLL_EVENT_LOOP_GROUP_CONSTRUCTOR.newInstance(0, factory); - } catch (Exception e) { - throw Throwables.propagate(e); // should not happen - } - } else { - return new NioEventLoopGroup(0, factory); - } + EPOLL_EVENT_LOOP_GROUP_CONSTRUCTOR = constructor; + EPOLL_CHANNEL_CLASS = channelClass; + } + + /** @return true if the current driver bundle is using shaded Netty classes, false otherwise. */ + public static boolean isShaded() { + return SHADED; + } + + /** @return true if native epoll transport is available in the classpath, false otherwise. */ + public static boolean isEpollAvailable() { + return USE_EPOLL; + } + + /** + * Return a new instance of {@link EventLoopGroup}. + * + *

Returns an instance of {@link io.netty.channel.epoll.EpollEventLoopGroup} if {@link + * #isEpollAvailable() epoll is available}, or an instance of {@link NioEventLoopGroup} otherwise. + * + * @param factory the {@link ThreadFactory} instance to use to create the new instance of {@link + * EventLoopGroup} + * @return a new instance of {@link EventLoopGroup} + */ + public static EventLoopGroup newEventLoopGroupInstance(ThreadFactory factory) { + if (isEpollAvailable()) { + try { + return EPOLL_EVENT_LOOP_GROUP_CONSTRUCTOR.newInstance(0, factory); + } catch (Exception e) { + throw Throwables.propagate(e); // should not happen + } + } else { + return new NioEventLoopGroup(0, factory); } - - /** - * Return the SocketChannel class to use. - *

- * Returns an instance of {@link io.netty.channel.epoll.EpollSocketChannel} if {@link #isEpollAvailable() epoll is available}, - * or an instance of {@link NioSocketChannel} otherwise. - * - * @return the SocketChannel class to use. - */ - public static Class channelClass() { - if (isEpollAvailable()) { - return EPOLL_CHANNEL_CLASS; - } else { - return NioSocketChannel.class; - } + } + + /** + * Return the SocketChannel class to use. + * + *

Returns an instance of {@link io.netty.channel.epoll.EpollSocketChannel} if {@link + * #isEpollAvailable() epoll is available}, or an instance of {@link NioSocketChannel} otherwise. + * + * @return the SocketChannel class to use. + */ + public static Class channelClass() { + if (isEpollAvailable()) { + return EPOLL_CHANNEL_CLASS; + } else { + return NioSocketChannel.class; } - + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/OutboundTrafficMeter.java b/driver-core/src/main/java/com/datastax/driver/core/OutboundTrafficMeter.java new file mode 100644 index 00000000000..701b3eb13cb --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/OutboundTrafficMeter.java @@ -0,0 +1,44 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import com.codahale.metrics.Meter; +import io.netty.buffer.ByteBuf; +import io.netty.channel.ChannelHandler.Sharable; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelOutboundHandlerAdapter; +import io.netty.channel.ChannelPromise; + +@Sharable +class OutboundTrafficMeter extends ChannelOutboundHandlerAdapter { + + private final Meter meter; + + OutboundTrafficMeter(Meter meter) { + this.meter = meter; + } + + @Override + public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) + throws Exception { + if (msg instanceof ByteBuf) { + meter.mark(((ByteBuf) msg).readableBytes()); + } + super.write(ctx, msg, promise); + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/PagingIterable.java b/driver-core/src/main/java/com/datastax/driver/core/PagingIterable.java index 4a7fdb90aa4..606adf3be67 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/PagingIterable.java +++ b/driver-core/src/main/java/com/datastax/driver/core/PagingIterable.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,151 +18,139 @@ package com.datastax.driver.core; import com.google.common.util.concurrent.ListenableFuture; - import java.util.Iterator; import java.util.List; /** - * Defines an iterable whose elements can be remotely fetched and paged, - * possibly asynchronously. + * Defines an iterable whose elements can be remotely fetched and paged, possibly asynchronously. */ public interface PagingIterable, T> extends Iterable { - /** - * Returns whether this result set has more results. - * - * @return whether this result set has more results. - */ - boolean isExhausted(); - - /** - * Whether all results from this result set have been fetched from the - * database. - *

- * Note that if {@code isFullyFetched()}, then {@link #getAvailableWithoutFetching} - * will return how many rows remain in the result set before exhaustion. But - * please note that {@code !isFullyFetched()} never guarantees that the result set - * is not exhausted (you should call {@link #isExhausted()} to verify it). - * - * @return whether all results have been fetched. - */ - boolean isFullyFetched(); + /** + * Returns whether this result set has more results. + * + * @return whether this result set has more results. + */ + boolean isExhausted(); - /** - * The number of rows that can be retrieved from this result set without - * blocking to fetch. - * - * @return the number of rows readily available in this result set. If - * {@link #isFullyFetched()}, this is the total number of rows remaining - * in this result set (after which the result set will be exhausted). - */ - int getAvailableWithoutFetching(); + /** + * Whether all results from this result set have been fetched from the database. + * + *

Note that if {@code isFullyFetched()}, then {@link #getAvailableWithoutFetching} will return + * how many rows remain in the result set before exhaustion. But please note that {@code + * isFullyFetched()} never guarantees that the result set is exhausted (you should call {@link + * #isExhausted()} to verify it). + * + * @return whether all results have been fetched. + */ + boolean isFullyFetched(); - /** - * Force fetching the next page of results for this result set, if any. - *

- * This method is entirely optional. It will be called automatically while - * the result set is consumed (through {@link #one}, {@link #all} or iteration) - * when needed (i.e. when {@code getAvailableWithoutFetching() == 0} and - * {@code isFullyFetched() == false}). - *

- * You can however call this method manually to force the fetching of the - * next page of results. This can allow to prefetch results before they are - * strictly needed. For instance, if you want to prefetch the next page of - * results as soon as there is less than 100 rows readily available in this - * result set, you can do: - *

-     *   ResultSet rs = session.execute(...);
-     *   Iterator<Row> iter = rs.iterator();
-     *   while (iter.hasNext()) {
-     *       if (rs.getAvailableWithoutFetching() == 100 && !rs.isFullyFetched())
-     *           rs.fetchMoreResults();
-     *       Row row = iter.next()
-     *       ... process the row ...
-     *   }
-     * 
- * This method is not blocking, so in the example above, the call to {@code - * fetchMoreResults} will not block the processing of the 100 currently available - * rows (but {@code iter.hasNext()} will block once those rows have been processed - * until the fetch query returns, if it hasn't yet). - *

- * Only one page of results (for a given result set) can be - * fetched at any given time. If this method is called twice and the query - * triggered by the first call has not returned yet when the second one is - * performed, then the 2nd call will simply return a future on the currently - * in progress query. - * - * @return a future on the completion of fetching the next page of results. - * If the result set is already fully retrieved ({@code isFullyFetched() == true}), - * then the returned future will return immediately but not particular error will be - * thrown (you should thus call {@link #isFullyFetched()} to know if calling this - * method can be of any use}). - */ - ListenableFuture fetchMoreResults(); + /** + * The number of rows that can be retrieved from this result set without blocking to fetch. + * + * @return the number of rows readily available in this result set. If {@link #isFullyFetched()}, + * this is the total number of rows remaining in this result set (after which the result set + * will be exhausted). + */ + int getAvailableWithoutFetching(); - /** - * Returns the next result from this result set. - * - * @return the next row in this result set or null if this result set is - * exhausted. - */ - T one(); + /** + * Force fetching the next page of results for this result set, if any. + * + *

This method is entirely optional. It will be called automatically while the result set is + * consumed (through {@link #one}, {@link #all} or iteration) when needed (i.e. when {@code + * getAvailableWithoutFetching() == 0} and {@code isFullyFetched() == false}). + * + *

You can however call this method manually to force the fetching of the next page of results. + * This can allow to prefetch results before they are strictly needed. For instance, if you want + * to prefetch the next page of results as soon as there is less than 100 rows readily available + * in this result set, you can do: + * + *

+   *   ResultSet rs = session.execute(...);
+   *   Iterator<Row> iter = rs.iterator();
+   *   while (iter.hasNext()) {
+   *       if (rs.getAvailableWithoutFetching() == 100 && !rs.isFullyFetched())
+   *           rs.fetchMoreResults();
+   *       Row row = iter.next()
+   *       ... process the row ...
+   *   }
+   * 
+ * + * This method is not blocking, so in the example above, the call to {@code fetchMoreResults} will + * not block the processing of the 100 currently available rows (but {@code iter.hasNext()} will + * block once those rows have been processed until the fetch query returns, if it hasn't yet). + * + *

Only one page of results (for a given result set) can be fetched at any given time. If this + * method is called twice and the query triggered by the first call has not returned yet when the + * second one is performed, then the 2nd call will simply return a future on the currently in + * progress query. + * + * @return a future on the completion of fetching the next page of results. If the result set is + * already fully retrieved ({@code isFullyFetched() == true}), then the returned future will + * return immediately but not particular error will be thrown (you should thus call {@link + * #isFullyFetched()} to know if calling this method can be of any use}). + */ + ListenableFuture fetchMoreResults(); - /** - * Returns all the remaining rows in this result set as a list. - *

- * Note that, contrary to {@link #iterator()} or successive calls to - * {@link #one()}, this method forces fetching the full content of the result set - * at once, holding it all in memory in particular. It is thus recommended - * to prefer iterations through {@link #iterator()} when possible, especially - * if the result set can be big. - * - * @return a list containing the remaining results of this result set. The - * returned list is empty if and only the result set is exhausted. The result set - * will be exhausted after a call to this method. - */ - List all(); + /** + * Returns the next result from this result set. + * + * @return the next row in this result set or null if this result set is exhausted. + */ + T one(); - /** - * Returns an iterator over the rows contained in this result set. - *

- * The {@link Iterator#next} method is equivalent to calling {@link #one}. - * So this iterator will consume results from this result set and after a - * full iteration, the result set will be empty. - *

- * The returned iterator does not support the {@link Iterator#remove} method. - * - * @return an iterator that will consume and return the remaining rows of - * this result set. - */ - Iterator iterator(); + /** + * Returns all the remaining rows in this result set as a list. + * + *

Note that, contrary to {@link #iterator()} or successive calls to {@link #one()}, this + * method forces fetching the full content of the result set at once, holding it all in memory in + * particular. It is thus recommended to prefer iterations through {@link #iterator()} when + * possible, especially if the result set can be big. + * + * @return a list containing the remaining results of this result set. The returned list is empty + * if and only the result set is exhausted. The result set will be exhausted after a call to + * this method. + */ + List all(); - /** - * Returns information on the execution of the last query made for this result set. - *

- * Note that in most cases, a result set is fetched with only one query, but large - * result sets can be paged and thus be retrieved by multiple queries. In that - * case this method return the {@link ExecutionInfo} for the last query - * performed. To retrieve the information for all queries, use {@link #getAllExecutionInfo}. - *

- * The returned object includes basic information such as the queried hosts, - * but also the Cassandra query trace if tracing was enabled for the query. - * - * @return the execution info for the last query made for this result set. - */ - ExecutionInfo getExecutionInfo(); + /** + * Returns an iterator over the rows contained in this result set. + * + *

The {@link Iterator#next} method is equivalent to calling {@link #one}. So this iterator + * will consume results from this result set and after a full iteration, the result set will be + * empty. + * + *

The returned iterator does not support the {@link Iterator#remove} method. + * + * @return an iterator that will consume and return the remaining rows of this result set. + */ + Iterator iterator(); - /** - * Return the execution information for all queries made to retrieve this - * result set. - *

- * Unless the result set is large enough to get paged underneath, the returned - * list will be singleton. If paging has been used however, the returned list - * contains the {@link ExecutionInfo} objects for all the queries done to obtain this - * result set (at the time of the call) in the order those queries were made. - * - * @return a list of the execution info for all the queries made for this result set. - */ - List getAllExecutionInfo(); + /** + * Returns information on the execution of the last query made for this result set. + * + *

Note that in most cases, a result set is fetched with only one query, but large result sets + * can be paged and thus be retrieved by multiple queries. In that case this method return the + * {@link ExecutionInfo} for the last query performed. To retrieve the information for all + * queries, use {@link #getAllExecutionInfo}. + * + *

The returned object includes basic information such as the queried hosts, but also the + * Cassandra query trace if tracing was enabled for the query. + * + * @return the execution info for the last query made for this result set. + */ + ExecutionInfo getExecutionInfo(); + /** + * Return the execution information for all queries made to retrieve this result set. + * + *

Unless the result set is large enough to get paged underneath, the returned list will be + * singleton. If paging has been used however, the returned list contains the {@link + * ExecutionInfo} objects for all the queries done to obtain this result set (at the time of the + * call) in the order those queries were made. + * + * @return a list of the execution info for all the queries made for this result set. + */ + List getAllExecutionInfo(); } diff --git a/driver-core/src/main/java/com/datastax/driver/core/PagingState.java b/driver-core/src/main/java/com/datastax/driver/core/PagingState.java index 708f74c92d6..9f9398513b2 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/PagingState.java +++ b/driver-core/src/main/java/com/datastax/driver/core/PagingState.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,7 +19,6 @@ import com.datastax.driver.core.exceptions.PagingStateException; import com.datastax.driver.core.utils.Bytes; - import java.nio.ByteBuffer; import java.security.MessageDigest; import java.security.NoSuchAlgorithmException; @@ -25,148 +26,158 @@ /** * The paging state of a query. - *

- * This object represents the next page to be fetched if the query is - * multi page. It can be saved and reused later on the same statement. - *

- * The PagingState can be serialized and deserialized either as a String - * or as a byte array. + * + *

This object represents the next page to be fetched if the query is multi page. It can be saved + * and reused later on the same statement. + * + *

The PagingState can be serialized and deserialized either as a String or as a byte array. * * @see Statement#setPagingState(PagingState) */ public class PagingState { - private final byte[] pagingState; - private final byte[] hash; - private final ProtocolVersion protocolVersion; - - PagingState(ByteBuffer pagingState, Statement statement, ProtocolVersion protocolVersion, CodecRegistry codecRegistry) { - this.pagingState = Bytes.getArray(pagingState); - this.hash = hash(statement, protocolVersion, codecRegistry); - this.protocolVersion = protocolVersion; + private final byte[] pagingState; + private final byte[] hash; + private final ProtocolVersion protocolVersion; + + PagingState( + ByteBuffer pagingState, + Statement statement, + ProtocolVersion protocolVersion, + CodecRegistry codecRegistry) { + this.pagingState = Bytes.getArray(pagingState); + this.hash = hash(statement, protocolVersion, codecRegistry); + this.protocolVersion = protocolVersion; + } + + // The serialized form of the paging state is: + // size of raw state|size of hash|raw state|hash|protocol version + // + // The protocol version might be absent, in which case it defaults to V2 (this is for backward + // compatibility with 2.0.10 where it is always absent). + private PagingState(byte[] complete) { + // Check the sizes in the beginning of the buffer, otherwise we cannot build the paging state + // object + ByteBuffer pagingStateBB = ByteBuffer.wrap(complete); + int pagingSize = pagingStateBB.getShort(); + int hashSize = pagingStateBB.getShort(); + if (pagingSize + hashSize != pagingStateBB.remaining() + && pagingSize + hashSize + 2 != pagingStateBB.remaining()) { + throw new PagingStateException( + "Cannot deserialize paging state, invalid format. " + + "The serialized form was corrupted, or not initially generated from a PagingState object."); } - - // The serialized form of the paging state is: - // size of raw state|size of hash|raw state|hash|protocol version - // - // The protocol version might be absent, in which case it defaults to V2 (this is for backward - // compatibility with 2.0.10 where it is always absent). - private PagingState(byte[] complete) { - // Check the sizes in the beginning of the buffer, otherwise we cannot build the paging state object - ByteBuffer pagingStateBB = ByteBuffer.wrap(complete); - int pagingSize = pagingStateBB.getShort(); - int hashSize = pagingStateBB.getShort(); - if (pagingSize + hashSize != pagingStateBB.remaining() && pagingSize + hashSize + 2 != pagingStateBB.remaining()) { - throw new PagingStateException("Cannot deserialize paging state, invalid format. " - + "The serialized form was corrupted, or not initially generated from a PagingState object."); - } - this.pagingState = new byte[pagingSize]; - pagingStateBB.get(this.pagingState); - this.hash = new byte[hashSize]; - pagingStateBB.get(this.hash); - this.protocolVersion = (pagingStateBB.remaining() > 0) - ? ProtocolVersion.fromInt(pagingStateBB.getShort()) - : ProtocolVersion.V2; + this.pagingState = new byte[pagingSize]; + pagingStateBB.get(this.pagingState); + this.hash = new byte[hashSize]; + pagingStateBB.get(this.hash); + this.protocolVersion = + (pagingStateBB.remaining() > 0) + ? ProtocolVersion.fromInt(pagingStateBB.getShort()) + : ProtocolVersion.V2; + } + + private byte[] hash( + Statement statement, ProtocolVersion protocolVersion, CodecRegistry codecRegistry) { + byte[] digest; + ByteBuffer[] values; + MessageDigest md; + if (statement instanceof StatementWrapper) { + statement = ((StatementWrapper) statement).getWrappedStatement(); } - - private byte[] hash(Statement statement, ProtocolVersion protocolVersion, CodecRegistry codecRegistry) { - byte[] digest; - ByteBuffer[] values; - MessageDigest md; - if (statement instanceof StatementWrapper) { - statement = ((StatementWrapper) statement).getWrappedStatement(); - } - assert !(statement instanceof BatchStatement); - try { - md = MessageDigest.getInstance("MD5"); - if (statement instanceof BoundStatement) { - BoundStatement bs = ((BoundStatement) statement); - md.update(bs.preparedStatement().getQueryString().getBytes()); - values = bs.wrapper.values; - } else { - //it is a RegularStatement since Batch statements are not allowed - RegularStatement rs = (RegularStatement) statement; - md.update(rs.getQueryString().getBytes()); - values = rs.getValues(protocolVersion, codecRegistry); - } - if (values != null) { - for (ByteBuffer value : values) { - md.update(value.duplicate()); - } - } - md.update(this.pagingState); - digest = md.digest(); - - } catch (NoSuchAlgorithmException e) { - throw new RuntimeException("MD5 doesn't seem to be available on this JVM", e); + assert !(statement instanceof BatchStatement); + try { + md = MessageDigest.getInstance("MD5"); + if (statement instanceof BoundStatement) { + BoundStatement bs = ((BoundStatement) statement); + md.update(bs.preparedStatement().getQueryString().getBytes()); + values = bs.wrapper.values; + } else { + // it is a RegularStatement since Batch statements are not allowed + RegularStatement rs = (RegularStatement) statement; + md.update(rs.getQueryString().getBytes()); + values = rs.getValues(protocolVersion, codecRegistry); + } + if (values != null) { + for (ByteBuffer value : values) { + md.update(value.duplicate()); } - return digest; - } + } + md.update(this.pagingState); + digest = md.digest(); - boolean matches(Statement statement, CodecRegistry codecRegistry) { - byte[] toTest = hash(statement, protocolVersion, codecRegistry); - return Arrays.equals(toTest, this.hash); + } catch (NoSuchAlgorithmException e) { + throw new RuntimeException("MD5 doesn't seem to be available on this JVM", e); } - - private ByteBuffer generateCompleteOutput() { - ByteBuffer res = ByteBuffer.allocate(pagingState.length + hash.length + 6); - - res.putShort((short) pagingState.length); - res.putShort((short) hash.length); - - res.put(pagingState); - res.put(hash); - - res.putShort((short) protocolVersion.toInt()); - - res.rewind(); - - return res; - } - - ByteBuffer getRawState() { - return ByteBuffer.wrap(this.pagingState); - } - - @Override - public String toString() { - return Bytes.toRawHexString(generateCompleteOutput()); - } - - /** - * Create a PagingState object from a string previously generated with {@link #toString()}. - * - * @param string the string value. - * @return the PagingState object created. - * @throws PagingStateException if the string does not have the correct format. - */ - public static PagingState fromString(String string) { - try { - byte[] complete = Bytes.fromRawHexString(string, 0); - return new PagingState(complete); - } catch (Exception e) { - throw new PagingStateException("Cannot deserialize paging state, invalid format. " - + "The serialized form was corrupted, or not initially generated from a PagingState object.", e); - } - } - - /** - * Return a representation of the paging state object as a byte array. - * - * @return the paging state as a byte array. - */ - public byte[] toBytes() { - return generateCompleteOutput().array(); - } - - /** - * Create a PagingState object from a byte array previously generated with {@link #toBytes()}. - * - * @param pagingState The byte array representation. - * @return the PagingState object created. - * @throws PagingStateException if the byte array does not have the correct format. - */ - public static PagingState fromBytes(byte[] pagingState) { - return new PagingState(pagingState); + return digest; + } + + boolean matches(Statement statement, CodecRegistry codecRegistry) { + byte[] toTest = hash(statement, protocolVersion, codecRegistry); + return Arrays.equals(toTest, this.hash); + } + + private ByteBuffer generateCompleteOutput() { + ByteBuffer res = ByteBuffer.allocate(pagingState.length + hash.length + 6); + + res.putShort((short) pagingState.length); + res.putShort((short) hash.length); + + res.put(pagingState); + res.put(hash); + + res.putShort((short) protocolVersion.toInt()); + + res.rewind(); + + return res; + } + + ByteBuffer getRawState() { + return ByteBuffer.wrap(this.pagingState); + } + + @Override + public String toString() { + return Bytes.toRawHexString(generateCompleteOutput()); + } + + /** + * Create a PagingState object from a string previously generated with {@link #toString()}. + * + * @param string the string value. + * @return the PagingState object created. + * @throws PagingStateException if the string does not have the correct format. + */ + public static PagingState fromString(String string) { + try { + byte[] complete = Bytes.fromRawHexString(string, 0); + return new PagingState(complete); + } catch (Exception e) { + throw new PagingStateException( + "Cannot deserialize paging state, invalid format. " + + "The serialized form was corrupted, or not initially generated from a PagingState object.", + e); } + } + + /** + * Return a representation of the paging state object as a byte array. + * + * @return the paging state as a byte array. + */ + public byte[] toBytes() { + return generateCompleteOutput().array(); + } + + /** + * Create a PagingState object from a byte array previously generated with {@link #toBytes()}. + * + * @param pagingState The byte array representation. + * @return the PagingState object created. + * @throws PagingStateException if the byte array does not have the correct format. + */ + public static PagingState fromBytes(byte[] pagingState) { + return new PagingState(pagingState); + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/ParseUtils.java b/driver-core/src/main/java/com/datastax/driver/core/ParseUtils.java index f9cefb0abbd..8bc9272d3b3 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ParseUtils.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ParseUtils.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,577 +24,543 @@ import java.util.TimeZone; import java.util.concurrent.TimeUnit; -/** - * Simple utility class used to help parsing CQL values (mainly UDT and collection ones). - */ +/** Simple utility class used to help parsing CQL values (mainly UDT and collection ones). */ public abstract class ParseUtils { - /** - * Valid ISO-8601 patterns for CQL timestamp literals. - */ - private static final String[] iso8601Patterns = new String[]{ - "yyyy-MM-dd HH:mm", - "yyyy-MM-dd HH:mm:ss", - "yyyy-MM-dd HH:mmZ", - "yyyy-MM-dd HH:mm:ssZ", - "yyyy-MM-dd HH:mm:ss.SSS", - "yyyy-MM-dd HH:mm:ss.SSSZ", - "yyyy-MM-dd'T'HH:mm", - "yyyy-MM-dd'T'HH:mmZ", - "yyyy-MM-dd'T'HH:mm:ss", - "yyyy-MM-dd'T'HH:mm:ssZ", - "yyyy-MM-dd'T'HH:mm:ss.SSS", - "yyyy-MM-dd'T'HH:mm:ss.SSSZ", - "yyyy-MM-dd", - "yyyy-MM-ddZ" - }; - - /** - * Returns the index of the first character in toParse from idx that is not a "space". - * - * @param toParse the string to skip space on. - * @param idx the index to start skipping space from. - * @return the index of the first character in toParse from idx that is not a "space. - */ - public static int skipSpaces(String toParse, int idx) { - while (isBlank(toParse.charAt(idx)) && idx < toParse.length()) - ++idx; - return idx; - } - - /** - * Assuming that idx points to the beginning of a CQL value in toParse, returns the - * index of the first character after this value. - * - * @param toParse the string to skip a value form. - * @param idx the index to start parsing a value from. - * @return the index ending the CQL value starting at {@code idx}. - * @throws IllegalArgumentException if idx doesn't point to the start of a valid CQL - * value. - */ - public static int skipCQLValue(String toParse, int idx) { - if (idx >= toParse.length()) - throw new IllegalArgumentException(); - - if (isBlank(toParse.charAt(idx))) - throw new IllegalArgumentException(); - - int cbrackets = 0; - int sbrackets = 0; - int parens = 0; - boolean inString = false; - - do { - char c = toParse.charAt(idx); - if (inString) { - if (c == '\'') { - if (idx + 1 < toParse.length() && toParse.charAt(idx + 1) == '\'') { - ++idx; // this is an escaped quote, skip it - } else { - inString = false; - if (cbrackets == 0 && sbrackets == 0 && parens == 0) - return idx + 1; - } - } - // Skip any other character - } else if (c == '\'') { - inString = true; - } else if (c == '{') { - ++cbrackets; - } else if (c == '[') { - ++sbrackets; - } else if (c == '(') { - ++parens; - } else if (c == '}') { - if (cbrackets == 0) - return idx; - - --cbrackets; - if (cbrackets == 0 && sbrackets == 0 && parens == 0) - return idx + 1; - } else if (c == ']') { - if (sbrackets == 0) - return idx; - - --sbrackets; - if (cbrackets == 0 && sbrackets == 0 && parens == 0) - return idx + 1; - } else if (c == ')') { - if (parens == 0) - return idx; - - --parens; - if (cbrackets == 0 && sbrackets == 0 && parens == 0) - return idx + 1; - } else if (isBlank(c) || !isIdentifierChar(c)) { - if (cbrackets == 0 && sbrackets == 0 && parens == 0) - return idx; - } - } while (++idx < toParse.length()); - - if (inString || cbrackets != 0 || sbrackets != 0 || parens != 0) - throw new IllegalArgumentException(); - return idx; - } - - /** - * Assuming that idx points to the beginning of a CQL identifier in toParse, returns the - * index of the first character after this identifier. - * - * @param toParse the string to skip an identifier from. - * @param idx the index to start parsing an identifier from. - * @return the index ending the CQL identifier starting at {@code idx}. - * @throws IllegalArgumentException if idx doesn't point to the start of a valid CQL - * identifier. - */ - public static int skipCQLId(String toParse, int idx) { - if (idx >= toParse.length()) - throw new IllegalArgumentException(); - - char c = toParse.charAt(idx); - if (isIdentifierChar(c)) { - while (idx < toParse.length() && isIdentifierChar(toParse.charAt(idx))) - idx++; - return idx; + /** Valid ISO-8601 patterns for CQL timestamp literals. */ + private static final String[] iso8601Patterns = + new String[] { + "yyyy-MM-dd HH:mm", + "yyyy-MM-dd HH:mm:ss", + "yyyy-MM-dd HH:mmZ", + "yyyy-MM-dd HH:mm:ssZ", + "yyyy-MM-dd HH:mm:ss.SSS", + "yyyy-MM-dd HH:mm:ss.SSSZ", + "yyyy-MM-dd'T'HH:mm", + "yyyy-MM-dd'T'HH:mmZ", + "yyyy-MM-dd'T'HH:mm:ss", + "yyyy-MM-dd'T'HH:mm:ssZ", + "yyyy-MM-dd'T'HH:mm:ss.SSS", + "yyyy-MM-dd'T'HH:mm:ss.SSSZ", + "yyyy-MM-dd", + "yyyy-MM-ddZ" + }; + + /** + * Returns the index of the first character in toParse from idx that is not a "space". + * + * @param toParse the string to skip space on. + * @param idx the index to start skipping space from. + * @return the index of the first character in toParse from idx that is not a "space. + */ + public static int skipSpaces(String toParse, int idx) { + while (isBlank(toParse.charAt(idx)) && idx < toParse.length()) ++idx; + return idx; + } + + /** + * Assuming that idx points to the beginning of a CQL value in toParse, returns the index of the + * first character after this value. + * + * @param toParse the string to skip a value form. + * @param idx the index to start parsing a value from. + * @return the index ending the CQL value starting at {@code idx}. + * @throws IllegalArgumentException if idx doesn't point to the start of a valid CQL value. + */ + public static int skipCQLValue(String toParse, int idx) { + if (idx >= toParse.length()) throw new IllegalArgumentException(); + + if (isBlank(toParse.charAt(idx))) throw new IllegalArgumentException(); + + int cbrackets = 0; + int sbrackets = 0; + int parens = 0; + boolean inString = false; + + do { + char c = toParse.charAt(idx); + if (inString) { + if (c == '\'') { + if (idx + 1 < toParse.length() && toParse.charAt(idx + 1) == '\'') { + ++idx; // this is an escaped quote, skip it + } else { + inString = false; + if (cbrackets == 0 && sbrackets == 0 && parens == 0) return idx + 1; + } } - - if (c != '"') - throw new IllegalArgumentException(); - - while (++idx < toParse.length()) { - c = toParse.charAt(idx); - if (c != '"') - continue; - - if (idx + 1 < toParse.length() && toParse.charAt(idx + 1) == '\"') - ++idx; // this is an escaped double quote, skip it - else - return idx + 1; - } - throw new IllegalArgumentException(); - } - - /** - * Return {@code true} if the given character - * is allowed in a CQL identifier, that is, - * if it is in the range: {@code [0..9a..zA..Z-+._&]}. - * - * @param c The character to inspect. - * @return {@code true} if the given character - * is allowed in a CQL identifier, {@code false} otherwise. - */ - public static boolean isIdentifierChar(int c) { - return (c >= '0' && c <= '9') - || (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') - || c == '-' || c == '+' || c == '.' || c == '_' || c == '&'; + // Skip any other character + } else if (c == '\'') { + inString = true; + } else if (c == '{') { + ++cbrackets; + } else if (c == '[') { + ++sbrackets; + } else if (c == '(') { + ++parens; + } else if (c == '}') { + if (cbrackets == 0) return idx; + + --cbrackets; + if (cbrackets == 0 && sbrackets == 0 && parens == 0) return idx + 1; + } else if (c == ']') { + if (sbrackets == 0) return idx; + + --sbrackets; + if (cbrackets == 0 && sbrackets == 0 && parens == 0) return idx + 1; + } else if (c == ')') { + if (parens == 0) return idx; + + --parens; + if (cbrackets == 0 && sbrackets == 0 && parens == 0) return idx + 1; + } else if (isBlank(c) || !isIdentifierChar(c)) { + if (cbrackets == 0 && sbrackets == 0 && parens == 0) return idx; + } + } while (++idx < toParse.length()); + + if (inString || cbrackets != 0 || sbrackets != 0 || parens != 0) + throw new IllegalArgumentException(); + return idx; + } + + /** + * Assuming that idx points to the beginning of a CQL identifier in toParse, returns the index of + * the first character after this identifier. + * + * @param toParse the string to skip an identifier from. + * @param idx the index to start parsing an identifier from. + * @return the index ending the CQL identifier starting at {@code idx}. + * @throws IllegalArgumentException if idx doesn't point to the start of a valid CQL identifier. + */ + public static int skipCQLId(String toParse, int idx) { + if (idx >= toParse.length()) throw new IllegalArgumentException(); + + char c = toParse.charAt(idx); + if (isIdentifierChar(c)) { + while (idx < toParse.length() && isIdentifierChar(toParse.charAt(idx))) idx++; + return idx; } - /** - * Return {@code true} if the given character - * is a valid whitespace character in CQL, that is, - * if it is a regular space, a tabulation sign, - * or a new line sign. - * - * @param c The character to inspect. - * @return {@code true} if the given character - * is a valid whitespace character, {@code false} otherwise. - */ - public static boolean isBlank(int c) { - return c == ' ' || c == '\t' || c == '\n'; - } + if (c != '"') throw new IllegalArgumentException(); - /** - * Check whether the given string corresponds - * to a valid CQL long literal. - * Long literals are composed solely by digits, - * but can have an optional leading minus sign. - * - * @param str The string to inspect. - * @return {@code true} if the given string corresponds - * to a valid CQL integer literal, {@code false} otherwise. - */ - public static boolean isLongLiteral(String str) { - if (str == null || str.isEmpty()) - return false; - char[] chars = str.toCharArray(); - for (int i = 0; i < chars.length; i++) { - char c = chars[i]; - if ((c < '0' && (i != 0 || c != '-')) || c > '9') - return false; - } - return true; - } + while (++idx < toParse.length()) { + c = toParse.charAt(idx); + if (c != '"') continue; - /** - * Return {@code true} if the given string is surrounded - * by single quotes, and {@code false} otherwise. - * - * @param value The string to inspect. - * @return {@code true} if the given string is surrounded - * by single quotes, and {@code false} otherwise. - */ - public static boolean isQuoted(String value) { - return isQuoted(value, '\''); + if (idx + 1 < toParse.length() && toParse.charAt(idx + 1) == '\"') + ++idx; // this is an escaped double quote, skip it + else return idx + 1; } - - /** - * Quote the given string; single quotes are escaped. - * If the given string is null, this method returns a quoted empty string ({@code ''}). - * - * @param value The value to quote. - * @return The quoted string. - */ - public static String quote(String value) { - return quote(value, '\''); + throw new IllegalArgumentException(); + } + + /** + * Return {@code true} if the given character is allowed in a CQL identifier, that is, if it is in + * the range: {@code [0..9a..zA..Z-+._&]}. + * + * @param c The character to inspect. + * @return {@code true} if the given character is allowed in a CQL identifier, {@code false} + * otherwise. + */ + public static boolean isIdentifierChar(int c) { + return (c >= '0' && c <= '9') + || (c >= 'a' && c <= 'z') + || (c >= 'A' && c <= 'Z') + || c == '-' + || c == '+' + || c == '.' + || c == '_' + || c == '&'; + } + + /** + * Return {@code true} if the given character is a valid whitespace character in CQL, that is, if + * it is a regular space, a tabulation sign, or a new line sign. + * + * @param c The character to inspect. + * @return {@code true} if the given character is a valid whitespace character, {@code false} + * otherwise. + */ + public static boolean isBlank(int c) { + return c == ' ' || c == '\t' || c == '\n'; + } + + /** + * Check whether the given string corresponds to a valid CQL long literal. Long literals are + * composed solely by digits, but can have an optional leading minus sign. + * + * @param str The string to inspect. + * @return {@code true} if the given string corresponds to a valid CQL integer literal, {@code + * false} otherwise. + */ + public static boolean isLongLiteral(String str) { + if (str == null || str.isEmpty()) return false; + char[] chars = str.toCharArray(); + for (int i = 0; i < chars.length; i++) { + char c = chars[i]; + if ((c < '0' && (i != 0 || c != '-')) || c > '9') return false; } - - /** - * Unquote the given string if it is quoted; single quotes are unescaped. - * If the given string is not quoted, it is returned without any modification. - * - * @param value The string to unquote. - * @return The unquoted string. - */ - public static String unquote(String value) { - return unquote(value, '\''); + return true; + } + + /** + * Return {@code true} if the given string is surrounded by single quotes, and {@code false} + * otherwise. + * + * @param value The string to inspect. + * @return {@code true} if the given string is surrounded by single quotes, and {@code false} + * otherwise. + */ + public static boolean isQuoted(String value) { + return isQuoted(value, '\''); + } + + /** + * Quote the given string; single quotes are escaped. If the given string is null, this method + * returns a quoted empty string ({@code ''}). + * + * @param value The value to quote. + * @return The quoted string. + */ + public static String quote(String value) { + return quote(value, '\''); + } + + /** + * Unquote the given string if it is quoted; single quotes are unescaped. If the given string is + * not quoted, it is returned without any modification. + * + * @param value The string to unquote. + * @return The unquoted string. + */ + public static String unquote(String value) { + return unquote(value, '\''); + } + + /** + * Return {@code true} if the given string is surrounded by double quotes, and {@code false} + * otherwise. + * + * @param value The string to inspect. + * @return {@code true} if the given string is surrounded by double quotes, and {@code false} + * otherwise. + */ + public static boolean isDoubleQuoted(String value) { + return isQuoted(value, '\"'); + } + + /** + * Double quote the given string; double quotes are escaped. If the given string is null, this + * method returns a quoted empty string ({@code ""}). + * + * @param value The value to double quote. + * @return The double quoted string. + */ + public static String doubleQuote(String value) { + return quote(value, '"'); + } + + /** + * Unquote the given string if it is double quoted; double quotes are unescaped. If the given + * string is not double quoted, it is returned without any modification. + * + * @param value The string to un-double quote. + * @return The un-double quoted string. + */ + public static String unDoubleQuote(String value) { + return unquote(value, '"'); + } + + /** + * Parse the given string as a date, using one of the accepted ISO-8601 date patterns. + * + *

This method is adapted from Apache Commons {@code DateUtils.parseStrictly()} method (that is + * used Cassandra side to parse date strings).. + * + * @throws ParseException If the given string is not a valid ISO-8601 date. + * @see 'Working with + * timestamps' section of CQL specification + */ + public static Date parseDate(String str) throws ParseException { + SimpleDateFormat parser = new SimpleDateFormat(); + parser.setLenient(false); + // set a default timezone for patterns that do not provide one + parser.setTimeZone(TimeZone.getTimeZone("UTC")); + // Java 6 has very limited support for ISO-8601 time zone formats, + // so we need to transform the string first + // so that accepted patterns are correctly handled, + // such as Z for UTC, or "+00:00" instead of "+0000". + // Note: we cannot use the X letter in the pattern + // because it has been introduced in Java 7. + str = str.replaceAll("(\\+|\\-)(\\d\\d):(\\d\\d)$", "$1$2$3"); + str = str.replaceAll("Z$", "+0000"); + ParsePosition pos = new ParsePosition(0); + for (String parsePattern : iso8601Patterns) { + parser.applyPattern(parsePattern); + pos.setIndex(0); + Date date = parser.parse(str, pos); + if (date != null && pos.getIndex() == str.length()) { + return date; + } } - - /** - * Return {@code true} if the given string is surrounded - * by double quotes, and {@code false} otherwise. - * - * @param value The string to inspect. - * @return {@code true} if the given string is surrounded - * by double quotes, and {@code false} otherwise. - */ - public static boolean isDoubleQuoted(String value) { - return isQuoted(value, '\"'); + throw new ParseException("Unable to parse the date: " + str, -1); + } + + /** + * Parse the given string as a date, using the supplied date pattern. + * + *

This method is adapted from Apache Commons {@code DateUtils.parseStrictly()} method (that is + * used Cassandra side to parse date strings).. + * + * @throws ParseException If the given string cannot be parsed with the given pattern. + * @see 'Working with + * timestamps' section of CQL specification + */ + public static Date parseDate(String str, String pattern) throws ParseException { + SimpleDateFormat parser = new SimpleDateFormat(); + parser.setLenient(false); + // set a default timezone for patterns that do not provide one + parser.setTimeZone(TimeZone.getTimeZone("UTC")); + // Java 6 has very limited support for ISO-8601 time zone formats, + // so we need to transform the string first + // so that accepted patterns are correctly handled, + // such as Z for UTC, or "+00:00" instead of "+0000". + // Note: we cannot use the X letter in the pattern + // because it has been introduced in Java 7. + str = str.replaceAll("(\\+|\\-)(\\d\\d):(\\d\\d)$", "$1$2$3"); + str = str.replaceAll("Z$", "+0000"); + ParsePosition pos = new ParsePosition(0); + parser.applyPattern(pattern); + pos.setIndex(0); + Date date = parser.parse(str, pos); + if (date != null && pos.getIndex() == str.length()) { + return date; } - - /** - * Double quote the given string; double quotes are escaped. - * If the given string is null, this method returns a quoted empty string ({@code ""}). - * - * @param value The value to double quote. - * @return The double quoted string. - */ - public static String doubleQuote(String value) { - return quote(value, '"'); + throw new ParseException("Unable to parse the date: " + str, -1); + } + + /** + * Parse the given string as a time, using the following time pattern: {@code + * hh:mm:ss[.fffffffff]}. + * + *

This method is loosely based on {@code java.sql.Timestamp}. + * + * @param str The string to parse. + * @return A long value representing the number of nanoseconds since midnight. + * @throws ParseException if the string cannot be parsed. + * @see 'Working with time' + * section of CQL specification + */ + public static long parseTime(String str) throws ParseException { + String nanos_s; + + long hour; + long minute; + long second; + long a_nanos = 0; + + String formatError = "Timestamp format must be hh:mm:ss[.fffffffff]"; + String zeros = "000000000"; + + if (str == null) throw new IllegalArgumentException(formatError); + str = str.trim(); + + // Parse the time + int firstColon = str.indexOf(':'); + int secondColon = str.indexOf(':', firstColon + 1); + + // Convert the time; default missing nanos + if (firstColon > 0 && secondColon > 0 && secondColon < str.length() - 1) { + int period = str.indexOf('.', secondColon + 1); + hour = Integer.parseInt(str.substring(0, firstColon)); + if (hour < 0 || hour >= 24) throw new IllegalArgumentException("Hour out of bounds."); + + minute = Integer.parseInt(str.substring(firstColon + 1, secondColon)); + if (minute < 0 || minute >= 60) throw new IllegalArgumentException("Minute out of bounds."); + + if (period > 0 && period < str.length() - 1) { + second = Integer.parseInt(str.substring(secondColon + 1, period)); + if (second < 0 || second >= 60) throw new IllegalArgumentException("Second out of bounds."); + + nanos_s = str.substring(period + 1); + if (nanos_s.length() > 9) throw new IllegalArgumentException(formatError); + if (!Character.isDigit(nanos_s.charAt(0))) throw new IllegalArgumentException(formatError); + nanos_s = nanos_s + zeros.substring(0, 9 - nanos_s.length()); + a_nanos = Integer.parseInt(nanos_s); + } else if (period > 0) throw new ParseException(formatError, -1); + else { + second = Integer.parseInt(str.substring(secondColon + 1)); + if (second < 0 || second >= 60) throw new ParseException("Second out of bounds.", -1); + } + } else throw new ParseException(formatError, -1); + + long rawTime = 0; + rawTime += TimeUnit.HOURS.toNanos(hour); + rawTime += TimeUnit.MINUTES.toNanos(minute); + rawTime += TimeUnit.SECONDS.toNanos(second); + rawTime += a_nanos; + return rawTime; + } + + /** + * Format the given long value as a CQL time literal, using the following time pattern: {@code + * hh:mm:ss[.fffffffff]}. + * + * @param value A long value representing the number of nanoseconds since midnight. + * @return The formatted value. + * @see 'Working with time' + * section of CQL specification + */ + public static String formatTime(long value) { + int nano = (int) (value % 1000000000); + value -= nano; + value /= 1000000000; + int seconds = (int) (value % 60); + value -= seconds; + value /= 60; + int minutes = (int) (value % 60); + value -= minutes; + value /= 60; + int hours = (int) (value % 24); + value -= hours; + value /= 24; + assert (value == 0); + StringBuilder sb = new StringBuilder(); + leftPadZeros(hours, 2, sb); + sb.append(":"); + leftPadZeros(minutes, 2, sb); + sb.append(":"); + leftPadZeros(seconds, 2, sb); + sb.append("."); + leftPadZeros(nano, 9, sb); + return sb.toString(); + } + + /** + * Return {@code true} if the given string is surrounded by the quote character given, and {@code + * false} otherwise. + * + * @param value The string to inspect. + * @return {@code true} if the given string is surrounded by the quote character, and {@code + * false} otherwise. + */ + private static boolean isQuoted(String value, char quoteChar) { + return value != null + && value.length() > 1 + && value.charAt(0) == quoteChar + && value.charAt(value.length() - 1) == quoteChar; + } + + /** + * @param quoteChar " or ' + * @return A quoted empty string. + */ + private static String emptyQuoted(char quoteChar) { + // don't handle non quote characters, this is done so that these are interned and don't create + // repeated empty quoted strings. + assert quoteChar == '"' || quoteChar == '\''; + if (quoteChar == '"') return "\"\""; + else return "''"; + } + + /** + * Quotes text and escapes any existing quotes in the text. {@code String.replace()} is a bit too + * inefficient (see JAVA-67, JAVA-1262). + * + * @param text The text. + * @param quoteChar The character to use as a quote. + * @return The text with surrounded in quotes with all existing quotes escaped with (i.e. ' + * becomes '') + */ + private static String quote(String text, char quoteChar) { + if (text == null || text.isEmpty()) return emptyQuoted(quoteChar); + + int nbMatch = 0; + int start = -1; + do { + start = text.indexOf(quoteChar, start + 1); + if (start != -1) ++nbMatch; + } while (start != -1); + + // no quotes found that need to be escaped, simply surround in quotes and return. + if (nbMatch == 0) return quoteChar + text + quoteChar; + + // 2 for beginning and end quotes. + // length for original text + // nbMatch for escape characters to add to quotes to be escaped. + int newLength = 2 + text.length() + nbMatch; + char[] result = new char[newLength]; + result[0] = quoteChar; + result[newLength - 1] = quoteChar; + int newIdx = 1; + for (int i = 0; i < text.length(); i++) { + char c = text.charAt(i); + if (c == quoteChar) { + // escape quote with another occurrence. + result[newIdx++] = c; + result[newIdx++] = c; + } else { + result[newIdx++] = c; + } } - - /** - * Unquote the given string if it is double quoted; double quotes are unescaped. - * If the given string is not double quoted, it is returned without any modification. - * - * @param value The string to un-double quote. - * @return The un-double quoted string. - */ - public static String unDoubleQuote(String value) { - return unquote(value, '"'); - } - - /** - * Parse the given string as a date, using one of the accepted ISO-8601 date patterns. - *

- * This method is adapted from Apache Commons {@code DateUtils.parseStrictly()} method (that is used Cassandra side - * to parse date strings).. - * - * @throws ParseException If the given string is not a valid ISO-8601 date. - * @see 'Working with timestamps' section of CQL specification - */ - public static Date parseDate(String str) throws ParseException { - SimpleDateFormat parser = new SimpleDateFormat(); - parser.setLenient(false); - // set a default timezone for patterns that do not provide one - parser.setTimeZone(TimeZone.getTimeZone("UTC")); - // Java 6 has very limited support for ISO-8601 time zone formats, - // so we need to transform the string first - // so that accepted patterns are correctly handled, - // such as Z for UTC, or "+00:00" instead of "+0000". - // Note: we cannot use the X letter in the pattern - // because it has been introduced in Java 7. - str = str.replaceAll("(\\+|\\-)(\\d\\d):(\\d\\d)$", "$1$2$3"); - str = str.replaceAll("Z$", "+0000"); - ParsePosition pos = new ParsePosition(0); - for (String parsePattern : iso8601Patterns) { - parser.applyPattern(parsePattern); - pos.setIndex(0); - Date date = parser.parse(str, pos); - if (date != null && pos.getIndex() == str.length()) { - return date; - } + return new String(result); + } + + /** + * Unquotes text and unescapes non surrounding quotes. {@code String.replace()} is a bit too + * inefficient (see JAVA-67, JAVA-1262). + * + * @param text The text + * @param quoteChar The character to use as a quote. + * @return The text with surrounding quotes removed and non surrounding quotes unescaped (i.e. '' + * becomes ') + */ + private static String unquote(String text, char quoteChar) { + if (!isQuoted(text, quoteChar)) return text; + + if (text.length() == 2) return ""; + + String search = emptyQuoted(quoteChar); + int nbMatch = 0; + int start = -1; + do { + start = text.indexOf(search, start + 2); + // ignore the second to last character occurrence, as the last character is a quote. + if (start != -1 && start != text.length() - 2) ++nbMatch; + } while (start != -1); + + // no escaped quotes found, simply remove surrounding quotes and return. + if (nbMatch == 0) return text.substring(1, text.length() - 1); + + // length of the new string will be its current length - the number of occurrences. + int newLength = text.length() - nbMatch - 2; + char[] result = new char[newLength]; + int newIdx = 0; + // track whenever a quoteChar is encountered and the previous character is not a quoteChar. + boolean firstFound = false; + for (int i = 1; i < text.length() - 1; i++) { + char c = text.charAt(i); + if (c == quoteChar) { + if (firstFound) { + // The previous character was a quoteChar, don't add this to result, this action in + // effect removes consecutive quotes. + firstFound = false; + } else { + // found a quoteChar and the previous character was not a quoteChar, include in result. + firstFound = true; + result[newIdx++] = c; } - throw new ParseException("Unable to parse the date: " + str, -1); + } else { + // non quoteChar encountered, include in result. + result[newIdx++] = c; + firstFound = false; + } } + return new String(result); + } - /** - * Parse the given string as a date, using the supplied date pattern. - *

- * This method is adapted from Apache Commons {@code DateUtils.parseStrictly()} method (that is used Cassandra side - * to parse date strings).. - * - * @throws ParseException If the given string cannot be parsed with the given pattern. - * @see 'Working with timestamps' section of CQL specification - */ - public static Date parseDate(String str, String pattern) throws ParseException { - SimpleDateFormat parser = new SimpleDateFormat(); - parser.setLenient(false); - // set a default timezone for patterns that do not provide one - parser.setTimeZone(TimeZone.getTimeZone("UTC")); - // Java 6 has very limited support for ISO-8601 time zone formats, - // so we need to transform the string first - // so that accepted patterns are correctly handled, - // such as Z for UTC, or "+00:00" instead of "+0000". - // Note: we cannot use the X letter in the pattern - // because it has been introduced in Java 7. - str = str.replaceAll("(\\+|\\-)(\\d\\d):(\\d\\d)$", "$1$2$3"); - str = str.replaceAll("Z$", "+0000"); - ParsePosition pos = new ParsePosition(0); - parser.applyPattern(pattern); - pos.setIndex(0); - Date date = parser.parse(str, pos); - if (date != null && pos.getIndex() == str.length()) { - return date; - } - throw new ParseException("Unable to parse the date: " + str, -1); - } - - /** - * Parse the given string as a time, using the following time pattern: {@code hh:mm:ss[.fffffffff]}. - *

- * This method is loosely based on {@code java.sql.Timestamp}. - * - * @param str The string to parse. - * @return A long value representing the number of nanoseconds since midnight. - * @throws ParseException if the string cannot be parsed. - * @see 'Working with time' section of CQL specification - */ - public static long parseTime(String str) throws ParseException { - String nanos_s; - - long hour; - long minute; - long second; - long a_nanos = 0; - - String formatError = "Timestamp format must be hh:mm:ss[.fffffffff]"; - String zeros = "000000000"; - - if (str == null) - throw new IllegalArgumentException(formatError); - str = str.trim(); - - // Parse the time - int firstColon = str.indexOf(':'); - int secondColon = str.indexOf(':', firstColon + 1); - - // Convert the time; default missing nanos - if (firstColon > 0 && secondColon > 0 && secondColon < str.length() - 1) { - int period = str.indexOf('.', secondColon + 1); - hour = Integer.parseInt(str.substring(0, firstColon)); - if (hour < 0 || hour >= 24) - throw new IllegalArgumentException("Hour out of bounds."); - - minute = Integer.parseInt(str.substring(firstColon + 1, secondColon)); - if (minute < 0 || minute >= 60) - throw new IllegalArgumentException("Minute out of bounds."); - - if (period > 0 && period < str.length() - 1) { - second = Integer.parseInt(str.substring(secondColon + 1, period)); - if (second < 0 || second >= 60) - throw new IllegalArgumentException("Second out of bounds."); - - nanos_s = str.substring(period + 1); - if (nanos_s.length() > 9) - throw new IllegalArgumentException(formatError); - if (!Character.isDigit(nanos_s.charAt(0))) - throw new IllegalArgumentException(formatError); - nanos_s = nanos_s + zeros.substring(0, 9 - nanos_s.length()); - a_nanos = Integer.parseInt(nanos_s); - } else if (period > 0) - throw new ParseException(formatError, -1); - else { - second = Integer.parseInt(str.substring(secondColon + 1)); - if (second < 0 || second >= 60) - throw new ParseException("Second out of bounds.", -1); - } - } else - throw new ParseException(formatError, -1); - - long rawTime = 0; - rawTime += TimeUnit.HOURS.toNanos(hour); - rawTime += TimeUnit.MINUTES.toNanos(minute); - rawTime += TimeUnit.SECONDS.toNanos(second); - rawTime += a_nanos; - return rawTime; - } - - /** - * Format the given long value as a CQL time literal, using the following time pattern: {@code hh:mm:ss[.fffffffff]}. - * - * @param value A long value representing the number of nanoseconds since midnight. - * @return The formatted value. - * @see 'Working with time' section of CQL specification - */ - public static String formatTime(long value) { - int nano = (int) (value % 1000000000); - value -= nano; - value /= 1000000000; - int seconds = (int) (value % 60); - value -= seconds; - value /= 60; - int minutes = (int) (value % 60); - value -= minutes; - value /= 60; - int hours = (int) (value % 24); - value -= hours; - value /= 24; - assert (value == 0); - StringBuilder sb = new StringBuilder(); - leftPadZeros(hours, 2, sb); - sb.append(":"); - leftPadZeros(minutes, 2, sb); - sb.append(":"); - leftPadZeros(seconds, 2, sb); - sb.append("."); - leftPadZeros(nano, 9, sb); - return sb.toString(); - } - - /** - * Return {@code true} if the given string is surrounded - * by the quote character given, and {@code false} otherwise. - * - * @param value The string to inspect. - * @return {@code true} if the given string is surrounded - * by the quote character, and {@code false} otherwise. - */ - private static boolean isQuoted(String value, char quoteChar) { - return value != null && value.length() > 1 - && value.charAt(0) == quoteChar && value.charAt(value.length() - 1) == quoteChar; - } - - /** - * @param quoteChar " or ' - * @return A quoted empty string. - */ - private static String emptyQuoted(char quoteChar) { - // don't handle non quote characters, this is done so that these are interned and don't create - // repeated empty quoted strings. - assert quoteChar == '"' || quoteChar == '\''; - if (quoteChar == '"') - return "\"\""; - else - return "''"; - } - - - /** - * Quotes text and escapes any existing quotes in the text. - * {@code String.replace()} is a bit too inefficient (see JAVA-67, JAVA-1262). - * - * @param text The text. - * @param quoteChar The character to use as a quote. - * @return The text with surrounded in quotes with all existing quotes escaped with (i.e. ' becomes '') - */ - private static String quote(String text, char quoteChar) { - if (text == null || text.isEmpty()) - return emptyQuoted(quoteChar); - - int nbMatch = 0; - int start = -1; - do { - start = text.indexOf(quoteChar, start + 1); - if (start != -1) - ++nbMatch; - } while (start != -1); - - // no quotes found that need to be escaped, simply surround in quotes and return. - if (nbMatch == 0) - return quoteChar + text + quoteChar; - - // 2 for beginning and end quotes. - // length for original text - // nbMatch for escape characters to add to quotes to be escaped. - int newLength = 2 + text.length() + nbMatch; - char[] result = new char[newLength]; - result[0] = quoteChar; - result[newLength - 1] = quoteChar; - int newIdx = 1; - for (int i = 0; i < text.length(); i++) { - char c = text.charAt(i); - if (c == quoteChar) { - // escape quote with another occurrence. - result[newIdx++] = c; - result[newIdx++] = c; - } else { - result[newIdx++] = c; - } - } - return new String(result); - } - - /** - * Unquotes text and unescapes non surrounding quotes. - * {@code String.replace()} is a bit too inefficient (see JAVA-67, JAVA-1262). - * - * @param text The text - * @param quoteChar The character to use as a quote. - * @return The text with surrounding quotes removed and non surrounding quotes unescaped (i.e. '' becomes ') - */ - private static String unquote(String text, char quoteChar) { - if (!isQuoted(text, quoteChar)) - return text; - - if (text.length() == 2) - return ""; - - String search = emptyQuoted(quoteChar); - int nbMatch = 0; - int start = -1; - do { - start = text.indexOf(search, start + 2); - // ignore the second to last character occurrence, as the last character is a quote. - if (start != -1 && start != text.length() - 2) - ++nbMatch; - } while (start != -1); - - // no escaped quotes found, simply remove surrounding quotes and return. - if (nbMatch == 0) - return text.substring(1, text.length() - 1); - - // length of the new string will be its current length - the number of occurrences. - int newLength = text.length() - nbMatch - 2; - char[] result = new char[newLength]; - int newIdx = 0; - // track whenever a quoteChar is encountered and the previous character is not a quoteChar. - boolean firstFound = false; - for (int i = 1; i < text.length() - 1; i++) { - char c = text.charAt(i); - if (c == quoteChar) { - if (firstFound) { - // The previous character was a quoteChar, don't add this to result, this action in - // effect removes consecutive quotes. - firstFound = false; - } else { - // found a quoteChar and the previous character was not a quoteChar, include in result. - firstFound = true; - result[newIdx++] = c; - } - } else { - // non quoteChar encountered, include in result. - result[newIdx++] = c; - firstFound = false; - } - } - return new String(result); - } - - private static void leftPadZeros(int value, int digits, StringBuilder sb) { - sb.append(String.format("%0" + digits + "d", value)); - } - - private ParseUtils() { - } + private static void leftPadZeros(int value, int digits, StringBuilder sb) { + sb.append(String.format("%0" + digits + "d", value)); + } + private ParseUtils() {} } diff --git a/driver-core/src/main/java/com/datastax/driver/core/PerHostPercentileTracker.java b/driver-core/src/main/java/com/datastax/driver/core/PerHostPercentileTracker.java index 081e2c9e06e..7ea076d1c5f 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/PerHostPercentileTracker.java +++ b/driver-core/src/main/java/com/datastax/driver/core/PerHostPercentileTracker.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,52 +19,59 @@ /** * A {@code PercentileTracker} that maintains a separate histogram for each host. - *

- * This gives you per-host latency percentiles, meaning that each host will only be compared to itself. + * + *

This gives you per-host latency percentiles, meaning that each host will only be compared to + * itself. */ public class PerHostPercentileTracker extends PercentileTracker { - private PerHostPercentileTracker(long highestTrackableLatencyMillis, - int numberOfSignificantValueDigits, - int minRecordedValues, - long intervalMs) { - super(highestTrackableLatencyMillis, numberOfSignificantValueDigits, minRecordedValues, intervalMs); - } + private PerHostPercentileTracker( + long highestTrackableLatencyMillis, + int numberOfSignificantValueDigits, + int minRecordedValues, + long intervalMs) { + super( + highestTrackableLatencyMillis, + numberOfSignificantValueDigits, + minRecordedValues, + intervalMs); + } - @Override - protected Host computeKey(Host host, Statement statement, Exception exception) { - return host; - } + @Override + protected Host computeKey(Host host, Statement statement, Exception exception) { + return host; + } - /** - * Returns a builder to create a new instance. - * - * @param highestTrackableLatencyMillis the highest expected latency. If a higher value is reported, it will be - * ignored and a warning will be logged. A good rule of thumb is to set it - * slightly higher than {@link SocketOptions#getReadTimeoutMillis()}. - * @return the builder. - */ - public static Builder builder(long highestTrackableLatencyMillis) { - return new Builder(highestTrackableLatencyMillis); - } + /** + * Returns a builder to create a new instance. + * + * @param highestTrackableLatencyMillis the highest expected latency. If a higher value is + * reported, it will be ignored and a warning will be logged. A good rule of thumb is to set + * it slightly higher than {@link SocketOptions#getReadTimeoutMillis()}. + * @return the builder. + */ + public static Builder builder(long highestTrackableLatencyMillis) { + return new Builder(highestTrackableLatencyMillis); + } - /** - * Helper class to build {@code PerHostPercentileTracker} instances with a fluent interface. - */ - public static class Builder extends PercentileTracker.Builder { + /** Helper class to build {@code PerHostPercentileTracker} instances with a fluent interface. */ + public static class Builder extends PercentileTracker.Builder { - Builder(long highestTrackableLatencyMillis) { - super(highestTrackableLatencyMillis); - } + Builder(long highestTrackableLatencyMillis) { + super(highestTrackableLatencyMillis); + } - @Override - protected Builder self() { - return this; - } + @Override + protected Builder self() { + return this; + } - @Override - public PerHostPercentileTracker build() { - return new PerHostPercentileTracker(highestTrackableLatencyMillis, numberOfSignificantValueDigits, - minRecordedValues, intervalMs); - } + @Override + public PerHostPercentileTracker build() { + return new PerHostPercentileTracker( + highestTrackableLatencyMillis, + numberOfSignificantValueDigits, + minRecordedValues, + intervalMs); } + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/PercentileTracker.java b/driver-core/src/main/java/com/datastax/driver/core/PercentileTracker.java index 6f60df2f1b2..cc60517745c 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/PercentileTracker.java +++ b/driver-core/src/main/java/com/datastax/driver/core/PercentileTracker.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,328 +17,346 @@ */ package com.datastax.driver.core; -import com.datastax.driver.core.exceptions.*; +import static com.google.common.base.Preconditions.checkArgument; +import static java.util.concurrent.TimeUnit.MILLISECONDS; +import static java.util.concurrent.TimeUnit.MINUTES; +import static java.util.concurrent.TimeUnit.NANOSECONDS; + +import com.datastax.driver.core.exceptions.BootstrappingException; +import com.datastax.driver.core.exceptions.DriverInternalError; +import com.datastax.driver.core.exceptions.OverloadedException; +import com.datastax.driver.core.exceptions.QueryValidationException; +import com.datastax.driver.core.exceptions.UnavailableException; +import com.datastax.driver.core.exceptions.UnpreparedException; import com.google.common.collect.ImmutableSet; import com.google.common.util.concurrent.Futures; import com.google.common.util.concurrent.ListenableFuture; import com.google.common.util.concurrent.SettableFuture; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; import org.HdrHistogram.Histogram; import org.HdrHistogram.Recorder; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.util.Set; -import java.util.concurrent.*; - -import static com.google.common.base.Preconditions.checkArgument; -import static java.util.concurrent.TimeUnit.*; - /** - * A {@link LatencyTracker} that records query latencies over a sliding time interval, and exposes an API to retrieve - * the latency at a given percentile. - *

- * Percentiles may be computed separately for different categories of requests; this is implementation-dependent and - * determined by {@link #computeKey(Host, Statement, Exception)}. - *

- * This class is used by percentile-aware components such as - * {@link QueryLogger.Builder#withDynamicThreshold(PercentileTracker, double)} QueryLogger} and - * {@link com.datastax.driver.core.policies.PercentileSpeculativeExecutionPolicy}. - *

- * It uses HdrHistogram to record latencies: - * for each category, there is a "live" histogram where current latencies are recorded, and a "cached", read-only - * histogram that is used when clients call {@link #getLatencyAtPercentile(Host, Statement, Exception, double)}. Each - * time the cached histogram becomes older than the interval, the two histograms are switched. Statistics will not be - * available during the first interval at cluster startup, since we don't have a cached histogram yet. + * A {@link LatencyTracker} that records query latencies over a sliding time interval, and exposes + * an API to retrieve the latency at a given percentile. + * + *

Percentiles may be computed separately for different categories of requests; this is + * implementation-dependent and determined by {@link #computeKey(Host, Statement, Exception)}. + * + *

This class is used by percentile-aware components such as {@link + * QueryLogger.Builder#withDynamicThreshold(PercentileTracker, double)} QueryLogger} and {@link + * com.datastax.driver.core.policies.PercentileSpeculativeExecutionPolicy}. + * + *

It uses HdrHistogram to record + * latencies: for each category, there is a "live" histogram where current latencies are recorded, + * and a "cached", read-only histogram that is used when clients call {@link + * #getLatencyAtPercentile(Host, Statement, Exception, double)}. Each time the cached histogram + * becomes older than the interval, the two histograms are switched. Statistics will not be + * available during the first interval at cluster startup, since we don't have a cached histogram + * yet. */ public abstract class PercentileTracker implements LatencyTracker { - private static final Logger logger = LoggerFactory.getLogger(PercentileTracker.class); - - private final long highestTrackableLatencyMillis; - private final int numberOfSignificantValueDigits; - private final int minRecordedValues; - private final long intervalMs; - - // The "live" recorders: this is where we store the latencies received from the cluster - private final ConcurrentMap recorders; - // The cached histograms, corresponding to the previous interval. This is where we get the percentiles from when the - // user requests them. Each histogram is valid for a given duration, when it gets stale we request a new one from - // the corresponding recorder. - private final ConcurrentMap cachedHistograms; - - /** - * Builds a new instance. - * - * @see Builder - */ - protected PercentileTracker(long highestTrackableLatencyMillis, - int numberOfSignificantValueDigits, - int minRecordedValues, - long intervalMs) { - this.highestTrackableLatencyMillis = highestTrackableLatencyMillis; - this.numberOfSignificantValueDigits = numberOfSignificantValueDigits; - this.minRecordedValues = minRecordedValues; - this.intervalMs = intervalMs; - this.recorders = new ConcurrentHashMap(); - this.cachedHistograms = new ConcurrentHashMap(); + private static final Logger logger = LoggerFactory.getLogger(PercentileTracker.class); + + private final long highestTrackableLatencyMillis; + private final int numberOfSignificantValueDigits; + private final int minRecordedValues; + private final long intervalMs; + + // The "live" recorders: this is where we store the latencies received from the cluster + private final ConcurrentMap recorders; + // The cached histograms, corresponding to the previous interval. This is where we get the + // percentiles from when the + // user requests them. Each histogram is valid for a given duration, when it gets stale we request + // a new one from + // the corresponding recorder. + private final ConcurrentMap cachedHistograms; + + /** + * Builds a new instance. + * + * @see Builder + */ + protected PercentileTracker( + long highestTrackableLatencyMillis, + int numberOfSignificantValueDigits, + int minRecordedValues, + long intervalMs) { + this.highestTrackableLatencyMillis = highestTrackableLatencyMillis; + this.numberOfSignificantValueDigits = numberOfSignificantValueDigits; + this.minRecordedValues = minRecordedValues; + this.intervalMs = intervalMs; + this.recorders = new ConcurrentHashMap(); + this.cachedHistograms = new ConcurrentHashMap(); + } + + /** + * Computes a key used to categorize measurements. Measurements with the same key will be recorded + * in the same histogram. + * + *

It's recommended to keep the number of distinct keys low, in order to limit the memory + * footprint of the histograms. + * + * @param host the host that was queried. + * @param statement the statement that was executed. + * @param exception if the query failed, the corresponding exception. + * @return the key. + */ + protected abstract Object computeKey(Host host, Statement statement, Exception exception); + + @Override + public void update(Host host, Statement statement, Exception exception, long newLatencyNanos) { + if (!include(host, statement, exception)) return; + + long latencyMs = NANOSECONDS.toMillis(newLatencyNanos); + try { + Recorder recorder = getRecorder(host, statement, exception); + if (recorder != null) recorder.recordValue(latencyMs); + } catch (ArrayIndexOutOfBoundsException e) { + logger.warn( + "Got request with latency of {} ms, which exceeds the configured maximum trackable value {}", + latencyMs, + highestTrackableLatencyMillis); } - - /** - * Computes a key used to categorize measurements. Measurements with the same key will be recorded in the same - * histogram. - *

- * It's recommended to keep the number of distinct keys low, in order to limit the memory footprint of the - * histograms. - * - * @param host the host that was queried. - * @param statement the statement that was executed. - * @param exception if the query failed, the corresponding exception. - * @return the key. - */ - protected abstract Object computeKey(Host host, Statement statement, Exception exception); - - @Override - public void update(Host host, Statement statement, Exception exception, long newLatencyNanos) { - if (!include(host, statement, exception)) - return; - - long latencyMs = NANOSECONDS.toMillis(newLatencyNanos); - try { - Recorder recorder = getRecorder(host, statement, exception); - if (recorder != null) - recorder.recordValue(latencyMs); - } catch (ArrayIndexOutOfBoundsException e) { - logger.warn("Got request with latency of {} ms, which exceeds the configured maximum trackable value {}", - latencyMs, highestTrackableLatencyMillis); - } - } - - /** - * Returns the request latency at a given percentile. - * - * @param host the host (if this is relevant in the way percentiles are categorized). - * @param statement the statement (if this is relevant in the way percentiles are categorized). - * @param exception the exception (if this is relevant in the way percentiles are categorized). - * @param percentile the percentile (for example, {@code 99.0} for the 99th percentile). - * @return the latency (in milliseconds) at the given percentile, or a negative value if it's not available yet. - * @see #computeKey(Host, Statement, Exception) - */ - public long getLatencyAtPercentile(Host host, Statement statement, Exception exception, double percentile) { - checkArgument(percentile >= 0.0 && percentile < 100, - "percentile must be between 0.0 and 100 (was %s)", percentile); - Histogram histogram = getLastIntervalHistogram(host, statement, exception); - if (histogram == null || histogram.getTotalCount() < minRecordedValues) - return -1; - - return histogram.getValueAtPercentile(percentile); + } + + /** + * Returns the request latency at a given percentile. + * + * @param host the host (if this is relevant in the way percentiles are categorized). + * @param statement the statement (if this is relevant in the way percentiles are categorized). + * @param exception the exception (if this is relevant in the way percentiles are categorized). + * @param percentile the percentile (for example, {@code 99.0} for the 99th percentile). + * @return the latency (in milliseconds) at the given percentile, or a negative value if it's not + * available yet. + * @see #computeKey(Host, Statement, Exception) + */ + public long getLatencyAtPercentile( + Host host, Statement statement, Exception exception, double percentile) { + checkArgument( + percentile >= 0.0 && percentile < 100, + "percentile must be between 0.0 and 100 (was %s)", + percentile); + Histogram histogram = getLastIntervalHistogram(host, statement, exception); + if (histogram == null || histogram.getTotalCount() < minRecordedValues) return -1; + + return histogram.getValueAtPercentile(percentile); + } + + private Recorder getRecorder(Host host, Statement statement, Exception exception) { + Object key = computeKey(host, statement, exception); + if (key == null) return null; + + Recorder recorder = recorders.get(key); + if (recorder == null) { + recorder = new Recorder(highestTrackableLatencyMillis, numberOfSignificantValueDigits); + Recorder old = recorders.putIfAbsent(key, recorder); + if (old != null) { + // We got beaten at creating the recorder, use the actual instance and discard ours + recorder = old; + } else { + // Also set an empty cache entry to remember the time we started recording: + cachedHistograms.putIfAbsent(key, CachedHistogram.empty()); + } } - - private Recorder getRecorder(Host host, Statement statement, Exception exception) { - Object key = computeKey(host, statement, exception); - if (key == null) - return null; - - Recorder recorder = recorders.get(key); - if (recorder == null) { - recorder = new Recorder(highestTrackableLatencyMillis, numberOfSignificantValueDigits); - Recorder old = recorders.putIfAbsent(key, recorder); - if (old != null) { - // We got beaten at creating the recorder, use the actual instance and discard ours - recorder = old; - } else { - // Also set an empty cache entry to remember the time we started recording: - cachedHistograms.putIfAbsent(key, CachedHistogram.empty()); - } + return recorder; + } + + /** @return null if no histogram is available yet (no entries recorded, or not for long enough) */ + private Histogram getLastIntervalHistogram(Host host, Statement statement, Exception exception) { + Object key = computeKey(host, statement, exception); + if (key == null) return null; + + try { + while (true) { + CachedHistogram entry = cachedHistograms.get(key); + if (entry == null) return null; + + long age = System.currentTimeMillis() - entry.timestamp; + if (age < intervalMs) { // current histogram is recent enough + return entry.histogram.get(); + } else { // need to refresh + Recorder recorder = recorders.get(key); + // intervalMs should be much larger than the time it takes to replace a histogram, so this + // future should never block + Histogram staleHistogram = entry.histogram.get(0, MILLISECONDS); + SettableFuture future = SettableFuture.create(); + CachedHistogram newEntry = new CachedHistogram(future); + if (cachedHistograms.replace(key, entry, newEntry)) { + // Only get the new histogram if we successfully replaced the cache entry. + // This ensures that only one thread will do it. + Histogram newHistogram = recorder.getIntervalHistogram(staleHistogram); + future.set(newHistogram); + return newHistogram; + } + // If we couldn't replace the entry it means we raced, so loop to try again } - return recorder; + } + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + return null; + } catch (ExecutionException e) { + throw new DriverInternalError("Unexpected error", e.getCause()); + } catch (TimeoutException e) { + throw new DriverInternalError("Unexpected timeout while getting histogram", e); } - - /** - * @return null if no histogram is available yet (no entries recorded, or not for long enough) - */ - private Histogram getLastIntervalHistogram(Host host, Statement statement, Exception exception) { - Object key = computeKey(host, statement, exception); - if (key == null) - return null; - - try { - while (true) { - CachedHistogram entry = cachedHistograms.get(key); - if (entry == null) - return null; - - long age = System.currentTimeMillis() - entry.timestamp; - if (age < intervalMs) { // current histogram is recent enough - return entry.histogram.get(); - } else { // need to refresh - Recorder recorder = recorders.get(key); - // intervalMs should be much larger than the time it takes to replace a histogram, so this future should never block - Histogram staleHistogram = entry.histogram.get(0, MILLISECONDS); - SettableFuture future = SettableFuture.create(); - CachedHistogram newEntry = new CachedHistogram(future); - if (cachedHistograms.replace(key, entry, newEntry)) { - // Only get the new histogram if we successfully replaced the cache entry. - // This ensures that only one thread will do it. - Histogram newHistogram = recorder.getIntervalHistogram(staleHistogram); - future.set(newHistogram); - return newHistogram; - } - // If we couldn't replace the entry it means we raced, so loop to try again - } - } - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - return null; - } catch (ExecutionException e) { - throw new DriverInternalError("Unexpected error", e.getCause()); - } catch (TimeoutException e) { - throw new DriverInternalError("Unexpected timeout while getting histogram", e); - } + } + + /** + * A histogram and the timestamp at which it was retrieved. The data is only relevant for + * (timestamp + intervalMs); after that, the histogram is stale and we want to retrieve a new one. + */ + static class CachedHistogram { + final ListenableFuture histogram; + final long timestamp; + + CachedHistogram(ListenableFuture histogram) { + this.histogram = histogram; + this.timestamp = System.currentTimeMillis(); } - /** - * A histogram and the timestamp at which it was retrieved. - * The data is only relevant for (timestamp + intervalMs); after that, the histogram is stale and we want to - * retrieve a new one. - */ - static class CachedHistogram { - final ListenableFuture histogram; - final long timestamp; - - CachedHistogram(ListenableFuture histogram) { - this.histogram = histogram; - this.timestamp = System.currentTimeMillis(); - } - - static CachedHistogram empty() { - return new CachedHistogram(Futures.immediateFuture(null)); - } + static CachedHistogram empty() { + return new CachedHistogram(Futures.immediateFuture(null)); } - - @Override - public void onRegister(Cluster cluster) { - // nothing by default + } + + @Override + public void onRegister(Cluster cluster) { + // nothing by default + } + + @Override + public void onUnregister(Cluster cluster) { + // nothing by default + } + + /** + * Determines whether a particular measurement should be included. + * + *

This is used to ignore measurements that could skew the statistics; for example, we + * typically want to ignore invalid query errors because they have a very low latency and would + * make a given cluster/host appear faster than it really is. + * + * @param host the host that was queried. + * @param statement the statement that was executed. + * @param exception if the query failed, the corresponding exception. + * @return whether the measurement should be included. + */ + protected boolean include(Host host, Statement statement, Exception exception) { + // query was successful: always consider + if (exception == null) return true; + // filter out "fast" errors + // TODO this was copy/pasted from LatencyAwarePolicy, maybe it could be refactored as a shared + // method + return !EXCLUDED_EXCEPTIONS.contains(exception.getClass()); + } + + /** + * A set of DriverException subclasses that we should prevent from updating the host's score. The + * intent behind it is to filter out "fast" errors: when a host replies with such errors, it + * usually does so very quickly, because it did not involve any actual coordination work. Such + * errors are not good indicators of the host's responsiveness, and tend to make the host's score + * look better than it actually is. + */ + @SuppressWarnings("unchecked") + private static final Set> EXCLUDED_EXCEPTIONS = + ImmutableSet.of( + UnavailableException.class, // this is done via the snitch and is usually very fast + OverloadedException.class, + BootstrappingException.class, + UnpreparedException.class, + QueryValidationException + .class, // query validation also happens at early stages in the coordinator + CancelledSpeculativeExecutionException.class); + + /** + * Base class for {@code PercentileTracker} implementation builders. + * + * @param the type of the concrete builder implementation. + * @param the type of the object to build. + */ + public abstract static class Builder { + protected final long highestTrackableLatencyMillis; + protected int numberOfSignificantValueDigits = 3; + protected int minRecordedValues = 1000; + protected long intervalMs = MINUTES.toMillis(5); + + Builder(long highestTrackableLatencyMillis) { + this.highestTrackableLatencyMillis = highestTrackableLatencyMillis; } - @Override - public void onUnregister(Cluster cluster) { - // nothing by default - } + protected abstract B self(); /** - * Determines whether a particular measurement should be included. - *

- * This is used to ignore measurements that could skew the statistics; for example, we typically want to ignore - * invalid query errors because they have a very low latency and would make a given cluster/host appear faster than - * it really is. + * Sets the number of significant decimal digits to which histograms will maintain value + * resolution and separation. This must be an integer between 0 and 5. + * + *

If not set explicitly, this value defaults to 3. * - * @param host the host that was queried. - * @param statement the statement that was executed. - * @param exception if the query failed, the corresponding exception. - * @return whether the measurement should be included. + *

See the + * HdrHistogram Javadocs for a more detailed explanation on how this parameter affects the + * resolution of recorded samples. + * + * @param numberOfSignificantValueDigits the new value. + * @return this builder. */ - protected boolean include(Host host, Statement statement, Exception exception) { - // query was successful: always consider - if (exception == null) - return true; - // filter out "fast" errors - // TODO this was copy/pasted from LatencyAwarePolicy, maybe it could be refactored as a shared method - return !EXCLUDED_EXCEPTIONS.contains(exception.getClass()); + public B withNumberOfSignificantValueDigits(int numberOfSignificantValueDigits) { + this.numberOfSignificantValueDigits = numberOfSignificantValueDigits; + return self(); } /** - * A set of DriverException subclasses that we should prevent from updating the host's score. - * The intent behind it is to filter out "fast" errors: when a host replies with such errors, - * it usually does so very quickly, because it did not involve any actual - * coordination work. Such errors are not good indicators of the host's responsiveness, - * and tend to make the host's score look better than it actually is. + * Sets the minimum number of values that must be recorded for a host before we consider the + * sample size significant. + * + *

If this count is not reached during a given interval, {@link #getLatencyAtPercentile(Host, + * Statement, Exception, double)} will return a negative value, indicating that statistics are + * not available. In particular, this is true during the first interval. + * + *

If not set explicitly, this value default to 1000. + * + * @param minRecordedValues the new value. + * @return this builder. */ - private static final Set> EXCLUDED_EXCEPTIONS = ImmutableSet.>of( - UnavailableException.class, // this is done via the snitch and is usually very fast - OverloadedException.class, - BootstrappingException.class, - UnpreparedException.class, - QueryValidationException.class // query validation also happens at early stages in the coordinator - ); + public B withMinRecordedValues(int minRecordedValues) { + this.minRecordedValues = minRecordedValues; + return self(); + } /** - * Base class for {@code PercentileTracker} implementation builders. + * Sets the time interval over which samples are recorded. + * + *

For each host, there is a "live" histogram where current latencies are recorded, and a + * "cached", read-only histogram that is used when clients call {@link + * #getLatencyAtPercentile(Host, Statement, Exception, double)}. Each time the cached histogram + * becomes older than the interval, the two histograms are switched. Note that statistics will + * not be available during the first interval at cluster startup, since we don't have a cached + * histogram yet. * - * @param the type of the concrete builder implementation. - * @param the type of the object to build. + *

If not set explicitly, this value defaults to 5 minutes. + * + * @param interval the new interval. + * @param unit the unit that the interval is expressed in. + * @return this builder. */ - public static abstract class Builder { - protected final long highestTrackableLatencyMillis; - protected int numberOfSignificantValueDigits = 3; - protected int minRecordedValues = 1000; - protected long intervalMs = MINUTES.toMillis(5); - - Builder(long highestTrackableLatencyMillis) { - this.highestTrackableLatencyMillis = highestTrackableLatencyMillis; - } - - protected abstract B self(); - - /** - * Sets the number of significant decimal digits to which histograms will maintain value - * resolution and separation. This must be an integer between 0 and 5. - *

- * If not set explicitly, this value defaults to 3. - *

- * See the HdrHistogram Javadocs - * for a more detailed explanation on how this parameter affects the resolution of recorded samples. - * - * @param numberOfSignificantValueDigits the new value. - * @return this builder. - */ - public B withNumberOfSignificantValueDigits(int numberOfSignificantValueDigits) { - this.numberOfSignificantValueDigits = numberOfSignificantValueDigits; - return self(); - } - - /** - * Sets the minimum number of values that must be recorded for a host before we consider - * the sample size significant. - *

- * If this count is not reached during a given interval, - * {@link #getLatencyAtPercentile(Host, Statement, Exception, double)} will return a negative value, indicating - * that statistics are not available. In particular, this is true during the first interval. - *

- * If not set explicitly, this value default to 1000. - * - * @param minRecordedValues the new value. - * @return this builder. - */ - public B withMinRecordedValues(int minRecordedValues) { - this.minRecordedValues = minRecordedValues; - return self(); - } - - /** - * Sets the time interval over which samples are recorded. - *

- * For each host, there is a "live" histogram where current latencies are recorded, and a "cached", read-only - * histogram that is used when clients call {@link #getLatencyAtPercentile(Host, Statement, Exception, double)}. - * Each time the cached histogram becomes older than the interval, the two histograms are switched. Note that - * statistics will not be available during the first interval at cluster startup, since we don't have a cached - * histogram yet. - *

- * If not set explicitly, this value defaults to 5 minutes. - * - * @param interval the new interval. - * @param unit the unit that the interval is expressed in. - * @return this builder. - */ - public B withInterval(long interval, TimeUnit unit) { - this.intervalMs = MILLISECONDS.convert(interval, unit); - return self(); - } - - /** - * Builds the {@code PercentileTracker} instance configured with this builder. - * - * @return the instance. - */ - public abstract T build(); + public B withInterval(long interval, TimeUnit unit) { + this.intervalMs = MILLISECONDS.convert(interval, unit); + return self(); } + /** + * Builds the {@code PercentileTracker} instance configured with this builder. + * + * @return the instance. + */ + public abstract T build(); + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/PlainTextAuthProvider.java b/driver-core/src/main/java/com/datastax/driver/core/PlainTextAuthProvider.java index 67ef1781004..86f62ea4eab 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/PlainTextAuthProvider.java +++ b/driver-core/src/main/java/com/datastax/driver/core/PlainTextAuthProvider.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,113 +17,119 @@ */ package com.datastax.driver.core; +import com.datastax.driver.core.exceptions.AuthenticationException; import com.google.common.base.Charsets; import com.google.common.collect.ImmutableMap; - import java.net.InetSocketAddress; import java.util.Map; /** * A simple {@code AuthProvider} implementation. - *

- * This provider allows to programmatically define authentication - * information that will then apply to all hosts. The - * PlainTextAuthenticator instances it returns support SASL - * authentication using the PLAIN mechanism for version 2 (or above) of the - * CQL native protocol. + * + *

This provider allows to programmatically define authentication information that will then + * apply to all hosts. The PlainTextAuthenticator instances it returns support SASL authentication + * using the PLAIN mechanism for version 2 (or above) of the CQL native protocol. */ -public class PlainTextAuthProvider implements AuthProvider { +public class PlainTextAuthProvider implements ExtendedAuthProvider { - private volatile String username; - private volatile String password; + private volatile String username; + private volatile String password; - /** - * Creates a new simple authentication information provider with the - * supplied credentials. - * - * @param username to use for authentication requests - * @param password to use for authentication requests - */ - public PlainTextAuthProvider(String username, String password) { - this.username = username; - this.password = password; - } + /** + * Creates a new simple authentication information provider with the supplied credentials. + * + * @param username to use for authentication requests + * @param password to use for authentication requests + */ + public PlainTextAuthProvider(String username, String password) { + this.username = username; + this.password = password; + } - /** - * Changes the user name. - *

- * The new credentials will be used for all connections initiated after this method was called. - * - * @param username the new name. - */ - public void setUsername(String username) { - this.username = username; - } + /** + * Changes the user name. + * + *

The new credentials will be used for all connections initiated after this method was called. + * + * @param username the new name. + */ + public void setUsername(String username) { + this.username = username; + } - /** - * Changes the password. - *

- * The new credentials will be used for all connections initiated after this method was called. - * - * @param password the new password. - */ - public void setPassword(String password) { - this.password = password; - } + /** + * Changes the password. + * + *

The new credentials will be used for all connections initiated after this method was called. + * + * @param password the new password. + */ + public void setPassword(String password) { + this.password = password; + } - /** - * Uses the supplied credentials and the SASL PLAIN mechanism to login - * to the server. - * - * @param host the Cassandra host with which we want to authenticate - * @param authenticator the configured authenticator on the host - * @return an Authenticator instance which can be used to perform - * authentication negotiations on behalf of the client - */ - @Override - public Authenticator newAuthenticator(InetSocketAddress host, String authenticator) { - return new PlainTextAuthenticator(username, password); - } + /** + * Uses the supplied credentials and the SASL PLAIN mechanism to login to the server. + * + * @param host the Cassandra host with which we want to authenticate + * @param authenticator the configured authenticator on the host + * @return an Authenticator instance which can be used to perform authentication negotiations on + * behalf of the client + */ + @Override + public Authenticator newAuthenticator(EndPoint host, String authenticator) { + return new PlainTextAuthenticator(username, password); + } - /** - * Simple implementation of {@link Authenticator} which can - * perform authentication against Cassandra servers configured - * with PasswordAuthenticator. - */ - private static class PlainTextAuthenticator extends ProtocolV1Authenticator implements Authenticator { + @Override + public Authenticator newAuthenticator(InetSocketAddress host, String authenticator) + throws AuthenticationException { + throw new AssertionError( + "The driver should never call this method on an object that implements " + + this.getClass().getSimpleName()); + } - private final byte[] username; - private final byte[] password; + /** + * Simple implementation of {@link Authenticator} which can perform authentication against + * Cassandra servers configured with PasswordAuthenticator. + */ + static class PlainTextAuthenticator extends ProtocolV1Authenticator implements Authenticator { - public PlainTextAuthenticator(String username, String password) { - this.username = username.getBytes(Charsets.UTF_8); - this.password = password.getBytes(Charsets.UTF_8); - } + private final byte[] username; + private final byte[] password; - @Override - public byte[] initialResponse() { - byte[] initialToken = new byte[username.length + password.length + 2]; - initialToken[0] = 0; - System.arraycopy(username, 0, initialToken, 1, username.length); - initialToken[username.length + 1] = 0; - System.arraycopy(password, 0, initialToken, username.length + 2, password.length); - return initialToken; - } + public PlainTextAuthenticator(String username, String password) { + this.username = username.getBytes(Charsets.UTF_8); + this.password = password.getBytes(Charsets.UTF_8); + } - @Override - public byte[] evaluateChallenge(byte[] challenge) { - return null; - } + @Override + public byte[] initialResponse() { + byte[] initialToken = new byte[username.length + password.length + 2]; + initialToken[0] = 0; + System.arraycopy(username, 0, initialToken, 1, username.length); + initialToken[username.length + 1] = 0; + System.arraycopy(password, 0, initialToken, username.length + 2, password.length); + return initialToken; + } + + @Override + public byte[] evaluateChallenge(byte[] challenge) { + return null; + } - @Override - public void onAuthenticationSuccess(byte[] token) { - // no-op, the server should send nothing anyway - } + @Override + public void onAuthenticationSuccess(byte[] token) { + // no-op, the server should send nothing anyway + } - @Override - Map getCredentials() { - return ImmutableMap.of("username", new String(username, Charsets.UTF_8), - "password", new String(password, Charsets.UTF_8)); - } + @Override + Map getCredentials() { + return ImmutableMap.of( + "username", + new String(username, Charsets.UTF_8), + "password", + new String(password, Charsets.UTF_8)); } + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/PoolingOptions.java b/driver-core/src/main/java/com/datastax/driver/core/PoolingOptions.java index 7ffd5b06362..c5de138c6af 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/PoolingOptions.java +++ b/driver-core/src/main/java/com/datastax/driver/core/PoolingOptions.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,617 +17,650 @@ */ package com.datastax.driver.core; +import static com.datastax.driver.core.HostDistance.LOCAL; +import static com.datastax.driver.core.HostDistance.REMOTE; + import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableMap; - import java.util.Map; import java.util.concurrent.Executor; -import static com.datastax.driver.core.HostDistance.LOCAL; -import static com.datastax.driver.core.HostDistance.REMOTE; - /** * Options related to connection pooling. - *

- * The driver uses connections in an asynchronous manner, meaning that - * multiple requests can be submitted on the same connection at the same - * time. Therefore only a relatively small number of connections is needed. - * For each host, the driver uses a connection pool that may have a variable + * + *

The driver uses connections in an asynchronous manner, meaning that multiple requests can be + * submitted on the same connection at the same time. Therefore only a relatively small number of + * connections is needed. For each host, the driver uses a connection pool that may have a variable * size (it will automatically adjust to the current load). - *

- * With {@code ProtocolVersion#V2} or below, there are at most 128 simultaneous - * requests per connection, so the pool defaults to a variable size. You will - * typically raise the maximum capacity by adding more connections with - * {@link #setMaxConnectionsPerHost(HostDistance, int)}. - *

- * With {@code ProtocolVersion#V3} or above, there are up to 32768 requests per - * connection, and the pool defaults to a fixed size of 1. You will typically - * raise the maximum capacity by allowing more simultaneous requests per connection - * ({@link #setMaxRequestsPerConnection(HostDistance, int)}). - *

- * All parameters can be separately set for {@code LOCAL} and - * {@code REMOTE} hosts ({@link HostDistance}). For {@code IGNORED} hosts, - * no connections are created so these settings cannot be changed. + * + *

With {@code ProtocolVersion#V2} or below, there are at most 128 simultaneous requests per + * connection, so the pool defaults to a variable size. You will typically raise the maximum + * capacity by adding more connections with {@link #setMaxConnectionsPerHost(HostDistance, int)}. + * + *

With {@code ProtocolVersion#V3} or above, there are up to 32768 requests per connection, and + * the pool defaults to a fixed size of 1. You will typically raise the maximum capacity by allowing + * more simultaneous requests per connection ({@link #setMaxRequestsPerConnection(HostDistance, + * int)}). + * + *

All parameters can be separately set for {@code LOCAL} and {@code REMOTE} hosts ({@link + * HostDistance}). For {@code IGNORED} hosts, no connections are created so these settings cannot be + * changed. */ public class PoolingOptions { - /** - * The value returned for connection options when they have not been set by the client, and the protocol version - * is not known yet. - *

- * Once a {@code PoolingOptions} object is associated to a {@link Cluster} and that cluster initializes, the - * protocol version will be detected, and connection options will take their default values for that protocol - * version. - *

- * The methods that may return this value are: - * {@link #getCoreConnectionsPerHost(HostDistance)}, - * {@link #getMaxConnectionsPerHost(HostDistance)}, - * {@link #getNewConnectionThreshold(HostDistance)}, - * {@link #getMaxRequestsPerConnection(HostDistance)}. - */ - public static final int UNSET = Integer.MIN_VALUE; - - public static final String CORE_POOL_LOCAL_KEY = "corePoolLocal"; - public static final String MAX_POOL_LOCAL_KEY = "maxPoolLocal"; - public static final String CORE_POOL_REMOTE_KEY = "corePoolRemote"; - public static final String MAX_POOL_REMOTE_KEY = "maxPoolRemote"; - public static final String NEW_CONNECTION_THRESHOLD_LOCAL_KEY = "newConnectionThresholdLocal"; - public static final String NEW_CONNECTION_THRESHOLD_REMOTE_KEY = "newConnectionThresholdRemote"; - public static final String MAX_REQUESTS_PER_CONNECTION_LOCAL_KEY = "maxRequestsPerConnectionLocal"; - public static final String MAX_REQUESTS_PER_CONNECTION_REMOTE_KEY = "maxRequestsPerConnectionRemote"; - - /** - * The default values for connection options, that depend on the native protocol version. - *

- * The map stores protocol versions in ascending order, and only the versions that introduced a change are present. - * To find the defaults for a particular version, look for the highest key that is less than or equal to that - * version, in other words: - *

{@code
-     * ProtocolVersion referenceVersion = null;
-     * for (ProtocolVersion key : DEFAULTS.keySet()) {
-     *     if (key.compareTo(actualVersion) > 0)
-     *         break;
-     *     else
-     *         referenceVersion = key;
-     * }
-     * Map defaults = DEFAULTS.get(referenceVersion);
-     * }
- * Once you've extracted the underlying map, use the keys {@code CORE_POOL_LOCAL_KEY}, - * {@code MAX_POOL_LOCAL_KEY}, {@code CORE_POOL_REMOTE_KEY}, {@code MAX_POOL_REMOTE_KEY}, - * {@code NEW_CONNECTION_THRESHOLD_LOCAL_KEY}, {@code NEW_CONNECTION_THRESHOLD_REMOTE_KEY}, - * {@code MAX_REQUESTS_PER_CONNECTION_LOCAL_KEY} and {@code MAX_REQUESTS_PER_CONNECTION_REMOTE_KEY}. - * - * @see #UNSET - */ - public static final Map> DEFAULTS = ImmutableMap.>of( - ProtocolVersion.V1, ImmutableMap.builder() - .put(CORE_POOL_LOCAL_KEY, 2) - .put(MAX_POOL_LOCAL_KEY, 8) - .put(CORE_POOL_REMOTE_KEY, 1) - .put(MAX_POOL_REMOTE_KEY, 2) - .put(NEW_CONNECTION_THRESHOLD_LOCAL_KEY, 100) - .put(NEW_CONNECTION_THRESHOLD_REMOTE_KEY, 100) - .put(MAX_REQUESTS_PER_CONNECTION_LOCAL_KEY, 128) - .put(MAX_REQUESTS_PER_CONNECTION_REMOTE_KEY, 128) - .build(), - - ProtocolVersion.V3, ImmutableMap.builder() - .put(CORE_POOL_LOCAL_KEY, 1) - .put(MAX_POOL_LOCAL_KEY, 1) - .put(CORE_POOL_REMOTE_KEY, 1) - .put(MAX_POOL_REMOTE_KEY, 1) - .put(NEW_CONNECTION_THRESHOLD_LOCAL_KEY, 800) - .put(NEW_CONNECTION_THRESHOLD_REMOTE_KEY, 200) - .put(MAX_REQUESTS_PER_CONNECTION_LOCAL_KEY, 1024) - .put(MAX_REQUESTS_PER_CONNECTION_REMOTE_KEY, 256) - .build() - ); - - /** - * The default value for {@link #getIdleTimeoutSeconds()} ({@value}). - */ - public static final int DEFAULT_IDLE_TIMEOUT_SECONDS = 120; - - /** - * The default value for {@link #getPoolTimeoutMillis()} ({@value}). - */ - public static final int DEFAULT_POOL_TIMEOUT_MILLIS = 5000; - - /** - * The default value for {@link #getMaxQueueSize()} ({@value}). - */ - public static final int DEFAULT_MAX_QUEUE_SIZE = 256; - - /** - * The default value for {@link #getHeartbeatIntervalSeconds()} ({@value}). - */ - public static final int DEFAULT_HEARTBEAT_INTERVAL_SECONDS = 30; - - private static final Executor DEFAULT_INITIALIZATION_EXECUTOR = GuavaCompatibility.INSTANCE.sameThreadExecutor(); - - private volatile Cluster.Manager manager; - private volatile ProtocolVersion protocolVersion; - - // The defaults for these fields depend on the protocol version, which is only known after control connection initialization. - // Yet if the user set them before initialization, we want to keep their values. So we use -1 to mean "uninitialized". - private final int[] coreConnections = new int[]{UNSET, UNSET, 0}; - private final int[] maxConnections = new int[]{UNSET, UNSET, 0}; - private final int[] newConnectionThreshold = new int[]{UNSET, UNSET, 0}; - private volatile int maxRequestsPerConnectionLocal = UNSET; - private volatile int maxRequestsPerConnectionRemote = UNSET; - - private volatile int idleTimeoutSeconds = DEFAULT_IDLE_TIMEOUT_SECONDS; - private volatile int poolTimeoutMillis = DEFAULT_POOL_TIMEOUT_MILLIS; - private volatile int maxQueueSize = DEFAULT_MAX_QUEUE_SIZE; - private volatile int heartbeatIntervalSeconds = DEFAULT_HEARTBEAT_INTERVAL_SECONDS; - - private volatile Executor initializationExecutor = DEFAULT_INITIALIZATION_EXECUTOR; - - public PoolingOptions() { - } - - void register(Cluster.Manager manager) { - this.manager = manager; - } - - /** - * Returns the core number of connections per host. - * - * @param distance the {@code HostDistance} for which to return this threshold. - * @return the core number of connections per host at distance {@code distance}. - */ - public int getCoreConnectionsPerHost(HostDistance distance) { - return coreConnections[distance.ordinal()]; - } - - /** - * Sets the core number of connections per host. - *

- * For the provided {@code distance}, this corresponds to the number of - * connections initially created and kept open to each host of that - * distance. - *

- * The default value is: - *

    - *
  • with {@code ProtocolVersion#V2} or below: 2 for {@code LOCAL} hosts and 1 for {@code REMOTE} hosts.
  • - *
  • with {@code ProtocolVersion#V3} or above: 1 for all hosts.
  • - *
- * - * @param distance the {@code HostDistance} for which to set this threshold. - * @param newCoreConnections the value to set - * @return this {@code PoolingOptions}. - * @throws IllegalArgumentException if {@code distance == HostDistance.IGNORED}, - * or if {@code newCoreConnections} is greater than the maximum value for this distance. - * @see #setConnectionsPerHost(HostDistance, int, int) - */ - public synchronized PoolingOptions setCoreConnectionsPerHost(HostDistance distance, int newCoreConnections) { - if (distance == HostDistance.IGNORED) - throw new IllegalArgumentException("Cannot set core connections per host for " + distance + " hosts"); - Preconditions.checkArgument(newCoreConnections >= 0, "core number of connections must be positive"); - - if (maxConnections[distance.ordinal()] != UNSET) - checkConnectionsPerHostOrder(newCoreConnections, maxConnections[distance.ordinal()], distance); - - int oldCore = coreConnections[distance.ordinal()]; - coreConnections[distance.ordinal()] = newCoreConnections; - if (oldCore < newCoreConnections && manager != null) - manager.ensurePoolsSizing(); - return this; - } - - /** - * Returns the maximum number of connections per host. - * - * @param distance the {@code HostDistance} for which to return this threshold. - * @return the maximum number of connections per host at distance {@code distance}. - */ - public int getMaxConnectionsPerHost(HostDistance distance) { - return maxConnections[distance.ordinal()]; - } - - /** - * Sets the maximum number of connections per host. - *

- * For the provided {@code distance}, this corresponds to the maximum - * number of connections that can be created per host at that distance. - *

- * The default value is: - *

    - *
  • with {@code ProtocolVersion#V2} or below: 8 for {@code LOCAL} hosts and 2 for {@code REMOTE} hosts.
  • - *
  • with {@code ProtocolVersion#V3} or above: 1 for all hosts.
  • - *
- * - * @param distance the {@code HostDistance} for which to set this threshold. - * @param newMaxConnections the value to set - * @return this {@code PoolingOptions}. - * @throws IllegalArgumentException if {@code distance == HostDistance.IGNORED}, - * or if {@code newMaxConnections} is less than the core value for this distance. - * @see #setConnectionsPerHost(HostDistance, int, int) - */ - public synchronized PoolingOptions setMaxConnectionsPerHost(HostDistance distance, int newMaxConnections) { - if (distance == HostDistance.IGNORED) - throw new IllegalArgumentException("Cannot set max connections per host for " + distance + " hosts"); - Preconditions.checkArgument(newMaxConnections >= 0, "max number of connections must be positive"); - - if (coreConnections[distance.ordinal()] != UNSET) - checkConnectionsPerHostOrder(coreConnections[distance.ordinal()], newMaxConnections, distance); - - maxConnections[distance.ordinal()] = newMaxConnections; - return this; - } - - /** - * Sets the core and maximum number of connections per host in one call. - *

- * This is a convenience method that is equivalent to calling {@link #setCoreConnectionsPerHost(HostDistance, int)} - * and {@link #setMaxConnectionsPerHost(HostDistance, int)}. - * - * @param distance the {@code HostDistance} for which to set these threshold. - * @param core the core number of connections. - * @param max the max number of connections. - * @return this {@code PoolingOptions}. - * @throws IllegalArgumentException if {@code distance == HostDistance.IGNORED}, - * or if {@code core} > {@code max}. - */ - public synchronized PoolingOptions setConnectionsPerHost(HostDistance distance, int core, int max) { - if (distance == HostDistance.IGNORED) - throw new IllegalArgumentException("Cannot set connections per host for " + distance + " hosts"); - Preconditions.checkArgument(core >= 0, "core number of connections must be positive"); - Preconditions.checkArgument(max >= 0, "max number of connections must be positive"); - - checkConnectionsPerHostOrder(core, max, distance); - coreConnections[distance.ordinal()] = core; - maxConnections[distance.ordinal()] = max; - return this; - } - - /** - * Returns the threshold that triggers the creation of a new connection to a host. - * - * @param distance the {@code HostDistance} for which to return this threshold. - * @return the configured threshold, or the default one if none have been set. - * @see #setNewConnectionThreshold(HostDistance, int) - */ - public int getNewConnectionThreshold(HostDistance distance) { - return newConnectionThreshold[distance.ordinal()]; - } - - /** - * Sets the threshold that triggers the creation of a new connection to a host. - *

- * A new connection gets created if: - *

    - *
  • N connections are open
  • - *
  • N < {@link #getMaxConnectionsPerHost(HostDistance)}
  • - *
  • the number of active requests is more than - * (N - 1) * {@link #getMaxRequestsPerConnection(HostDistance)} + {@link #getNewConnectionThreshold(HostDistance)} - *
  • - *
- * In other words, if all but the last connection are full, and the last connection is above this threshold. - *

- * The default value is: - *

    - *
  • with {@code ProtocolVersion#V2} or below: 100 for all hosts.
  • - *
  • with {@code ProtocolVersion#V3} or above: 800 for {@code LOCAL} hosts and 200 for {@code REMOTE} hosts.
  • - *
- * - * @param distance the {@code HostDistance} for which to configure this threshold. - * @param newValue the value to set (between 0 and 128). - * @return this {@code PoolingOptions}. - * @throws IllegalArgumentException if {@code distance == HostDistance.IGNORED}, or if {@code maxSimultaneousRequests} - * is not in range, or if {@code newValue} is less than the minimum value for this distance. - */ - public synchronized PoolingOptions setNewConnectionThreshold(HostDistance distance, int newValue) { - if (distance == HostDistance.IGNORED) - throw new IllegalArgumentException("Cannot set new connection threshold for " + distance + " hosts"); - - checkRequestsPerConnectionRange(newValue, "New connection threshold", distance); - newConnectionThreshold[distance.ordinal()] = newValue; - return this; - } - - /** - * Returns the maximum number of requests per connection. - * - * @param distance the {@code HostDistance} for which to return this threshold. - * @return the maximum number of requests per connection at distance {@code distance}. - * @see #setMaxRequestsPerConnection(HostDistance, int) - */ - public int getMaxRequestsPerConnection(HostDistance distance) { - switch (distance) { - case LOCAL: - return maxRequestsPerConnectionLocal; - case REMOTE: - return maxRequestsPerConnectionRemote; - default: - return 0; - } - } - - /** - * Sets the maximum number of requests per connection. - *

- * The default value is: - *

    - *
  • with {@code ProtocolVersion#V2} or below: 128 for all hosts (there should not be any reason to change this).
  • - *
  • with {@code ProtocolVersion#V3} or above: 1024 for {@code LOCAL} hosts and 256 for {@code REMOTE} hosts. - * These values were chosen so that the default V2 and V3 configuration generate the same load on a Cassandra cluster. - * Protocol V3 can go much higher (up to 32768), so if your number of clients is low, don't hesitate to experiment with - * higher values. If you have more than one connection per host, consider also adjusting - * {@link #setNewConnectionThreshold(HostDistance, int)}. - *
  • - *
- * - * @param distance the {@code HostDistance} for which to set this threshold. - * @param newMaxRequests the value to set. - * @return this {@code PoolingOptions}. - * @throws IllegalArgumentException if {@code distance == HostDistance.IGNORED}, - * or if {@code newMaxConnections} is not within the allowed range. - */ - public PoolingOptions setMaxRequestsPerConnection(HostDistance distance, int newMaxRequests) { - checkRequestsPerConnectionRange(newMaxRequests, "Max requests per connection", distance); - - switch (distance) { - case LOCAL: - maxRequestsPerConnectionLocal = newMaxRequests; - break; - case REMOTE: - maxRequestsPerConnectionRemote = newMaxRequests; - break; - default: - throw new IllegalArgumentException("Cannot set max requests per host for " + distance + " hosts"); - } - return this; - } - - /** - * Returns the timeout before an idle connection is removed. - * - * @return the timeout. - */ - public int getIdleTimeoutSeconds() { - return idleTimeoutSeconds; - } - - /** - * Sets the timeout before an idle connection is removed. - *

- * The order of magnitude should be a few minutes (the default is 120 seconds). The - * timeout that triggers the removal has a granularity of 10 seconds. - * - * @param idleTimeoutSeconds the new timeout in seconds. - * @return this {@code PoolingOptions}. - * @throws IllegalArgumentException if the timeout is negative. - */ - public PoolingOptions setIdleTimeoutSeconds(int idleTimeoutSeconds) { - if (idleTimeoutSeconds < 0) - throw new IllegalArgumentException("Idle timeout must be positive"); - this.idleTimeoutSeconds = idleTimeoutSeconds; - return this; - } - - /** - * Returns the timeout when trying to acquire a connection from a host's pool. - * - * @return the timeout. - */ - public int getPoolTimeoutMillis() { - return poolTimeoutMillis; - } - - /** - * Sets the timeout when trying to acquire a connection from a host's pool. - *

- * This option works in concert with {@link #setMaxQueueSize(int)} to determine what happens if the driver tries - * to borrow a connection from the pool but none is available: - *

    - *
  • if either option is set to zero, the attempt is rejected immediately;
  • - *
  • else if more than {@code maxQueueSize} requests are already waiting for a connection, the attempt is also - * rejected;
  • - *
  • otherwise, the attempt is enqueued; if a connection becomes available before {@code poolTimeoutMillis} - * has elapsed, then the attempt succeeds, otherwise it is rejected.
  • - *
- * If the attempt is rejected, the driver will move to the next host in the - * {@link com.datastax.driver.core.policies.LoadBalancingPolicy#newQueryPlan(String, Statement)} query plan}. - *

- * The default is 5 seconds. If this option is set to zero, the driver won't wait at all. - * - * @param poolTimeoutMillis the new value in milliseconds. - * @return this {@code PoolingOptions} - * @throws IllegalArgumentException if the timeout is negative. - */ - public PoolingOptions setPoolTimeoutMillis(int poolTimeoutMillis) { - if (poolTimeoutMillis < 0) - throw new IllegalArgumentException("Pool timeout must be positive"); - this.poolTimeoutMillis = poolTimeoutMillis; - return this; - } - - /** - * Returns the maximum number of requests that get enqueued if no connection is available. - * - * @return the maximum queue size. - */ - public int getMaxQueueSize() { - return maxQueueSize; - } - - /** - * Sets the maximum number of requests that get enqueued if no connection is available. - *

- * This option works in concert with {@link #setPoolTimeoutMillis(int)} to determine what happens if the driver - * tries to borrow a connection from the pool but none is available: - *

    - *
  • if either options is set to zero, the attempt is rejected immediately;
  • - *
  • else if more than {@code maxQueueSize} requests are already waiting for a connection, the attempt is also - * rejected;
  • - *
  • otherwise, the attempt is enqueued; if a connection becomes available before {@code poolTimeoutMillis} - * has elapsed, then the attempt succeeds, otherwise it is rejected.
  • - *
- * If the attempt is rejected, the driver will move to the next host in the - * {@link com.datastax.driver.core.policies.LoadBalancingPolicy#newQueryPlan(String, Statement)} query plan}. - *

- * The default value is {@value DEFAULT_MAX_QUEUE_SIZE}. If this option is set to zero, the driver will never - * enqueue requests. - * - * @param maxQueueSize the new value. - * @return this {@code PoolingOptions} - * @throws IllegalArgumentException if the value is negative. - */ - public PoolingOptions setMaxQueueSize(int maxQueueSize) { - if (maxQueueSize < 0) - throw new IllegalArgumentException("Max queue size must be positive"); - this.maxQueueSize = maxQueueSize; - return this; + /** + * The value returned for connection options when they have not been set by the client, and the + * protocol version is not known yet. + * + *

Once a {@code PoolingOptions} object is associated to a {@link Cluster} and that cluster + * initializes, the protocol version will be detected, and connection options will take their + * default values for that protocol version. + * + *

The methods that may return this value are: {@link + * #getCoreConnectionsPerHost(HostDistance)}, {@link #getMaxConnectionsPerHost(HostDistance)}, + * {@link #getNewConnectionThreshold(HostDistance)}, {@link + * #getMaxRequestsPerConnection(HostDistance)}. + */ + public static final int UNSET = Integer.MIN_VALUE; + + public static final String CORE_POOL_LOCAL_KEY = "corePoolLocal"; + public static final String MAX_POOL_LOCAL_KEY = "maxPoolLocal"; + public static final String CORE_POOL_REMOTE_KEY = "corePoolRemote"; + public static final String MAX_POOL_REMOTE_KEY = "maxPoolRemote"; + public static final String NEW_CONNECTION_THRESHOLD_LOCAL_KEY = "newConnectionThresholdLocal"; + public static final String NEW_CONNECTION_THRESHOLD_REMOTE_KEY = "newConnectionThresholdRemote"; + public static final String MAX_REQUESTS_PER_CONNECTION_LOCAL_KEY = + "maxRequestsPerConnectionLocal"; + public static final String MAX_REQUESTS_PER_CONNECTION_REMOTE_KEY = + "maxRequestsPerConnectionRemote"; + + /** + * The default values for connection options, that depend on the native protocol version. + * + *

The map stores protocol versions in ascending order, and only the versions that introduced a + * change are present. To find the defaults for a particular version, look for the highest key + * that is less than or equal to that version, in other words: + * + *

{@code
+   * ProtocolVersion referenceVersion = null;
+   * for (ProtocolVersion key : DEFAULTS.keySet()) {
+   *     if (key.compareTo(actualVersion) > 0)
+   *         break;
+   *     else
+   *         referenceVersion = key;
+   * }
+   * Map defaults = DEFAULTS.get(referenceVersion);
+   * }
+ * + * Once you've extracted the underlying map, use the keys {@code CORE_POOL_LOCAL_KEY}, {@code + * MAX_POOL_LOCAL_KEY}, {@code CORE_POOL_REMOTE_KEY}, {@code MAX_POOL_REMOTE_KEY}, {@code + * NEW_CONNECTION_THRESHOLD_LOCAL_KEY}, {@code NEW_CONNECTION_THRESHOLD_REMOTE_KEY}, {@code + * MAX_REQUESTS_PER_CONNECTION_LOCAL_KEY} and {@code MAX_REQUESTS_PER_CONNECTION_REMOTE_KEY}. + * + * @see #UNSET + */ + public static final Map> DEFAULTS = + ImmutableMap.>of( + ProtocolVersion.V1, + ImmutableMap.builder() + .put(CORE_POOL_LOCAL_KEY, 2) + .put(MAX_POOL_LOCAL_KEY, 8) + .put(CORE_POOL_REMOTE_KEY, 1) + .put(MAX_POOL_REMOTE_KEY, 2) + .put(NEW_CONNECTION_THRESHOLD_LOCAL_KEY, 100) + .put(NEW_CONNECTION_THRESHOLD_REMOTE_KEY, 100) + .put(MAX_REQUESTS_PER_CONNECTION_LOCAL_KEY, 128) + .put(MAX_REQUESTS_PER_CONNECTION_REMOTE_KEY, 128) + .build(), + ProtocolVersion.V3, + ImmutableMap.builder() + .put(CORE_POOL_LOCAL_KEY, 1) + .put(MAX_POOL_LOCAL_KEY, 1) + .put(CORE_POOL_REMOTE_KEY, 1) + .put(MAX_POOL_REMOTE_KEY, 1) + .put(NEW_CONNECTION_THRESHOLD_LOCAL_KEY, 800) + .put(NEW_CONNECTION_THRESHOLD_REMOTE_KEY, 200) + .put(MAX_REQUESTS_PER_CONNECTION_LOCAL_KEY, 1024) + .put(MAX_REQUESTS_PER_CONNECTION_REMOTE_KEY, 256) + .build()); + + /** The default value for {@link #getIdleTimeoutSeconds()} ({@value}). */ + public static final int DEFAULT_IDLE_TIMEOUT_SECONDS = 120; + + /** The default value for {@link #getPoolTimeoutMillis()} ({@value}). */ + public static final int DEFAULT_POOL_TIMEOUT_MILLIS = 5000; + + /** The default value for {@link #getMaxQueueSize()} ({@value}). */ + public static final int DEFAULT_MAX_QUEUE_SIZE = 256; + + /** The default value for {@link #getHeartbeatIntervalSeconds()} ({@value}). */ + public static final int DEFAULT_HEARTBEAT_INTERVAL_SECONDS = 30; + + private static final Executor DEFAULT_INITIALIZATION_EXECUTOR = + GuavaCompatibility.INSTANCE.sameThreadExecutor(); + + private volatile Cluster.Manager manager; + private volatile ProtocolVersion protocolVersion; + + // The defaults for these fields depend on the protocol version, which is only known after control + // connection initialization. + // Yet if the user set them before initialization, we want to keep their values. So we use -1 to + // mean "uninitialized". + private final int[] coreConnections = new int[] {UNSET, UNSET, 0}; + private final int[] maxConnections = new int[] {UNSET, UNSET, 0}; + private final int[] newConnectionThreshold = new int[] {UNSET, UNSET, 0}; + private volatile int maxRequestsPerConnectionLocal = UNSET; + private volatile int maxRequestsPerConnectionRemote = UNSET; + + private volatile int idleTimeoutSeconds = DEFAULT_IDLE_TIMEOUT_SECONDS; + private volatile int poolTimeoutMillis = DEFAULT_POOL_TIMEOUT_MILLIS; + private volatile int maxQueueSize = DEFAULT_MAX_QUEUE_SIZE; + private volatile int heartbeatIntervalSeconds = DEFAULT_HEARTBEAT_INTERVAL_SECONDS; + + private volatile Executor initializationExecutor = DEFAULT_INITIALIZATION_EXECUTOR; + + public PoolingOptions() {} + + void register(Cluster.Manager manager) { + this.manager = manager; + } + + /** + * Returns the core number of connections per host. + * + * @param distance the {@code HostDistance} for which to return this threshold. + * @return the core number of connections per host at distance {@code distance}. + */ + public int getCoreConnectionsPerHost(HostDistance distance) { + return coreConnections[distance.ordinal()]; + } + + /** + * Sets the core number of connections per host. + * + *

For the provided {@code distance}, this corresponds to the number of connections initially + * created and kept open to each host of that distance. + * + *

The default value is: + * + *

    + *
  • with {@code ProtocolVersion#V2} or below: 2 for {@code LOCAL} hosts and 1 for {@code + * REMOTE} hosts. + *
  • with {@code ProtocolVersion#V3} or above: 1 for all hosts. + *
+ * + * @param distance the {@code HostDistance} for which to set this threshold. + * @param newCoreConnections the value to set + * @return this {@code PoolingOptions}. + * @throws IllegalArgumentException if {@code distance == HostDistance.IGNORED}, or if {@code + * newCoreConnections} is greater than the maximum value for this distance. + * @see #setConnectionsPerHost(HostDistance, int, int) + */ + public synchronized PoolingOptions setCoreConnectionsPerHost( + HostDistance distance, int newCoreConnections) { + if (distance == HostDistance.IGNORED) + throw new IllegalArgumentException( + "Cannot set core connections per host for " + distance + " hosts"); + Preconditions.checkArgument( + newCoreConnections >= 0, "core number of connections must be positive"); + + if (maxConnections[distance.ordinal()] != UNSET) + checkConnectionsPerHostOrder( + newCoreConnections, maxConnections[distance.ordinal()], distance); + + int oldCore = coreConnections[distance.ordinal()]; + coreConnections[distance.ordinal()] = newCoreConnections; + if (oldCore < newCoreConnections && manager != null) manager.ensurePoolsSizing(); + return this; + } + + /** + * Returns the maximum number of connections per host. + * + * @param distance the {@code HostDistance} for which to return this threshold. + * @return the maximum number of connections per host at distance {@code distance}. + */ + public int getMaxConnectionsPerHost(HostDistance distance) { + return maxConnections[distance.ordinal()]; + } + + /** + * Sets the maximum number of connections per host. + * + *

For the provided {@code distance}, this corresponds to the maximum number of connections + * that can be created per host at that distance. + * + *

The default value is: + * + *

    + *
  • with {@code ProtocolVersion#V2} or below: 8 for {@code LOCAL} hosts and 2 for {@code + * REMOTE} hosts. + *
  • with {@code ProtocolVersion#V3} or above: 1 for all hosts. + *
+ * + * @param distance the {@code HostDistance} for which to set this threshold. + * @param newMaxConnections the value to set + * @return this {@code PoolingOptions}. + * @throws IllegalArgumentException if {@code distance == HostDistance.IGNORED}, or if {@code + * newMaxConnections} is less than the core value for this distance. + * @see #setConnectionsPerHost(HostDistance, int, int) + */ + public synchronized PoolingOptions setMaxConnectionsPerHost( + HostDistance distance, int newMaxConnections) { + if (distance == HostDistance.IGNORED) + throw new IllegalArgumentException( + "Cannot set max connections per host for " + distance + " hosts"); + Preconditions.checkArgument( + newMaxConnections >= 0, "max number of connections must be positive"); + + if (coreConnections[distance.ordinal()] != UNSET) + checkConnectionsPerHostOrder( + coreConnections[distance.ordinal()], newMaxConnections, distance); + + maxConnections[distance.ordinal()] = newMaxConnections; + return this; + } + + /** + * Sets the core and maximum number of connections per host in one call. + * + *

This is a convenience method that is equivalent to calling {@link + * #setCoreConnectionsPerHost(HostDistance, int)} and {@link + * #setMaxConnectionsPerHost(HostDistance, int)}. + * + * @param distance the {@code HostDistance} for which to set these threshold. + * @param core the core number of connections. + * @param max the max number of connections. + * @return this {@code PoolingOptions}. + * @throws IllegalArgumentException if {@code distance == HostDistance.IGNORED}, or if {@code + * core} > {@code max}. + */ + public synchronized PoolingOptions setConnectionsPerHost( + HostDistance distance, int core, int max) { + if (distance == HostDistance.IGNORED) + throw new IllegalArgumentException( + "Cannot set connections per host for " + distance + " hosts"); + Preconditions.checkArgument(core >= 0, "core number of connections must be positive"); + Preconditions.checkArgument(max >= 0, "max number of connections must be positive"); + + checkConnectionsPerHostOrder(core, max, distance); + coreConnections[distance.ordinal()] = core; + maxConnections[distance.ordinal()] = max; + return this; + } + + /** + * Returns the threshold that triggers the creation of a new connection to a host. + * + * @param distance the {@code HostDistance} for which to return this threshold. + * @return the configured threshold, or the default one if none have been set. + * @see #setNewConnectionThreshold(HostDistance, int) + */ + public int getNewConnectionThreshold(HostDistance distance) { + return newConnectionThreshold[distance.ordinal()]; + } + + /** + * Sets the threshold that triggers the creation of a new connection to a host. + * + *

A new connection gets created if: + * + *

    + *
  • N connections are open + *
  • N < {@link #getMaxConnectionsPerHost(HostDistance)} + *
  • the number of active requests is more than (N - 1) * {@link + * #getMaxRequestsPerConnection(HostDistance)} + {@link + * #getNewConnectionThreshold(HostDistance)} + *
+ * + * In other words, if all but the last connection are full, and the last connection is above this + * threshold. + * + *

The default value is: + * + *

    + *
  • with {@code ProtocolVersion#V2} or below: 100 for all hosts. + *
  • with {@code ProtocolVersion#V3} or above: 800 for {@code LOCAL} hosts and 200 for {@code + * REMOTE} hosts. + *
+ * + * @param distance the {@code HostDistance} for which to configure this threshold. + * @param newValue the value to set (between 0 and 128). + * @return this {@code PoolingOptions}. + * @throws IllegalArgumentException if {@code distance == HostDistance.IGNORED}, or if {@code + * maxSimultaneousRequests} is not in range, or if {@code newValue} is less than the minimum + * value for this distance. + */ + public synchronized PoolingOptions setNewConnectionThreshold( + HostDistance distance, int newValue) { + if (distance == HostDistance.IGNORED) + throw new IllegalArgumentException( + "Cannot set new connection threshold for " + distance + " hosts"); + + checkRequestsPerConnectionRange(newValue, "New connection threshold", distance); + newConnectionThreshold[distance.ordinal()] = newValue; + return this; + } + + /** + * Returns the maximum number of requests per connection. + * + * @param distance the {@code HostDistance} for which to return this threshold. + * @return the maximum number of requests per connection at distance {@code distance}. + * @see #setMaxRequestsPerConnection(HostDistance, int) + */ + public int getMaxRequestsPerConnection(HostDistance distance) { + switch (distance) { + case LOCAL: + return maxRequestsPerConnectionLocal; + case REMOTE: + return maxRequestsPerConnectionRemote; + default: + return 0; } - - /** - * Returns the heart beat interval, after which a message is sent on an idle connection to make sure it's still alive. - * - * @return the interval. - */ - public int getHeartbeatIntervalSeconds() { - return heartbeatIntervalSeconds; + } + + /** + * Sets the maximum number of requests per connection. + * + *

The default value is: + * + *

    + *
  • with {@code ProtocolVersion#V2} or below: 128 for all hosts (there should not be any + * reason to change this). + *
  • with {@code ProtocolVersion#V3} or above: 1024 for {@code LOCAL} hosts and 256 for {@code + * REMOTE} hosts. These values were chosen so that the default V2 and V3 configuration + * generate the same load on a Cassandra cluster. Protocol V3 can go much higher (up to + * 32768), so if your number of clients is low, don't hesitate to experiment with higher + * values. If you have more than one connection per host, consider also adjusting {@link + * #setNewConnectionThreshold(HostDistance, int)}. + *
+ * + * @param distance the {@code HostDistance} for which to set this threshold. + * @param newMaxRequests the value to set. + * @return this {@code PoolingOptions}. + * @throws IllegalArgumentException if {@code distance == HostDistance.IGNORED}, or if {@code + * newMaxConnections} is not within the allowed range. + */ + public PoolingOptions setMaxRequestsPerConnection(HostDistance distance, int newMaxRequests) { + checkRequestsPerConnectionRange(newMaxRequests, "Max requests per connection", distance); + + switch (distance) { + case LOCAL: + maxRequestsPerConnectionLocal = newMaxRequests; + break; + case REMOTE: + maxRequestsPerConnectionRemote = newMaxRequests; + break; + default: + throw new IllegalArgumentException( + "Cannot set max requests per host for " + distance + " hosts"); } - - /** - * Sets the heart beat interval, after which a message is sent on an idle connection to make sure it's still alive. - *

- * This is an application-level keep-alive, provided for convenience since adjusting the TCP keep-alive might not be - * practical in all environments. - *

- * This option should be set higher than {@link SocketOptions#getReadTimeoutMillis()}. - *

- * The default value for this option is 30 seconds. - * - * @param heartbeatIntervalSeconds the new value in seconds. If set to 0, it will disable the feature. - * @return this {@code PoolingOptions} - * @throws IllegalArgumentException if the interval is negative. - */ - public PoolingOptions setHeartbeatIntervalSeconds(int heartbeatIntervalSeconds) { - if (heartbeatIntervalSeconds < 0) - throw new IllegalArgumentException("Heartbeat interval must be positive"); - - this.heartbeatIntervalSeconds = heartbeatIntervalSeconds; - return this; + return this; + } + + /** + * Returns the timeout before an idle connection is removed. + * + * @return the timeout. + */ + public int getIdleTimeoutSeconds() { + return idleTimeoutSeconds; + } + + /** + * Sets the timeout before an idle connection is removed. + * + *

The order of magnitude should be a few minutes (the default is 120 seconds). The timeout + * that triggers the removal has a granularity of 10 seconds. + * + * @param idleTimeoutSeconds the new timeout in seconds. + * @return this {@code PoolingOptions}. + * @throws IllegalArgumentException if the timeout is negative. + */ + public PoolingOptions setIdleTimeoutSeconds(int idleTimeoutSeconds) { + if (idleTimeoutSeconds < 0) throw new IllegalArgumentException("Idle timeout must be positive"); + this.idleTimeoutSeconds = idleTimeoutSeconds; + return this; + } + + /** + * Returns the timeout when trying to acquire a connection from a host's pool. + * + * @return the timeout. + */ + public int getPoolTimeoutMillis() { + return poolTimeoutMillis; + } + + /** + * Sets the timeout when trying to acquire a connection from a host's pool. + * + *

This option works in concert with {@link #setMaxQueueSize(int)} to determine what happens if + * the driver tries to borrow a connection from the pool but none is available: + * + *

    + *
  • if either option is set to zero, the attempt is rejected immediately; + *
  • else if more than {@code maxQueueSize} requests are already waiting for a connection, the + * attempt is also rejected; + *
  • otherwise, the attempt is enqueued; if a connection becomes available before {@code + * poolTimeoutMillis} has elapsed, then the attempt succeeds, otherwise it is rejected. + *
+ * + * If the attempt is rejected, the driver will move to the next host in the {@link + * com.datastax.driver.core.policies.LoadBalancingPolicy#newQueryPlan(String, Statement)} query + * plan}. + * + *

The default is 5 seconds. If this option is set to zero, the driver won't wait at all. + * + * @param poolTimeoutMillis the new value in milliseconds. + * @return this {@code PoolingOptions} + * @throws IllegalArgumentException if the timeout is negative. + */ + public PoolingOptions setPoolTimeoutMillis(int poolTimeoutMillis) { + if (poolTimeoutMillis < 0) throw new IllegalArgumentException("Pool timeout must be positive"); + this.poolTimeoutMillis = poolTimeoutMillis; + return this; + } + + /** + * Returns the maximum number of requests that get enqueued if no connection is available. + * + * @return the maximum queue size. + */ + public int getMaxQueueSize() { + return maxQueueSize; + } + + /** + * Sets the maximum number of requests that get enqueued if no connection is available. + * + *

This option works in concert with {@link #setPoolTimeoutMillis(int)} to determine what + * happens if the driver tries to borrow a connection from the pool but none is available: + * + *

    + *
  • if either options is set to zero, the attempt is rejected immediately; + *
  • else if more than {@code maxQueueSize} requests are already waiting for a connection, the + * attempt is also rejected; + *
  • otherwise, the attempt is enqueued; if a connection becomes available before {@code + * poolTimeoutMillis} has elapsed, then the attempt succeeds, otherwise it is rejected. + *
+ * + * If the attempt is rejected, the driver will move to the next host in the {@link + * com.datastax.driver.core.policies.LoadBalancingPolicy#newQueryPlan(String, Statement)} query + * plan}. + * + *

The default value is {@value DEFAULT_MAX_QUEUE_SIZE}. If this option is set to zero, the + * driver will never enqueue requests. + * + * @param maxQueueSize the new value. + * @return this {@code PoolingOptions} + * @throws IllegalArgumentException if the value is negative. + */ + public PoolingOptions setMaxQueueSize(int maxQueueSize) { + if (maxQueueSize < 0) throw new IllegalArgumentException("Max queue size must be positive"); + this.maxQueueSize = maxQueueSize; + return this; + } + + /** + * Returns the heart beat interval, after which a message is sent on an idle connection to make + * sure it's still alive. + * + * @return the interval. + */ + public int getHeartbeatIntervalSeconds() { + return heartbeatIntervalSeconds; + } + + /** + * Sets the heart beat interval, after which a message is sent on an idle connection to make sure + * it's still alive. + * + *

This is an application-level keep-alive, provided for convenience since adjusting the TCP + * keep-alive might not be practical in all environments. + * + *

This option should be set higher than {@link SocketOptions#getReadTimeoutMillis()}. + * + *

The default value for this option is 30 seconds. + * + * @param heartbeatIntervalSeconds the new value in seconds. If set to 0, it will disable the + * feature. + * @return this {@code PoolingOptions} + * @throws IllegalArgumentException if the interval is negative. + */ + public PoolingOptions setHeartbeatIntervalSeconds(int heartbeatIntervalSeconds) { + if (heartbeatIntervalSeconds < 0) + throw new IllegalArgumentException("Heartbeat interval must be positive"); + + this.heartbeatIntervalSeconds = heartbeatIntervalSeconds; + return this; + } + + /** + * Returns the executor to use for connection initialization. + * + * @return the executor. + * @see #setInitializationExecutor(java.util.concurrent.Executor) + */ + public Executor getInitializationExecutor() { + return initializationExecutor; + } + + /** + * Sets the executor to use for connection initialization. + * + *

Connections are open in a completely asynchronous manner. Since initializing the transport + * requires separate CQL queries, the futures representing the completion of these queries are + * transformed and chained. This executor is where these transformations happen. + * + *

This is an advanced option, which should be rarely needed in practice. It defaults to + * Guava's {@code MoreExecutors.sameThreadExecutor()}, which results in running the + * transformations on the network I/O threads; this is fine if the transformations are fast and + * not I/O bound (which is the case by default). One reason why you might want to provide a custom + * executor is if you use authentication with a custom {@link + * com.datastax.driver.core.Authenticator} implementation that performs blocking calls. + * + * @param initializationExecutor the executor to use + * @return this {@code PoolingOptions} + * @throws java.lang.NullPointerException if the executor is null + */ + public PoolingOptions setInitializationExecutor(Executor initializationExecutor) { + Preconditions.checkNotNull(initializationExecutor); + this.initializationExecutor = initializationExecutor; + return this; + } + + synchronized void setProtocolVersion(ProtocolVersion actualVersion) { + this.protocolVersion = actualVersion; + + ProtocolVersion referenceVersion = null; + for (ProtocolVersion key : DEFAULTS.keySet()) { + if (key.compareTo(actualVersion) > 0) break; + else referenceVersion = key; } - - /** - * Returns the executor to use for connection initialization. - * - * @return the executor. - * @see #setInitializationExecutor(java.util.concurrent.Executor) - */ - public Executor getInitializationExecutor() { - return initializationExecutor; - } - - /** - * Sets the executor to use for connection initialization. - *

- * Connections are open in a completely asynchronous manner. Since initializing the transport - * requires separate CQL queries, the futures representing the completion of these queries are - * transformed and chained. This executor is where these transformations happen. - *

- * This is an advanced option, which should be rarely needed in practice. It defaults to - * Guava's {@code MoreExecutors.sameThreadExecutor()}, which results in running the transformations - * on the network I/O threads; this is fine if the transformations are fast and not I/O bound - * (which is the case by default). - * One reason why you might want to provide a custom executor is if you use authentication with - * a custom {@link com.datastax.driver.core.Authenticator} implementation that performs blocking - * calls. - * - * @param initializationExecutor the executor to use - * @return this {@code PoolingOptions} - * @throws java.lang.NullPointerException if the executor is null - */ - public PoolingOptions setInitializationExecutor(Executor initializationExecutor) { - Preconditions.checkNotNull(initializationExecutor); - this.initializationExecutor = initializationExecutor; - return this; - } - - synchronized void setProtocolVersion(ProtocolVersion actualVersion) { - this.protocolVersion = actualVersion; - - ProtocolVersion referenceVersion = null; - for (ProtocolVersion key : DEFAULTS.keySet()) { - if (key.compareTo(actualVersion) > 0) - break; - else - referenceVersion = key; - } - assert referenceVersion != null; // will not happen since V1 is a key - - Map defaults = DEFAULTS.get(referenceVersion); - - if (coreConnections[LOCAL.ordinal()] == UNSET) - coreConnections[LOCAL.ordinal()] = defaults.get(CORE_POOL_LOCAL_KEY); - if (maxConnections[LOCAL.ordinal()] == UNSET) - maxConnections[LOCAL.ordinal()] = defaults.get(MAX_POOL_LOCAL_KEY); - checkConnectionsPerHostOrder(coreConnections[LOCAL.ordinal()], maxConnections[LOCAL.ordinal()], LOCAL); - - if (coreConnections[REMOTE.ordinal()] == UNSET) - coreConnections[REMOTE.ordinal()] = defaults.get(CORE_POOL_REMOTE_KEY); - if (maxConnections[REMOTE.ordinal()] == UNSET) - maxConnections[REMOTE.ordinal()] = defaults.get(MAX_POOL_REMOTE_KEY); - checkConnectionsPerHostOrder(coreConnections[REMOTE.ordinal()], maxConnections[REMOTE.ordinal()], REMOTE); - - if (newConnectionThreshold[LOCAL.ordinal()] == UNSET) - newConnectionThreshold[LOCAL.ordinal()] = defaults.get(NEW_CONNECTION_THRESHOLD_LOCAL_KEY); - checkRequestsPerConnectionRange(newConnectionThreshold[LOCAL.ordinal()], "New connection threshold", LOCAL); - - if (newConnectionThreshold[REMOTE.ordinal()] == UNSET) - newConnectionThreshold[REMOTE.ordinal()] = defaults.get(NEW_CONNECTION_THRESHOLD_REMOTE_KEY); - checkRequestsPerConnectionRange(newConnectionThreshold[REMOTE.ordinal()], "New connection threshold", REMOTE); - - if (maxRequestsPerConnectionLocal == UNSET) - maxRequestsPerConnectionLocal = defaults.get(MAX_REQUESTS_PER_CONNECTION_LOCAL_KEY); - checkRequestsPerConnectionRange(maxRequestsPerConnectionLocal, "Max requests per connection", LOCAL); - - if (maxRequestsPerConnectionRemote == UNSET) - maxRequestsPerConnectionRemote = defaults.get(MAX_REQUESTS_PER_CONNECTION_REMOTE_KEY); - checkRequestsPerConnectionRange(maxRequestsPerConnectionRemote, "Max requests per connection", REMOTE); - } - - /** - * Requests the driver to re-evaluate the {@link HostDistance} (through the configured - * {@link com.datastax.driver.core.policies.LoadBalancingPolicy#distance}) for every known - * hosts and to drop/add connections to each hosts according to the computed distance. - *

- * Note that, due to backward compatibility issues, this method is not interruptible. If the - * caller thread gets interrupted, the method will complete and only then re-interrupt the - * thread (which you can check with {@code Thread.currentThread().isInterrupted()}). - */ - public void refreshConnectedHosts() { - manager.refreshConnectedHosts(); - } - - /** - * Requests the driver to re-evaluate the {@link HostDistance} for a given node. - * - * @param host the host to refresh. - * @see #refreshConnectedHosts() - */ - public void refreshConnectedHost(Host host) { - manager.refreshConnectedHost(host); - } - - private void checkRequestsPerConnectionRange(int value, String description, HostDistance distance) { - // If we don't know the protocol version yet, use the highest possible upper bound, this will get checked again when possible - int max = (protocolVersion == null || protocolVersion.compareTo(ProtocolVersion.V3) >= 0) - ? StreamIdGenerator.MAX_STREAM_PER_CONNECTION_V3 - : StreamIdGenerator.MAX_STREAM_PER_CONNECTION_V2; - - if (value < 0 || value > max) - throw new IllegalArgumentException(String.format("%s for %s hosts must be in the range (0, %d)", - description, distance, max)); - } - - private static void checkConnectionsPerHostOrder(int core, int max, HostDistance distance) { - if (core > max) - throw new IllegalArgumentException(String.format("Core connections for %s hosts must be less than max (%d > %d)", - distance, core, max)); - } - + assert referenceVersion != null; // will not happen since V1 is a key + + Map defaults = DEFAULTS.get(referenceVersion); + + if (coreConnections[LOCAL.ordinal()] == UNSET) + coreConnections[LOCAL.ordinal()] = defaults.get(CORE_POOL_LOCAL_KEY); + if (maxConnections[LOCAL.ordinal()] == UNSET) + maxConnections[LOCAL.ordinal()] = defaults.get(MAX_POOL_LOCAL_KEY); + checkConnectionsPerHostOrder( + coreConnections[LOCAL.ordinal()], maxConnections[LOCAL.ordinal()], LOCAL); + + if (coreConnections[REMOTE.ordinal()] == UNSET) + coreConnections[REMOTE.ordinal()] = defaults.get(CORE_POOL_REMOTE_KEY); + if (maxConnections[REMOTE.ordinal()] == UNSET) + maxConnections[REMOTE.ordinal()] = defaults.get(MAX_POOL_REMOTE_KEY); + checkConnectionsPerHostOrder( + coreConnections[REMOTE.ordinal()], maxConnections[REMOTE.ordinal()], REMOTE); + + if (newConnectionThreshold[LOCAL.ordinal()] == UNSET) + newConnectionThreshold[LOCAL.ordinal()] = defaults.get(NEW_CONNECTION_THRESHOLD_LOCAL_KEY); + checkRequestsPerConnectionRange( + newConnectionThreshold[LOCAL.ordinal()], "New connection threshold", LOCAL); + + if (newConnectionThreshold[REMOTE.ordinal()] == UNSET) + newConnectionThreshold[REMOTE.ordinal()] = defaults.get(NEW_CONNECTION_THRESHOLD_REMOTE_KEY); + checkRequestsPerConnectionRange( + newConnectionThreshold[REMOTE.ordinal()], "New connection threshold", REMOTE); + + if (maxRequestsPerConnectionLocal == UNSET) + maxRequestsPerConnectionLocal = defaults.get(MAX_REQUESTS_PER_CONNECTION_LOCAL_KEY); + checkRequestsPerConnectionRange( + maxRequestsPerConnectionLocal, "Max requests per connection", LOCAL); + + if (maxRequestsPerConnectionRemote == UNSET) + maxRequestsPerConnectionRemote = defaults.get(MAX_REQUESTS_PER_CONNECTION_REMOTE_KEY); + checkRequestsPerConnectionRange( + maxRequestsPerConnectionRemote, "Max requests per connection", REMOTE); + } + + /** + * Requests the driver to re-evaluate the {@link HostDistance} (through the configured {@link + * com.datastax.driver.core.policies.LoadBalancingPolicy#distance}) for every known hosts and to + * drop/add connections to each hosts according to the computed distance. + * + *

Note that, due to backward compatibility issues, this method is not interruptible. If the + * caller thread gets interrupted, the method will complete and only then re-interrupt the thread + * (which you can check with {@code Thread.currentThread().isInterrupted()}). + */ + public void refreshConnectedHosts() { + manager.refreshConnectedHosts(); + } + + /** + * Requests the driver to re-evaluate the {@link HostDistance} for a given node. + * + * @param host the host to refresh. + * @see #refreshConnectedHosts() + */ + public void refreshConnectedHost(Host host) { + manager.refreshConnectedHost(host); + } + + private void checkRequestsPerConnectionRange( + int value, String description, HostDistance distance) { + // If we don't know the protocol version yet, use the highest possible upper bound, this will + // get checked again when possible + int max = + (protocolVersion == null || protocolVersion.compareTo(ProtocolVersion.V3) >= 0) + ? StreamIdGenerator.MAX_STREAM_PER_CONNECTION_V3 + : StreamIdGenerator.MAX_STREAM_PER_CONNECTION_V2; + + if (value < 0 || value > max) + throw new IllegalArgumentException( + String.format( + "%s for %s hosts must be in the range (0, %d)", description, distance, max)); + } + + private static void checkConnectionsPerHostOrder(int core, int max, HostDistance distance) { + if (core > max) + throw new IllegalArgumentException( + String.format( + "Core connections for %s hosts must be less than max (%d > %d)", + distance, core, max)); + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/PreparedId.java b/driver-core/src/main/java/com/datastax/driver/core/PreparedId.java index 9a0e255f380..28fef17cf92 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/PreparedId.java +++ b/driver-core/src/main/java/com/datastax/driver/core/PreparedId.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,25 +17,42 @@ */ package com.datastax.driver.core; -/** - * Identifies a PreparedStatement. - */ +/** Identifies a PreparedStatement. */ public class PreparedId { - // This class is mostly here to group PreparedStatement data that are need for - // execution but that we don't want to expose publicly (see JAVA-195) - final MD5Digest id; - final ColumnDefinitions metadata; - final ColumnDefinitions resultSetMetadata; + // This class is mostly here to group PreparedStatement data that are needed for + // execution but that we don't want to expose publicly (see JAVA-195) + + final int[] routingKeyIndexes; + + final ProtocolVersion protocolVersion; - final int[] routingKeyIndexes; - final ProtocolVersion protocolVersion; + final PreparedMetadata boundValuesMetadata; + + // can change over time, see JAVA-1196, JAVA-420 + volatile PreparedMetadata resultSetMetadata; + + PreparedId( + PreparedMetadata boundValuesMetadata, + PreparedMetadata resultSetMetadata, + int[] routingKeyIndexes, + ProtocolVersion protocolVersion) { + assert boundValuesMetadata != null; + assert resultSetMetadata != null; + this.boundValuesMetadata = boundValuesMetadata; + this.resultSetMetadata = resultSetMetadata; + this.routingKeyIndexes = routingKeyIndexes; + this.protocolVersion = protocolVersion; + } + + static class PreparedMetadata { + + final MD5Digest id; + final ColumnDefinitions variables; - PreparedId(MD5Digest id, ColumnDefinitions metadata, ColumnDefinitions resultSetMetadata, int[] routingKeyIndexes, ProtocolVersion protocolVersion) { - this.id = id; - this.metadata = metadata; - this.resultSetMetadata = resultSetMetadata; - this.routingKeyIndexes = routingKeyIndexes; - this.protocolVersion = protocolVersion; + PreparedMetadata(MD5Digest id, ColumnDefinitions variables) { + this.id = id; + this.variables = variables; } + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/PreparedStatement.java b/driver-core/src/main/java/com/datastax/driver/core/PreparedStatement.java index acf0dd3530c..4d324560db0 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/PreparedStatement.java +++ b/driver-core/src/main/java/com/datastax/driver/core/PreparedStatement.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,353 +19,331 @@ import com.datastax.driver.core.exceptions.InvalidTypeException; import com.datastax.driver.core.policies.RetryPolicy; - import java.nio.ByteBuffer; import java.util.Map; /** - * Represents a prepared statement, a query with bound variables that has been - * prepared (pre-parsed) by the database. - *

- * A prepared statement can be executed once concrete values have been provided - * for the bound variables. A prepared statement and the values for its - * bound variables constitute a BoundStatement and can be executed (by - * {@link Session#execute}). - *

- * A {@code PreparedStatement} object allows you to define specific defaults - * for the different properties of a {@link Statement} (Consistency level, tracing, ...), - * in which case those properties will be inherited as default by every - * BoundedStatement created from the {PreparedStatement}. The default for those - * {@code PreparedStatement} properties is the same that in {@link Statement} if the - * PreparedStatement is created by {@link Session#prepare(String)} but will inherit - * of the properties of the {@link RegularStatement} used for the preparation if - * {@link Session#prepare(RegularStatement)} is used. + * Represents a prepared statement, a query with bound variables that has been prepared (pre-parsed) + * by the database. + * + *

A prepared statement can be executed once concrete values have been provided for the bound + * variables. A prepared statement and the values for its bound variables constitute a + * BoundStatement and can be executed (by {@link Session#execute}). + * + *

A {@code PreparedStatement} object allows you to define specific defaults for the different + * properties of a {@link Statement} (Consistency level, tracing, ...), in which case those + * properties will be inherited as default by every BoundedStatement created from the + * {PreparedStatement}. The default for those {@code PreparedStatement} properties is the same that + * in {@link Statement} if the PreparedStatement is created by {@link Session#prepare(String)} but + * will inherit of the properties of the {@link RegularStatement} used for the preparation if {@link + * Session#prepare(RegularStatement)} is used. */ public interface PreparedStatement { - /** - * Returns metadata on the bounded variables of this prepared statement. - * - * @return the variables bounded in this prepared statement. - */ - public ColumnDefinitions getVariables(); - - /** - * Creates a new BoundStatement object and bind its variables to the - * provided values. - *

- * While the number of {@code values} cannot be greater than the number of bound - * variables, the number of {@code values} may be fewer than the number of bound - * variables. In that case, the remaining variables will have to be bound - * to values by another mean because the resulting {@code BoundStatement} - * being executable. - *

- * This method is a convenience for {@code new BoundStatement(this).bind(...)}. - * - * @param values the values to bind to the variables of the newly created - * BoundStatement. - * @return the newly created {@code BoundStatement} with its variables - * bound to {@code values}. - * @throws IllegalArgumentException if more {@code values} are provided - * than there is of bound variables in this statement. - * @throws InvalidTypeException if any of the provided value is not of - * correct type to be bound to the corresponding bind variable. - * @throws NullPointerException if one of {@code values} is a collection - * (List, Set or Map) containing a null value. Nulls are not supported in - * collections by CQL. - * @see BoundStatement#bind - */ - public BoundStatement bind(Object... values); + /** + * Returns metadata on the bounded variables of this prepared statement. + * + * @return the variables bounded in this prepared statement. + */ + public ColumnDefinitions getVariables(); - /** - * Creates a new BoundStatement object for this prepared statement. - *

- * This method do not bind any values to any of the prepared variables. Said - * values need to be bound on the resulting statement using BoundStatement's - * setters methods ({@link BoundStatement#setInt}, {@link BoundStatement#setLong}, ...). - * - * @return the newly created {@code BoundStatement}. - */ - public BoundStatement bind(); + /** + * Creates a new BoundStatement object and bind its variables to the provided values. + * + *

While the number of {@code values} cannot be greater than the number of bound variables, the + * number of {@code values} may be fewer than the number of bound variables. In that case, the + * remaining variables will have to be bound to values by another mean because the resulting + * {@code BoundStatement} being executable. + * + *

This method is a convenience for {@code new BoundStatement(this).bind(...)}. + * + * @param values the values to bind to the variables of the newly created BoundStatement. + * @return the newly created {@code BoundStatement} with its variables bound to {@code values}. + * @throws IllegalArgumentException if more {@code values} are provided than there is of bound + * variables in this statement. + * @throws InvalidTypeException if any of the provided value is not of correct type to be bound to + * the corresponding bind variable. + * @throws NullPointerException if one of {@code values} is a collection (List, Set or Map) + * containing a null value. Nulls are not supported in collections by CQL. + * @see BoundStatement#bind + */ + public BoundStatement bind(Object... values); - /** - * Sets the routing key for this prepared statement. - *

- * While you can provide a fixed routing key for all executions of this prepared - * statement with this method, it is not mandatory to provide - * one through this method. This method should only be used - * if the partition key of the prepared query is not part of the prepared - * variables (that is if the partition key is fixed). - *

- * Note that if the partition key is part of the prepared variables, the - * routing key will be automatically computed once those variables are bound. - *

- * If the partition key is neither fixed nor part of the prepared variables (e.g. - * a composite partition key where only some of the components are bound), the - * routing key can also be set on each bound statement. - * - * @param routingKey the raw (binary) value to use as routing key. - * @return this {@code PreparedStatement} object. - * @see Statement#getRoutingKey - * @see BoundStatement#getRoutingKey - */ - public PreparedStatement setRoutingKey(ByteBuffer routingKey); + /** + * Creates a new BoundStatement object for this prepared statement. + * + *

This method do not bind any values to any of the prepared variables. Said values need to be + * bound on the resulting statement using BoundStatement's setters methods ({@link + * BoundStatement#setInt}, {@link BoundStatement#setLong}, ...). + * + * @return the newly created {@code BoundStatement}. + */ + public BoundStatement bind(); - /** - * Sets the routing key for this query. - *

- * See {@link #setRoutingKey(ByteBuffer)} for more information. This - * method is a variant for when the query partition key is composite and - * the routing key must be built from multiple values. - * - * @param routingKeyComponents the raw (binary) values to compose to obtain - * the routing key. - * @return this {@code PreparedStatement} object. - * @see Statement#getRoutingKey - */ - public PreparedStatement setRoutingKey(ByteBuffer... routingKeyComponents); + /** + * Sets the routing key for this prepared statement. + * + *

While you can provide a fixed routing key for all executions of this prepared statement with + * this method, it is not mandatory to provide one through this method. This method should only be + * used if the partition key of the prepared query is not part of the prepared variables (that is + * if the partition key is fixed). + * + *

Note that if the partition key is part of the prepared variables, the routing key will be + * automatically computed once those variables are bound. + * + *

If the partition key is neither fixed nor part of the prepared variables (e.g. a composite + * partition key where only some of the components are bound), the routing key can also be set on + * each bound statement. + * + * @param routingKey the raw (binary) value to use as routing key. + * @return this {@code PreparedStatement} object. + * @see Statement#getRoutingKey + * @see BoundStatement#getRoutingKey + */ + public PreparedStatement setRoutingKey(ByteBuffer routingKey); - /** - * Returns the routing key set for this query. - * - * @return the routing key for this query or {@code null} if none has been - * explicitly set on this PreparedStatement. - */ - public ByteBuffer getRoutingKey(); + /** + * Sets the routing key for this query. + * + *

See {@link #setRoutingKey(ByteBuffer)} for more information. This method is a variant for + * when the query partition key is composite and the routing key must be built from multiple + * values. + * + * @param routingKeyComponents the raw (binary) values to compose to obtain the routing key. + * @return this {@code PreparedStatement} object. + * @see Statement#getRoutingKey + */ + public PreparedStatement setRoutingKey(ByteBuffer... routingKeyComponents); - /** - * Sets a default consistency level for all bound statements - * created from this prepared statement. - *

- * If no consistency level is set through this method, the bound statement - * created from this object will use the default consistency level (ONE). - *

- * Changing the default consistency level is not retroactive, it only - * applies to BoundStatement created after the change. - * - * @param consistency the default consistency level to set. - * @return this {@code PreparedStatement} object. - */ - public PreparedStatement setConsistencyLevel(ConsistencyLevel consistency); + /** + * Returns the routing key set for this query. + * + * @return the routing key for this query or {@code null} if none has been explicitly set on this + * PreparedStatement. + */ + public ByteBuffer getRoutingKey(); - /** - * Returns the default consistency level set through {@link #setConsistencyLevel}. - * - * @return the default consistency level. Returns {@code null} if no - * consistency level has been set through this object {@code setConsistencyLevel} - * method. - */ - public ConsistencyLevel getConsistencyLevel(); + /** + * Sets a default consistency level for all bound statements created from this prepared statement. + * + *

If no consistency level is set through this method, the bound statement created from this + * object will use the default consistency level (LOCAL_ONE). + * + *

Changing the default consistency level is not retroactive, it only applies to BoundStatement + * created after the change. + * + * @param consistency the default consistency level to set. + * @return this {@code PreparedStatement} object. + */ + public PreparedStatement setConsistencyLevel(ConsistencyLevel consistency); - /** - * Sets a default serial consistency level for all bound statements - * created from this prepared statement. - *

- * If no serial consistency level is set through this method, the bound statement - * created from this object will use the default serial consistency level (SERIAL). - *

- * Changing the default serial consistency level is not retroactive, it only - * applies to BoundStatement created after the change. - * - * @param serialConsistency the default serial consistency level to set. - * @return this {@code PreparedStatement} object. - * @throws IllegalArgumentException if {@code serialConsistency} is not one of - * {@code ConsistencyLevel.SERIAL} or {@code ConsistencyLevel.LOCAL_SERIAL}. - */ - public PreparedStatement setSerialConsistencyLevel(ConsistencyLevel serialConsistency); + /** + * Returns the default consistency level set through {@link #setConsistencyLevel}. + * + * @return the default consistency level. Returns {@code null} if no consistency level has been + * set through this object {@code setConsistencyLevel} method. + */ + public ConsistencyLevel getConsistencyLevel(); - /** - * Returns the default serial consistency level set through {@link #setSerialConsistencyLevel}. - * - * @return the default serial consistency level. Returns {@code null} if no - * consistency level has been set through this object {@code setSerialConsistencyLevel} - * method. - */ - public ConsistencyLevel getSerialConsistencyLevel(); + /** + * Sets a default serial consistency level for all bound statements created from this prepared + * statement. + * + *

If no serial consistency level is set through this method, the bound statement created from + * this object will use the default serial consistency level (SERIAL). + * + *

Changing the default serial consistency level is not retroactive, it only applies to + * BoundStatement created after the change. + * + * @param serialConsistency the default serial consistency level to set. + * @return this {@code PreparedStatement} object. + * @throws IllegalArgumentException if {@code serialConsistency} is not one of {@code + * ConsistencyLevel.SERIAL} or {@code ConsistencyLevel.LOCAL_SERIAL}. + */ + public PreparedStatement setSerialConsistencyLevel(ConsistencyLevel serialConsistency); - /** - * Returns the string of the query that was prepared to yield this {@code - * PreparedStatement}. - *

- * Note that a CQL3 query may be implicitly applied to the current keyspace - * (that is, if the keyspace is not explicitly qualified in the query - * itself). For prepared queries, the current keyspace used is the one at - * the time of the preparation, not the one at execution time. The current - * keyspace at the time of the preparation can be retrieved through - * {@link #getQueryKeyspace}. - * - * @return the query that was prepared to yield this - * {@code PreparedStatement}. - */ - public String getQueryString(); + /** + * Returns the default serial consistency level set through {@link #setSerialConsistencyLevel}. + * + * @return the default serial consistency level. Returns {@code null} if no consistency level has + * been set through this object {@code setSerialConsistencyLevel} method. + */ + public ConsistencyLevel getSerialConsistencyLevel(); - /** - * Returns the keyspace at the time that this prepared statement was prepared, - * (that is the one on which this statement applies unless it specified a - * keyspace explicitly). - * - * @return the keyspace at the time that this statement was prepared or - * {@code null} if no keyspace was set when the query was prepared (which - * is possible since keyspaces can be explicitly qualified in queries and - * so may not require a current keyspace to be set). - */ - public String getQueryKeyspace(); + /** + * Returns the string of the query that was prepared to yield this {@code PreparedStatement}. + * + *

Note that a CQL3 query may be implicitly applied to the current keyspace (that is, if the + * keyspace is not explicitly qualified in the query itself). For prepared queries, the current + * keyspace used is the one at the time of the preparation, not the one at execution time. The + * current keyspace at the time of the preparation can be retrieved through {@link + * #getQueryKeyspace}. + * + * @return the query that was prepared to yield this {@code PreparedStatement}. + */ + public String getQueryString(); - /** - * Convenience method to enables tracing for all bound statements created - * from this prepared statement. - * - * @return this {@code Query} object. - */ - public PreparedStatement enableTracing(); + /** + * Returns the keyspace at the time that this prepared statement was prepared, (that is the one on + * which this statement applies unless it specified a keyspace explicitly). + * + * @return the keyspace at the time that this statement was prepared or {@code null} if no + * keyspace was set when the query was prepared (which is possible since keyspaces can be + * explicitly qualified in queries and so may not require a current keyspace to be set). + */ + public String getQueryKeyspace(); - /** - * Convenience method to disable tracing for all bound statements created - * from this prepared statement. - * - * @return this {@code PreparedStatement} object. - */ - public PreparedStatement disableTracing(); + /** + * Convenience method to enables tracing for all bound statements created from this prepared + * statement. + * + * @return this {@code Query} object. + */ + public PreparedStatement enableTracing(); - /** - * Returns whether tracing is enabled for this prepared statement, i.e. if - * BoundStatement created from it will use tracing by default. - * - * @return {@code true} if this prepared statement has tracing enabled, - * {@code false} otherwise. - */ - public boolean isTracing(); + /** + * Convenience method to disable tracing for all bound statements created from this prepared + * statement. + * + * @return this {@code PreparedStatement} object. + */ + public PreparedStatement disableTracing(); - /** - * Convenience method to set a default retry policy for the {@code BoundStatement} - * created from this prepared statement. - *

- * Note that this method is completely optional. By default, the retry policy - * used is the one returned {@link com.datastax.driver.core.policies.Policies#getRetryPolicy} - * in the cluster configuration. This method is only useful if you want - * to override this default policy for the {@code BoundStatement} created from - * this {@code PreparedStatement}. - * to punctually override the default policy for this request. - * - * @param policy the retry policy to use for this prepared statement. - * @return this {@code PreparedStatement} object. - */ - public PreparedStatement setRetryPolicy(RetryPolicy policy); + /** + * Returns whether tracing is enabled for this prepared statement, i.e. if BoundStatement created + * from it will use tracing by default. + * + * @return {@code true} if this prepared statement has tracing enabled, {@code false} otherwise. + */ + public boolean isTracing(); - /** - * Returns the retry policy sets for this prepared statement, if any. - * - * @return the retry policy sets specifically for this prepared statement or - * {@code null} if none have been set. - */ - public RetryPolicy getRetryPolicy(); + /** + * Convenience method to set a default retry policy for the {@code BoundStatement} created from + * this prepared statement. + * + *

Note that this method is completely optional. By default, the retry policy used is the one + * returned {@link com.datastax.driver.core.policies.Policies#getRetryPolicy} in the cluster + * configuration. This method is only useful if you want to override this default policy for the + * {@code BoundStatement} created from this {@code PreparedStatement}. to punctually override the + * default policy for this request. + * + * @param policy the retry policy to use for this prepared statement. + * @return this {@code PreparedStatement} object. + */ + public PreparedStatement setRetryPolicy(RetryPolicy policy); - /** - * Returns the prepared Id for this statement. - * - * @return the PreparedId corresponding to this statement. - */ - public PreparedId getPreparedId(); + /** + * Returns the retry policy sets for this prepared statement, if any. + * + * @return the retry policy sets specifically for this prepared statement or {@code null} if none + * have been set. + */ + public RetryPolicy getRetryPolicy(); - /** - * Return the incoming payload, that is, the payload that the server - * sent back with its {@code PREPARED} response, if any, - * or {@code null}, if the server did not include any custom payload. - *

- * Note that if an incoming payload is present, - * and if no outgoing payload has been {@link #setOutgoingPayload(Map) explicitly set}, then each time - * a {@link BoundStatement} is created (with either {@link #bind()} or {@link #bind(Object...)}), - * the resulting {@link BoundStatement} will inherit from this value - * as its default outgoing payload. - *

- * Implementors should return a read-only view of the original map, but even though, - * its values would remain inherently mutable. - * Callers should take care not to modify the returned map in any way. - *

- * This feature is only available with {@link ProtocolVersion#V4} or above; with lower - * versions, this method will always return {@code null}. - * - * @return the custom payload that the server sent back with its response, if any, - * or {@code null}, if the server did not include any custom payload. - * @since 2.2 - */ - public Map getIncomingPayload(); + /** + * Returns the prepared Id for this statement. + * + * @return the PreparedId corresponding to this statement. + */ + public PreparedId getPreparedId(); - /** - * Return the outgoing payload currently associated with this statement. - *

- * If this is set to a non-null value, each time a {@link BoundStatement} is - * created (with either {@link #bind()} or {@link #bind(Object...)}), - * the resulting {@link BoundStatement} will inherit from this value - * as its default outgoing payload. - *

- * Implementors should return a read-only view of the original map, but even though, - * its values would remain inherently mutable. - * Callers should take care not to modify the returned map in any way. - *

- * This feature is only available with {@link ProtocolVersion#V4} or above. - * - * @return this statement's outgoing payload, if any, or {@code null} if no outgoing payload is set - * @since 2.2 - */ - public Map getOutgoingPayload(); + /** + * Return the incoming payload, that is, the payload that the server sent back with its {@code + * PREPARED} response, if any, or {@code null}, if the server did not include any custom payload. + * + *

Note that if an incoming payload is present, and if no outgoing payload has been {@link + * #setOutgoingPayload(Map) explicitly set}, then each time a {@link BoundStatement} is created + * (with either {@link #bind()} or {@link #bind(Object...)}), the resulting {@link BoundStatement} + * will inherit from this value as its default outgoing payload. + * + *

Implementors should return a read-only view of the original map, but even though, its values + * would remain inherently mutable. Callers should take care not to modify the returned map in any + * way. + * + *

This feature is only available with {@link ProtocolVersion#V4} or above; with lower + * versions, this method will always return {@code null}. + * + * @return the custom payload that the server sent back with its response, if any, or {@code + * null}, if the server did not include any custom payload. + * @since 2.2 + */ + public Map getIncomingPayload(); - /** - * Associate the given payload with this prepared statement. - *

- * If this is set to a non-null value, each time a {@link BoundStatement} is - * created (with either {@link #bind()} or {@link #bind(Object...)}), - * the resulting {@link BoundStatement} will inherit from this value - * as its default outgoing payload. - *

- * Implementors should make a defensive, thread-safe copy of the given map, but even though, - * its values would remain inherently mutable. - * Callers should take care not to modify the original map once it is passed to this method. - *

- * This feature is only available with {@link ProtocolVersion#V4} or above. - * Trying to include custom payloads in requests sent by the driver - * under lower protocol versions will result in an - * {@link com.datastax.driver.core.exceptions.UnsupportedFeatureException} - * (wrapped in a {@link com.datastax.driver.core.exceptions.NoHostAvailableException}). - * - * @param payload the outgoing payload to associate with this statement, - * or {@code null} to clear any previously associated payload. - * @return this {@link Statement} object. - * @since 2.2 - */ - public PreparedStatement setOutgoingPayload(Map payload); + /** + * Return the outgoing payload currently associated with this statement. + * + *

If this is set to a non-null value, each time a {@link BoundStatement} is created (with + * either {@link #bind()} or {@link #bind(Object...)}), the resulting {@link BoundStatement} will + * inherit from this value as its default outgoing payload. + * + *

Implementors should return a read-only view of the original map, but even though, its values + * would remain inherently mutable. Callers should take care not to modify the returned map in any + * way. + * + *

This feature is only available with {@link ProtocolVersion#V4} or above. + * + * @return this statement's outgoing payload, if any, or {@code null} if no outgoing payload is + * set + * @since 2.2 + */ + public Map getOutgoingPayload(); - /** - * Return the {@link CodecRegistry} instance associated with this prepared statement. - * Implementations should never return {@code null}; instead, they should always return - * the {@link CodecRegistry} instance registered with the {@link Cluster} instance - * this prepared statement belongs to. - * - * @return the {@link CodecRegistry} instance associated with this prepared statement. - */ - public CodecRegistry getCodecRegistry(); + /** + * Associate the given payload with this prepared statement. + * + *

If this is set to a non-null value, each time a {@link BoundStatement} is created (with + * either {@link #bind()} or {@link #bind(Object...)}), the resulting {@link BoundStatement} will + * inherit from this value as its default outgoing payload. + * + *

Implementors should make a defensive, thread-safe copy of the given map, but even though, + * its values would remain inherently mutable. Callers should take care not to modify the original + * map once it is passed to this method. + * + *

This feature is only available with {@link ProtocolVersion#V4} or above. Trying to include + * custom payloads in requests sent by the driver under lower protocol versions will result in an + * {@link com.datastax.driver.core.exceptions.UnsupportedFeatureException} (wrapped in a {@link + * com.datastax.driver.core.exceptions.NoHostAvailableException}). + * + * @param payload the outgoing payload to associate with this statement, or {@code null} to clear + * any previously associated payload. + * @return this {@link Statement} object. + * @since 2.2 + */ + public PreparedStatement setOutgoingPayload(Map payload); - /** - * Sets whether this statement is idempotent. - *

- * See {@link com.datastax.driver.core.Statement#isIdempotent} for more explanations about this property. - * - * @param idempotent the new value. - * @return this {@code IdempotenceAwarePreparedStatement} object. - */ - public PreparedStatement setIdempotent(Boolean idempotent); + /** + * Return the {@link CodecRegistry} instance associated with this prepared statement. + * Implementations should never return {@code null}; instead, they should always return the {@link + * CodecRegistry} instance registered with the {@link Cluster} instance this prepared statement + * belongs to. + * + * @return the {@link CodecRegistry} instance associated with this prepared statement. + */ + public CodecRegistry getCodecRegistry(); - /** - * Whether this statement is idempotent, i.e. whether it can be applied multiple times - * without changing the result beyond the initial application. - *

- * See {@link com.datastax.driver.core.Statement#isIdempotent} for more explanations about this property. - *

- * Please note that idempotence will be propagated to all {@link BoundStatement}s created from this prepared statement. - * - * @return whether this statement is idempotent, or {@code null} to use - * {@link QueryOptions#getDefaultIdempotence()}. - */ - public Boolean isIdempotent(); + /** + * Sets whether this statement is idempotent. + * + *

See {@link com.datastax.driver.core.Statement#isIdempotent} for more explanations about this + * property. + * + * @param idempotent the new value. + * @return this {@code IdempotenceAwarePreparedStatement} object. + */ + public PreparedStatement setIdempotent(Boolean idempotent); + /** + * Whether this statement is idempotent, i.e. whether it can be applied multiple times without + * changing the result beyond the initial application. + * + *

See {@link com.datastax.driver.core.Statement#isIdempotent} for more explanations about this + * property. + * + *

Please note that idempotence will be propagated to all {@link BoundStatement}s created from + * this prepared statement. + * + * @return whether this statement is idempotent, or {@code null} to use {@link + * QueryOptions#getDefaultIdempotence()}. + */ + public Boolean isIdempotent(); } diff --git a/driver-core/src/main/java/com/datastax/driver/core/ProtocolEvent.java b/driver-core/src/main/java/com/datastax/driver/core/ProtocolEvent.java index 863c0561c78..a3bf0c244c6 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ProtocolEvent.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ProtocolEvent.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,141 +17,169 @@ */ package com.datastax.driver.core; -import io.netty.buffer.ByteBuf; +import static com.datastax.driver.core.SchemaElement.AGGREGATE; +import static com.datastax.driver.core.SchemaElement.FUNCTION; +import static com.datastax.driver.core.SchemaElement.KEYSPACE; +import static com.datastax.driver.core.SchemaElement.TABLE; +import io.netty.buffer.ByteBuf; import java.net.InetSocketAddress; import java.util.Collections; import java.util.List; -import static com.datastax.driver.core.SchemaElement.*; - class ProtocolEvent { - enum Type {TOPOLOGY_CHANGE, STATUS_CHANGE, SCHEMA_CHANGE} + enum Type { + TOPOLOGY_CHANGE, + STATUS_CHANGE, + SCHEMA_CHANGE + } + + final Type type; + + private ProtocolEvent(Type type) { + this.type = type; + } + + static ProtocolEvent deserialize(ByteBuf bb, ProtocolVersion version) { + switch (CBUtil.readEnumValue(Type.class, bb)) { + case TOPOLOGY_CHANGE: + return TopologyChange.deserializeEvent(bb); + case STATUS_CHANGE: + return StatusChange.deserializeEvent(bb); + case SCHEMA_CHANGE: + return SchemaChange.deserializeEvent(bb, version); + } + throw new AssertionError(); + } + + static class TopologyChange extends ProtocolEvent { + enum Change { + NEW_NODE, + REMOVED_NODE, + MOVED_NODE + } - final Type type; + final Change change; + final InetSocketAddress node; - private ProtocolEvent(Type type) { - this.type = type; + private TopologyChange(Change change, InetSocketAddress node) { + super(Type.TOPOLOGY_CHANGE); + this.change = change; + this.node = node; } - static ProtocolEvent deserialize(ByteBuf bb, ProtocolVersion version) { - switch (CBUtil.readEnumValue(Type.class, bb)) { - case TOPOLOGY_CHANGE: - return TopologyChange.deserializeEvent(bb); - case STATUS_CHANGE: - return StatusChange.deserializeEvent(bb); - case SCHEMA_CHANGE: - return SchemaChange.deserializeEvent(bb, version); - } - throw new AssertionError(); + // Assumes the type has already been deserialized + private static TopologyChange deserializeEvent(ByteBuf bb) { + Change change = CBUtil.readEnumValue(Change.class, bb); + InetSocketAddress node = CBUtil.readInet(bb); + return new TopologyChange(change, node); } - static class TopologyChange extends ProtocolEvent { - enum Change {NEW_NODE, REMOVED_NODE, MOVED_NODE} - - final Change change; - final InetSocketAddress node; - - private TopologyChange(Change change, InetSocketAddress node) { - super(Type.TOPOLOGY_CHANGE); - this.change = change; - this.node = node; - } - - // Assumes the type has already been deserialized - private static TopologyChange deserializeEvent(ByteBuf bb) { - Change change = CBUtil.readEnumValue(Change.class, bb); - InetSocketAddress node = CBUtil.readInet(bb); - return new TopologyChange(change, node); - } - - @Override - public String toString() { - return change + " " + node; - } + @Override + public String toString() { + return change + " " + node; } + } - static class StatusChange extends ProtocolEvent { + static class StatusChange extends ProtocolEvent { - enum Status {UP, DOWN} + enum Status { + UP, + DOWN + } - final Status status; - final InetSocketAddress node; + final Status status; + final InetSocketAddress node; - private StatusChange(Status status, InetSocketAddress node) { - super(Type.STATUS_CHANGE); - this.status = status; - this.node = node; - } + private StatusChange(Status status, InetSocketAddress node) { + super(Type.STATUS_CHANGE); + this.status = status; + this.node = node; + } - // Assumes the type has already been deserialized - private static StatusChange deserializeEvent(ByteBuf bb) { - Status status = CBUtil.readEnumValue(Status.class, bb); - InetSocketAddress node = CBUtil.readInet(bb); - return new StatusChange(status, node); - } + // Assumes the type has already been deserialized + private static StatusChange deserializeEvent(ByteBuf bb) { + Status status = CBUtil.readEnumValue(Status.class, bb); + InetSocketAddress node = CBUtil.readInet(bb); + return new StatusChange(status, node); + } + + @Override + public String toString() { + return status + " " + node; + } + } + + static class SchemaChange extends ProtocolEvent { - @Override - public String toString() { - return status + " " + node; - } + enum Change { + CREATED, + UPDATED, + DROPPED } - static class SchemaChange extends ProtocolEvent { - - enum Change {CREATED, UPDATED, DROPPED} - - final Change change; - final SchemaElement targetType; - final String targetKeyspace; - final String targetName; - final List targetSignature; - - SchemaChange(Change change, SchemaElement targetType, String targetKeyspace, String targetName, List targetSignature) { - super(Type.SCHEMA_CHANGE); - this.change = change; - this.targetType = targetType; - this.targetKeyspace = targetKeyspace; - this.targetName = targetName; - this.targetSignature = targetSignature; - } - - // Assumes the type has already been deserialized - static SchemaChange deserializeEvent(ByteBuf bb, ProtocolVersion version) { - Change change; - SchemaElement targetType; - String targetKeyspace, targetName; - List targetSignature; - switch (version) { - case V1: - case V2: - change = CBUtil.readEnumValue(Change.class, bb); - targetKeyspace = CBUtil.readString(bb); - targetName = CBUtil.readString(bb); - targetType = targetName.isEmpty() ? KEYSPACE : TABLE; - targetSignature = Collections.emptyList(); - return new SchemaChange(change, targetType, targetKeyspace, targetName, targetSignature); - case V3: - case V4: - case V5: - change = CBUtil.readEnumValue(Change.class, bb); - targetType = CBUtil.readEnumValue(SchemaElement.class, bb); - targetKeyspace = CBUtil.readString(bb); - targetName = (targetType == KEYSPACE) ? "" : CBUtil.readString(bb); - targetSignature = (targetType == FUNCTION || targetType == AGGREGATE) - ? CBUtil.readStringList(bb) - : Collections.emptyList(); - return new SchemaChange(change, targetType, targetKeyspace, targetName, targetSignature); - default: - throw version.unsupported(); - } - } - - @Override - public String toString() { - return change.toString() + ' ' + targetType + ' ' + targetKeyspace + (targetName.isEmpty() ? "" : '.' + targetName); - } + final Change change; + final SchemaElement targetType; + final String targetKeyspace; + final String targetName; + final List targetSignature; + + SchemaChange( + Change change, + SchemaElement targetType, + String targetKeyspace, + String targetName, + List targetSignature) { + super(Type.SCHEMA_CHANGE); + this.change = change; + this.targetType = targetType; + this.targetKeyspace = targetKeyspace; + this.targetName = targetName; + this.targetSignature = targetSignature; } + // Assumes the type has already been deserialized + static SchemaChange deserializeEvent(ByteBuf bb, ProtocolVersion version) { + Change change; + SchemaElement targetType; + String targetKeyspace, targetName; + List targetSignature; + switch (version) { + case V1: + case V2: + change = CBUtil.readEnumValue(Change.class, bb); + targetKeyspace = CBUtil.readString(bb); + targetName = CBUtil.readString(bb); + targetType = targetName.isEmpty() ? KEYSPACE : TABLE; + targetSignature = Collections.emptyList(); + return new SchemaChange(change, targetType, targetKeyspace, targetName, targetSignature); + case V3: + case V4: + case V5: + case V6: + change = CBUtil.readEnumValue(Change.class, bb); + targetType = CBUtil.readEnumValue(SchemaElement.class, bb); + targetKeyspace = CBUtil.readString(bb); + targetName = (targetType == KEYSPACE) ? "" : CBUtil.readString(bb); + targetSignature = + (targetType == FUNCTION || targetType == AGGREGATE) + ? CBUtil.readStringList(bb) + : Collections.emptyList(); + return new SchemaChange(change, targetType, targetKeyspace, targetName, targetSignature); + default: + throw version.unsupported(); + } + } + + @Override + public String toString() { + return change.toString() + + ' ' + + targetType + + ' ' + + targetKeyspace + + (targetName.isEmpty() ? "" : '.' + targetName); + } + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/ProtocolFeature.java b/driver-core/src/main/java/com/datastax/driver/core/ProtocolFeature.java new file mode 100644 index 00000000000..b31a4977637 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/ProtocolFeature.java @@ -0,0 +1,56 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +/** A listing of features that may or not apply to a given {@link ProtocolVersion}. */ +enum ProtocolFeature { + + /** + * The capability of updating a prepared statement if the result's metadata changes at runtime + * (for example, if the query is a {@code SELECT *} and the table is altered). + */ + PREPARED_METADATA_CHANGES, + + /** The capability of sending or receiving custom payloads. */ + CUSTOM_PAYLOADS, + + /** The capability of assigning client-generated timestamps to write requests. */ + CLIENT_TIMESTAMPS, + +// +; + + /** + * Determines whether or not the input version supports ths feature. + * + * @param version the version to test against. + * @return true if supported, false otherwise. + */ + boolean isSupportedBy(ProtocolVersion version) { + switch (this) { + case PREPARED_METADATA_CHANGES: + return version.compareTo(ProtocolVersion.V5) >= 0; + case CUSTOM_PAYLOADS: + return version.compareTo(ProtocolVersion.V4) >= 0; + case CLIENT_TIMESTAMPS: + return version.compareTo(ProtocolVersion.V3) >= 0; + default: + return false; + } + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/ProtocolOptions.java b/driver-core/src/main/java/com/datastax/driver/core/ProtocolOptions.java index 77848c0fc99..853b3a58b14 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ProtocolOptions.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ProtocolOptions.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,218 +19,234 @@ import com.google.common.annotations.VisibleForTesting; -/** - * Options of the Cassandra native binary protocol. - */ +/** Options of the Cassandra native binary protocol. */ public class ProtocolOptions { - /** - * Compression supported by the Cassandra binary protocol. - */ - public enum Compression { - /** - * No compression - */ - NONE("") { - @Override - FrameCompressor compressor() { - return null; - } - }, - /** - * Snappy compression - */ - SNAPPY("snappy") { - @Override - FrameCompressor compressor() { - return SnappyCompressor.instance; - } - }, - /** - * LZ4 compression - */ - LZ4("lz4") { - @Override - FrameCompressor compressor() { - return LZ4Compressor.instance; - } - }; - - final String protocolName; - - private Compression(String protocolName) { - this.protocolName = protocolName; - } - - abstract FrameCompressor compressor(); - - static Compression fromString(String str) { - for (Compression c : values()) { - if (c.protocolName.equalsIgnoreCase(str)) - return c; - } - return null; - } - - @Override - public String toString() { - return protocolName; - } - } - - ; - - /** - * The default port for Cassandra native binary protocol: 9042. - */ - public static final int DEFAULT_PORT = 9042; - - /** - * The default value for {@link #getMaxSchemaAgreementWaitSeconds()}: 10. - */ - public static final int DEFAULT_MAX_SCHEMA_AGREEMENT_WAIT_SECONDS = 10; - - private volatile Cluster.Manager manager; - - private final int port; - final ProtocolVersion initialProtocolVersion; // What the user asked us. Will be null by default. - - @VisibleForTesting - volatile int maxSchemaAgreementWaitSeconds; - - private final SSLOptions sslOptions; // null if no SSL - private final AuthProvider authProvider; - - private volatile Compression compression = Compression.NONE; - - /** - * Creates a new {@code ProtocolOptions} instance using the {@code DEFAULT_PORT} - * (and without SSL). - */ - public ProtocolOptions() { - this(DEFAULT_PORT); - } - - /** - * Creates a new {@code ProtocolOptions} instance using the provided port - * (without SSL nor authentication). - *

- * This is a shortcut for {@code new ProtocolOptions(port, null, AuthProvider.NONE)}. - * - * @param port the port to use for the binary protocol. - */ - public ProtocolOptions(int port) { - this(port, null, DEFAULT_MAX_SCHEMA_AGREEMENT_WAIT_SECONDS, null, AuthProvider.NONE); + /** Compression supported by the Cassandra binary protocol. */ + public enum Compression { + /** No compression */ + NONE("") { + @Override + FrameCompressor compressor() { + return null; + } + }, + /** Snappy compression */ + SNAPPY("snappy") { + @Override + FrameCompressor compressor() { + return SnappyCompressor.instance; + } + }, + /** LZ4 compression */ + LZ4("lz4") { + @Override + FrameCompressor compressor() { + return LZ4Compressor.instance; + } + }; + + final String protocolName; + + private Compression(String protocolName) { + this.protocolName = protocolName; } - /** - * Creates a new {@code ProtocolOptions} instance using the provided port - * and SSL context. - * - * @param port the port to use for the binary protocol. - * @param protocolVersion the protocol version to use. This can be {@code null}, in which case the - * version used will be the biggest version supported by the first node the driver connects to. - * See {@link Cluster.Builder#withProtocolVersion} for more details. - * @param sslOptions the SSL options to use. Use {@code null} if SSL is not - * to be used. - * @param authProvider the {@code AuthProvider} to use for authentication against - * the Cassandra nodes. - */ - public ProtocolOptions(int port, ProtocolVersion protocolVersion, int maxSchemaAgreementWaitSeconds, SSLOptions sslOptions, AuthProvider authProvider) { - this.port = port; - this.initialProtocolVersion = protocolVersion; - this.maxSchemaAgreementWaitSeconds = maxSchemaAgreementWaitSeconds; - this.sslOptions = sslOptions; - this.authProvider = authProvider; - } - - void register(Cluster.Manager manager) { - this.manager = manager; - } - - /** - * Returns the port used to connect to the Cassandra hosts. - * - * @return the port used to connect to the Cassandra hosts. - */ - public int getPort() { - return port; - } - - /** - * The protocol version used by the Cluster instance. - * - * @return the protocol version in use. This might return {@code null} if a particular - * version hasn't been forced by the user (using say {Cluster.Builder#withProtocolVersion}) - * and this Cluster instance has not yet connected to any node (but as soon as the - * Cluster instance is connected, this is guaranteed to return a non-null value). Note that - * nodes that do not support this protocol version will be ignored. - */ - public ProtocolVersion getProtocolVersion() { - return manager == null || manager.connectionFactory == null ? null : manager.connectionFactory.protocolVersion; - } + abstract FrameCompressor compressor(); - /** - * Returns the compression used by the protocol. - *

- * By default, compression is not used. - * - * @return the compression used. - */ - public Compression getCompression() { - return compression; + static Compression fromString(String str) { + for (Compression c : values()) { + if (c.protocolName.equalsIgnoreCase(str)) return c; + } + return null; } - /** - * Sets the compression to use. - *

- * Note that while this setting can be changed at any time, it will - * only apply to newly created connections. - * - * @param compression the compression algorithm to use (or {@code - * Compression.NONE} to disable compression). - * @return this {@code ProtocolOptions} object. - * @throws IllegalStateException if the compression requested is not - * available. Most compression algorithms require that the relevant be - * present in the classpath. If not, the compression will be - * unavailable. - */ - public ProtocolOptions setCompression(Compression compression) { - if (compression != Compression.NONE && compression.compressor() == null) - throw new IllegalStateException("The requested compression is not available (some compression require a JAR to be found in the classpath)"); - - this.compression = compression; - return this; + @Override + public String toString() { + return protocolName; } - - /** - * Returns the maximum time to wait for schema agreement before returning from a DDL query. - * - * @return the time. - */ - public int getMaxSchemaAgreementWaitSeconds() { - return maxSchemaAgreementWaitSeconds; - } - - /** - * The {@code SSLOptions} used by this cluster. - * - * @return the {@code SSLOptions} used by this cluster (set at the cluster creation time) - * or {@code null} if SSL is not in use. - */ - public SSLOptions getSSLOptions() { - return sslOptions; - } - - /** - * The {@code AuthProvider} used by this cluster. - * - * @return the {@code AuthProvided} used by this cluster (set at the cluster creation - * time). If no authentication mechanism is in use (the default), {@code AuthProvided.NONE} - * will be returned. - */ - public AuthProvider getAuthProvider() { - return authProvider; - } - + }; + + /** The default port for Cassandra native binary protocol: 9042. */ + public static final int DEFAULT_PORT = 9042; + + /** The default value for {@link #getMaxSchemaAgreementWaitSeconds()}: 10. */ + public static final int DEFAULT_MAX_SCHEMA_AGREEMENT_WAIT_SECONDS = 10; + + private volatile Cluster.Manager manager; + + private final int port; + final ProtocolVersion initialProtocolVersion; // What the user asked us. Will be null by default. + + @VisibleForTesting volatile int maxSchemaAgreementWaitSeconds; + + private final SSLOptions sslOptions; // null if no SSL + private final AuthProvider authProvider; + + private final boolean noCompact; + + private volatile Compression compression = Compression.NONE; + + /** + * Creates a new {@code ProtocolOptions} instance using the {@code DEFAULT_PORT} (and without + * SSL). + */ + public ProtocolOptions() { + this(DEFAULT_PORT); + } + + /** + * Creates a new {@code ProtocolOptions} instance using the provided port (without SSL nor + * authentication). + * + *

This is a shortcut for {@code new ProtocolOptions(port, null, AuthProvider.NONE)}. + * + * @param port the port to use for the binary protocol. + */ + public ProtocolOptions(int port) { + this(port, null, DEFAULT_MAX_SCHEMA_AGREEMENT_WAIT_SECONDS, null, AuthProvider.NONE, false); + } + + /** + * Creates a new {@code ProtocolOptions} instance using the provided port and SSL context. + * + * @param port the port to use for the binary protocol. + * @param protocolVersion the protocol version to use. This can be {@code null}, in which case the + * version used will be the biggest version supported by the first node the driver + * connects to. See {@link Cluster.Builder#withProtocolVersion} for more details. + * @param sslOptions the SSL options to use. Use {@code null} if SSL is not to be used. + * @param authProvider the {@code AuthProvider} to use for authentication against the Cassandra + * nodes. + */ + public ProtocolOptions( + int port, + ProtocolVersion protocolVersion, + int maxSchemaAgreementWaitSeconds, + SSLOptions sslOptions, + AuthProvider authProvider) { + this(port, protocolVersion, maxSchemaAgreementWaitSeconds, sslOptions, authProvider, false); + } + + /** + * Creates a new {@code ProtocolOptions} instance using the provided port and SSL context. + * + * @param port the port to use for the binary protocol. + * @param protocolVersion the protocol version to use. This can be {@code null}, in which case the + * version used will be the biggest version supported by the first node the driver + * connects to. See {@link Cluster.Builder#withProtocolVersion} for more details. + * @param sslOptions the SSL options to use. Use {@code null} if SSL is not to be used. + * @param authProvider the {@code AuthProvider} to use for authentication against the Cassandra + * nodes. + * @param noCompact whether or not to include the NO_COMPACT startup option. + */ + public ProtocolOptions( + int port, + ProtocolVersion protocolVersion, + int maxSchemaAgreementWaitSeconds, + SSLOptions sslOptions, + AuthProvider authProvider, + boolean noCompact) { + this.port = port; + this.initialProtocolVersion = protocolVersion; + this.maxSchemaAgreementWaitSeconds = maxSchemaAgreementWaitSeconds; + this.sslOptions = sslOptions; + this.authProvider = authProvider; + this.noCompact = noCompact; + } + + void register(Cluster.Manager manager) { + this.manager = manager; + } + + /** + * Returns the port used to connect to the Cassandra hosts. + * + * @return the port used to connect to the Cassandra hosts. + */ + public int getPort() { + return port; + } + + /** + * The protocol version used by the Cluster instance. + * + * @return the protocol version in use. This might return {@code null} if a particular version + * hasn't been forced by the user (using say {Cluster.Builder#withProtocolVersion}) + * and this Cluster instance has not yet connected to any node (but as soon as the + * Cluster instance is connected, this is guaranteed to return a non-null value). Note that + * nodes that do not support this protocol version will be ignored. + */ + public ProtocolVersion getProtocolVersion() { + return manager == null || manager.connectionFactory == null + ? null + : manager.connectionFactory.protocolVersion; + } + + /** + * Returns the compression used by the protocol. + * + *

By default, compression is not used. + * + * @return the compression used. + */ + public Compression getCompression() { + return compression; + } + + /** + * Sets the compression to use. + * + *

Note that while this setting can be changed at any time, it will only apply to newly created + * connections. + * + * @param compression the compression algorithm to use (or {@code Compression.NONE} to disable + * compression). + * @return this {@code ProtocolOptions} object. + * @throws IllegalStateException if the compression requested is not available. Most compression + * algorithms require that the relevant be present in the classpath. If not, the compression + * will be unavailable. + */ + public ProtocolOptions setCompression(Compression compression) { + if (compression != Compression.NONE && compression.compressor() == null) + throw new IllegalStateException( + "The requested compression is not available (some compression require a JAR to be found in the classpath)"); + + this.compression = compression; + return this; + } + + /** + * Returns the maximum time to wait for schema agreement before returning from a DDL query. + * + * @return the time. + */ + public int getMaxSchemaAgreementWaitSeconds() { + return maxSchemaAgreementWaitSeconds; + } + + /** + * The {@code SSLOptions} used by this cluster. + * + * @return the {@code SSLOptions} used by this cluster (set at the cluster creation time) or + * {@code null} if SSL is not in use. + */ + public SSLOptions getSSLOptions() { + return sslOptions; + } + + /** + * The {@code AuthProvider} used by this cluster. + * + * @return the {@code AuthProvided} used by this cluster (set at the cluster creation time). If no + * authentication mechanism is in use (the default), {@code AuthProvided.NONE} will be + * returned. + */ + public AuthProvider getAuthProvider() { + return authProvider; + } + + /** @return Whether or not to include the NO_COMPACT startup option. */ + public boolean isNoCompact() { + return noCompact; + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/ProtocolV1Authenticator.java b/driver-core/src/main/java/com/datastax/driver/core/ProtocolV1Authenticator.java index 007fe14c2db..af774b07e12 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ProtocolV1Authenticator.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ProtocolV1Authenticator.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,15 +20,18 @@ import java.util.Map; /** - * Parent class for {@link Authenticator} implementations that support native protocol v1 authentication. - *

- * Protocol v1 uses simple, credentials-based authentication (as opposed to SASL for later protocol versions). - * In order to support protocol v1, an authenticator must extend this class. - *

- * We use an abstract class instead of an interface because we don't want to expose {@link #getCredentials()}. + * Parent class for {@link Authenticator} implementations that support native protocol v1 + * authentication. + * + *

Protocol v1 uses simple, credentials-based authentication (as opposed to SASL for later + * protocol versions). In order to support protocol v1, an authenticator must extend this class. + * + *

We use an abstract class instead of an interface because we don't want to expose {@link + * #getCredentials()}. * - * @see Native protocol v1 specification + * @see Native + * protocol v1 specification */ abstract class ProtocolV1Authenticator { - abstract Map getCredentials(); + abstract Map getCredentials(); } diff --git a/driver-core/src/main/java/com/datastax/driver/core/ProtocolVersion.java b/driver-core/src/main/java/com/datastax/driver/core/ProtocolVersion.java index e5836c5ca6d..919d666228c 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ProtocolVersion.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ProtocolVersion.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,90 +20,83 @@ import com.datastax.driver.core.exceptions.DriverInternalError; import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableMap.Builder; - import java.util.Map; -/** - * Versions of the native protocol supported by the driver. - */ +/** Versions of the native protocol supported by the driver. */ public enum ProtocolVersion { - - V1("1.2.0", 1, null), - V2("2.0.0", 2, V1), - V3("2.1.0", 3, V2), - V4("2.2.0", 4, V3), - V5("3.10.0", 5, V4); - - /** - * The most recent protocol version supported by the driver. - */ - public static final ProtocolVersion NEWEST_SUPPORTED = V4; - - /** - * The most recent beta protocol version supported by the driver. - */ - public static final ProtocolVersion NEWEST_BETA = V5; - - private final VersionNumber minCassandraVersion; - - private final int asInt; - - private final ProtocolVersion lowerSupported; - - private ProtocolVersion(String minCassandraVersion, int asInt, ProtocolVersion lowerSupported) { - this.minCassandraVersion = VersionNumber.parse(minCassandraVersion); - this.asInt = asInt; - this.lowerSupported = lowerSupported; - } - - VersionNumber minCassandraVersion() { - return minCassandraVersion; - } - - DriverInternalError unsupported() { - return new DriverInternalError("Unsupported protocol version " + this); - } - - /** - * Returns the version as an integer. - * - * @return the integer representation. - */ - public int toInt() { - return asInt; - } - - /** - * Returns the highest supported version that is lower than this version. - * Returns {@code null} if there isn't such a version. - * - * @return the highest supported version that is lower than this version. - */ - public ProtocolVersion getLowerSupported() { - return lowerSupported; - } - - private static final Map INT_TO_VERSION; - - static { - Builder builder = ImmutableMap.builder(); - for (ProtocolVersion version : values()) { - builder.put(version.asInt, version); - } - INT_TO_VERSION = builder.build(); - } - - /** - * Returns the value matching an integer version. - * - * @param i the version as an integer. - * @return the matching enum value. - * @throws IllegalArgumentException if the argument doesn't match any known version. - */ - public static ProtocolVersion fromInt(int i) { - ProtocolVersion version = INT_TO_VERSION.get(i); - if (version == null) - throw new IllegalArgumentException("No protocol version matching integer version " + i); - return version; + V1("1.2.0", 1, null), + V2("2.0.0", 2, V1), + V3("2.1.0", 3, V2), + V4("2.2.0", 4, V3), + V5("4.0.0", 5, V4), + V6("4.0.0", 6, V5); + + /** The most recent protocol version supported by the driver. */ + public static final ProtocolVersion NEWEST_SUPPORTED = V5; + + /** The most recent beta protocol version supported by the driver. */ + public static final ProtocolVersion NEWEST_BETA = V6; + + private final VersionNumber minCassandraVersion; + + private final int asInt; + + private final ProtocolVersion lowerSupported; + + private ProtocolVersion(String minCassandraVersion, int asInt, ProtocolVersion lowerSupported) { + this.minCassandraVersion = VersionNumber.parse(minCassandraVersion); + this.asInt = asInt; + this.lowerSupported = lowerSupported; + } + + VersionNumber minCassandraVersion() { + return minCassandraVersion; + } + + DriverInternalError unsupported() { + return new DriverInternalError("Unsupported protocol version " + this); + } + + /** + * Returns the version as an integer. + * + * @return the integer representation. + */ + public int toInt() { + return asInt; + } + + /** + * Returns the highest supported version that is lower than this version. Returns {@code null} if + * there isn't such a version. + * + * @return the highest supported version that is lower than this version. + */ + public ProtocolVersion getLowerSupported() { + return lowerSupported; + } + + private static final Map INT_TO_VERSION; + + static { + Builder builder = ImmutableMap.builder(); + for (ProtocolVersion version : values()) { + builder.put(version.asInt, version); } + INT_TO_VERSION = builder.build(); + } + + /** + * Returns the value matching an integer version. + * + * @param i the version as an integer. + * @return the matching enum value. + * @throws IllegalArgumentException if the argument doesn't match any known version. + */ + public static ProtocolVersion fromInt(int i) { + ProtocolVersion version = INT_TO_VERSION.get(i); + if (version == null) + throw new IllegalArgumentException("No protocol version matching integer version " + i); + return version; + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/QueryLogger.java b/driver-core/src/main/java/com/datastax/driver/core/QueryLogger.java index 32963fbd3ce..5621d2e8159 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/QueryLogger.java +++ b/driver-core/src/main/java/com/datastax/driver/core/QueryLogger.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,22 +17,24 @@ */ package com.datastax.driver.core; -import com.google.common.annotations.VisibleForTesting; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import static java.util.concurrent.TimeUnit.NANOSECONDS; +import com.datastax.driver.core.querybuilder.BuiltStatement; +import com.google.common.annotations.VisibleForTesting; import java.nio.ByteBuffer; import java.util.Iterator; import java.util.List; - -import static java.util.concurrent.TimeUnit.NANOSECONDS; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * A configurable {@link LatencyTracker} that logs all executed statements. - *

- * Typically, client applications would instantiate one single query logger (using its {@link Builder}), - * configure it and register it on the relevant {@link Cluster} instance, e.g.: - *

+ * + *

Typically, client applications would instantiate one single query logger (using its {@link + * Builder}), configure it and register it on the relevant {@link Cluster} instance, e.g.: + * + *

+ * *

  * Cluster cluster = ...
  * QueryLogger queryLogger = QueryLogger.builder()
@@ -39,855 +43,930 @@
  *     .build();
  * cluster.register(queryLogger);
  * 
- *

- * Refer to the {@link Builder} documentation for more information on - * configuration settings for the query logger. - *

- * Once registered, the query logger will log every {@link RegularStatement}, {@link BoundStatement} or {@link BatchStatement} - * executed by the driver; - * note that it will never log other types of statement, null statements nor any special statement used internally by the driver. - *

- * There is one log for each request to a Cassandra node; because the driver sometimes retries the same statement on multiple nodes, - * a single statement execution (for example, a single call to {@link Session#execute(Statement)}) can produce multiple logs on - * different nodes. - *

- * For more flexibility, the query logger uses 3 different {@link Logger} instances: - *

+ * + *

Refer to the {@link Builder} documentation for more information on configuration settings for + * the query logger. + * + *

Once registered, the query logger will log every {@link RegularStatement}, {@link + * BoundStatement} or {@link BatchStatement} executed by the driver; note that it will never log + * other types of statement, null statements nor any special statement used internally by the + * driver. + * + *

There is one log for each request to a Cassandra node; because the driver sometimes retries + * the same statement on multiple nodes, a single statement execution (for example, a single call to + * {@link Session#execute(Statement)}) can produce multiple logs on different nodes. + * + *

For more flexibility, the query logger uses 3 different {@link Logger} instances: + * + *

+ * *

    - *
  1. {@link #NORMAL_LOGGER}: used to log normal queries, i.e., queries that completed successfully - * within a configurable threshold in milliseconds.
  2. - *
  3. {@link #SLOW_LOGGER}: used to log slow queries, i.e., queries that completed successfully - * but that took longer than a configurable threshold in milliseconds to complete.
  4. - *
  5. {@link #ERROR_LOGGER}: used to log unsuccessful queries, i.e., - * queries that did not completed normally and threw an exception. - * Note this this logger will also print the full stack trace of the reported exception.
  6. + *
  7. {@link #NORMAL_LOGGER}: used to log normal queries, i.e., queries that completed + * successfully within a configurable threshold in milliseconds. + *
  8. {@link #SLOW_LOGGER}: used to log slow queries, i.e., queries that completed successfully + * but that took longer than a configurable threshold in milliseconds to complete. + *
  9. {@link #ERROR_LOGGER}: used to log unsuccessful queries, i.e., queries that did not + * completed normally and threw an exception. Note this this logger will also print the full + * stack trace of the reported exception. *
- *

- *

- * The appropriate logger is chosen according to the following algorithm: + * + *

+ * + *

The appropriate logger is chosen according to the following algorithm: + * *

    - *
  1. if an exception has been thrown: use {@link #ERROR_LOGGER};
  2. - *
  3. otherwise, if the reported latency is greater than the configured threshold in milliseconds: use {@link #SLOW_LOGGER};
  4. - *
  5. otherwise, use {@link #NORMAL_LOGGER}.
  6. + *
  7. if an exception has been thrown: use {@link #ERROR_LOGGER}; + *
  8. otherwise, if the reported latency is greater than the configured threshold in + * milliseconds: use {@link #SLOW_LOGGER}; + *
  9. otherwise, use {@link #NORMAL_LOGGER}. *
- *

- *

- * All loggers are activated by setting their levels to {@code DEBUG} or {@code TRACE} (including {@link #ERROR_LOGGER}). - * If the level is set to {@code TRACE} and the statement being logged is a {@link BoundStatement}, - * then the query parameters (if any) will be logged as well (names and actual values). - *

- *

- * Constant thresholds vs. Dynamic thresholds - *

- * Currently the QueryLogger can track slow queries in two different ways: - * using a {@link Builder#withConstantThreshold(long)} constant threshold} in milliseconds (which is the default - * behavior), or using a {@link Builder#withDynamicThreshold(PercentileTracker, double) dynamic threshold} - * based on latency percentiles. - *

- * This class is thread-safe. + * + *

+ * + *

All loggers are activated by setting their levels to {@code DEBUG} or {@code TRACE} (including + * {@link #ERROR_LOGGER}). If the level is set to {@code TRACE} and the statement being logged is a + * {@link BoundStatement}, then the query parameters (if any) will be logged as well (names and + * actual values). + * + *

+ * + *

Constant thresholds vs. Dynamic thresholds + * + *

Currently the QueryLogger can track slow queries in two different ways: using a {@link + * Builder#withConstantThreshold(long)} constant threshold} in milliseconds (which is the default + * behavior), or using a {@link Builder#withDynamicThreshold(PercentileTracker, double) dynamic + * threshold} based on latency percentiles. + * + *

This class is thread-safe. * * @since 2.0.10 */ public abstract class QueryLogger implements LatencyTracker { - /** - * The default latency threshold in milliseconds beyond which queries are considered 'slow' - * and logged as such by the driver. - */ - public static final long DEFAULT_SLOW_QUERY_THRESHOLD_MS = 5000; - - /** - * The default latency percentile beyond which queries are considered 'slow' - * and logged as such by the driver. - */ - public static final double DEFAULT_SLOW_QUERY_THRESHOLD_PERCENTILE = 99.0; - - /** - * The default maximum length of a CQL query string that can be logged verbatim - * by the driver. Query strings longer than this value will be truncated - * when logged. - */ - public static final int DEFAULT_MAX_QUERY_STRING_LENGTH = 500; + /** + * The default latency threshold in milliseconds beyond which queries are considered 'slow' and + * logged as such by the driver. + */ + public static final long DEFAULT_SLOW_QUERY_THRESHOLD_MS = 5000; + + /** + * The default latency percentile beyond which queries are considered 'slow' and logged as such by + * the driver. + */ + public static final double DEFAULT_SLOW_QUERY_THRESHOLD_PERCENTILE = 99.0; + + /** + * The default maximum length of a CQL query string that can be logged verbatim by the driver. + * Query strings longer than this value will be truncated when logged. + */ + public static final int DEFAULT_MAX_QUERY_STRING_LENGTH = 500; + + /** + * The default maximum length of a query parameter value that can be logged verbatim by the + * driver. Parameter values longer than this value will be truncated when logged. + */ + public static final int DEFAULT_MAX_PARAMETER_VALUE_LENGTH = 50; + + /** + * The default maximum number of query parameters that can be logged by the driver. Queries with a + * number of parameters higher than this value will not have all their parameters logged. + */ + public static final int DEFAULT_MAX_LOGGED_PARAMETERS = 50; + + // Loggers + + /** + * The logger used to log normal queries, i.e., queries that completed successfully within a + * configurable threshold in milliseconds. + * + *

This logger is activated by setting its level to {@code DEBUG} or {@code TRACE}. + * Additionally, if the level is set to {@code TRACE} and the statement being logged is a {@link + * BoundStatement} or a {@link SimpleStatement}, then the query parameters (if any) will be + * logged. For a {@link BoundStatement} names and actual values are logged and for a {@link + * SimpleStatement} values are logged in positional order and named values are logged with names + * and value. + * + *

The name of this logger is {@code com.datastax.driver.core.QueryLogger.NORMAL}. + */ + public static final Logger NORMAL_LOGGER = + LoggerFactory.getLogger("com.datastax.driver.core.QueryLogger.NORMAL"); + + /** + * The logger used to log slow queries, i.e., queries that completed successfully but whose + * execution time exceeded a configurable threshold in milliseconds. + * + *

This logger is activated by setting its level to {@code DEBUG} or {@code TRACE}. + * Additionally, if the level is set to {@code TRACE} and the statement being logged is a {@link + * BoundStatement} or a {@link SimpleStatement}, then the query parameters (if any) will be + * logged. For a {@link BoundStatement} names and actual values are logged and for a {@link + * SimpleStatement} values are logged in positional order and named values are logged with names + * and value. + * + *

The name of this logger is {@code com.datastax.driver.core.QueryLogger.SLOW}. + */ + public static final Logger SLOW_LOGGER = + LoggerFactory.getLogger("com.datastax.driver.core.QueryLogger.SLOW"); + + /** + * The logger used to log unsuccessful queries, i.e., queries that did not complete normally and + * threw an exception. + * + *

This logger is activated by setting its level to {@code DEBUG} or {@code TRACE}. + * Additionally, if the level is set to {@code TRACE} and the statement being logged is a {@link + * BoundStatement} or a {@link SimpleStatement}, then the query parameters (if any) will be + * logged. For a {@link BoundStatement} names and actual values are logged and for a {@link + * SimpleStatement} values are logged in positional order and named values are logged with names + * and value. Note this this logger will also print the full stack trace of the reported + * exception. + * + *

The name of this logger is {@code com.datastax.driver.core.QueryLogger.ERROR}. + */ + public static final Logger ERROR_LOGGER = + LoggerFactory.getLogger("com.datastax.driver.core.QueryLogger.ERROR"); + + // Message templates + + private static final String NORMAL_TEMPLATE = + "[%s] [%s] Query completed normally, took %s ms: %s"; + + private static final String SLOW_TEMPLATE_MILLIS = "[%s] [%s] Query too slow, took %s ms: %s"; + + private static final String SLOW_TEMPLATE_PERCENTILE = + "[%s] [%s] Query too slow, took %s ms (%s percentile = %s ms): %s"; + + private static final String ERROR_TEMPLATE = "[%s] [%s] Query error after %s ms: %s"; + + @VisibleForTesting static final String TRUNCATED_OUTPUT = "... [truncated output]"; + + @VisibleForTesting static final String FURTHER_PARAMS_OMITTED = " [further parameters omitted]"; + + protected volatile Cluster cluster; + + private volatile ProtocolVersion protocolVersion; + + protected volatile int maxQueryStringLength; + + protected volatile int maxParameterValueLength; + + protected volatile int maxLoggedParameters; + + /** + * Private constructor. Instances of QueryLogger should be obtained via the {@link #builder()} + * method. + */ + private QueryLogger( + int maxQueryStringLength, int maxParameterValueLength, int maxLoggedParameters) { + this.maxQueryStringLength = maxQueryStringLength; + this.maxParameterValueLength = maxParameterValueLength; + this.maxLoggedParameters = maxLoggedParameters; + } + + /** + * Creates a new {@link QueryLogger.Builder} instance. + * + *

This is a convenience method for {@code new QueryLogger.Builder()}. + * + * @return the new QueryLogger builder. + * @throws NullPointerException if {@code cluster} is {@code null}. + */ + public static QueryLogger.Builder builder() { + return new QueryLogger.Builder(); + } + + @Override + public void onRegister(Cluster cluster) { + this.cluster = cluster; + } + + @Override + public void onUnregister(Cluster cluster) { + // nothing to do + } + + /** + * A QueryLogger that uses a constant threshold in milliseconds to track slow queries. This + * implementation is the default and should be preferred to {@link DynamicThresholdQueryLogger} + * which is still in beta state. + */ + public static class ConstantThresholdQueryLogger extends QueryLogger { + + private volatile long slowQueryLatencyThresholdMillis; + + private ConstantThresholdQueryLogger( + int maxQueryStringLength, + int maxParameterValueLength, + int maxLoggedParameters, + long slowQueryLatencyThresholdMillis) { + super(maxQueryStringLength, maxParameterValueLength, maxLoggedParameters); + this.setSlowQueryLatencyThresholdMillis(slowQueryLatencyThresholdMillis); + } /** - * The default maximum length of a query parameter value that can be logged verbatim - * by the driver. Parameter values longer than this value will be truncated - * when logged. + * Return the threshold in milliseconds beyond which queries are considered 'slow' and logged as + * such by the driver. The default value is {@link #DEFAULT_SLOW_QUERY_THRESHOLD_MS}. + * + * @return The threshold in milliseconds beyond which queries are considered 'slow' and logged + * as such by the driver. */ - public static final int DEFAULT_MAX_PARAMETER_VALUE_LENGTH = 50; + public long getSlowQueryLatencyThresholdMillis() { + return slowQueryLatencyThresholdMillis; + } /** - * The default maximum number of query parameters that can be logged - * by the driver. Queries with a number of parameters higher than this value - * will not have all their parameters logged. + * Set the threshold in milliseconds beyond which queries are considered 'slow' and logged as + * such by the driver. + * + * @param slowQueryLatencyThresholdMillis Slow queries threshold in milliseconds. It must be + * strictly positive. + * @throws IllegalArgumentException if {@code slowQueryLatencyThresholdMillis <= 0}. */ - public static final int DEFAULT_MAX_LOGGED_PARAMETERS = 50; + public void setSlowQueryLatencyThresholdMillis(long slowQueryLatencyThresholdMillis) { + if (slowQueryLatencyThresholdMillis <= 0) + throw new IllegalArgumentException( + "Invalid slowQueryLatencyThresholdMillis, should be > 0, got " + + slowQueryLatencyThresholdMillis); + this.slowQueryLatencyThresholdMillis = slowQueryLatencyThresholdMillis; + } - // Loggers + @Override + protected void maybeLogNormalOrSlowQuery(Host host, Statement statement, long latencyMs) { + if (latencyMs > slowQueryLatencyThresholdMillis) { + maybeLogSlowQuery(host, statement, latencyMs); + } else { + maybeLogNormalQuery(host, statement, latencyMs); + } + } - /** - * The logger used to log normal queries, i.e., queries that completed successfully - * within a configurable threshold in milliseconds. - *

- * This logger is activated by setting its level to {@code DEBUG} or {@code TRACE}. - * Additionally, if the level is set to {@code TRACE} and the statement being logged is a {@link BoundStatement} - * or a {@link SimpleStatement}, then the query parameters (if any) will be logged. For a {@link BoundStatement} - * names and actual values are logged and for a {@link SimpleStatement} values are logged in positional order - * and named values are logged with names and value. - *

- * The name of this logger is {@code com.datastax.driver.core.QueryLogger.NORMAL}. - */ - public static final Logger NORMAL_LOGGER = LoggerFactory.getLogger("com.datastax.driver.core.QueryLogger.NORMAL"); + protected void maybeLogSlowQuery(Host host, Statement statement, long latencyMs) { + if (SLOW_LOGGER.isDebugEnabled()) { + String message = + String.format( + SLOW_TEMPLATE_MILLIS, + cluster.getClusterName(), + host, + latencyMs, + statementAsString(statement)); + logQuery(statement, null, SLOW_LOGGER, message); + } + } + } + + /** + * A QueryLogger that uses a dynamic threshold in milliseconds to track slow queries. + * + *

Dynamic thresholds are based on per-host latency percentiles, as computed by {@link + * PercentileTracker}. + */ + public static class DynamicThresholdQueryLogger extends QueryLogger { + + private volatile double slowQueryLatencyThresholdPercentile; + + private volatile PercentileTracker percentileLatencyTracker; + + private DynamicThresholdQueryLogger( + int maxQueryStringLength, + int maxParameterValueLength, + int maxLoggedParameters, + double slowQueryLatencyThresholdPercentile, + PercentileTracker percentileLatencyTracker) { + super(maxQueryStringLength, maxParameterValueLength, maxLoggedParameters); + this.setSlowQueryLatencyThresholdPercentile(slowQueryLatencyThresholdPercentile); + this.setPercentileLatencyTracker(percentileLatencyTracker); + } /** - * The logger used to log slow queries, i.e., queries that completed successfully - * but whose execution time exceeded a configurable threshold in milliseconds. - *

- * This logger is activated by setting its level to {@code DEBUG} or {@code TRACE}. - * Additionally, if the level is set to {@code TRACE} and the statement being logged is a {@link BoundStatement} - * or a {@link SimpleStatement}, then the query parameters (if any) will be logged. For a {@link BoundStatement} - * names and actual values are logged and for a {@link SimpleStatement} values are logged in positional order - * and named values are logged with names and value. - *

- * The name of this logger is {@code com.datastax.driver.core.QueryLogger.SLOW}. + * Return the percentile tracker to use for recording per-host latency histograms. Cannot be + * {@code null}. + * + * @return the percentile tracker to use. */ - public static final Logger SLOW_LOGGER = LoggerFactory.getLogger("com.datastax.driver.core.QueryLogger.SLOW"); + public PercentileTracker getPercentileLatencyTracker() { + return percentileLatencyTracker; + } /** - * The logger used to log unsuccessful queries, i.e., queries that did not complete normally and threw an exception. - *

- * This logger is activated by setting its level to {@code DEBUG} or {@code TRACE}. - * Additionally, if the level is set to {@code TRACE} and the statement being logged is a {@link BoundStatement} - * or a {@link SimpleStatement}, then the query parameters (if any) will be logged. For a {@link BoundStatement} - * names and actual values are logged and for a {@link SimpleStatement} values are logged in positional order - * and named values are logged with names and value. - * Note this this logger will also print the full stack trace of the reported exception. - *

- * The name of this logger is {@code com.datastax.driver.core.QueryLogger.ERROR}. + * Set the percentile tracker to use for recording per-host latency histograms. Cannot be {@code + * null}. + * + * @param percentileLatencyTracker the percentile tracker instance to use. + * @throws IllegalArgumentException if {@code percentileLatencyTracker == null}. */ - public static final Logger ERROR_LOGGER = LoggerFactory.getLogger("com.datastax.driver.core.QueryLogger.ERROR"); - - // Message templates - - private static final String NORMAL_TEMPLATE = "[%s] [%s] Query completed normally, took %s ms: %s"; - - private static final String SLOW_TEMPLATE_MILLIS = "[%s] [%s] Query too slow, took %s ms: %s"; - - private static final String SLOW_TEMPLATE_PERCENTILE = "[%s] [%s] Query too slow, took %s ms (%s percentile = %s ms): %s"; - - private static final String ERROR_TEMPLATE = "[%s] [%s] Query error after %s ms: %s"; - - @VisibleForTesting - static final String TRUNCATED_OUTPUT = "... [truncated output]"; - - @VisibleForTesting - static final String FURTHER_PARAMS_OMITTED = " [further parameters omitted]"; - - protected volatile Cluster cluster; - - private volatile ProtocolVersion protocolVersion; - - protected volatile int maxQueryStringLength; - - protected volatile int maxParameterValueLength; - - protected volatile int maxLoggedParameters; + public void setPercentileLatencyTracker(PercentileTracker percentileLatencyTracker) { + if (percentileLatencyTracker == null) + throw new IllegalArgumentException("perHostPercentileLatencyTracker cannot be null"); + this.percentileLatencyTracker = percentileLatencyTracker; + } /** - * Private constructor. Instances of QueryLogger should be obtained via the {@link #builder()} method. + * Return the threshold percentile beyond which queries are considered 'slow' and logged as such + * by the driver. The default value is {@link #DEFAULT_SLOW_QUERY_THRESHOLD_PERCENTILE}. + * + * @return threshold percentile beyond which queries are considered 'slow' and logged as such by + * the driver. */ - private QueryLogger(int maxQueryStringLength, int maxParameterValueLength, int maxLoggedParameters) { - this.maxQueryStringLength = maxQueryStringLength; - this.maxParameterValueLength = maxParameterValueLength; - this.maxLoggedParameters = maxLoggedParameters; + public double getSlowQueryLatencyThresholdPercentile() { + return slowQueryLatencyThresholdPercentile; } /** - * Creates a new {@link QueryLogger.Builder} instance. - *

- * This is a convenience method for {@code new QueryLogger.Builder()}. + * Set the threshold percentile beyond which queries are considered 'slow' and logged as such by + * the driver. * - * @return the new QueryLogger builder. - * @throws NullPointerException if {@code cluster} is {@code null}. + * @param slowQueryLatencyThresholdPercentile Slow queries threshold percentile. It must be + * comprised between 0 inclusive and 100 exclusive. + * @throws IllegalArgumentException if {@code slowQueryLatencyThresholdPercentile < 0 || + * slowQueryLatencyThresholdPercentile >= 100}. */ - public static QueryLogger.Builder builder() { - return new QueryLogger.Builder(); - } - - @Override - public void onRegister(Cluster cluster) { - this.cluster = cluster; + public void setSlowQueryLatencyThresholdPercentile(double slowQueryLatencyThresholdPercentile) { + if (slowQueryLatencyThresholdPercentile < 0.0 || slowQueryLatencyThresholdPercentile >= 100.0) + throw new IllegalArgumentException( + "Invalid slowQueryLatencyThresholdPercentile, should be >= 0 and < 100, got " + + slowQueryLatencyThresholdPercentile); + this.slowQueryLatencyThresholdPercentile = slowQueryLatencyThresholdPercentile; } @Override - public void onUnregister(Cluster cluster) { - // nothing to do + protected void maybeLogNormalOrSlowQuery(Host host, Statement statement, long latencyMs) { + long threshold = + percentileLatencyTracker.getLatencyAtPercentile( + host, statement, null, slowQueryLatencyThresholdPercentile); + if (threshold >= 0 && latencyMs > threshold) { + maybeLogSlowQuery(host, statement, latencyMs, threshold); + } else { + maybeLogNormalQuery(host, statement, latencyMs); + } } - /** - * A QueryLogger that uses a constant threshold in milliseconds - * to track slow queries. - * This implementation is the default and should be preferred to {@link DynamicThresholdQueryLogger} - * which is still in beta state. - */ - public static class ConstantThresholdQueryLogger extends QueryLogger { - - private volatile long slowQueryLatencyThresholdMillis; - - private ConstantThresholdQueryLogger(int maxQueryStringLength, int maxParameterValueLength, int maxLoggedParameters, long slowQueryLatencyThresholdMillis) { - super(maxQueryStringLength, maxParameterValueLength, maxLoggedParameters); - this.setSlowQueryLatencyThresholdMillis(slowQueryLatencyThresholdMillis); - } - - /** - * Return the threshold in milliseconds beyond which queries are considered 'slow' - * and logged as such by the driver. - * The default value is {@link #DEFAULT_SLOW_QUERY_THRESHOLD_MS}. - * - * @return The threshold in milliseconds beyond which queries are considered 'slow' - * and logged as such by the driver. - */ - public long getSlowQueryLatencyThresholdMillis() { - return slowQueryLatencyThresholdMillis; - } - - /** - * Set the threshold in milliseconds beyond which queries are considered 'slow' - * and logged as such by the driver. - * - * @param slowQueryLatencyThresholdMillis Slow queries threshold in milliseconds. - * It must be strictly positive. - * @throws IllegalArgumentException if {@code slowQueryLatencyThresholdMillis <= 0}. - */ - public void setSlowQueryLatencyThresholdMillis(long slowQueryLatencyThresholdMillis) { - if (slowQueryLatencyThresholdMillis <= 0) - throw new IllegalArgumentException("Invalid slowQueryLatencyThresholdMillis, should be > 0, got " + slowQueryLatencyThresholdMillis); - this.slowQueryLatencyThresholdMillis = slowQueryLatencyThresholdMillis; - } - - @Override - protected void maybeLogNormalOrSlowQuery(Host host, Statement statement, long latencyMs) { - if (latencyMs > slowQueryLatencyThresholdMillis) { - maybeLogSlowQuery(host, statement, latencyMs); - } else { - maybeLogNormalQuery(host, statement, latencyMs); - } - } - - protected void maybeLogSlowQuery(Host host, Statement statement, long latencyMs) { - if (SLOW_LOGGER.isDebugEnabled()) { - String message = String.format(SLOW_TEMPLATE_MILLIS, cluster.getClusterName(), host, latencyMs, statementAsString(statement)); - logQuery(statement, null, SLOW_LOGGER, message); - } - } + protected void maybeLogSlowQuery( + Host host, Statement statement, long latencyMs, long threshold) { + if (SLOW_LOGGER.isDebugEnabled()) { + String message = + String.format( + SLOW_TEMPLATE_PERCENTILE, + cluster.getClusterName(), + host, + latencyMs, + slowQueryLatencyThresholdPercentile, + threshold, + statementAsString(statement)); + logQuery(statement, null, SLOW_LOGGER, message); + } } - /** - * A QueryLogger that uses a dynamic threshold in milliseconds - * to track slow queries. - *

- * Dynamic thresholds are based on per-host latency percentiles, as computed - * by {@link PercentileTracker}. - */ - public static class DynamicThresholdQueryLogger extends QueryLogger { - - private volatile double slowQueryLatencyThresholdPercentile; - - private volatile PercentileTracker percentileLatencyTracker; - - private DynamicThresholdQueryLogger(int maxQueryStringLength, int maxParameterValueLength, - int maxLoggedParameters, double slowQueryLatencyThresholdPercentile, - PercentileTracker percentileLatencyTracker) { - super(maxQueryStringLength, maxParameterValueLength, maxLoggedParameters); - this.setSlowQueryLatencyThresholdPercentile(slowQueryLatencyThresholdPercentile); - this.setPercentileLatencyTracker(percentileLatencyTracker); - } - - /** - * Return the percentile tracker to use for recording per-host latency histograms. - * Cannot be {@code null}. - * - * @return the percentile tracker to use. - */ - public PercentileTracker getPercentileLatencyTracker() { - return percentileLatencyTracker; - } - - /** - * Set the percentile tracker to use for recording per-host latency histograms. - * Cannot be {@code null}. - * - * @param percentileLatencyTracker the percentile tracker instance to use. - * @throws IllegalArgumentException if {@code percentileLatencyTracker == null}. - */ - public void setPercentileLatencyTracker(PercentileTracker percentileLatencyTracker) { - if (percentileLatencyTracker == null) - throw new IllegalArgumentException("perHostPercentileLatencyTracker cannot be null"); - this.percentileLatencyTracker = percentileLatencyTracker; - } - - /** - * Return the threshold percentile beyond which queries are considered 'slow' - * and logged as such by the driver. - * The default value is {@link #DEFAULT_SLOW_QUERY_THRESHOLD_PERCENTILE}. - * - * @return threshold percentile beyond which queries are considered 'slow' - * and logged as such by the driver. - */ - public double getSlowQueryLatencyThresholdPercentile() { - return slowQueryLatencyThresholdPercentile; - } - - /** - * Set the threshold percentile beyond which queries are considered 'slow' - * and logged as such by the driver. - * - * @param slowQueryLatencyThresholdPercentile Slow queries threshold percentile. - * It must be comprised between 0 inclusive and 100 exclusive. - * @throws IllegalArgumentException if {@code slowQueryLatencyThresholdPercentile < 0 || slowQueryLatencyThresholdPercentile >= 100}. - */ - public void setSlowQueryLatencyThresholdPercentile(double slowQueryLatencyThresholdPercentile) { - if (slowQueryLatencyThresholdPercentile < 0.0 || slowQueryLatencyThresholdPercentile >= 100.0) - throw new IllegalArgumentException("Invalid slowQueryLatencyThresholdPercentile, should be >= 0 and < 100, got " + slowQueryLatencyThresholdPercentile); - this.slowQueryLatencyThresholdPercentile = slowQueryLatencyThresholdPercentile; - } - - @Override - protected void maybeLogNormalOrSlowQuery(Host host, Statement statement, long latencyMs) { - long threshold = percentileLatencyTracker.getLatencyAtPercentile(host, statement, null, slowQueryLatencyThresholdPercentile); - if (threshold >= 0 && latencyMs > threshold) { - maybeLogSlowQuery(host, statement, latencyMs, threshold); - } else { - maybeLogNormalQuery(host, statement, latencyMs); - } - } - - protected void maybeLogSlowQuery(Host host, Statement statement, long latencyMs, long threshold) { - if (SLOW_LOGGER.isDebugEnabled()) { - String message = String.format(SLOW_TEMPLATE_PERCENTILE, cluster.getClusterName(), host, latencyMs, slowQueryLatencyThresholdPercentile, threshold, statementAsString(statement)); - logQuery(statement, null, SLOW_LOGGER, message); - } - } - - @Override - public void onRegister(Cluster cluster) { - super.onRegister(cluster); - cluster.register(percentileLatencyTracker); - } - - // Don't unregister the latency tracker in onUnregister, we can't guess if it's being used by another component - // or not. + @Override + public void onRegister(Cluster cluster) { + super.onRegister(cluster); + cluster.register(percentileLatencyTracker); } - /** - * Helper class to build {@link QueryLogger} instances with a fluent API. - */ - public static class Builder { - - private int maxQueryStringLength = DEFAULT_MAX_QUERY_STRING_LENGTH; - - private int maxParameterValueLength = DEFAULT_MAX_PARAMETER_VALUE_LENGTH; - - private int maxLoggedParameters = DEFAULT_MAX_LOGGED_PARAMETERS; - - private long slowQueryLatencyThresholdMillis = DEFAULT_SLOW_QUERY_THRESHOLD_MS; - - private double slowQueryLatencyThresholdPercentile = DEFAULT_SLOW_QUERY_THRESHOLD_PERCENTILE; + // Don't unregister the latency tracker in onUnregister, we can't guess if it's being used by + // another component + // or not. + } - private PercentileTracker percentileLatencyTracker; + /** Helper class to build {@link QueryLogger} instances with a fluent API. */ + public static class Builder { - private boolean constantThreshold = true; + private int maxQueryStringLength = DEFAULT_MAX_QUERY_STRING_LENGTH; - /** - * Enables slow query latency tracking based on constant thresholds. - *

- * Note: You should either use {@link #withConstantThreshold(long) constant thresholds} - * or {@link #withDynamicThreshold(PercentileTracker, double) dynamic thresholds}, - * not both. - * - * @param slowQueryLatencyThresholdMillis The threshold in milliseconds beyond which queries are considered 'slow' - * and logged as such by the driver. - * The default value is {@link #DEFAULT_SLOW_QUERY_THRESHOLD_MS} - * @return this {@link Builder} instance (for method chaining). - */ - public Builder withConstantThreshold(long slowQueryLatencyThresholdMillis) { - this.slowQueryLatencyThresholdMillis = slowQueryLatencyThresholdMillis; - constantThreshold = true; - return this; - } - - /** - * Enables slow query latency tracking based on dynamic thresholds. - *

- * Dynamic thresholds are based on latency percentiles, as computed by {@link PercentileTracker}. - *

- * Note: You should either use {@link #withConstantThreshold(long) constant thresholds} or - * {@link #withDynamicThreshold(PercentileTracker, double) dynamic thresholds}, not both. - * - * @param percentileLatencyTracker the {@link PercentileTracker} instance to use for recording - * latency histograms. Cannot be {@code null}. - * It will get {@link Cluster#register(LatencyTracker) registered} - * with the cluster at the same time as this logger. - * @param slowQueryLatencyThresholdPercentile Slow queries threshold percentile. - * It must be comprised between 0 inclusive and 100 exclusive. - * The default value is {@link #DEFAULT_SLOW_QUERY_THRESHOLD_PERCENTILE} - * @return this {@link Builder} instance (for method chaining). - */ - public Builder withDynamicThreshold(PercentileTracker percentileLatencyTracker, - double slowQueryLatencyThresholdPercentile) { - this.percentileLatencyTracker = percentileLatencyTracker; - this.slowQueryLatencyThresholdPercentile = slowQueryLatencyThresholdPercentile; - constantThreshold = false; - return this; - } - - /** - * Set the maximum length of a CQL query string that can be logged verbatim - * by the driver. Query strings longer than this value will be truncated - * when logged. - * - * @param maxQueryStringLength The maximum length of a CQL query string - * that can be logged verbatim by the driver. - * It must be strictly positive or {@code -1}, - * in which case the query is never truncated - * (use with care). - * The default value is {@link #DEFAULT_MAX_QUERY_STRING_LENGTH}. - * @return this {@link Builder} instance (for method chaining). - */ - public Builder withMaxQueryStringLength(int maxQueryStringLength) { - this.maxQueryStringLength = maxQueryStringLength; - return this; - } + private int maxParameterValueLength = DEFAULT_MAX_PARAMETER_VALUE_LENGTH; - /** - * Set the maximum length of a query parameter value that can be logged verbatim - * by the driver. Parameter values longer than this value will be truncated - * when logged. - * - * @param maxParameterValueLength The maximum length of a query parameter value - * that can be logged verbatim by the driver. - * It must be strictly positive or {@code -1}, - * in which case the parameter value is never truncated - * (use with care). - * The default value is {@link #DEFAULT_MAX_PARAMETER_VALUE_LENGTH}. - * @return this {@link Builder} instance (for method chaining). - */ - public Builder withMaxParameterValueLength(int maxParameterValueLength) { - this.maxParameterValueLength = maxParameterValueLength; - return this; - } + private int maxLoggedParameters = DEFAULT_MAX_LOGGED_PARAMETERS; - /** - * Set the maximum number of query parameters that can be logged - * by the driver. Queries with a number of parameters higher than this value - * will not have all their parameters logged. - * - * @param maxLoggedParameters The maximum number of query parameters that can be logged - * by the driver. It must be strictly positive or {@code -1}, - * in which case all parameters will be logged, regardless of their number - * (use with care). - * The default value is {@link #DEFAULT_MAX_LOGGED_PARAMETERS}. - * @return this {@link Builder} instance (for method chaining). - */ - public Builder withMaxLoggedParameters(int maxLoggedParameters) { - this.maxLoggedParameters = maxLoggedParameters; - return this; - } + private long slowQueryLatencyThresholdMillis = DEFAULT_SLOW_QUERY_THRESHOLD_MS; - /** - * Build the {@link QueryLogger} instance. - * - * @return the {@link QueryLogger} instance. - * @throws IllegalArgumentException if the builder is unable to build a valid instance due to incorrect settings. - */ - public QueryLogger build() { - if (constantThreshold) { - return new ConstantThresholdQueryLogger(maxQueryStringLength, maxParameterValueLength, - maxLoggedParameters, slowQueryLatencyThresholdMillis); - } else { - return new DynamicThresholdQueryLogger(maxQueryStringLength, maxParameterValueLength, - maxLoggedParameters, slowQueryLatencyThresholdPercentile, - percentileLatencyTracker); - } - } + private double slowQueryLatencyThresholdPercentile = DEFAULT_SLOW_QUERY_THRESHOLD_PERCENTILE; - } + private PercentileTracker percentileLatencyTracker; - // Getters and Setters + private boolean constantThreshold = true; /** - * Return the maximum length of a CQL query string that can be logged verbatim - * by the driver. Query strings longer than this value will be truncated - * when logged. - * The default value is {@link #DEFAULT_MAX_QUERY_STRING_LENGTH}. + * Enables slow query latency tracking based on constant thresholds. + * + *

Note: You should either use {@link #withConstantThreshold(long) constant thresholds} or + * {@link #withDynamicThreshold(PercentileTracker, double) dynamic thresholds}, not both. * - * @return The maximum length of a CQL query string that can be logged verbatim - * by the driver. + * @param slowQueryLatencyThresholdMillis The threshold in milliseconds beyond which queries are + * considered 'slow' and logged as such by the driver. The default value is {@link + * #DEFAULT_SLOW_QUERY_THRESHOLD_MS} + * @return this {@link Builder} instance (for method chaining). */ - public int getMaxQueryStringLength() { - return maxQueryStringLength; + public Builder withConstantThreshold(long slowQueryLatencyThresholdMillis) { + this.slowQueryLatencyThresholdMillis = slowQueryLatencyThresholdMillis; + constantThreshold = true; + return this; } /** - * Set the maximum length of a CQL query string that can be logged verbatim - * by the driver. Query strings longer than this value will be truncated - * when logged. + * Enables slow query latency tracking based on dynamic thresholds. * - * @param maxQueryStringLength The maximum length of a CQL query string - * that can be logged verbatim by the driver. - * It must be strictly positive or {@code -1}, - * in which case the query is never truncated - * (use with care). - * @throws IllegalArgumentException if {@code maxQueryStringLength <= 0 && maxQueryStringLength != -1}. + *

Dynamic thresholds are based on latency percentiles, as computed by {@link + * PercentileTracker}. + * + *

Note: You should either use {@link #withConstantThreshold(long) constant thresholds} or + * {@link #withDynamicThreshold(PercentileTracker, double) dynamic thresholds}, not both. + * + * @param percentileLatencyTracker the {@link PercentileTracker} instance to use for recording + * latency histograms. Cannot be {@code null}. It will get {@link + * Cluster#register(LatencyTracker) registered} with the cluster at the same time as this + * logger. + * @param slowQueryLatencyThresholdPercentile Slow queries threshold percentile. It must be + * comprised between 0 inclusive and 100 exclusive. The default value is {@link + * #DEFAULT_SLOW_QUERY_THRESHOLD_PERCENTILE} + * @return this {@link Builder} instance (for method chaining). */ - public void setMaxQueryStringLength(int maxQueryStringLength) { - if (maxQueryStringLength <= 0 && maxQueryStringLength != -1) - throw new IllegalArgumentException("Invalid maxQueryStringLength, should be > 0 or -1, got " + maxQueryStringLength); - this.maxQueryStringLength = maxQueryStringLength; + public Builder withDynamicThreshold( + PercentileTracker percentileLatencyTracker, double slowQueryLatencyThresholdPercentile) { + this.percentileLatencyTracker = percentileLatencyTracker; + this.slowQueryLatencyThresholdPercentile = slowQueryLatencyThresholdPercentile; + constantThreshold = false; + return this; } /** - * Return the maximum length of a query parameter value that can be logged verbatim - * by the driver. Parameter values longer than this value will be truncated - * when logged. - * The default value is {@link #DEFAULT_MAX_PARAMETER_VALUE_LENGTH}. + * Set the maximum length of a CQL query string that can be logged verbatim by the driver. Query + * strings longer than this value will be truncated when logged. * - * @return The maximum length of a query parameter value that can be logged verbatim - * by the driver. + * @param maxQueryStringLength The maximum length of a CQL query string that can be logged + * verbatim by the driver. It must be strictly positive or {@code -1}, in which case the + * query is never truncated (use with care). The default value is {@link + * #DEFAULT_MAX_QUERY_STRING_LENGTH}. + * @return this {@link Builder} instance (for method chaining). */ - public int getMaxParameterValueLength() { - return maxParameterValueLength; + public Builder withMaxQueryStringLength(int maxQueryStringLength) { + this.maxQueryStringLength = maxQueryStringLength; + return this; } /** - * Set the maximum length of a query parameter value that can be logged verbatim - * by the driver. Parameter values longer than this value will be truncated - * when logged. + * Set the maximum length of a query parameter value that can be logged verbatim by the driver. + * Parameter values longer than this value will be truncated when logged. * - * @param maxParameterValueLength The maximum length of a query parameter value - * that can be logged verbatim by the driver. - * It must be strictly positive or {@code -1}, - * in which case the parameter value is never truncated - * (use with care). - * @throws IllegalArgumentException if {@code maxParameterValueLength <= 0 && maxParameterValueLength != -1}. + * @param maxParameterValueLength The maximum length of a query parameter value that can be + * logged verbatim by the driver. It must be strictly positive or {@code -1}, in which case + * the parameter value is never truncated (use with care). The default value is {@link + * #DEFAULT_MAX_PARAMETER_VALUE_LENGTH}. + * @return this {@link Builder} instance (for method chaining). */ - public void setMaxParameterValueLength(int maxParameterValueLength) { - if (maxParameterValueLength <= 0 && maxParameterValueLength != -1) - throw new IllegalArgumentException("Invalid maxParameterValueLength, should be > 0 or -1, got " + maxParameterValueLength); - this.maxParameterValueLength = maxParameterValueLength; + public Builder withMaxParameterValueLength(int maxParameterValueLength) { + this.maxParameterValueLength = maxParameterValueLength; + return this; } /** - * Return the maximum number of query parameters that can be logged - * by the driver. Queries with a number of parameters higher than this value - * will not have all their parameters logged. - * The default value is {@link #DEFAULT_MAX_LOGGED_PARAMETERS}. + * Set the maximum number of query parameters that can be logged by the driver. Queries with a + * number of parameters higher than this value will not have all their parameters logged. * - * @return The maximum number of query parameters that can be logged - * by the driver. + * @param maxLoggedParameters The maximum number of query parameters that can be logged by the + * driver. It must be strictly positive or {@code -1}, in which case all parameters will be + * logged, regardless of their number (use with care). The default value is {@link + * #DEFAULT_MAX_LOGGED_PARAMETERS}. + * @return this {@link Builder} instance (for method chaining). */ - public int getMaxLoggedParameters() { - return maxLoggedParameters; + public Builder withMaxLoggedParameters(int maxLoggedParameters) { + this.maxLoggedParameters = maxLoggedParameters; + return this; } /** - * Set the maximum number of query parameters that can be logged - * by the driver. Queries with a number of parameters higher than this value - * will not have all their parameters logged. + * Build the {@link QueryLogger} instance. * - * @param maxLoggedParameters the maximum number of query parameters that can be logged - * by the driver. It must be strictly positive or {@code -1}, - * in which case all parameters will be logged, regardless of their number - * (use with care). - * @throws IllegalArgumentException if {@code maxLoggedParameters <= 0 && maxLoggedParameters != -1}. + * @return the {@link QueryLogger} instance. + * @throws IllegalArgumentException if the builder is unable to build a valid instance due to + * incorrect settings. */ - public void setMaxLoggedParameters(int maxLoggedParameters) { - if (maxLoggedParameters <= 0 && maxLoggedParameters != -1) - throw new IllegalArgumentException("Invalid maxLoggedParameters, should be > 0 or -1, got " + maxLoggedParameters); - this.maxLoggedParameters = maxLoggedParameters; + public QueryLogger build() { + if (constantThreshold) { + return new ConstantThresholdQueryLogger( + maxQueryStringLength, + maxParameterValueLength, + maxLoggedParameters, + slowQueryLatencyThresholdMillis); + } else { + return new DynamicThresholdQueryLogger( + maxQueryStringLength, + maxParameterValueLength, + maxLoggedParameters, + slowQueryLatencyThresholdPercentile, + percentileLatencyTracker); + } } - - /** - * {@inheritDoc} - */ - @Override - public void update(Host host, Statement statement, Exception exception, long newLatencyNanos) { - if (cluster == null) - throw new IllegalStateException("This method should only be called after the logger has been registered with a cluster"); - - if (statement instanceof StatementWrapper) - statement = ((StatementWrapper) statement).getWrappedStatement(); - - long latencyMs = NANOSECONDS.toMillis(newLatencyNanos); - if (exception == null) { - maybeLogNormalOrSlowQuery(host, statement, latencyMs); - } else { - maybeLogErrorQuery(host, statement, exception, latencyMs); - } + } + + // Getters and Setters + + /** + * Return the maximum length of a CQL query string that can be logged verbatim by the driver. + * Query strings longer than this value will be truncated when logged. The default value is {@link + * #DEFAULT_MAX_QUERY_STRING_LENGTH}. + * + * @return The maximum length of a CQL query string that can be logged verbatim by the driver. + */ + public int getMaxQueryStringLength() { + return maxQueryStringLength; + } + + /** + * Set the maximum length of a CQL query string that can be logged verbatim by the driver. Query + * strings longer than this value will be truncated when logged. + * + * @param maxQueryStringLength The maximum length of a CQL query string that can be logged + * verbatim by the driver. It must be strictly positive or {@code -1}, in which case the query + * is never truncated (use with care). + * @throws IllegalArgumentException if {@code maxQueryStringLength <= 0 && maxQueryStringLength != + * -1}. + */ + public void setMaxQueryStringLength(int maxQueryStringLength) { + if (maxQueryStringLength <= 0 && maxQueryStringLength != -1) + throw new IllegalArgumentException( + "Invalid maxQueryStringLength, should be > 0 or -1, got " + maxQueryStringLength); + this.maxQueryStringLength = maxQueryStringLength; + } + + /** + * Return the maximum length of a query parameter value that can be logged verbatim by the driver. + * Parameter values longer than this value will be truncated when logged. The default value is + * {@link #DEFAULT_MAX_PARAMETER_VALUE_LENGTH}. + * + * @return The maximum length of a query parameter value that can be logged verbatim by the + * driver. + */ + public int getMaxParameterValueLength() { + return maxParameterValueLength; + } + + /** + * Set the maximum length of a query parameter value that can be logged verbatim by the driver. + * Parameter values longer than this value will be truncated when logged. + * + * @param maxParameterValueLength The maximum length of a query parameter value that can be logged + * verbatim by the driver. It must be strictly positive or {@code -1}, in which case the + * parameter value is never truncated (use with care). + * @throws IllegalArgumentException if {@code maxParameterValueLength <= 0 && + * maxParameterValueLength != -1}. + */ + public void setMaxParameterValueLength(int maxParameterValueLength) { + if (maxParameterValueLength <= 0 && maxParameterValueLength != -1) + throw new IllegalArgumentException( + "Invalid maxParameterValueLength, should be > 0 or -1, got " + maxParameterValueLength); + this.maxParameterValueLength = maxParameterValueLength; + } + + /** + * Return the maximum number of query parameters that can be logged by the driver. Queries with a + * number of parameters higher than this value will not have all their parameters logged. The + * default value is {@link #DEFAULT_MAX_LOGGED_PARAMETERS}. + * + * @return The maximum number of query parameters that can be logged by the driver. + */ + public int getMaxLoggedParameters() { + return maxLoggedParameters; + } + + /** + * Set the maximum number of query parameters that can be logged by the driver. Queries with a + * number of parameters higher than this value will not have all their parameters logged. + * + * @param maxLoggedParameters the maximum number of query parameters that can be logged by the + * driver. It must be strictly positive or {@code -1}, in which case all parameters will be + * logged, regardless of their number (use with care). + * @throws IllegalArgumentException if {@code maxLoggedParameters <= 0 && maxLoggedParameters != + * -1}. + */ + public void setMaxLoggedParameters(int maxLoggedParameters) { + if (maxLoggedParameters <= 0 && maxLoggedParameters != -1) + throw new IllegalArgumentException( + "Invalid maxLoggedParameters, should be > 0 or -1, got " + maxLoggedParameters); + this.maxLoggedParameters = maxLoggedParameters; + } + + /** {@inheritDoc} */ + @Override + public void update(Host host, Statement statement, Exception exception, long newLatencyNanos) { + if (cluster == null) + throw new IllegalStateException( + "This method should only be called after the logger has been registered with a cluster"); + + if (statement instanceof StatementWrapper) + statement = ((StatementWrapper) statement).getWrappedStatement(); + + long latencyMs = NANOSECONDS.toMillis(newLatencyNanos); + if (exception == null) { + maybeLogNormalOrSlowQuery(host, statement, latencyMs); + } else { + maybeLogErrorQuery(host, statement, exception, latencyMs); } - - protected abstract void maybeLogNormalOrSlowQuery(Host host, Statement statement, long latencyMs); - - protected void maybeLogNormalQuery(Host host, Statement statement, long latencyMs) { - if (NORMAL_LOGGER.isDebugEnabled()) { - String message = String.format(NORMAL_TEMPLATE, cluster.getClusterName(), host, latencyMs, statementAsString(statement)); - logQuery(statement, null, NORMAL_LOGGER, message); - } + } + + protected abstract void maybeLogNormalOrSlowQuery(Host host, Statement statement, long latencyMs); + + protected void maybeLogNormalQuery(Host host, Statement statement, long latencyMs) { + if (NORMAL_LOGGER.isDebugEnabled()) { + String message = + String.format( + NORMAL_TEMPLATE, + cluster.getClusterName(), + host, + latencyMs, + statementAsString(statement)); + logQuery(statement, null, NORMAL_LOGGER, message); } - - protected void maybeLogErrorQuery(Host host, Statement statement, Exception exception, long latencyMs) { - if (ERROR_LOGGER.isDebugEnabled()) { - String message = String.format(ERROR_TEMPLATE, cluster.getClusterName(), host, latencyMs, statementAsString(statement)); - logQuery(statement, exception, ERROR_LOGGER, message); - } + } + + protected void maybeLogErrorQuery( + Host host, Statement statement, Exception exception, long latencyMs) { + if (ERROR_LOGGER.isDebugEnabled() + && !(exception instanceof CancelledSpeculativeExecutionException)) { + String message = + String.format( + ERROR_TEMPLATE, + cluster.getClusterName(), + host, + latencyMs, + statementAsString(statement)); + logQuery(statement, exception, ERROR_LOGGER, message); } - - protected void logQuery(Statement statement, Exception exception, Logger logger, String message) { - boolean showParameterValues = logger.isTraceEnabled(); - if (showParameterValues) { - StringBuilder params = new StringBuilder(); - if (statement instanceof BoundStatement) { - appendParameters((BoundStatement) statement, params, maxLoggedParameters); - } else if (statement instanceof SimpleStatement) { - appendParameters((SimpleStatement) statement, params, maxLoggedParameters); - } else if (statement instanceof BatchStatement) { - BatchStatement batchStatement = (BatchStatement) statement; - int remaining = maxLoggedParameters; - for (Statement inner : batchStatement.getStatements()) { - if (inner instanceof BoundStatement) { - remaining = appendParameters((BoundStatement) inner, params, remaining); - } else if (inner instanceof SimpleStatement) { - remaining = appendParameters((SimpleStatement) inner, params, remaining); - } - } - } - if (params.length() > 0) - params.append("]"); - logger.trace(message + params, exception); - } else { - logger.debug(message, exception); - } + } + + protected void logQuery(Statement statement, Exception exception, Logger logger, String message) { + boolean showParameterValues = logger.isTraceEnabled(); + if (showParameterValues) { + StringBuilder params = new StringBuilder(); + if (statement instanceof BoundStatement) { + appendParameters((BoundStatement) statement, params, maxLoggedParameters); + } else if (statement instanceof SimpleStatement) { + appendParameters((SimpleStatement) statement, params, maxLoggedParameters); + } else if (statement instanceof BatchStatement) { + BatchStatement batchStatement = (BatchStatement) statement; + int remaining = maxLoggedParameters; + for (Statement inner : batchStatement.getStatements()) { + if (inner instanceof BoundStatement) { + remaining = appendParameters((BoundStatement) inner, params, remaining); + } else if (inner instanceof SimpleStatement) { + remaining = appendParameters((SimpleStatement) inner, params, remaining); + } + } + } else if (statement instanceof BuiltStatement) { + appendParameters((BuiltStatement) statement, params, maxLoggedParameters); + } + if (params.length() > 0) params.append("]"); + logger.trace(message + params, exception); + } else { + logger.debug(message, exception); + } + } + + protected String statementAsString(Statement statement) { + StringBuilder sb = new StringBuilder(); + if (statement instanceof BatchStatement) { + BatchStatement bs = (BatchStatement) statement; + int statements = bs.getStatements().size(); + int boundValues = countBoundValues(bs); + sb.append("[" + statements + " statements, " + boundValues + " bound values] "); + } else if (statement instanceof BoundStatement) { + int boundValues = ((BoundStatement) statement).wrapper.values.length; + sb.append("[" + boundValues + " bound values] "); + } else if (statement instanceof SimpleStatement) { + int boundValues = ((SimpleStatement) statement).valuesCount(); + sb.append("[" + boundValues + " bound values] "); } - protected String statementAsString(Statement statement) { - StringBuilder sb = new StringBuilder(); - if (statement instanceof BatchStatement) { - BatchStatement bs = (BatchStatement) statement; - int statements = bs.getStatements().size(); - int boundValues = countBoundValues(bs); - sb.append("[" + statements + " statements, " + boundValues + " bound values] "); - } else if (statement instanceof BoundStatement) { - int boundValues = ((BoundStatement) statement).wrapper.values.length; - sb.append("[" + boundValues + " bound values] "); - } else if (statement instanceof SimpleStatement) { - int boundValues = ((SimpleStatement) statement).valuesCount(); - sb.append("[" + boundValues + " bound values] "); - } + append(statement, sb, maxQueryStringLength); + return sb.toString(); + } - append(statement, sb, maxQueryStringLength); - return sb.toString(); + protected int countBoundValues(BatchStatement bs) { + int count = 0; + for (Statement s : bs.getStatements()) { + if (s instanceof BoundStatement) count += ((BoundStatement) s).wrapper.values.length; + else if (s instanceof SimpleStatement) count += ((SimpleStatement) s).valuesCount(); } - - protected int countBoundValues(BatchStatement bs) { - int count = 0; - for (Statement s : bs.getStatements()) { - if (s instanceof BoundStatement) - count += ((BoundStatement) s).wrapper.values.length; - else if (s instanceof SimpleStatement) - count += ((SimpleStatement) s).valuesCount(); - } - return count; - } - - protected int appendParameters(BoundStatement statement, StringBuilder buffer, int remaining) { - if (remaining == 0) - return 0; - ColumnDefinitions metadata = statement.preparedStatement().getVariables(); - int numberOfParameters = metadata.size(); - if (numberOfParameters > 0) { - List definitions = metadata.asList(); - int numberOfLoggedParameters; - if (remaining == -1) { - numberOfLoggedParameters = numberOfParameters; - } else { - numberOfLoggedParameters = Math.min(remaining, numberOfParameters); - remaining -= numberOfLoggedParameters; - } - for (int i = 0; i < numberOfLoggedParameters; i++) { - if (buffer.length() == 0) - buffer.append(" ["); - else - buffer.append(", "); - String value = statement.isSet(i) - ? parameterValueAsString(definitions.get(i), statement.wrapper.values[i]) - : ""; - buffer.append(String.format("%s:%s", metadata.getName(i), value)); - } - if (numberOfLoggedParameters < numberOfParameters) { - buffer.append(FURTHER_PARAMS_OMITTED); - } - } - return remaining; + return count; + } + + protected int appendParameters(BoundStatement statement, StringBuilder buffer, int remaining) { + if (remaining == 0) return 0; + ColumnDefinitions metadata = statement.preparedStatement().getVariables(); + int numberOfParameters = metadata.size(); + if (numberOfParameters > 0) { + List definitions = metadata.asList(); + int numberOfLoggedParameters; + if (remaining == -1) { + numberOfLoggedParameters = numberOfParameters; + } else { + numberOfLoggedParameters = Math.min(remaining, numberOfParameters); + remaining -= numberOfLoggedParameters; + } + for (int i = 0; i < numberOfLoggedParameters; i++) { + if (buffer.length() == 0) buffer.append(" ["); + else buffer.append(", "); + String value = + statement.isSet(i) + ? parameterValueAsString(definitions.get(i), statement.wrapper.values[i]) + : ""; + buffer.append(String.format("%s:%s", metadata.getName(i), value)); + } + if (numberOfLoggedParameters < numberOfParameters) { + buffer.append(FURTHER_PARAMS_OMITTED); + } } - - protected String parameterValueAsString(ColumnDefinitions.Definition definition, ByteBuffer raw) { - String valueStr; - if (raw == null || raw.remaining() == 0) { - valueStr = "NULL"; - } else { - DataType type = definition.getType(); - CodecRegistry codecRegistry = cluster.getConfiguration().getCodecRegistry(); - TypeCodec codec = codecRegistry.codecFor(type); - int maxParameterValueLength = this.maxParameterValueLength; - if (type.equals(DataType.blob()) && maxParameterValueLength != -1) { - // prevent large blobs from being converted to strings - int maxBufferLength = Math.max(2, (maxParameterValueLength - 2) / 2); - boolean bufferTooLarge = raw.remaining() > maxBufferLength; - if (bufferTooLarge) { - raw = (ByteBuffer) raw.duplicate().limit(maxBufferLength); - } - Object value = codec.deserialize(raw, protocolVersion()); - valueStr = codec.format(value); - if (bufferTooLarge) { - valueStr = valueStr + TRUNCATED_OUTPUT; - } - } else { - Object value = codec.deserialize(raw, protocolVersion()); - valueStr = codec.format(value); - if (maxParameterValueLength != -1 && valueStr.length() > maxParameterValueLength) { - valueStr = valueStr.substring(0, maxParameterValueLength) + TRUNCATED_OUTPUT; - } - } - } - return valueStr; - } - - protected int appendParameters(SimpleStatement statement, StringBuilder buffer, int remaining) { - if (remaining == 0) - return 0; - int numberOfParameters = statement.valuesCount(); - if (numberOfParameters > 0) { - int numberOfLoggedParameters; - if (remaining == -1) { - numberOfLoggedParameters = numberOfParameters; - } else { - numberOfLoggedParameters = remaining > numberOfParameters ? numberOfParameters : remaining; - remaining -= numberOfLoggedParameters; - } - Iterator valueNames = null; - if (statement.usesNamedValues()) { - valueNames = statement.getValueNames().iterator(); - } - for (int i = 0; i < numberOfLoggedParameters; i++) { - if (buffer.length() == 0) - buffer.append(" ["); - else - buffer.append(", "); - if (valueNames != null && valueNames.hasNext()) { - String valueName = valueNames.next(); - buffer.append(String.format("%s:%s", valueName, parameterValueAsString(statement.getObject(valueName)))); - } else { - buffer.append(parameterValueAsString(statement.getObject(i))); - } - } - if (numberOfLoggedParameters < numberOfParameters) { - buffer.append(FURTHER_PARAMS_OMITTED); - } - } - return remaining; + return remaining; + } + + protected String parameterValueAsString(ColumnDefinitions.Definition definition, ByteBuffer raw) { + String valueStr; + if (raw == null || raw.remaining() == 0) { + valueStr = "NULL"; + } else { + DataType type = definition.getType(); + CodecRegistry codecRegistry = cluster.getConfiguration().getCodecRegistry(); + TypeCodec codec = codecRegistry.codecFor(type); + int maxParameterValueLength = this.maxParameterValueLength; + if (type.equals(DataType.blob()) && maxParameterValueLength != -1) { + // prevent large blobs from being converted to strings + int maxBufferLength = Math.max(2, (maxParameterValueLength - 2) / 2); + boolean bufferTooLarge = raw.remaining() > maxBufferLength; + if (bufferTooLarge) { + raw = (ByteBuffer) raw.duplicate().limit(maxBufferLength); + } + Object value = codec.deserialize(raw, protocolVersion()); + valueStr = codec.format(value); + if (bufferTooLarge) { + valueStr = valueStr + TRUNCATED_OUTPUT; + } + } else { + Object value = codec.deserialize(raw, protocolVersion()); + valueStr = codec.format(value); + if (maxParameterValueLength != -1 && valueStr.length() > maxParameterValueLength) { + valueStr = valueStr.substring(0, maxParameterValueLength) + TRUNCATED_OUTPUT; + } + } } - - protected String parameterValueAsString(Object value) { - String valueStr; - if (value == null) { - valueStr = "NULL"; + return valueStr; + } + + protected int appendParameters(SimpleStatement statement, StringBuilder buffer, int remaining) { + if (remaining == 0) return 0; + int numberOfParameters = statement.valuesCount(); + if (numberOfParameters > 0) { + int numberOfLoggedParameters; + if (remaining == -1) { + numberOfLoggedParameters = numberOfParameters; + } else { + numberOfLoggedParameters = remaining > numberOfParameters ? numberOfParameters : remaining; + remaining -= numberOfLoggedParameters; + } + Iterator valueNames = null; + if (statement.usesNamedValues()) { + valueNames = statement.getValueNames().iterator(); + } + for (int i = 0; i < numberOfLoggedParameters; i++) { + if (buffer.length() == 0) buffer.append(" ["); + else buffer.append(", "); + if (valueNames != null && valueNames.hasNext()) { + String valueName = valueNames.next(); + buffer.append( + String.format( + "%s:%s", valueName, parameterValueAsString(statement.getObject(valueName)))); } else { - CodecRegistry codecRegistry = cluster.getConfiguration().getCodecRegistry(); - TypeCodec codec = codecRegistry.codecFor(value); - int maxParameterValueLength = this.maxParameterValueLength; - if (codec.cqlType.equals(DataType.blob()) && maxParameterValueLength != -1) { - // prevent large blobs from being converted to strings - ByteBuffer buf = (ByteBuffer) value; - int maxBufferLength = Math.max(2, (maxParameterValueLength - 2) / 2); - boolean bufferTooLarge = buf.remaining() > maxBufferLength; - if (bufferTooLarge) { - value = (ByteBuffer) buf.duplicate().limit(maxBufferLength); - } - valueStr = codec.format(value); - if (bufferTooLarge) { - valueStr = valueStr + TRUNCATED_OUTPUT; - } - } else { - valueStr = codec.format(value); - if (maxParameterValueLength != -1 && valueStr.length() > maxParameterValueLength) { - valueStr = valueStr.substring(0, maxParameterValueLength) + TRUNCATED_OUTPUT; - } - } + buffer.append(parameterValueAsString(statement.getObject(i))); } - return valueStr; + } + if (numberOfLoggedParameters < numberOfParameters) { + buffer.append(FURTHER_PARAMS_OMITTED); + } } + return remaining; + } + + protected String parameterValueAsString(Object value) { + String valueStr; + if (value == null) { + valueStr = "NULL"; + } else { + CodecRegistry codecRegistry = cluster.getConfiguration().getCodecRegistry(); + TypeCodec codec = codecRegistry.codecFor(value); + int maxParameterValueLength = this.maxParameterValueLength; + if (codec.cqlType.equals(DataType.blob()) && maxParameterValueLength != -1) { + // prevent large blobs from being converted to strings + ByteBuffer buf = (ByteBuffer) value; + int maxBufferLength = Math.max(2, (maxParameterValueLength - 2) / 2); + boolean bufferTooLarge = buf.remaining() > maxBufferLength; + if (bufferTooLarge) { + value = (ByteBuffer) buf.duplicate().limit(maxBufferLength); + } + valueStr = codec.format(value); + if (bufferTooLarge) { + valueStr = valueStr + TRUNCATED_OUTPUT; + } + } else { + valueStr = codec.format(value); + if (maxParameterValueLength != -1 && valueStr.length() > maxParameterValueLength) { + valueStr = valueStr.substring(0, maxParameterValueLength) + TRUNCATED_OUTPUT; + } + } + } + return valueStr; + } - private ProtocolVersion protocolVersion() { - // Since the QueryLogger can be registered before the Cluster was initialized, we can't retrieve - // it at construction time. Cache it field at first use (a volatile field is good enough since we - // don't need mutual exclusion). - if (protocolVersion == null) { - protocolVersion = cluster.getConfiguration().getProtocolOptions().getProtocolVersion(); - // At least one connection was established when QueryLogger is invoked - assert protocolVersion != null : "protocol version should be defined"; - } - return protocolVersion; - } - - protected int append(Statement statement, StringBuilder buffer, int remaining) { - if (statement instanceof RegularStatement) { - RegularStatement rs = (RegularStatement) statement; - String query = rs.getQueryString(); - remaining = append(query.trim(), buffer, remaining); - } else if (statement instanceof BoundStatement) { - remaining = append(((BoundStatement) statement).preparedStatement().getQueryString().trim(), buffer, remaining); - } else if (statement instanceof BatchStatement) { - BatchStatement batchStatement = (BatchStatement) statement; - remaining = append("BEGIN", buffer, remaining); - switch (batchStatement.batchType) { - case UNLOGGED: - append(" UNLOGGED", buffer, remaining); - break; - case COUNTER: - append(" COUNTER", buffer, remaining); - break; - } - remaining = append(" BATCH", buffer, remaining); - for (Statement stmt : batchStatement.getStatements()) { - remaining = append(" ", buffer, remaining); - remaining = append(stmt, buffer, remaining); - } - remaining = append(" APPLY BATCH", buffer, remaining); - } else { - // Unknown types of statement - // Call toString() as a last resort - remaining = append(statement.toString(), buffer, remaining); - } - if (buffer.charAt(buffer.length() - 1) != ';') { - remaining = append(";", buffer, remaining); - } - return remaining; - } - - protected int append(CharSequence str, StringBuilder buffer, int remaining) { - if (remaining == -2) { - // capacity exceeded - } else if (remaining == -1) { - // unlimited capacity - buffer.append(str); - } else if (str.length() > remaining) { - buffer.append(str, 0, remaining).append(TRUNCATED_OUTPUT); - remaining = -2; + protected int appendParameters(BuiltStatement statement, StringBuilder buffer, int remaining) { + if (remaining == 0) { + return 0; + } + ByteBuffer[] values = + statement.getValues(protocolVersion(), cluster.getConfiguration().getCodecRegistry()); + int numberOfParameters = values == null ? 0 : values.length; + if (numberOfParameters > 0) { + int numberOfLoggedParameters; + if (remaining == -1) { + numberOfLoggedParameters = numberOfParameters; + } else { + numberOfLoggedParameters = remaining > numberOfParameters ? numberOfParameters : remaining; + remaining -= numberOfLoggedParameters; + } + + for (int i = 0; i < numberOfLoggedParameters; i++) { + if (buffer.length() == 0) { + buffer.append(" ["); } else { - buffer.append(str); - remaining -= str.length(); + buffer.append(", "); } - return remaining; - } + buffer.append(parameterValueAsString(statement.getObject(i))); + } + if (numberOfLoggedParameters < numberOfParameters) { + buffer.append(FURTHER_PARAMS_OMITTED); + } + } + return remaining; + } + + private ProtocolVersion protocolVersion() { + // Since the QueryLogger can be registered before the Cluster was initialized, we can't retrieve + // it at construction time. Cache it field at first use (a volatile field is good enough since + // we + // don't need mutual exclusion). + if (protocolVersion == null) { + protocolVersion = cluster.getConfiguration().getProtocolOptions().getProtocolVersion(); + // At least one connection was established when QueryLogger is invoked + assert protocolVersion != null : "protocol version should be defined"; + } + return protocolVersion; + } + + protected int append(Statement statement, StringBuilder buffer, int remaining) { + if (statement instanceof RegularStatement) { + RegularStatement rs = (RegularStatement) statement; + String query = rs.getQueryString(); + remaining = append(query.trim(), buffer, remaining); + } else if (statement instanceof BoundStatement) { + remaining = + append( + ((BoundStatement) statement).preparedStatement().getQueryString().trim(), + buffer, + remaining); + } else if (statement instanceof BatchStatement) { + BatchStatement batchStatement = (BatchStatement) statement; + remaining = append("BEGIN", buffer, remaining); + switch (batchStatement.batchType) { + case UNLOGGED: + append(" UNLOGGED", buffer, remaining); + break; + case COUNTER: + append(" COUNTER", buffer, remaining); + break; + } + remaining = append(" BATCH", buffer, remaining); + for (Statement stmt : batchStatement.getStatements()) { + remaining = append(" ", buffer, remaining); + remaining = append(stmt, buffer, remaining); + } + remaining = append(" APPLY BATCH", buffer, remaining); + } else { + // Unknown types of statement + // Call toString() as a last resort + remaining = append(statement.toString(), buffer, remaining); + } + if (buffer.charAt(buffer.length() - 1) != ';') { + remaining = append(";", buffer, remaining); + } + return remaining; + } + + protected int append(CharSequence str, StringBuilder buffer, int remaining) { + if (remaining == -2) { + // capacity exceeded + } else if (remaining == -1) { + // unlimited capacity + buffer.append(str); + } else if (str.length() > remaining) { + buffer.append(str, 0, remaining).append(TRUNCATED_OUTPUT); + remaining = -2; + } else { + buffer.append(str); + remaining -= str.length(); + } + return remaining; + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/QueryOptions.java b/driver-core/src/main/java/com/datastax/driver/core/QueryOptions.java index d7fc041649c..31e93af4448 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/QueryOptions.java +++ b/driver-core/src/main/java/com/datastax/driver/core/QueryOptions.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,471 +19,508 @@ import com.datastax.driver.core.exceptions.UnsupportedFeatureException; import com.datastax.driver.core.utils.MoreFutures; -import com.google.common.util.concurrent.Futures; +import com.datastax.driver.core.utils.MoreObjects; -/** - * Options related to defaults for individual queries. - */ +/** Options related to defaults for individual queries. */ public class QueryOptions { - /** - * The default consistency level for queries: {@link ConsistencyLevel#LOCAL_ONE}. - */ - public static final ConsistencyLevel DEFAULT_CONSISTENCY_LEVEL = ConsistencyLevel.LOCAL_ONE; - - /** - * The default serial consistency level for conditional updates: {@link ConsistencyLevel#SERIAL}. - */ - public static final ConsistencyLevel DEFAULT_SERIAL_CONSISTENCY_LEVEL = ConsistencyLevel.SERIAL; - - /** - * The default fetch size for SELECT queries: 5000. - */ - public static final int DEFAULT_FETCH_SIZE = 5000; - - /** - * The default value for {@link #getDefaultIdempotence()}: {@code false}. - */ - public static final boolean DEFAULT_IDEMPOTENCE = false; - - public static final int DEFAULT_MAX_PENDING_REFRESH_NODE_LIST_REQUESTS = 20; - - public static final int DEFAULT_MAX_PENDING_REFRESH_NODE_REQUESTS = 20; - - public static final int DEFAULT_MAX_PENDING_REFRESH_SCHEMA_REQUESTS = 20; - - public static final int DEFAULT_REFRESH_NODE_LIST_INTERVAL_MILLIS = 1000; - - public static final int DEFAULT_REFRESH_NODE_INTERVAL_MILLIS = 1000; - - public static final int DEFAULT_REFRESH_SCHEMA_INTERVAL_MILLIS = 1000; - - private volatile ConsistencyLevel consistency = DEFAULT_CONSISTENCY_LEVEL; - private volatile ConsistencyLevel serialConsistency = DEFAULT_SERIAL_CONSISTENCY_LEVEL; - private volatile int fetchSize = DEFAULT_FETCH_SIZE; - private volatile boolean defaultIdempotence = DEFAULT_IDEMPOTENCE; - - private volatile boolean metadataEnabled = true; - - private volatile int maxPendingRefreshNodeListRequests = DEFAULT_MAX_PENDING_REFRESH_NODE_LIST_REQUESTS; - private volatile int maxPendingRefreshNodeRequests = DEFAULT_MAX_PENDING_REFRESH_NODE_REQUESTS; - private volatile int maxPendingRefreshSchemaRequests = DEFAULT_MAX_PENDING_REFRESH_SCHEMA_REQUESTS; - - private volatile int refreshNodeListIntervalMillis = DEFAULT_REFRESH_NODE_LIST_INTERVAL_MILLIS; - private volatile int refreshNodeIntervalMillis = DEFAULT_REFRESH_NODE_INTERVAL_MILLIS; - private volatile int refreshSchemaIntervalMillis = DEFAULT_REFRESH_SCHEMA_INTERVAL_MILLIS; - - private volatile boolean reprepareOnUp = true; - private volatile Cluster.Manager manager; - private volatile boolean prepareOnAllHosts = true; - - /** - * Creates a new {@link QueryOptions} instance using the {@link #DEFAULT_CONSISTENCY_LEVEL}, - * {@link #DEFAULT_SERIAL_CONSISTENCY_LEVEL} and {@link #DEFAULT_FETCH_SIZE}. - */ - public QueryOptions() { - } - - void register(Cluster.Manager manager) { - this.manager = manager; - } - - /** - * Sets the default consistency level to use for queries. - *

- * The consistency level set through this method will be use for queries - * that don't explicitly have a consistency level, i.e. when {@link Statement#getConsistencyLevel} - * returns {@code null}. - * - * @param consistencyLevel the new consistency level to set as default. - * @return this {@code QueryOptions} instance. - */ - public QueryOptions setConsistencyLevel(ConsistencyLevel consistencyLevel) { - this.consistency = consistencyLevel; - return this; - } - - /** - * The default consistency level used by queries. - * - * @return the default consistency level used by queries. - */ - public ConsistencyLevel getConsistencyLevel() { - return consistency; - } - - /** - * Sets the default serial consistency level to use for queries. - *

- * The serial consistency level set through this method will be use for queries - * that don't explicitly have a serial consistency level, i.e. when {@link Statement#getSerialConsistencyLevel} - * returns {@code null}. - * - * @param serialConsistencyLevel the new serial consistency level to set as default. - * @return this {@code QueryOptions} instance. - */ - public QueryOptions setSerialConsistencyLevel(ConsistencyLevel serialConsistencyLevel) { - this.serialConsistency = serialConsistencyLevel; - return this; - } - - /** - * The default serial consistency level used by queries. - * - * @return the default serial consistency level used by queries. - */ - public ConsistencyLevel getSerialConsistencyLevel() { - return serialConsistency; - } - - /** - * Sets the default fetch size to use for SELECT queries. - *

- * The fetch size set through this method will be use for queries - * that don't explicitly have a fetch size, i.e. when {@link Statement#getFetchSize} - * is less or equal to 0. - * - * @param fetchSize the new fetch size to set as default. It must be - * strictly positive but you can use {@code Integer.MAX_VALUE} to disable - * paging. - * @return this {@code QueryOptions} instance. - * @throws IllegalArgumentException if {@code fetchSize <e; 0}. - * @throws UnsupportedFeatureException if version 1 of the native protocol is in - * use and {@code fetchSize != Integer.MAX_VALUE} as paging is not supported by - * version 1 of the protocol. See {@link Cluster.Builder#withProtocolVersion} - * for more details on protocol versions. - */ - public QueryOptions setFetchSize(int fetchSize) { - if (fetchSize <= 0) - throw new IllegalArgumentException("Invalid fetchSize, should be > 0, got " + fetchSize); - - ProtocolVersion version = manager == null ? null : manager.protocolVersion(); - if (fetchSize != Integer.MAX_VALUE && version == ProtocolVersion.V1) - throw new UnsupportedFeatureException(version, "Paging is not supported"); - - this.fetchSize = fetchSize; - return this; - } - - /** - * The default fetch size used by queries. - * - * @return the default fetch size used by queries. - */ - public int getFetchSize() { - return fetchSize; - } - - /** - * Sets the default idempotence for queries. - *

- * This will be used for statements for which {@link com.datastax.driver.core.Statement#isIdempotent()} - * returns {@code null}. - * - * @param defaultIdempotence the new value to set as default idempotence. - * @return this {@code QueryOptions} instance. - */ - public QueryOptions setDefaultIdempotence(boolean defaultIdempotence) { - this.defaultIdempotence = defaultIdempotence; - return this; - } - - /** - * The default idempotence for queries. - *

- * It defaults to {@link #DEFAULT_IDEMPOTENCE}. - * - * @return the default idempotence for queries. - */ - public boolean getDefaultIdempotence() { - return defaultIdempotence; + /** The default consistency level for queries: {@link ConsistencyLevel#LOCAL_ONE}. */ + public static final ConsistencyLevel DEFAULT_CONSISTENCY_LEVEL = ConsistencyLevel.LOCAL_ONE; + + /** + * The default serial consistency level for conditional updates: {@link ConsistencyLevel#SERIAL}. + */ + public static final ConsistencyLevel DEFAULT_SERIAL_CONSISTENCY_LEVEL = ConsistencyLevel.SERIAL; + + /** The default fetch size for SELECT queries: 5000. */ + public static final int DEFAULT_FETCH_SIZE = 5000; + + /** The default value for {@link #getDefaultIdempotence()}: {@code false}. */ + public static final boolean DEFAULT_IDEMPOTENCE = false; + + public static final int DEFAULT_MAX_PENDING_REFRESH_NODE_LIST_REQUESTS = 20; + + public static final int DEFAULT_MAX_PENDING_REFRESH_NODE_REQUESTS = 20; + + public static final int DEFAULT_MAX_PENDING_REFRESH_SCHEMA_REQUESTS = 20; + + public static final int DEFAULT_REFRESH_NODE_LIST_INTERVAL_MILLIS = 1000; + + public static final int DEFAULT_REFRESH_NODE_INTERVAL_MILLIS = 1000; + + public static final int DEFAULT_REFRESH_SCHEMA_INTERVAL_MILLIS = 1000; + + private volatile ConsistencyLevel consistency = DEFAULT_CONSISTENCY_LEVEL; + private volatile ConsistencyLevel serialConsistency = DEFAULT_SERIAL_CONSISTENCY_LEVEL; + private volatile int fetchSize = DEFAULT_FETCH_SIZE; + private volatile boolean defaultIdempotence = DEFAULT_IDEMPOTENCE; + + private volatile boolean consistencySet = false; + private volatile boolean metadataEnabled = true; + + private volatile int maxPendingRefreshNodeListRequests = + DEFAULT_MAX_PENDING_REFRESH_NODE_LIST_REQUESTS; + private volatile int maxPendingRefreshNodeRequests = DEFAULT_MAX_PENDING_REFRESH_NODE_REQUESTS; + private volatile int maxPendingRefreshSchemaRequests = + DEFAULT_MAX_PENDING_REFRESH_SCHEMA_REQUESTS; + + private volatile int refreshNodeListIntervalMillis = DEFAULT_REFRESH_NODE_LIST_INTERVAL_MILLIS; + private volatile int refreshNodeIntervalMillis = DEFAULT_REFRESH_NODE_INTERVAL_MILLIS; + private volatile int refreshSchemaIntervalMillis = DEFAULT_REFRESH_SCHEMA_INTERVAL_MILLIS; + + private volatile boolean reprepareOnUp = true; + private volatile Cluster.Manager manager; + private volatile boolean prepareOnAllHosts = true; + + /** + * Creates a new {@link QueryOptions} instance using the {@link #DEFAULT_CONSISTENCY_LEVEL}, + * {@link #DEFAULT_SERIAL_CONSISTENCY_LEVEL} and {@link #DEFAULT_FETCH_SIZE}. + */ + public QueryOptions() {} + + void register(Cluster.Manager manager) { + this.manager = manager; + } + + /** + * Sets the default consistency level to use for queries. + * + *

The consistency level set through this method will be use for queries that don't explicitly + * have a consistency level, i.e. when {@link Statement#getConsistencyLevel} returns {@code null}. + * + * @param consistencyLevel the new consistency level to set as default. + * @return this {@code QueryOptions} instance. + */ + public QueryOptions setConsistencyLevel(ConsistencyLevel consistencyLevel) { + this.consistencySet = true; + this.consistency = consistencyLevel; + return this; + } + + /** + * The default consistency level used by queries. + * + * @return the default consistency level used by queries. + */ + public ConsistencyLevel getConsistencyLevel() { + return consistency; + } + + /** + * Sets the default serial consistency level to use for queries. + * + *

The serial consistency level set through this method will be use for queries that don't + * explicitly have a serial consistency level, i.e. when {@link + * Statement#getSerialConsistencyLevel} returns {@code null}. + * + * @param serialConsistencyLevel the new serial consistency level to set as default. + * @return this {@code QueryOptions} instance. + */ + public QueryOptions setSerialConsistencyLevel(ConsistencyLevel serialConsistencyLevel) { + this.serialConsistency = serialConsistencyLevel; + return this; + } + + /** + * The default serial consistency level used by queries. + * + * @return the default serial consistency level used by queries. + */ + public ConsistencyLevel getSerialConsistencyLevel() { + return serialConsistency; + } + + /** + * Sets the default fetch size to use for SELECT queries. + * + *

The fetch size set through this method will be use for queries that don't explicitly have a + * fetch size, i.e. when {@link Statement#getFetchSize} is less or equal to 0. + * + * @param fetchSize the new fetch size to set as default. It must be strictly positive but you can + * use {@code Integer.MAX_VALUE} to disable paging. + * @return this {@code QueryOptions} instance. + * @throws IllegalArgumentException if {@code fetchSize <e; 0}. + * @throws UnsupportedFeatureException if version 1 of the native protocol is in use and {@code + * fetchSize != Integer.MAX_VALUE} as paging is not supported by version 1 of the protocol. + * See {@link Cluster.Builder#withProtocolVersion} for more details on protocol versions. + */ + public QueryOptions setFetchSize(int fetchSize) { + if (fetchSize <= 0) + throw new IllegalArgumentException("Invalid fetchSize, should be > 0, got " + fetchSize); + + ProtocolVersion version = manager == null ? null : manager.protocolVersion(); + if (fetchSize != Integer.MAX_VALUE && version == ProtocolVersion.V1) + throw new UnsupportedFeatureException(version, "Paging is not supported"); + + this.fetchSize = fetchSize; + return this; + } + + /** + * The default fetch size used by queries. + * + * @return the default fetch size used by queries. + */ + public int getFetchSize() { + return fetchSize; + } + + /** + * Sets the default idempotence for queries. + * + *

This will be used for statements for which {@link + * com.datastax.driver.core.Statement#isIdempotent()} returns {@code null}. + * + * @param defaultIdempotence the new value to set as default idempotence. + * @return this {@code QueryOptions} instance. + */ + public QueryOptions setDefaultIdempotence(boolean defaultIdempotence) { + this.defaultIdempotence = defaultIdempotence; + return this; + } + + /** + * The default idempotence for queries. + * + *

It defaults to {@link #DEFAULT_IDEMPOTENCE}. + * + * @return the default idempotence for queries. + */ + public boolean getDefaultIdempotence() { + return defaultIdempotence; + } + + /** + * Set whether the driver should prepare statements on all hosts in the cluster. + * + *

A statement is normally prepared in two steps: + * + *

    + *
  1. prepare the query on a single host in the cluster; + *
  2. if that succeeds, prepare on all other hosts. + *
+ * + * This option controls whether step 2 is executed. It is enabled by default. + * + *

The reason why you might want to disable it is to optimize network usage if you have a large + * number of clients preparing the same set of statements at startup. If your load balancing + * policy distributes queries randomly, each client will pick a different host to prepare its + * statements, and on the whole each host has a good chance of having been hit by at least one + * client for each statement. + * + *

On the other hand, if that assumption turns out to be wrong and one host hasn't prepared a + * given statement, it needs to be re-prepared on the fly the first time it gets executed; this + * causes a performance penalty (one extra roundtrip to resend the query to prepare, and another + * to retry the execution). + * + * @param prepareOnAllHosts the new value to set to indicate whether to prepare statements once or + * on all nodes. + * @return this {@code QueryOptions} instance. + */ + public QueryOptions setPrepareOnAllHosts(boolean prepareOnAllHosts) { + this.prepareOnAllHosts = prepareOnAllHosts; + return this; + } + + /** + * Returns whether the driver should prepare statements on all hosts in the cluster. + * + * @return the value. + * @see #setPrepareOnAllHosts(boolean) + */ + public boolean isPrepareOnAllHosts() { + return this.prepareOnAllHosts; + } + + /** + * Set whether the driver should re-prepare all cached prepared statements on a host when it marks + * it back up. + * + *

This option is enabled by default. + * + *

The reason why you might want to disable it is to optimize reconnection time when you + * believe hosts often get marked down because of temporary network issues, rather than the host + * really crashing. In that case, the host still has prepared statements in its cache when the + * driver reconnects, so re-preparing is redundant. + * + *

On the other hand, if that assumption turns out to be wrong and the host had really + * restarted, its prepared statement cache is empty, and statements need to be re-prepared on the + * fly the first time they get executed; this causes a performance penalty (one extra roundtrip to + * resend the query to prepare, and another to retry the execution). + * + * @param reprepareOnUp whether the driver should re-prepare when marking a node up. + * @return this {@code QueryOptions} instance. + */ + public QueryOptions setReprepareOnUp(boolean reprepareOnUp) { + this.reprepareOnUp = reprepareOnUp; + return this; + } + + /** + * Whether the driver should re-prepare all cached prepared statements on a host when its marks + * that host back up. + * + * @return the value. + * @see #setReprepareOnUp(boolean) + */ + public boolean isReprepareOnUp() { + return this.reprepareOnUp; + } + + /** + * Toggle client-side token and schema metadata. + * + *

This feature is enabled by default. Some applications might wish to disable it in order to + * eliminate the overhead of querying the metadata and building its client-side representation. + * However, take note that doing so will have important consequences: + * + *

    + *
  • most schema- or token-related methods in {@link Metadata} will return stale or null/empty + * results (see the javadoc of each method for details); + *
  • {@link Metadata#newToken(String)} and {@link Metadata#newTokenRange(Token, Token)} will + * throw an exception if metadata was disabled before startup; + *
  • token-aware routing will not work properly: if metadata was never initialized, {@link + * com.datastax.driver.core.policies.TokenAwarePolicy} will always delegate to its child + * policy. Otherwise, it might not pick the best coordinator (i.e. chose a host that is not + * a replica for the statement's routing key). In addition, statements prepared while the + * metadata was disabled might also be sent to a non-optimal coordinator, even if metadata + * was re-enabled later. + *
+ * + * @param enabled whether metadata is enabled. + * @return this {@code QueryOptions} instance. + */ + public QueryOptions setMetadataEnabled(boolean enabled) { + boolean wasEnabled = this.metadataEnabled; + this.metadataEnabled = enabled; + if (!wasEnabled && enabled && manager != null) { + // This is roughly the same as what we do in ControlConnection.tryConnect(): + // 1. call submitNodeListRefresh() first to + // be able to compute the token map for the first time, + // which will be incomplete due to the lack of keyspace metadata + GuavaCompatibility.INSTANCE.addCallback( + manager.submitNodeListRefresh(), + new MoreFutures.SuccessCallback() { + @Override + public void onSuccess(Void result) { + // 2. then call submitSchemaRefresh() to + // refresh schema metadata and re-compute the token map + // this time with information about keyspaces + manager.submitSchemaRefresh(null, null, null, null); + } + }); } - - /** - * Set whether the driver should prepare statements on all hosts in the cluster. - *

- * A statement is normally prepared in two steps: - *

    - *
  1. prepare the query on a single host in the cluster;
  2. - *
  3. if that succeeds, prepare on all other hosts.
  4. - *
- * This option controls whether step 2 is executed. It is enabled by default. - *

- * The reason why you might want to disable it is to optimize network usage if you - * have a large number of clients preparing the same set of statements at startup. - * If your load balancing policy distributes queries randomly, each client will pick - * a different host to prepare its statements, and on the whole each host has a good - * chance of having been hit by at least one client for each statement. - *

- * On the other hand, if that assumption turns out to be wrong and one host hasn't - * prepared a given statement, it needs to be re-prepared on the fly the first time - * it gets executed; this causes a performance penalty (one extra roundtrip to resend - * the query to prepare, and another to retry the execution). - * - * @param prepareOnAllHosts the new value to set to indicate whether to prepare - * statements once or on all nodes. - * @return this {@code QueryOptions} instance. - */ - public QueryOptions setPrepareOnAllHosts(boolean prepareOnAllHosts) { - this.prepareOnAllHosts = prepareOnAllHosts; - return this; - } - - /** - * Returns whether the driver should prepare statements on all hosts in the cluster. - * - * @return the value. - * @see #setPrepareOnAllHosts(boolean) - */ - public boolean isPrepareOnAllHosts() { - return this.prepareOnAllHosts; - } - - /** - * Set whether the driver should re-prepare all cached prepared statements on a host - * when it marks it back up. - *

- * This option is enabled by default. - *

- * The reason why you might want to disable it is to optimize reconnection time when - * you believe hosts often get marked down because of temporary network issues, rather - * than the host really crashing. In that case, the host still has prepared statements - * in its cache when the driver reconnects, so re-preparing is redundant. - *

- * On the other hand, if that assumption turns out to be wrong and the host had - * really restarted, its prepared statement cache is empty, and statements need to be - * re-prepared on the fly the first time they get executed; this causes a performance - * penalty (one extra roundtrip to resend the query to prepare, and another to retry - * the execution). - * - * @param reprepareOnUp whether the driver should re-prepare when marking a node up. - * @return this {@code QueryOptions} instance. - */ - public QueryOptions setReprepareOnUp(boolean reprepareOnUp) { - this.reprepareOnUp = reprepareOnUp; - return this; - } - - /** - * Whether the driver should re-prepare all cached prepared statements on a host - * when its marks that host back up. - * - * @return the value. - * @see #setReprepareOnUp(boolean) - */ - public boolean isReprepareOnUp() { - return this.reprepareOnUp; - } - - /** - * Toggle client-side token and schema metadata. - *

- * This feature is enabled by default. Some applications might wish to disable it - * in order to eliminate the overhead of querying the metadata and building its - * client-side representation. However, take note that doing so will have important - * consequences: - *

    - *
  • most schema- or token-related methods in {@link Metadata} will return stale - * or null/empty results (see the javadoc of each method for details);
  • - *
  • {@link Metadata#newToken(String)} and - * {@link Metadata#newTokenRange(Token, Token)} will throw an exception if metadata - * was disabled before startup;
  • - *
  • token-aware routing will not work properly: if metadata was never initialized, - * {@link com.datastax.driver.core.policies.TokenAwarePolicy} will always delegate - * to its child policy. Otherwise, it might not pick the best coordinator (i.e. chose - * a host that is not a replica for the statement's routing key). In addition, statements - * prepared while the metadata was disabled might also be sent to a non-optimal coordinator, - * even if metadata was re-enabled later.
  • - *
- * - * @param enabled whether metadata is enabled. - * @return this {@code QueryOptions} instance. - */ - public QueryOptions setMetadataEnabled(boolean enabled) { - boolean wasEnabled = this.metadataEnabled; - this.metadataEnabled = enabled; - if (!wasEnabled && enabled && manager != null) { - // This is roughly the same as what we do in ControlConnection.tryConnect(): - // 1. call submitNodeListRefresh() first to - // be able to compute the token map for the first time, - // which will be incomplete due to the lack of keyspace metadata - Futures.addCallback(manager.submitNodeListRefresh(), new MoreFutures.SuccessCallback() { - @Override - public void onSuccess(Void result) { - // 2. then call submitSchemaRefresh() to - // refresh schema metadata and re-compute the token map - // this time with information about keyspaces - manager.submitSchemaRefresh(null, null, null, null); - } - }); - } - return this; - } - - /** - * Whether client-side token and schema metadata is enabled. - * - * @return the value. - * @see #setMetadataEnabled(boolean) - */ - public boolean isMetadataEnabled() { - return metadataEnabled; - } - - /** - * Sets the default window size in milliseconds used to debounce node list refresh requests. - *

- * When the control connection receives a new schema refresh request, - * it puts it on hold and starts a timer, cancelling any previous running timer; - * when a timer expires, then the pending requests are coalesced and executed - * as a single request. - * - * @param refreshSchemaIntervalMillis The default window size in milliseconds used to debounce schema refresh requests. - */ - public QueryOptions setRefreshSchemaIntervalMillis(int refreshSchemaIntervalMillis) { - this.refreshSchemaIntervalMillis = refreshSchemaIntervalMillis; - return this; - } - - /** - * The default window size in milliseconds used to debounce schema refresh requests. - * - * @return The default window size in milliseconds used to debounce schema refresh requests. - */ - public int getRefreshSchemaIntervalMillis() { - return refreshSchemaIntervalMillis; - } - - /** - * Sets the maximum number of schema refresh requests that the control connection can accumulate - * before executing them. - *

- * When the control connection receives a new schema refresh request, - * it puts it on hold and starts a timer, cancelling any previous running timer; - * if the control connection receives too many events, is parameter allows to trigger - * execution of pending requests, event if the last timer is still running. - * - * @param maxPendingRefreshSchemaRequests The maximum number of schema refresh requests that the control connection can accumulate - * before executing them. - */ - public QueryOptions setMaxPendingRefreshSchemaRequests(int maxPendingRefreshSchemaRequests) { - this.maxPendingRefreshSchemaRequests = maxPendingRefreshSchemaRequests; - return this; - } - - /** - * The maximum number of schema refresh requests that the control connection can accumulate - * before executing them. - * - * @return The maximum number of schema refresh requests that the control connection can accumulate - * before executing them. - */ - public int getMaxPendingRefreshSchemaRequests() { - return maxPendingRefreshSchemaRequests; - } - - /** - * Sets the default window size in milliseconds used to debounce node list refresh requests. - *

- * When the control connection receives a new node list refresh request, - * it puts it on hold and starts a timer, cancelling any previous running timer; - * when a timer expires, then the pending requests are coalesced and executed - * as a single request. - * - * @param refreshNodeListIntervalMillis The default window size in milliseconds used to debounce node list refresh requests. - */ - public QueryOptions setRefreshNodeListIntervalMillis(int refreshNodeListIntervalMillis) { - this.refreshNodeListIntervalMillis = refreshNodeListIntervalMillis; - return this; - } - - /** - * The default window size in milliseconds used to debounce node list refresh requests. - * - * @return The default window size in milliseconds used to debounce node list refresh requests. - */ - public int getRefreshNodeListIntervalMillis() { - return refreshNodeListIntervalMillis; - } - - /** - * Sets the maximum number of node list refresh requests that the control connection can accumulate - * before executing them. - *

- * When the control connection receives a new node list refresh request, - * it puts it on hold and starts a timer, cancelling any previous running timer; - * if the control connection receives too many events, is parameter allows to trigger - * execution of pending requests, event if the last timer is still running. - * - * @param maxPendingRefreshNodeListRequests The maximum number of node list refresh requests that the control connection can accumulate - * before executing them. - */ - public QueryOptions setMaxPendingRefreshNodeListRequests(int maxPendingRefreshNodeListRequests) { - this.maxPendingRefreshNodeListRequests = maxPendingRefreshNodeListRequests; - return this; - } - - /** - * Sets the maximum number of node list refresh requests that the control connection can accumulate - * before executing them. - * - * @return The maximum number of node list refresh requests that the control connection can accumulate - * before executing them. - */ - public int getMaxPendingRefreshNodeListRequests() { - return maxPendingRefreshNodeListRequests; - } - - /** - * Sets the default window size in milliseconds used to debounce node refresh requests. - *

- * When the control connection receives a new node refresh request, - * it puts it on hold and starts a timer, cancelling any previous running timer; - * when a timer expires, then the pending requests are coalesced and executed - * as a single request. - * - * @param refreshNodeIntervalMillis The default window size in milliseconds used to debounce node refresh requests. - */ - public QueryOptions setRefreshNodeIntervalMillis(int refreshNodeIntervalMillis) { - this.refreshNodeIntervalMillis = refreshNodeIntervalMillis; - return this; - } - - /** - * The default window size in milliseconds used to debounce node refresh requests. - * - * @return The default window size in milliseconds used to debounce node refresh requests. - */ - public int getRefreshNodeIntervalMillis() { - return refreshNodeIntervalMillis; - } - - /** - * Sets the maximum number of node refresh requests that the control connection can accumulate - * before executing them. - *

- * When the control connection receives a new node refresh request, - * it puts it on hold and starts a timer, cancelling any previous running timer; - * if the control connection receives too many events, is parameter allows to trigger - * execution of pending requests, event if the last timer is still running. - * - * @param maxPendingRefreshNodeRequests The maximum number of node refresh requests that the control connection can accumulate - * before executing them. - */ - public QueryOptions setMaxPendingRefreshNodeRequests(int maxPendingRefreshNodeRequests) { - this.maxPendingRefreshNodeRequests = maxPendingRefreshNodeRequests; - return this; - } - - /** - * The maximum number of node refresh requests that the control connection can accumulate - * before executing them. - * - * @return The maximum number of node refresh requests that the control connection can accumulate - * before executing them. - */ - public int getMaxPendingRefreshNodeRequests() { - return maxPendingRefreshNodeRequests; + return this; + } + + /** + * Whether client-side token and schema metadata is enabled. + * + * @return the value. + * @see #setMetadataEnabled(boolean) + */ + public boolean isMetadataEnabled() { + return metadataEnabled; + } + + /** + * Sets the default window size in milliseconds used to debounce node list refresh requests. + * + *

When the control connection receives a new schema refresh request, it puts it on hold and + * starts a timer, cancelling any previous running timer; when a timer expires, then the pending + * requests are coalesced and executed as a single request. + * + * @param refreshSchemaIntervalMillis The default window size in milliseconds used to debounce + * schema refresh requests. + */ + public QueryOptions setRefreshSchemaIntervalMillis(int refreshSchemaIntervalMillis) { + this.refreshSchemaIntervalMillis = refreshSchemaIntervalMillis; + return this; + } + + /** + * The default window size in milliseconds used to debounce schema refresh requests. + * + * @return The default window size in milliseconds used to debounce schema refresh requests. + */ + public int getRefreshSchemaIntervalMillis() { + return refreshSchemaIntervalMillis; + } + + /** + * Sets the maximum number of schema refresh requests that the control connection can accumulate + * before executing them. + * + *

When the control connection receives a new schema refresh request, it puts it on hold and + * starts a timer, cancelling any previous running timer; if the control connection receives too + * many events, is parameter allows to trigger execution of pending requests, event if the last + * timer is still running. + * + * @param maxPendingRefreshSchemaRequests The maximum number of schema refresh requests that the + * control connection can accumulate before executing them. + */ + public QueryOptions setMaxPendingRefreshSchemaRequests(int maxPendingRefreshSchemaRequests) { + this.maxPendingRefreshSchemaRequests = maxPendingRefreshSchemaRequests; + return this; + } + + /** + * The maximum number of schema refresh requests that the control connection can accumulate before + * executing them. + * + * @return The maximum number of schema refresh requests that the control connection can + * accumulate before executing them. + */ + public int getMaxPendingRefreshSchemaRequests() { + return maxPendingRefreshSchemaRequests; + } + + /** + * Sets the default window size in milliseconds used to debounce node list refresh requests. + * + *

When the control connection receives a new node list refresh request, it puts it on hold and + * starts a timer, cancelling any previous running timer; when a timer expires, then the pending + * requests are coalesced and executed as a single request. + * + * @param refreshNodeListIntervalMillis The default window size in milliseconds used to debounce + * node list refresh requests. + */ + public QueryOptions setRefreshNodeListIntervalMillis(int refreshNodeListIntervalMillis) { + this.refreshNodeListIntervalMillis = refreshNodeListIntervalMillis; + return this; + } + + /** + * The default window size in milliseconds used to debounce node list refresh requests. + * + * @return The default window size in milliseconds used to debounce node list refresh requests. + */ + public int getRefreshNodeListIntervalMillis() { + return refreshNodeListIntervalMillis; + } + + /** + * Sets the maximum number of node list refresh requests that the control connection can + * accumulate before executing them. + * + *

When the control connection receives a new node list refresh request, it puts it on hold and + * starts a timer, cancelling any previous running timer; if the control connection receives too + * many events, is parameter allows to trigger execution of pending requests, event if the last + * timer is still running. + * + * @param maxPendingRefreshNodeListRequests The maximum number of node list refresh requests that + * the control connection can accumulate before executing them. + */ + public QueryOptions setMaxPendingRefreshNodeListRequests(int maxPendingRefreshNodeListRequests) { + this.maxPendingRefreshNodeListRequests = maxPendingRefreshNodeListRequests; + return this; + } + + /** + * Sets the maximum number of node list refresh requests that the control connection can + * accumulate before executing them. + * + * @return The maximum number of node list refresh requests that the control connection can + * accumulate before executing them. + */ + public int getMaxPendingRefreshNodeListRequests() { + return maxPendingRefreshNodeListRequests; + } + + /** + * Sets the default window size in milliseconds used to debounce node refresh requests. + * + *

When the control connection receives a new node refresh request, it puts it on hold and + * starts a timer, cancelling any previous running timer; when a timer expires, then the pending + * requests are coalesced and executed as a single request. + * + * @param refreshNodeIntervalMillis The default window size in milliseconds used to debounce node + * refresh requests. + */ + public QueryOptions setRefreshNodeIntervalMillis(int refreshNodeIntervalMillis) { + this.refreshNodeIntervalMillis = refreshNodeIntervalMillis; + return this; + } + + /** + * The default window size in milliseconds used to debounce node refresh requests. + * + * @return The default window size in milliseconds used to debounce node refresh requests. + */ + public int getRefreshNodeIntervalMillis() { + return refreshNodeIntervalMillis; + } + + /** + * Sets the maximum number of node refresh requests that the control connection can accumulate + * before executing them. + * + *

When the control connection receives a new node refresh request, it puts it on hold and + * starts a timer, cancelling any previous running timer; if the control connection receives too + * many events, is parameter allows to trigger execution of pending requests, event if the last + * timer is still running. + * + * @param maxPendingRefreshNodeRequests The maximum number of node refresh requests that the + * control connection can accumulate before executing them. + */ + public QueryOptions setMaxPendingRefreshNodeRequests(int maxPendingRefreshNodeRequests) { + this.maxPendingRefreshNodeRequests = maxPendingRefreshNodeRequests; + return this; + } + + /** + * The maximum number of node refresh requests that the control connection can accumulate before + * executing them. + * + * @return The maximum number of node refresh requests that the control connection can accumulate + * before executing them. + */ + public int getMaxPendingRefreshNodeRequests() { + return maxPendingRefreshNodeRequests; + } + + @Override + public boolean equals(Object that) { + if (that == null || !(that instanceof QueryOptions)) { + return false; } + QueryOptions other = (QueryOptions) that; + + return (this.consistency.equals(other.consistency) + && this.serialConsistency.equals(other.serialConsistency) + && this.fetchSize == other.fetchSize + && this.defaultIdempotence == other.defaultIdempotence + && this.metadataEnabled == other.metadataEnabled + && this.maxPendingRefreshNodeListRequests == other.maxPendingRefreshNodeListRequests + && this.maxPendingRefreshNodeRequests == other.maxPendingRefreshNodeRequests + && this.maxPendingRefreshSchemaRequests == other.maxPendingRefreshSchemaRequests + && this.refreshNodeListIntervalMillis == other.refreshNodeListIntervalMillis + && this.refreshNodeIntervalMillis == other.refreshNodeIntervalMillis + && this.refreshSchemaIntervalMillis == other.refreshSchemaIntervalMillis + && this.reprepareOnUp == other.reprepareOnUp + && this.prepareOnAllHosts == other.prepareOnAllHosts); + } + + @Override + public int hashCode() { + return MoreObjects.hashCode( + consistency, + serialConsistency, + fetchSize, + defaultIdempotence, + metadataEnabled, + maxPendingRefreshNodeListRequests, + maxPendingRefreshNodeRequests, + maxPendingRefreshSchemaRequests, + refreshNodeListIntervalMillis, + refreshNodeIntervalMillis, + refreshSchemaIntervalMillis, + reprepareOnUp, + prepareOnAllHosts); + } + + public boolean isConsistencySet() { + return consistencySet; + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/QueryTrace.java b/driver-core/src/main/java/com/datastax/driver/core/QueryTrace.java index 1d59a4f201e..49318d363c1 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/QueryTrace.java +++ b/driver-core/src/main/java/com/datastax/driver/core/QueryTrace.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,288 +19,305 @@ import com.datastax.driver.core.exceptions.TraceRetrievalException; import com.google.common.util.concurrent.Uninterruptibles; - import java.net.InetAddress; -import java.util.*; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Date; +import java.util.List; +import java.util.Map; +import java.util.UUID; import java.util.concurrent.TimeUnit; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; /** * The Cassandra trace for a query. - *

- * A trace is generated by Cassandra when query tracing is enabled for the - * query. The trace itself is stored in Cassandra in the {@code sessions} and - * {@code events} table in the {@code system_traces} keyspace and can be - * retrieve manually using the trace identifier (the one returned by - * {@link #getTraceId}). - *

- * This class provides facilities to fetch the traces from Cassandra. Please - * note that the writing of the trace is done asynchronously in Cassandra. So - * accessing the trace too soon after the query may result in the trace being - * incomplete. + * + *

A trace is generated by Cassandra when query tracing is enabled for the query. The trace + * itself is stored in Cassandra in the {@code sessions} and {@code events} table in the {@code + * system_traces} keyspace and can be retrieve manually using the trace identifier (the one returned + * by {@link #getTraceId}). + * + *

This class provides facilities to fetch the traces from Cassandra. Please note that the + * writing of the trace is done asynchronously in Cassandra. So accessing the trace too soon after + * the query may result in the trace being incomplete. */ public class QueryTrace { - private static final String SELECT_SESSIONS_FORMAT = "SELECT * FROM system_traces.sessions WHERE session_id = %s"; - private static final String SELECT_EVENTS_FORMAT = "SELECT * FROM system_traces.events WHERE session_id = %s"; - - private static final int MAX_TRIES = 5; - private static final long BASE_SLEEP_BETWEEN_TRIES_IN_MS = 3; - - private final UUID traceId; - - private volatile String requestType; - // We use the duration to figure out if the trace is complete, because - // that's the last event that is written (and it is written asynchronously - // so it's possible that a fetch gets all the trace except the duration). - private volatile int duration = Integer.MIN_VALUE; - private volatile InetAddress coordinator; - private volatile Map parameters; - private volatile long startedAt; - private volatile List events; - - private final SessionManager session; - private final Lock fetchLock = new ReentrantLock(); - - QueryTrace(UUID traceId, SessionManager session) { - this.traceId = traceId; - this.session = session; + private static final String SELECT_SESSIONS_FORMAT = + "SELECT * FROM system_traces.sessions WHERE session_id = %s"; + private static final String SELECT_EVENTS_FORMAT = + "SELECT * FROM system_traces.events WHERE session_id = %s"; + + private static final int MAX_TRIES = 5; + private static final long BASE_SLEEP_BETWEEN_TRIES_IN_MS = 3; + + private final UUID traceId; + + private volatile String requestType; + // We use the duration to figure out if the trace is complete, because + // that's the last event that is written (and it is written asynchronously + // so it's possible that a fetch gets all the trace except the duration). + private volatile int duration = Integer.MIN_VALUE; + private volatile InetAddress coordinator; + private volatile Map parameters; + private volatile long startedAt; + private volatile List events; + + private final SessionManager session; + private final Lock fetchLock = new ReentrantLock(); + + QueryTrace(UUID traceId, SessionManager session) { + this.traceId = traceId; + this.session = session; + } + + /** + * Returns the identifier of this trace. + * + *

Note that contrary to the other methods in this class, this does not entail fetching query + * trace details from Cassandra. + * + * @return the identifier of this trace. + */ + public UUID getTraceId() { + return traceId; + } + + /** + * Returns the type of request. + * + * @return the type of request or {@code null} if the request type is not yet available. + * @throws TraceRetrievalException if the trace details cannot be retrieve from Cassandra + * successfully. + */ + public String getRequestType() { + maybeFetchTrace(); + return requestType; + } + + /** + * Returns the server-side duration of the query in microseconds. + * + * @return the (server side) duration of the query in microseconds. This method will return {@code + * Integer.MIN_VALUE} if the duration is not yet available. + * @throws TraceRetrievalException if the trace details cannot be retrieve from Cassandra + * successfully. + */ + public int getDurationMicros() { + maybeFetchTrace(); + return duration; + } + + /** + * Returns the coordinator host of the query. + * + * @return the coordinator host of the query or {@code null} if the coordinator is not yet + * available. + * @throws TraceRetrievalException if the trace details cannot be retrieve from Cassandra + * successfully. + */ + public InetAddress getCoordinator() { + maybeFetchTrace(); + return coordinator; + } + + /** + * Returns the parameters attached to this trace. + * + * @return the parameters attached to this trace. or {@code null} if the coordinator is not yet + * available. + * @throws TraceRetrievalException if the trace details cannot be retrieve from Cassandra + * successfully. + */ + public Map getParameters() { + maybeFetchTrace(); + return parameters; + } + + /** + * Returns the server-side timestamp of the start of this query. + * + * @return the server side timestamp of the start of this query or 0 if the start timestamp is not + * available. + * @throws TraceRetrievalException if the trace details cannot be retrieve from Cassandra + * successfully. + */ + public long getStartedAt() { + maybeFetchTrace(); + return startedAt; + } + + /** + * Returns the events contained in this trace. + * + *

Query tracing is asynchronous in Cassandra. Hence, it is possible for the list returned to + * be missing some events for some of the replica involved in the query if the query trace is + * requested just after the return of the query it is a trace of (the only guarantee being that + * the list will contain the events pertaining to the coordinator of the query). + * + * @return the events contained in this trace. + * @throws TraceRetrievalException if the trace details cannot be retrieve from Cassandra + * successfully. + */ + public List getEvents() { + maybeFetchTrace(); + return events; + } + + @Override + public String toString() { + maybeFetchTrace(); + return String.format("%s [%s] - %dµs", requestType, traceId, duration); + } + + private void maybeFetchTrace() { + if (duration != Integer.MIN_VALUE) return; + + fetchLock.lock(); + try { + doFetchTrace(); + } finally { + fetchLock.unlock(); } - - /** - * Returns the identifier of this trace. - *

- * Note that contrary to the other methods in this class, this - * does not entail fetching query trace details from Cassandra. - * - * @return the identifier of this trace. - */ - public UUID getTraceId() { - return traceId; + } + + private void doFetchTrace() { + int tries = 0; + try { + // We cannot guarantee the trace is complete. But we can't at least wait until we have all the + // information + // the coordinator log in the trace. Since the duration is the last thing the coordinator log, + // that's + // what we check to know if the trace is "complete" (again, it may not contain the log of + // replicas). + while (duration == Integer.MIN_VALUE && tries <= MAX_TRIES) { + ++tries; + + ResultSetFuture sessionsFuture = + session.executeQuery( + new Requests.Query(String.format(SELECT_SESSIONS_FORMAT, traceId)), + Statement.DEFAULT); + ResultSetFuture eventsFuture = + session.executeQuery( + new Requests.Query(String.format(SELECT_EVENTS_FORMAT, traceId)), + Statement.DEFAULT); + + Row sessRow = sessionsFuture.get().one(); + if (sessRow != null && !sessRow.isNull("duration")) { + + requestType = sessRow.getString("request"); + coordinator = sessRow.getInet("coordinator"); + if (!sessRow.isNull("parameters")) + parameters = + Collections.unmodifiableMap( + sessRow.getMap("parameters", String.class, String.class)); + startedAt = sessRow.getTimestamp("started_at").getTime(); + + events = new ArrayList(); + for (Row evRow : eventsFuture.get()) { + events.add( + new Event( + evRow.getString("activity"), + evRow.getUUID("event_id").timestamp(), + evRow.getInet("source"), + evRow.getInt("source_elapsed"), + evRow.getString("thread"))); + } + events = Collections.unmodifiableList(events); + + // Set the duration last as it's our test to know if the trace is complete + duration = sessRow.getInt("duration"); + } else { + // The trace is not ready. Give it a few milliseconds before trying again. + // Notes: granted, sleeping uninterruptibly is bad, but having all method propagate + // InterruptedException bothers me. + Uninterruptibles.sleepUninterruptibly( + tries * BASE_SLEEP_BETWEEN_TRIES_IN_MS, TimeUnit.MILLISECONDS); + } + } + } catch (Exception e) { + throw new TraceRetrievalException("Unexpected exception while fetching query trace", e); } - /** - * Returns the type of request. - * - * @return the type of request or {@code null} if the request - * type is not yet available. - * @throws TraceRetrievalException if the trace details cannot be retrieve - * from Cassandra successfully. - */ - public String getRequestType() { - maybeFetchTrace(); - return requestType; + if (tries > MAX_TRIES) + throw new TraceRetrievalException( + String.format( + "Unable to retrieve complete query trace for id %s after %d tries", + traceId, MAX_TRIES)); + } + + /** + * A trace event. + * + *

A query trace is composed of a list of trace events. + */ + public static class Event { + private final String name; + private final long timestamp; + private final InetAddress source; + private final int sourceElapsed; + private final String threadName; + + private Event( + String name, long timestamp, InetAddress source, int sourceElapsed, String threadName) { + this.name = name; + // Convert the UUID timestamp to an epoch timestamp; I stole this seemingly random value from + // cqlsh, hopefully it's correct. + this.timestamp = (timestamp - 0x01b21dd213814000L) / 10000; + this.source = source; + this.sourceElapsed = sourceElapsed; + this.threadName = threadName; } /** - * Returns the server-side duration of the query in microseconds. + * The event description, that is which activity this event correspond to. * - * @return the (server side) duration of the query in microseconds. This - * method will return {@code Integer.MIN_VALUE} if the duration is not yet - * available. - * @throws TraceRetrievalException if the trace details cannot be retrieve - * from Cassandra successfully. + * @return the event description. */ - public int getDurationMicros() { - maybeFetchTrace(); - return duration; + public String getDescription() { + return name; } /** - * Returns the coordinator host of the query. + * Returns the server side timestamp of the event. * - * @return the coordinator host of the query or {@code null} - * if the coordinator is not yet available. - * @throws TraceRetrievalException if the trace details cannot be retrieve - * from Cassandra successfully. + * @return the server side timestamp of the event. */ - public InetAddress getCoordinator() { - maybeFetchTrace(); - return coordinator; + public long getTimestamp() { + return timestamp; } /** - * Returns the parameters attached to this trace. + * Returns the address of the host having generated this event. * - * @return the parameters attached to this trace. or - * {@code null} if the coordinator is not yet available. - * @throws TraceRetrievalException if the trace details cannot be retrieve - * from Cassandra successfully. + * @return the address of the host having generated this event. */ - public Map getParameters() { - maybeFetchTrace(); - return parameters; + public InetAddress getSource() { + return source; } /** - * Returns the server-side timestamp of the start of this query. + * Returns the number of microseconds elapsed on the source when this event occurred since when + * the source started handling the query. * - * @return the server side timestamp of the start of this query or - * 0 if the start timestamp is not available. - * @throws TraceRetrievalException if the trace details cannot be retrieve - * from Cassandra successfully. + * @return the elapsed time on the source host when that event happened in microseconds. */ - public long getStartedAt() { - maybeFetchTrace(); - return startedAt; + public int getSourceElapsedMicros() { + return sourceElapsed; } /** - * Returns the events contained in this trace. - *

- * Query tracing is asynchronous in Cassandra. Hence, it - * is possible for the list returned to be missing some events for some of - * the replica involved in the query if the query trace is requested just - * after the return of the query it is a trace of (the only guarantee being - * that the list will contain the events pertaining to the coordinator of - * the query). + * Returns the name of the thread on which this event occurred. * - * @return the events contained in this trace. - * @throws TraceRetrievalException if the trace details cannot be retrieve - * from Cassandra successfully. + * @return the name of the thread on which this event occurred. */ - public List getEvents() { - maybeFetchTrace(); - return events; + public String getThreadName() { + return threadName; } @Override public String toString() { - maybeFetchTrace(); - return String.format("%s [%s] - %dµs", requestType, traceId, duration); - } - - private void maybeFetchTrace() { - if (duration != Integer.MIN_VALUE) - return; - - fetchLock.lock(); - try { - doFetchTrace(); - } finally { - fetchLock.unlock(); - } - } - - private void doFetchTrace() { - int tries = 0; - try { - // We cannot guarantee the trace is complete. But we can't at least wait until we have all the information - // the coordinator log in the trace. Since the duration is the last thing the coordinator log, that's - // what we check to know if the trace is "complete" (again, it may not contain the log of replicas). - while (duration == Integer.MIN_VALUE && tries <= MAX_TRIES) { - ++tries; - - ResultSetFuture sessionsFuture = session.executeQuery(new Requests.Query(String.format(SELECT_SESSIONS_FORMAT, traceId)), Statement.DEFAULT); - ResultSetFuture eventsFuture = session.executeQuery(new Requests.Query(String.format(SELECT_EVENTS_FORMAT, traceId)), Statement.DEFAULT); - - Row sessRow = sessionsFuture.get().one(); - if (sessRow != null && !sessRow.isNull("duration")) { - - requestType = sessRow.getString("request"); - coordinator = sessRow.getInet("coordinator"); - if (!sessRow.isNull("parameters")) - parameters = Collections.unmodifiableMap(sessRow.getMap("parameters", String.class, String.class)); - startedAt = sessRow.getTimestamp("started_at").getTime(); - - events = new ArrayList(); - for (Row evRow : eventsFuture.get()) { - events.add(new Event(evRow.getString("activity"), - evRow.getUUID("event_id").timestamp(), - evRow.getInet("source"), - evRow.getInt("source_elapsed"), - evRow.getString("thread"))); - } - events = Collections.unmodifiableList(events); - - // Set the duration last as it's our test to know if the trace is complete - duration = sessRow.getInt("duration"); - } else { - // The trace is not ready. Give it a few milliseconds before trying again. - // Notes: granted, sleeping uninterruptibly is bad, but having all method propagate - // InterruptedException bothers me. - Uninterruptibles.sleepUninterruptibly(tries * BASE_SLEEP_BETWEEN_TRIES_IN_MS, TimeUnit.MILLISECONDS); - } - } - } catch (Exception e) { - throw new TraceRetrievalException("Unexpected exception while fetching query trace", e); - } - - if (tries > MAX_TRIES) - throw new TraceRetrievalException(String.format("Unable to retrieve complete query trace for id %s after %d tries", traceId, MAX_TRIES)); - } - - /** - * A trace event. - *

- * A query trace is composed of a list of trace events. - */ - public static class Event { - private final String name; - private final long timestamp; - private final InetAddress source; - private final int sourceElapsed; - private final String threadName; - - private Event(String name, long timestamp, InetAddress source, int sourceElapsed, String threadName) { - this.name = name; - // Convert the UUID timestamp to an epoch timestamp; I stole this seemingly random value from cqlsh, hopefully it's correct. - this.timestamp = (timestamp - 0x01b21dd213814000L) / 10000; - this.source = source; - this.sourceElapsed = sourceElapsed; - this.threadName = threadName; - } - - /** - * The event description, that is which activity this event correspond to. - * - * @return the event description. - */ - public String getDescription() { - return name; - } - - /** - * Returns the server side timestamp of the event. - * - * @return the server side timestamp of the event. - */ - public long getTimestamp() { - return timestamp; - } - - /** - * Returns the address of the host having generated this event. - * - * @return the address of the host having generated this event. - */ - public InetAddress getSource() { - return source; - } - - /** - * Returns the number of microseconds elapsed on the source when this event - * occurred since when the source started handling the query. - * - * @return the elapsed time on the source host when that event happened - * in microseconds. - */ - public int getSourceElapsedMicros() { - return sourceElapsed; - } - - /** - * Returns the name of the thread on which this event occurred. - * - * @return the name of the thread on which this event occurred. - */ - public String getThreadName() { - return threadName; - } - - @Override - public String toString() { - return String.format("%s on %s[%s] at %s", name, source, threadName, new Date(timestamp)); - } + return String.format("%s on %s[%s] at %s", name, source, threadName, new Date(timestamp)); } + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/RegularStatement.java b/driver-core/src/main/java/com/datastax/driver/core/RegularStatement.java index cc2c9a6fc46..a9dbed930e8 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/RegularStatement.java +++ b/driver-core/src/main/java/com/datastax/driver/core/RegularStatement.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,195 +17,236 @@ */ package com.datastax.driver.core; +import com.datastax.driver.core.Frame.Header; +import com.datastax.driver.core.Requests.QueryFlag; import com.datastax.driver.core.exceptions.InvalidTypeException; import com.datastax.driver.core.exceptions.UnsupportedProtocolVersionException; import com.datastax.driver.core.querybuilder.BuiltStatement; import com.datastax.driver.core.schemabuilder.SchemaStatement; - import java.nio.ByteBuffer; import java.util.Map; /** * A regular (non-prepared and non batched) CQL statement. - *

- * This class represents a query string along with query options (and optionally - * binary values, see {@code getValues}). It can be extended but {@link SimpleStatement} - * is provided as a simple implementation to build a {@code RegularStatement} directly - * from its query string. + * + *

This class represents a query string along with query options (and optionally binary values, + * see {@code getValues}). It can be extended but {@link SimpleStatement} is provided as a simple + * implementation to build a {@code RegularStatement} directly from its query string. */ public abstract class RegularStatement extends Statement { - /** - * Creates a new RegularStatement. - */ - protected RegularStatement() { - } + /** Creates a new RegularStatement. */ + protected RegularStatement() {} - /** - * Returns the query string for this statement. - *

- * It is important to note that the query string is merely - * a CQL representation of this statement, but it does - * not convey all the information stored in {@link Statement} - * objects. - *

- * For example, {@link Statement} objects carry numerous protocol-level - * settings, such as the {@link Statement#getConsistencyLevel() consistency level} to use, - * or the {@link Statement#isIdempotent() idempotence flag}, among others. - * None of these settings will be included in the resulting query string. - *

- * Similarly, if values have been set on this statement because - * it has bind markers, these values will not appear in the resulting query string. - *

- * Note: the consistency level was conveyed at CQL level in older versions - * of the CQL grammar, but since CASSANDRA-4734 - * it is now a protocol-level setting and consequently does not appear in the query string. - * - * @param codecRegistry the codec registry that will be used if the actual - * implementation needs to serialize Java objects in the - * process of generating the query. Note that it might be - * possible to use the no-arg {@link #getQueryString()} - * depending on the type of statement this is called on. - * @return a valid CQL query string. - * @see #getQueryString() - */ - public abstract String getQueryString(CodecRegistry codecRegistry); + /** + * Returns the query string for this statement. + * + *

It is important to note that the query string is merely a CQL representation of this + * statement, but it does not convey all the information stored in {@link Statement} + * objects. + * + *

For example, {@link Statement} objects carry numerous protocol-level settings, such as the + * {@link Statement#getConsistencyLevel() consistency level} to use, or the {@link + * Statement#isIdempotent() idempotence flag}, among others. None of these settings will be + * included in the resulting query string. + * + *

Similarly, if values have been set on this statement because it has bind markers, these + * values will not appear in the resulting query string. + * + *

Note: the consistency level was conveyed at CQL level in older versions of the CQL grammar, + * but since CASSANDRA-4734 it + * is now a protocol-level setting and consequently does not appear in the query string. + * + * @param codecRegistry the codec registry that will be used if the actual implementation needs to + * serialize Java objects in the process of generating the query. Note that it might be + * possible to use the no-arg {@link #getQueryString()} depending on the type of statement + * this is called on. + * @return a valid CQL query string. + * @see #getQueryString() + */ + public abstract String getQueryString(CodecRegistry codecRegistry); - /** - * Returns the query string for this statement. - *

- * This method calls {@link #getQueryString(CodecRegistry)} with {@link CodecRegistry#DEFAULT_INSTANCE}. - * Whether you should use this or the other variant depends on the type of statement this is - * called on: - *

    - *
  • for a {@link SimpleStatement} or {@link SchemaStatement}, the codec registry isn't - * actually needed, so it's always safe to use this method;
  • - *
  • for a {@link BuiltStatement} you can use this method if you use no custom codecs, or if - * your custom codecs are registered with the default registry. Otherwise, use the other method and - * provide the registry that contains your codecs (see {@link BuiltStatement} for more explanations - * on why this is so);
  • - *
  • for a {@link BatchStatement}, use the first rule if it contains no built statements, - * or the second rule otherwise.
  • - *
- * - * @return a valid CQL query string. - */ - public String getQueryString() { - return getQueryString(CodecRegistry.DEFAULT_INSTANCE); - } + /** + * Returns the query string for this statement. + * + *

This method calls {@link #getQueryString(CodecRegistry)} with {@link + * CodecRegistry#DEFAULT_INSTANCE}. Whether you should use this or the other variant depends on + * the type of statement this is called on: + * + *

    + *
  • for a {@link SimpleStatement} or {@link SchemaStatement}, the codec registry isn't + * actually needed, so it's always safe to use this method; + *
  • for a {@link BuiltStatement} you can use this method if you use no custom codecs, or if + * your custom codecs are registered with the default registry. Otherwise, use the other + * method and provide the registry that contains your codecs (see {@link BuiltStatement} for + * more explanations on why this is so); + *
  • for a {@link BatchStatement}, use the first rule if it contains no built statements, or + * the second rule otherwise. + *
+ * + * @return a valid CQL query string. + */ + public String getQueryString() { + return getQueryString(CodecRegistry.DEFAULT_INSTANCE); + } - /** - * The positional values to use for this statement. - *

- * A statement can use either positional or named values, but not both. So if this method returns a non-null result, - * {@link #getNamedValues(ProtocolVersion, CodecRegistry)} will return {@code null}. - *

- * Values for a RegularStatement (i.e. if either method does not return - * {@code null}) are not supported with the native protocol version 1: you - * will get an {@link UnsupportedProtocolVersionException} when submitting - * one if version 1 of the protocol is in use (i.e. if you've forced version - * 1 through {@link Cluster.Builder#withProtocolVersion} or you use - * Cassandra 1.2). - * - * @param protocolVersion the protocol version that will be used to serialize - * the values. - * @param codecRegistry the codec registry that will be used to serialize the - * values. - * @throws InvalidTypeException if one of the values is not of a type - * that can be serialized to a CQL3 type - * @see SimpleStatement#SimpleStatement(String, Object...) - */ - public abstract ByteBuffer[] getValues(ProtocolVersion protocolVersion, CodecRegistry codecRegistry); + /** + * The positional values to use for this statement. + * + *

A statement can use either positional or named values, but not both. So if this method + * returns a non-null result, {@link #getNamedValues(ProtocolVersion, CodecRegistry)} will return + * {@code null}. + * + *

Values for a RegularStatement (i.e. if either method does not return {@code null}) are not + * supported with the native protocol version 1: you will get an {@link + * UnsupportedProtocolVersionException} when submitting one if version 1 of the protocol is in use + * (i.e. if you've forced version 1 through {@link Cluster.Builder#withProtocolVersion} or you use + * Cassandra 1.2). + * + * @param protocolVersion the protocol version that will be used to serialize the values. + * @param codecRegistry the codec registry that will be used to serialize the values. + * @throws InvalidTypeException if one of the values is not of a type that can be serialized to a + * CQL3 type + * @see SimpleStatement#SimpleStatement(String, Object...) + */ + public abstract ByteBuffer[] getValues( + ProtocolVersion protocolVersion, CodecRegistry codecRegistry); - /** - * The named values to use for this statement. - *

- * A statement can use either positional or named values, but not both. So if this method returns a non-null result, - * {@link #getValues(ProtocolVersion, CodecRegistry)} will return {@code null}. - *

- * Values for a RegularStatement (i.e. if either method does not return - * {@code null}) are not supported with the native protocol version 1: you - * will get an {@link UnsupportedProtocolVersionException} when submitting - * one if version 1 of the protocol is in use (i.e. if you've forced version - * 1 through {@link Cluster.Builder#withProtocolVersion} or you use - * Cassandra 1.2). - * - * @param protocolVersion the protocol version that will be used to serialize - * the values. - * @param codecRegistry the codec registry that will be used to serialize the - * values. - * @return the named values. - * @throws InvalidTypeException if one of the values is not of a type - * that can be serialized to a CQL3 type - * @see SimpleStatement#SimpleStatement(String, Map) - */ - public abstract Map getNamedValues(ProtocolVersion protocolVersion, CodecRegistry codecRegistry); + /** + * The named values to use for this statement. + * + *

A statement can use either positional or named values, but not both. So if this method + * returns a non-null result, {@link #getValues(ProtocolVersion, CodecRegistry)} will return + * {@code null}. + * + *

Values for a RegularStatement (i.e. if either method does not return {@code null}) are not + * supported with the native protocol version 1: you will get an {@link + * UnsupportedProtocolVersionException} when submitting one if version 1 of the protocol is in use + * (i.e. if you've forced version 1 through {@link Cluster.Builder#withProtocolVersion} or you use + * Cassandra 1.2). + * + * @param protocolVersion the protocol version that will be used to serialize the values. + * @param codecRegistry the codec registry that will be used to serialize the values. + * @return the named values. + * @throws InvalidTypeException if one of the values is not of a type that can be serialized to a + * CQL3 type + * @see SimpleStatement#SimpleStatement(String, Map) + */ + public abstract Map getNamedValues( + ProtocolVersion protocolVersion, CodecRegistry codecRegistry); - /** - * Whether or not this statement has values, that is if {@code getValues} - * will return {@code null} or not. - * - * @param codecRegistry the codec registry that will be used if the actual - * implementation needs to serialize Java objects in the - * process of determining if the query has values. - * Note that it might be possible to use the no-arg - * {@link #hasValues()} depending on the type of - * statement this is called on. - * @return {@code false} if both {@link #getValues(ProtocolVersion, CodecRegistry)} - * and {@link #getNamedValues(ProtocolVersion, CodecRegistry)} return {@code null}, {@code true} - * otherwise. - * @see #hasValues() - */ - public abstract boolean hasValues(CodecRegistry codecRegistry); + /** + * Whether or not this statement has values, that is if {@code getValues} will return {@code null} + * or not. + * + * @param codecRegistry the codec registry that will be used if the actual implementation needs to + * serialize Java objects in the process of determining if the query has values. Note that it + * might be possible to use the no-arg {@link #hasValues()} depending on the type of statement + * this is called on. + * @return {@code false} if both {@link #getValues(ProtocolVersion, CodecRegistry)} and {@link + * #getNamedValues(ProtocolVersion, CodecRegistry)} return {@code null}, {@code true} + * otherwise. + * @see #hasValues() + */ + public abstract boolean hasValues(CodecRegistry codecRegistry); - /** - * Whether this statement uses named values. - * - * @return {@code false} if {@link #getNamedValues(ProtocolVersion, CodecRegistry)} returns {@code null}, - * {@code true} otherwise. - */ - public abstract boolean usesNamedValues(); + /** + * Whether this statement uses named values. + * + * @return {@code false} if {@link #getNamedValues(ProtocolVersion, CodecRegistry)} returns {@code + * null}, {@code true} otherwise. + */ + public abstract boolean usesNamedValues(); - /** - * Whether or not this statement has values, that is if {@code getValues} - * will return {@code null} or not. - *

- * This method calls {@link #hasValues(CodecRegistry)} with {@link ProtocolVersion#NEWEST_SUPPORTED}. - * Whether you should use this or the other variant depends on the type of statement this is - * called on: - *

    - *
  • for a {@link SimpleStatement} or {@link SchemaStatement}, the codec registry isn't - * actually needed, so it's always safe to use this method;
  • - *
  • for a {@link BuiltStatement} you can use this method if you use no custom codecs, or if - * your custom codecs are registered with the default registry. Otherwise, use the other method and - * provide the registry that contains your codecs (see {@link BuiltStatement} for more explanations - * on why this is so);
  • - *
  • for a {@link BatchStatement}, use the first rule if it contains no built statements, - * or the second rule otherwise.
  • - *
- * - * @return {@code false} if {@link #getValues} returns {@code null}, {@code true} - * otherwise. - */ - public boolean hasValues() { - return hasValues(CodecRegistry.DEFAULT_INSTANCE); - } + /** + * Whether or not this statement has values, that is if {@code getValues} will return {@code null} + * or not. + * + *

This method calls {@link #hasValues(CodecRegistry)} with {@link + * ProtocolVersion#NEWEST_SUPPORTED}. Whether you should use this or the other variant depends on + * the type of statement this is called on: + * + *

    + *
  • for a {@link SimpleStatement} or {@link SchemaStatement}, the codec registry isn't + * actually needed, so it's always safe to use this method; + *
  • for a {@link BuiltStatement} you can use this method if you use no custom codecs, or if + * your custom codecs are registered with the default registry. Otherwise, use the other + * method and provide the registry that contains your codecs (see {@link BuiltStatement} for + * more explanations on why this is so); + *
  • for a {@link BatchStatement}, use the first rule if it contains no built statements, or + * the second rule otherwise. + *
+ * + * @return {@code false} if {@link #getValues} returns {@code null}, {@code true} otherwise. + */ + public boolean hasValues() { + return hasValues(CodecRegistry.DEFAULT_INSTANCE); + } - /** - * Returns this statement as a CQL query string. - *

- * It is important to note that the query string is merely - * a CQL representation of this statement, but it does - * not convey all the information stored in {@link Statement} - * objects. - *

- * See the javadocs of {@link #getQueryString()} for more information. - * - * @return this statement as a CQL query string. - * @see #getQueryString() - */ - @Override - public String toString() { - return getQueryString(); + @Override + public int requestSizeInBytes(ProtocolVersion protocolVersion, CodecRegistry codecRegistry) { + int size = Header.lengthFor(protocolVersion); + try { + size += CBUtil.sizeOfLongString(getQueryString(codecRegistry)); + switch (protocolVersion) { + case V1: + size += CBUtil.sizeOfConsistencyLevel(getConsistencyLevel()); + break; + case V2: + case V3: + case V4: + case V5: + case V6: + size += CBUtil.sizeOfConsistencyLevel(getConsistencyLevel()); + size += QueryFlag.serializedSize(protocolVersion); + if (hasValues()) { + if (usesNamedValues()) { + size += CBUtil.sizeOfNamedValueList(getNamedValues(protocolVersion, codecRegistry)); + } else { + size += CBUtil.sizeOfValueList(getValues(protocolVersion, codecRegistry)); + } + } + // Fetch size, serial CL and default timestamp also depend on session-level defaults + // (QueryOptions). + // We always count them to avoid having to inject QueryOptions here, at worst we + // overestimate by a + // few bytes. + size += 4; // fetch size + if (getPagingState() != null) { + size += CBUtil.sizeOfValue(getPagingState()); + } + size += CBUtil.sizeOfConsistencyLevel(getSerialConsistencyLevel()); + if (ProtocolFeature.CLIENT_TIMESTAMPS.isSupportedBy(protocolVersion)) { + size += 8; // timestamp + } + if (ProtocolFeature.CUSTOM_PAYLOADS.isSupportedBy(protocolVersion) + && getOutgoingPayload() != null) { + size += CBUtil.sizeOfBytesMap(getOutgoingPayload()); + } + break; + default: + throw protocolVersion.unsupported(); + } + } catch (Exception e) { + size = -1; } + return size; + } + + /** + * Returns this statement as a CQL query string. + * + *

It is important to note that the query string is merely a CQL representation of this + * statement, but it does not convey all the information stored in {@link Statement} + * objects. + * + *

See the javadocs of {@link #getQueryString()} for more information. + * + * @return this statement as a CQL query string. + * @see #getQueryString() + */ + @Override + public String toString() { + return getQueryString(); + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/RemoteEndpointAwareJdkSSLOptions.java b/driver-core/src/main/java/com/datastax/driver/core/RemoteEndpointAwareJdkSSLOptions.java index 53364df646d..7a17ec691d2 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/RemoteEndpointAwareJdkSSLOptions.java +++ b/driver-core/src/main/java/com/datastax/driver/core/RemoteEndpointAwareJdkSSLOptions.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,10 +19,9 @@ import io.netty.channel.socket.SocketChannel; import io.netty.handler.ssl.SslHandler; - +import java.net.InetSocketAddress; import javax.net.ssl.SSLContext; import javax.net.ssl.SSLEngine; -import java.net.InetSocketAddress; /** * {@link RemoteEndpointAwareSSLOptions} implementation based on built-in JDK classes. @@ -29,72 +30,90 @@ * @since 3.2.0 */ @SuppressWarnings("deprecation") -public class RemoteEndpointAwareJdkSSLOptions extends JdkSSLOptions implements RemoteEndpointAwareSSLOptions { +public class RemoteEndpointAwareJdkSSLOptions extends JdkSSLOptions + implements ExtendedRemoteEndpointAwareSslOptions { - /** - * Creates a builder to create a new instance. - * - * @return the builder. - */ - public static Builder builder() { - return new Builder(); - } + /** + * Creates a builder to create a new instance. + * + * @return the builder. + */ + public static Builder builder() { + return new Builder(); + } - /** - * Creates a new instance. - * - * @param context the SSL context. - * @param cipherSuites the cipher suites to use. - */ - protected RemoteEndpointAwareJdkSSLOptions(SSLContext context, String[] cipherSuites) { - super(context, cipherSuites); - } + /** + * Creates a new instance. + * + * @param context the SSL context. + * @param cipherSuites the cipher suites to use. + */ + protected RemoteEndpointAwareJdkSSLOptions(SSLContext context, String[] cipherSuites) { + super(context, cipherSuites); + } - @Override - public SslHandler newSSLHandler(SocketChannel channel) { - throw new AssertionError("This class implements RemoteEndpointAwareSSLOptions, this method should not be called"); + @Override + public SslHandler newSSLHandler(SocketChannel channel) { + throw new AssertionError( + "This class implements RemoteEndpointAwareSSLOptions, this method should not be called"); + } + + @Override + public SslHandler newSSLHandler(SocketChannel channel, EndPoint remoteEndpoint) { + SSLEngine engine = + newSSLEngine(channel, remoteEndpoint == null ? null : remoteEndpoint.resolve()); + return new SslHandler(engine); + } + + @Override + public SslHandler newSSLHandler(SocketChannel channel, InetSocketAddress remoteEndpoint) { + throw new AssertionError( + "The driver should never call this method on an object that implements " + + this.getClass().getSimpleName()); + } + + /** + * Creates an SSL engine each time a connection is established. + * + *

You might want to override this if you need to fine-tune the engine's configuration (for + * example enabling hostname verification). + * + * @param channel the Netty channel for that connection. + * @param remoteEndpoint the remote endpoint we are connecting to. + * @return the engine. + * @since 3.2.0 + */ + protected SSLEngine newSSLEngine( + @SuppressWarnings("unused") SocketChannel channel, InetSocketAddress remoteEndpoint) { + SSLEngine engine; + if (remoteEndpoint == null) { + engine = context.createSSLEngine(); + } else { + engine = context.createSSLEngine(remoteEndpoint.getHostName(), remoteEndpoint.getPort()); } + engine.setUseClientMode(true); + if (cipherSuites != null) engine.setEnabledCipherSuites(cipherSuites); + return engine; + } + + /** Helper class to build {@link RemoteEndpointAwareJdkSSLOptions} instances. */ + public static class Builder extends JdkSSLOptions.Builder { @Override - public SslHandler newSSLHandler(SocketChannel channel, InetSocketAddress remoteEndpoint) { - SSLEngine engine = newSSLEngine(channel, remoteEndpoint); - return new SslHandler(engine); + public RemoteEndpointAwareJdkSSLOptions.Builder withSSLContext(SSLContext context) { + super.withSSLContext(context); + return this; } - /** - * Creates an SSL engine each time a connection is established. - *

- * You might want to override this if you need to fine-tune the engine's configuration - * (for example enabling hostname verification). - * - * @param channel the Netty channel for that connection. - * @param remoteEndpoint the remote endpoint we are connecting to. - * @return the engine. - * @since 3.2.0 - */ - protected SSLEngine newSSLEngine(@SuppressWarnings("unused") SocketChannel channel, InetSocketAddress remoteEndpoint) { - SSLEngine engine = remoteEndpoint == null - ? context.createSSLEngine() - : context.createSSLEngine(remoteEndpoint.getHostName(), remoteEndpoint.getPort()); - engine.setUseClientMode(true); - if (cipherSuites != null) - engine.setEnabledCipherSuites(cipherSuites); - return engine; + @Override + public RemoteEndpointAwareJdkSSLOptions.Builder withCipherSuites(String[] cipherSuites) { + super.withCipherSuites(cipherSuites); + return this; } - /** - * Helper class to build JDK-based SSL options. - */ - public static class Builder extends JdkSSLOptions.Builder { - - /** - * Builds a new instance based on the parameters provided to this builder. - * - * @return the new instance. - */ - @Override - public RemoteEndpointAwareJdkSSLOptions build() { - return new RemoteEndpointAwareJdkSSLOptions(context, cipherSuites); - } + @Override + public RemoteEndpointAwareJdkSSLOptions build() { + return new RemoteEndpointAwareJdkSSLOptions(context, cipherSuites); } + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/RemoteEndpointAwareNettySSLOptions.java b/driver-core/src/main/java/com/datastax/driver/core/RemoteEndpointAwareNettySSLOptions.java index a5740680275..f2f4a809804 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/RemoteEndpointAwareNettySSLOptions.java +++ b/driver-core/src/main/java/com/datastax/driver/core/RemoteEndpointAwareNettySSLOptions.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,36 +20,47 @@ import io.netty.channel.socket.SocketChannel; import io.netty.handler.ssl.SslContext; import io.netty.handler.ssl.SslHandler; - import java.net.InetSocketAddress; /** * {@link RemoteEndpointAwareSSLOptions} implementation based on Netty's SSL context. - *

- * Netty has the ability to use OpenSSL if available, instead of the JDK's built-in engine. This yields better performance. + * + *

Netty has the ability to use OpenSSL if available, instead of the JDK's built-in engine. This + * yields better performance. * * @see JAVA-1364 * @since 3.2.0 */ @SuppressWarnings("deprecation") -public class RemoteEndpointAwareNettySSLOptions extends NettySSLOptions implements RemoteEndpointAwareSSLOptions { +public class RemoteEndpointAwareNettySSLOptions extends NettySSLOptions + implements ExtendedRemoteEndpointAwareSslOptions { + + /** + * Create a new instance from a given context. + * + * @param context the Netty context. {@code SslContextBuilder.forClient()} provides a fluent API + * to build it. + */ + public RemoteEndpointAwareNettySSLOptions(SslContext context) { + super(context); + } - /** - * Create a new instance from a given context. - * - * @param context the Netty context. {@code SslContextBuilder.forClient()} provides a fluent API to build it. - */ - public RemoteEndpointAwareNettySSLOptions(SslContext context) { - super(context); - } + @Override + public SslHandler newSSLHandler(SocketChannel channel) { + throw new AssertionError( + "This class implements RemoteEndpointAwareSSLOptions, this method should not be called"); + } - @Override - public SslHandler newSSLHandler(SocketChannel channel) { - throw new AssertionError("This class implements RemoteEndpointAwareSSLOptions, this method should not be called"); - } + @Override + public SslHandler newSSLHandler(SocketChannel channel, EndPoint remoteEndpoint) { + InetSocketAddress address = remoteEndpoint.resolve(); + return context.newHandler(channel.alloc(), address.getHostName(), address.getPort()); + } - @Override - public SslHandler newSSLHandler(SocketChannel channel, InetSocketAddress remoteEndpoint) { - return context.newHandler(channel.alloc(), remoteEndpoint.getHostName(), remoteEndpoint.getPort()); - } + @Override + public SslHandler newSSLHandler(SocketChannel channel, InetSocketAddress remoteEndpoint) { + throw new AssertionError( + "The driver should never call this method on an object that implements " + + this.getClass().getSimpleName()); + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/RemoteEndpointAwareSSLOptions.java b/driver-core/src/main/java/com/datastax/driver/core/RemoteEndpointAwareSSLOptions.java index 707ee189c3c..41a537a3fca 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/RemoteEndpointAwareSSLOptions.java +++ b/driver-core/src/main/java/com/datastax/driver/core/RemoteEndpointAwareSSLOptions.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,37 +19,37 @@ import io.netty.channel.socket.SocketChannel; import io.netty.handler.ssl.SslHandler; - import java.net.InetSocketAddress; /** - * Child interface to {@link SSLOptions} with the possibility to pass remote endpoint data - * when instantiating {@link SslHandler}s. - *

- * This is needed when e.g. hostname verification is required. - * See JAVA-1364 for details. - *

- * The reason this is a child interface is to keep {@link SSLOptions} backwards-compatible. - * This interface may be be merged into {@link SSLOptions} in a later major release. + * Child interface to {@link SSLOptions} with the possibility to pass remote endpoint data when + * instantiating {@link SslHandler}s. + * + *

This is needed when e.g. hostname verification is required. See JAVA-1364 for details. + * + *

The reason this is a child interface is to keep {@link SSLOptions} backwards-compatible. This + * interface may be be merged into {@link SSLOptions} in a later major release. * * @see JAVA-1364 * @since 3.2.0 */ public interface RemoteEndpointAwareSSLOptions extends SSLOptions { - /** - * Creates a new SSL handler for the given Netty channel and the given remote endpoint. - *

- * This gets called each time the driver opens a new connection to a Cassandra host. The newly created handler will be added - * to the channel's pipeline to provide SSL support for the connection. - *

- * You don't necessarily need to implement this method directly; see the provided implementations: - * {@link RemoteEndpointAwareJdkSSLOptions} and {@link RemoteEndpointAwareNettySSLOptions}. - * - * @param channel the channel. - * @param remoteEndpoint the remote endpoint address. - * @return a newly-created {@link SslHandler}. - */ - SslHandler newSSLHandler(SocketChannel channel, InetSocketAddress remoteEndpoint); - + /** + * Creates a new SSL handler for the given Netty channel and the given remote endpoint. + * + *

This gets called each time the driver opens a new connection to a Cassandra host. The newly + * created handler will be added to the channel's pipeline to provide SSL support for the + * connection. + * + *

You don't necessarily need to implement this method directly; see the provided + * implementations: {@link RemoteEndpointAwareJdkSSLOptions} and {@link + * RemoteEndpointAwareNettySSLOptions}. + * + * @param channel the channel. + * @param remoteEndpoint the remote endpoint address. + * @return a newly-created {@link SslHandler}. + */ + SslHandler newSSLHandler(SocketChannel channel, InetSocketAddress remoteEndpoint); } diff --git a/driver-core/src/main/java/com/datastax/driver/core/ReplicationFactor.java b/driver-core/src/main/java/com/datastax/driver/core/ReplicationFactor.java new file mode 100644 index 00000000000..0d217f1092a --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/ReplicationFactor.java @@ -0,0 +1,81 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +// This class is a subset of server version at org.apache.cassandra.locator.ReplicationFactor + +class ReplicationFactor { + private final int allReplicas; + private final int fullReplicas; + private final int transientReplicas; + + ReplicationFactor(int allReplicas, int transientReplicas) { + this.allReplicas = allReplicas; + this.transientReplicas = transientReplicas; + this.fullReplicas = allReplicas - transientReplicas; + } + + ReplicationFactor(int allReplicas) { + this(allReplicas, 0); + } + + int fullReplicas() { + return fullReplicas; + } + + int transientReplicas() { + return transientReplicas; + } + + boolean hasTransientReplicas() { + return transientReplicas > 0; + } + + static ReplicationFactor fromString(String s) { + if (s.contains("/")) { + int slash = s.indexOf('/'); + String allPart = s.substring(0, slash); + String transientPart = s.substring(slash + 1); + return new ReplicationFactor(Integer.parseInt(allPart), Integer.parseInt(transientPart)); + } else { + return new ReplicationFactor(Integer.parseInt(s), 0); + } + } + + @Override + public String toString() { + return allReplicas + (hasTransientReplicas() ? "/" + transientReplicas() : ""); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof ReplicationFactor)) { + return false; + } + ReplicationFactor that = (ReplicationFactor) o; + return allReplicas == that.allReplicas && transientReplicas == that.transientReplicas; + } + + @Override + public int hashCode() { + return allReplicas ^ transientReplicas; + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/ReplicationStategy.java b/driver-core/src/main/java/com/datastax/driver/core/ReplicationStategy.java index 605d37335b3..cfee11b67a8 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ReplicationStategy.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ReplicationStategy.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,244 +20,250 @@ import com.google.common.collect.ImmutableSet; import com.google.common.collect.Maps; import com.google.common.collect.Sets; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.util.*; - /* * Computes the token->list association, given the token ring and token->primary token map. * * Note: it's not an interface mainly because we don't want to expose it. */ abstract class ReplicationStrategy { - private static final Logger logger = LoggerFactory.getLogger(ReplicationStrategy.class); + private static final Logger logger = LoggerFactory.getLogger(ReplicationStrategy.class); - static ReplicationStrategy create(Map replicationOptions) { + static ReplicationStrategy create(Map replicationOptions) { - String strategyClass = replicationOptions.get("class"); - if (strategyClass == null) - return null; + String strategyClass = replicationOptions.get("class"); + if (strategyClass == null) return null; - try { - if (strategyClass.contains("SimpleStrategy")) { - String repFactorString = replicationOptions.get("replication_factor"); - return repFactorString == null ? null : new SimpleStrategy(Integer.parseInt(repFactorString)); - } else if (strategyClass.contains("NetworkTopologyStrategy")) { - Map dcRfs = new HashMap(); - for (Map.Entry entry : replicationOptions.entrySet()) { - if (entry.getKey().equals("class")) - continue; + try { + if (strategyClass.contains("SimpleStrategy")) { + String repFactorString = replicationOptions.get("replication_factor"); + return repFactorString == null + ? null + : new SimpleStrategy(ReplicationFactor.fromString(repFactorString)); + } else if (strategyClass.contains("NetworkTopologyStrategy")) { + Map dcRfs = new HashMap(); + for (Map.Entry entry : replicationOptions.entrySet()) { + if (entry.getKey().equals("class")) continue; - dcRfs.put(entry.getKey(), Integer.parseInt(entry.getValue())); - } - return new NetworkTopologyStrategy(dcRfs); - } else { - // We might want to support oldNetworkTopologyStrategy, though not sure anyone still using that - return null; - } - } catch (NumberFormatException e) { - // Cassandra wouldn't let that pass in the first place so this really should never happen - logger.error("Failed to parse replication options: " + replicationOptions, e); - return null; + dcRfs.put(entry.getKey(), ReplicationFactor.fromString(entry.getValue())); } + return new NetworkTopologyStrategy(dcRfs); + } else { + // We might want to support oldNetworkTopologyStrategy, though not sure anyone still using + // that + return null; + } + } catch (NumberFormatException e) { + // Cassandra wouldn't let that pass in the first place so this really should never happen + logger.error("Failed to parse replication options: " + replicationOptions, e); + return null; } + } - abstract Map> computeTokenToReplicaMap(String keyspaceName, Map tokenToPrimary, List ring); + abstract Map> computeTokenToReplicaMap( + String keyspaceName, Map tokenToPrimary, List ring); - private static Token getTokenWrapping(int i, List ring) { - return ring.get(i % ring.size()); - } + private static Token getTokenWrapping(int i, List ring) { + return ring.get(i % ring.size()); + } - static class SimpleStrategy extends ReplicationStrategy { + static class SimpleStrategy extends ReplicationStrategy { - private final int replicationFactor; + private final ReplicationFactor replicationFactor; - private SimpleStrategy(int replicationFactor) { - this.replicationFactor = replicationFactor; - } + private SimpleStrategy(ReplicationFactor replicationFactor) { + this.replicationFactor = replicationFactor; + } - @Override - Map> computeTokenToReplicaMap(String keyspaceName, Map tokenToPrimary, List ring) { + @Override + Map> computeTokenToReplicaMap( + String keyspaceName, Map tokenToPrimary, List ring) { + + int rf = Math.min(replicationFactor.fullReplicas(), ring.size()); + + Map> replicaMap = new HashMap>(tokenToPrimary.size()); + for (int i = 0; i < ring.size(); i++) { + // Consecutive sections of the ring can assigned to the same host + Set replicas = new LinkedHashSet(); + for (int j = 0; j < ring.size() && replicas.size() < rf; j++) + replicas.add(tokenToPrimary.get(getTokenWrapping(i + j, ring))); + replicaMap.put(ring.get(i), ImmutableSet.copyOf(replicas)); + } + return replicaMap; + } - int rf = Math.min(replicationFactor, ring.size()); + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; - Map> replicaMap = new HashMap>(tokenToPrimary.size()); - for (int i = 0; i < ring.size(); i++) { - // Consecutive sections of the ring can assigned to the same host - Set replicas = new LinkedHashSet(); - for (int j = 0; j < ring.size() && replicas.size() < rf; j++) - replicas.add(tokenToPrimary.get(getTokenWrapping(i + j, ring))); - replicaMap.put(ring.get(i), ImmutableSet.copyOf(replicas)); - } - return replicaMap; - } + SimpleStrategy that = (SimpleStrategy) o; - @Override - public boolean equals(Object o) { - if (this == o) - return true; - if (o == null || getClass() != o.getClass()) - return false; + return replicationFactor.equals(that.replicationFactor); + } - SimpleStrategy that = (SimpleStrategy) o; + @Override + public int hashCode() { + return replicationFactor.hashCode(); + } + } - return replicationFactor == that.replicationFactor; + static class NetworkTopologyStrategy extends ReplicationStrategy { + private static final Logger logger = LoggerFactory.getLogger(NetworkTopologyStrategy.class); - } + private final Map replicationFactors; - @Override - public int hashCode() { - return replicationFactor; - } + private NetworkTopologyStrategy(Map replicationFactors) { + this.replicationFactors = replicationFactors; } - static class NetworkTopologyStrategy extends ReplicationStrategy { - private static final Logger logger = LoggerFactory.getLogger(NetworkTopologyStrategy.class); - - private final Map replicationFactors; - - private NetworkTopologyStrategy(Map replicationFactors) { - this.replicationFactors = replicationFactors; + @Override + Map> computeTokenToReplicaMap( + String keyspaceName, Map tokenToPrimary, List ring) { + + logger.debug("Computing token to replica map for keyspace: {}.", keyspaceName); + + // Track how long it takes to compute the token to replica map + long startTime = System.currentTimeMillis(); + + // This is essentially a copy of org.apache.cassandra.locator.NetworkTopologyStrategy + Map> racks = getRacksInDcs(tokenToPrimary.values()); + Map> replicaMap = new HashMap>(tokenToPrimary.size()); + Map dcHostCount = Maps.newHashMapWithExpectedSize(replicationFactors.size()); + Set warnedDcs = Sets.newHashSetWithExpectedSize(replicationFactors.size()); + // find maximum number of nodes in each DC + for (Host host : Sets.newHashSet(tokenToPrimary.values())) { + String dc = host.getDatacenter(); + if (dcHostCount.get(dc) == null) { + dcHostCount.put(dc, 0); + } + dcHostCount.put(dc, dcHostCount.get(dc) + 1); + } + for (int i = 0; i < ring.size(); i++) { + Map> allDcReplicas = new HashMap>(); + Map> seenRacks = new HashMap>(); + Map> skippedDcEndpoints = new HashMap>(); + for (String dc : replicationFactors.keySet()) { + allDcReplicas.put(dc, new HashSet()); + seenRacks.put(dc, new HashSet()); + skippedDcEndpoints.put(dc, new LinkedHashSet()); // preserve order } - @Override - Map> computeTokenToReplicaMap(String keyspaceName, Map tokenToPrimary, List ring) { - - logger.debug("Computing token to replica map for keyspace: {}.", keyspaceName); - - // Track how long it takes to compute the token to replica map - long startTime = System.currentTimeMillis(); - - // This is essentially a copy of org.apache.cassandra.locator.NetworkTopologyStrategy - Map> racks = getRacksInDcs(tokenToPrimary.values()); - Map> replicaMap = new HashMap>(tokenToPrimary.size()); - Map dcHostCount = Maps.newHashMapWithExpectedSize(replicationFactors.size()); - Set warnedDcs = Sets.newHashSetWithExpectedSize(replicationFactors.size()); - // find maximum number of nodes in each DC - for (Host host : Sets.newHashSet(tokenToPrimary.values())) { - String dc = host.getDatacenter(); - if (dcHostCount.get(dc) == null) { - dcHostCount.put(dc, 0); - } - dcHostCount.put(dc, dcHostCount.get(dc) + 1); - } - for (int i = 0; i < ring.size(); i++) { - Map> allDcReplicas = new HashMap>(); - Map> seenRacks = new HashMap>(); - Map> skippedDcEndpoints = new HashMap>(); - for (String dc : replicationFactors.keySet()) { - allDcReplicas.put(dc, new HashSet()); - seenRacks.put(dc, new HashSet()); - skippedDcEndpoints.put(dc, new LinkedHashSet()); // preserve order - } - - // Preserve order - primary replica will be first - Set replicas = new LinkedHashSet(); - for (int j = 0; j < ring.size() && !allDone(allDcReplicas, dcHostCount); j++) { - Host h = tokenToPrimary.get(getTokenWrapping(i + j, ring)); - String dc = h.getDatacenter(); - if (dc == null || !allDcReplicas.containsKey(dc)) - continue; - - Integer rf = replicationFactors.get(dc); - Set dcReplicas = allDcReplicas.get(dc); - if (rf == null || dcReplicas.size() >= rf) - continue; - - String rack = h.getRack(); - // Check if we already visited all racks in dc - if (rack == null || seenRacks.get(dc).size() == racks.get(dc).size()) { - replicas.add(h); - dcReplicas.add(h); - } else { - // Is this a new rack? - if (seenRacks.get(dc).contains(rack)) { - skippedDcEndpoints.get(dc).add(h); - } else { - replicas.add(h); - dcReplicas.add(h); - seenRacks.get(dc).add(rack); - // If we've run out of distinct racks, add the nodes skipped so far - if (seenRacks.get(dc).size() == racks.get(dc).size()) { - Iterator skippedIt = skippedDcEndpoints.get(dc).iterator(); - while (skippedIt.hasNext() && dcReplicas.size() < rf) { - Host nextSkipped = skippedIt.next(); - replicas.add(nextSkipped); - dcReplicas.add(nextSkipped); - } - } - } - } - } - - // If we haven't found enough replicas after a whole trip around the ring, this probably - // means that the replication factors are broken. - // Warn the user because that leads to quadratic performance of this method (JAVA-702). - for (Map.Entry> entry : allDcReplicas.entrySet()) { - String dcName = entry.getKey(); - int expectedFactor = replicationFactors.get(dcName); - int achievedFactor = entry.getValue().size(); - if (achievedFactor < expectedFactor && !warnedDcs.contains(dcName)) { - logger.warn("Error while computing token map for keyspace {} with datacenter {}: " - + "could not achieve replication factor {} (found {} replicas only), " - + "check your keyspace replication settings.", - keyspaceName, dcName, expectedFactor, achievedFactor); - // only warn once per DC - warnedDcs.add(dcName); - } + // Preserve order - primary replica will be first + Set replicas = new LinkedHashSet(); + for (int j = 0; j < ring.size() && !allDone(allDcReplicas, dcHostCount); j++) { + Host h = tokenToPrimary.get(getTokenWrapping(i + j, ring)); + String dc = h.getDatacenter(); + if (dc == null || !allDcReplicas.containsKey(dc)) continue; + + Integer rf = replicationFactors.get(dc).fullReplicas(); + Set dcReplicas = allDcReplicas.get(dc); + if (rf == null || dcReplicas.size() >= rf) continue; + + String rack = h.getRack(); + // Check if we already visited all racks in dc + if (rack == null || seenRacks.get(dc).size() == racks.get(dc).size()) { + replicas.add(h); + dcReplicas.add(h); + } else { + // Is this a new rack? + if (seenRacks.get(dc).contains(rack)) { + skippedDcEndpoints.get(dc).add(h); + } else { + replicas.add(h); + dcReplicas.add(h); + seenRacks.get(dc).add(rack); + // If we've run out of distinct racks, add the nodes skipped so far + if (seenRacks.get(dc).size() == racks.get(dc).size()) { + Iterator skippedIt = skippedDcEndpoints.get(dc).iterator(); + while (skippedIt.hasNext() && dcReplicas.size() < rf) { + Host nextSkipped = skippedIt.next(); + replicas.add(nextSkipped); + dcReplicas.add(nextSkipped); } - - replicaMap.put(ring.get(i), ImmutableSet.copyOf(replicas)); + } } - - long duration = System.currentTimeMillis() - startTime; - logger.debug("Token to replica map computation for keyspace {} completed in {} milliseconds", - keyspaceName, duration); - - return replicaMap; + } } - private boolean allDone(Map> map, Map dcHostCount) { - for (Map.Entry> entry : map.entrySet()) { - String dc = entry.getKey(); - int dcCount = dcHostCount.get(dc) == null ? 0 : dcHostCount.get(dc); - if (entry.getValue().size() < Math.min(replicationFactors.get(dc), dcCount)) - return false; - } - return true; + // If we haven't found enough replicas after a whole trip around the ring, this probably + // means that the replication factors are broken. + // Warn the user because that leads to quadratic performance of this method (JAVA-702). + for (Map.Entry> entry : allDcReplicas.entrySet()) { + String dcName = entry.getKey(); + int expectedFactor = replicationFactors.get(dcName).fullReplicas(); + int achievedFactor = entry.getValue().size(); + if (achievedFactor < expectedFactor && !warnedDcs.contains(dcName)) { + logger.warn( + "Error while computing token map for keyspace {} with datacenter {}: " + + "could not achieve replication factor {} (found {} replicas only), " + + "check your keyspace replication settings.", + keyspaceName, + dcName, + expectedFactor, + achievedFactor); + // only warn once per DC + warnedDcs.add(dcName); + } } - private Map> getRacksInDcs(Iterable hosts) { - Map> result = new HashMap>(); - for (Host host : hosts) { - Set racks = result.get(host.getDatacenter()); - if (racks == null) { - racks = new HashSet(); - result.put(host.getDatacenter(), racks); - } - racks.add(host.getRack()); - } - return result; - } + replicaMap.put(ring.get(i), ImmutableSet.copyOf(replicas)); + } - @Override - public boolean equals(Object o) { - if (this == o) - return true; - if (o == null || getClass() != o.getClass()) - return false; + long duration = System.currentTimeMillis() - startTime; + logger.debug( + "Token to replica map computation for keyspace {} completed in {} milliseconds", + keyspaceName, + duration); - NetworkTopologyStrategy that = (NetworkTopologyStrategy) o; + return replicaMap; + } - return replicationFactors.equals(that.replicationFactors); + private boolean allDone(Map> map, Map dcHostCount) { + for (Map.Entry> entry : map.entrySet()) { + String dc = entry.getKey(); + int dcCount = dcHostCount.get(dc) == null ? 0 : dcHostCount.get(dc); + if (entry.getValue().size() < Math.min(replicationFactors.get(dc).fullReplicas(), dcCount)) + return false; + } + return true; + } + private Map> getRacksInDcs(Iterable hosts) { + Map> result = new HashMap>(); + for (Host host : hosts) { + Set racks = result.get(host.getDatacenter()); + if (racks == null) { + racks = new HashSet(); + result.put(host.getDatacenter(), racks); } + racks.add(host.getRack()); + } + return result; + } - @Override - public int hashCode() { - return replicationFactors.hashCode(); - } + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + NetworkTopologyStrategy that = (NetworkTopologyStrategy) o; + + return replicationFactors.equals(that.replicationFactors); } + @Override + public int hashCode() { + return replicationFactors.hashCode(); + } + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/RequestHandler.java b/driver-core/src/main/java/com/datastax/driver/core/RequestHandler.java index 27cdf129610..25b813e3517 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/RequestHandler.java +++ b/driver-core/src/main/java/com/datastax/driver/core/RequestHandler.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,21 +18,31 @@ package com.datastax.driver.core; import com.codahale.metrics.Timer; -import com.datastax.driver.core.exceptions.*; +import com.datastax.driver.core.exceptions.BootstrappingException; +import com.datastax.driver.core.exceptions.BusyConnectionException; +import com.datastax.driver.core.exceptions.BusyPoolException; +import com.datastax.driver.core.exceptions.ConnectionException; +import com.datastax.driver.core.exceptions.DriverException; +import com.datastax.driver.core.exceptions.DriverInternalError; +import com.datastax.driver.core.exceptions.NoHostAvailableException; +import com.datastax.driver.core.exceptions.OperationTimedOutException; +import com.datastax.driver.core.exceptions.OverloadedException; +import com.datastax.driver.core.exceptions.ReadFailureException; +import com.datastax.driver.core.exceptions.ReadTimeoutException; +import com.datastax.driver.core.exceptions.ServerError; +import com.datastax.driver.core.exceptions.UnavailableException; +import com.datastax.driver.core.exceptions.WriteFailureException; +import com.datastax.driver.core.exceptions.WriteTimeoutException; import com.datastax.driver.core.policies.RetryPolicy; import com.datastax.driver.core.policies.RetryPolicy.RetryDecision.Type; import com.datastax.driver.core.policies.SpeculativeExecutionPolicy.SpeculativeExecutionPlan; import com.google.common.collect.ImmutableList; +import com.google.common.collect.Iterators; import com.google.common.collect.Sets; import com.google.common.util.concurrent.FutureCallback; -import com.google.common.util.concurrent.Futures; import com.google.common.util.concurrent.ListenableFuture; import io.netty.util.Timeout; import io.netty.util.TimerTask; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.net.InetSocketAddress; import java.util.Collections; import java.util.Iterator; import java.util.List; @@ -42,843 +54,1027 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** - * Handles a request to cassandra, dealing with host failover and retries on - * unavailable/timeout. + * Handles a request to cassandra, dealing with host failover and retries on unavailable/timeout. */ class RequestHandler { - private static final Logger logger = LoggerFactory.getLogger(RequestHandler.class); - - final String id; - - private final SessionManager manager; - private final Callback callback; - - private final QueryPlan queryPlan; - private final SpeculativeExecutionPlan speculativeExecutionPlan; - private final boolean allowSpeculativeExecutions; - private final Set runningExecutions = Sets.newCopyOnWriteArraySet(); - private final Set scheduledExecutions = Sets.newCopyOnWriteArraySet(); - private final Statement statement; - private final io.netty.util.Timer scheduler; - - private volatile List triedHosts; - private volatile ConcurrentMap errors; - - private final Timer.Context timerContext; - private final long startTime; - - private final AtomicBoolean isDone = new AtomicBoolean(); - private final AtomicInteger executionIndex = new AtomicInteger(); - - public RequestHandler(SessionManager manager, Callback callback, Statement statement) { - this.id = Long.toString(System.identityHashCode(this)); - if (logger.isTraceEnabled()) - logger.trace("[{}] {}", id, statement); - this.manager = manager; - this.callback = callback; - this.scheduler = manager.cluster.manager.connectionFactory.timer; - - callback.register(this); - - this.queryPlan = new QueryPlan(manager.loadBalancingPolicy().newQueryPlan(manager.poolsState.keyspace, statement)); - this.speculativeExecutionPlan = manager.speculativeExecutionPolicy().newPlan(manager.poolsState.keyspace, statement); - this.allowSpeculativeExecutions = statement != Statement.DEFAULT - && statement.isIdempotentWithDefault(manager.configuration().getQueryOptions()); - this.statement = statement; - - this.timerContext = metricsEnabled() - ? metrics().getRequestsTimer().time() - : null; - this.startTime = System.nanoTime(); + private static final Logger logger = LoggerFactory.getLogger(RequestHandler.class); + + private static final boolean HOST_METRICS_ENABLED = + Boolean.getBoolean("com.datastax.driver.HOST_METRICS_ENABLED"); + private static final QueryLogger QUERY_LOGGER = QueryLogger.builder().build(); + static final String DISABLE_QUERY_WARNING_LOGS = "com.datastax.driver.DISABLE_QUERY_WARNING_LOGS"; + + final String id; + + private final SessionManager manager; + private final Callback callback; + + private final QueryPlan queryPlan; + private final SpeculativeExecutionPlan speculativeExecutionPlan; + private final boolean allowSpeculativeExecutions; + private final Set runningExecutions = Sets.newCopyOnWriteArraySet(); + private final Set scheduledExecutions = Sets.newCopyOnWriteArraySet(); + private final Statement statement; + private final io.netty.util.Timer scheduler; + + private volatile List triedHosts; + private volatile ConcurrentMap errors; + + private final Timer.Context timerContext; + private final long startTime; + + private final AtomicBoolean isDone = new AtomicBoolean(); + private final AtomicInteger executionIndex = new AtomicInteger(); + + public RequestHandler(SessionManager manager, Callback callback, Statement statement) { + this.id = Long.toString(System.identityHashCode(this)); + if (logger.isTraceEnabled()) logger.trace("[{}] {}", id, statement); + this.manager = manager; + this.callback = callback; + this.scheduler = manager.cluster.manager.connectionFactory.timer; + + callback.register(this); + + // If host is explicitly set on statement, bypass load balancing policy. + if (statement.getHost() != null) { + this.queryPlan = new QueryPlan(Iterators.singletonIterator(statement.getHost())); + } else { + this.queryPlan = + new QueryPlan( + manager.loadBalancingPolicy().newQueryPlan(manager.poolsState.keyspace, statement)); } - void sendRequest() { - startNewExecution(); + this.speculativeExecutionPlan = + manager.speculativeExecutionPolicy().newPlan(manager.poolsState.keyspace, statement); + this.allowSpeculativeExecutions = + statement != Statement.DEFAULT + && statement.isIdempotentWithDefault(manager.configuration().getQueryOptions()); + this.statement = statement; + + this.timerContext = metricsEnabled() ? metrics().getRequestsTimer().time() : null; + this.startTime = System.nanoTime(); + } + + void sendRequest() { + startNewExecution(); + } + + // Called when the corresponding ResultSetFuture is cancelled by the client + void cancel() { + if (!isDone.compareAndSet(false, true)) return; + + cancelPendingExecutions(null); + } + + private void startNewExecution() { + if (isDone.get()) return; + + Message.Request request = callback.request(); + int position = executionIndex.getAndIncrement(); + + SpeculativeExecution execution = new SpeculativeExecution(request, position); + runningExecutions.add(execution); + execution.findNextHostAndQuery(); + } + + private void scheduleExecution(long delayMillis) { + if (isDone.get() || delayMillis < 0) return; + if (logger.isTraceEnabled()) + logger.trace("[{}] Schedule next speculative execution in {} ms", id, delayMillis); + if (delayMillis == 0) { + // kick off request immediately + scheduleExecutionImmediately(); + } else { + scheduledExecutions.add( + scheduler.newTimeout(newExecutionTask, delayMillis, TimeUnit.MILLISECONDS)); } + } - // Called when the corresponding ResultSetFuture is cancelled by the client - void cancel() { - if (!isDone.compareAndSet(false, true)) - return; + private final TimerTask newExecutionTask = + new TimerTask() { + @Override + public void run(final Timeout timeout) throws Exception { + scheduledExecutions.remove(timeout); + if (!isDone.get()) { + // We're on the timer thread so reschedule to another executor + manager + .executor() + .execute( + new Runnable() { + @Override + public void run() { + scheduleExecutionImmediately(); + } + }); + } + } + }; + + private void scheduleExecutionImmediately() { + if (metricsEnabled()) metrics().getErrorMetrics().getSpeculativeExecutions().inc(); + startNewExecution(); + } + + private void cancelPendingExecutions(SpeculativeExecution ignore) { + for (SpeculativeExecution execution : runningExecutions) + if (execution != ignore) // not vital but this produces nicer logs + execution.cancel(); + for (Timeout execution : scheduledExecutions) execution.cancel(); + } + + private void setFinalResult( + SpeculativeExecution execution, Connection connection, Message.Response response) { + if (!isDone.compareAndSet(false, true)) { + if (logger.isTraceEnabled()) + logger.trace("[{}] Got beaten to setting the result", execution.id); + return; + } - cancelPendingExecutions(null); + if (logger.isTraceEnabled()) logger.trace("[{}] Setting final result", execution.id); + + cancelPendingExecutions(execution); + + try { + if (timerContext != null) timerContext.stop(); + + ExecutionInfo info; + int speculativeExecutions = executionIndex.get() - 1; + // Avoid creating a new instance if we can reuse the host's default one + if (execution.position == 0 + && speculativeExecutions == 0 + && triedHosts == null + && execution.retryConsistencyLevel == null + && response.getCustomPayload() == null) { + info = execution.current.defaultExecutionInfo; + } else { + List hosts; + if (triedHosts == null) { + hosts = ImmutableList.of(execution.current); + } else { + hosts = triedHosts; + hosts.add(execution.current); + } + info = + new ExecutionInfo( + speculativeExecutions, + execution.position, + hosts, + execution.retryConsistencyLevel, + response.getCustomPayload()); + } + // if the response from the server has warnings, they'll be set on the ExecutionInfo. Log them + // here, unless they've been disabled. + if (response.warnings != null + && !response.warnings.isEmpty() + && !Boolean.getBoolean(RequestHandler.DISABLE_QUERY_WARNING_LOGS) + && logger.isWarnEnabled()) { + logServerWarnings(response.warnings); + } + callback.onSet(connection, response, info, statement, System.nanoTime() - startTime); + } catch (Exception e) { + callback.onException( + connection, + new DriverInternalError( + "Unexpected exception while setting final result from " + response, e), + System.nanoTime() - startTime, /*unused*/ + 0); + } + } + + private void logServerWarnings(List warnings) { + // truncate the statement query to the DEFAULT_MAX_QUERY_STRING_LENGTH, if necessary + final String queryString = QUERY_LOGGER.statementAsString(statement); + // log each warning separately + for (String warning : warnings) { + logger.warn("Query '{}' generated server side warning(s): {}", queryString, warning); + } + } + + private void setFinalException( + SpeculativeExecution execution, Connection connection, Exception exception) { + if (!isDone.compareAndSet(false, true)) { + if (logger.isTraceEnabled()) + logger.trace("[{}] Got beaten to setting final exception", execution.id); + return; } - private void startNewExecution() { - if (isDone.get()) - return; + if (logger.isTraceEnabled()) logger.trace("[{}] Setting final exception", execution.id); - Message.Request request = callback.request(); - int position = executionIndex.getAndIncrement(); + cancelPendingExecutions(execution); - SpeculativeExecution execution = new SpeculativeExecution(request, position); - runningExecutions.add(execution); - execution.findNextHostAndQuery(); + try { + if (timerContext != null) timerContext.stop(); + } finally { + callback.onException(connection, exception, System.nanoTime() - startTime, /*unused*/ 0); } - - private void scheduleExecution(long delayMillis) { - if (isDone.get() || delayMillis <= 0) - return; - if (logger.isTraceEnabled()) - logger.trace("[{}] Schedule next speculative execution in {} ms", id, delayMillis); - scheduledExecutions.add(scheduler.newTimeout(newExecutionTask, delayMillis, TimeUnit.MILLISECONDS)); + } + + // Triggered when an execution reaches the end of the query plan. + // This is only a failure if there are no other running executions. + private void reportNoMoreHosts(SpeculativeExecution execution) { + runningExecutions.remove(execution); + if (runningExecutions.isEmpty()) + setFinalException( + execution, + null, + new NoHostAvailableException( + errors == null ? Collections.emptyMap() : errors)); + } + + private boolean metricsEnabled() { + return manager.configuration().getMetricsOptions().isEnabled(); + } + + private boolean hostMetricsEnabled() { + return HOST_METRICS_ENABLED && metricsEnabled(); + } + + private Metrics metrics() { + return manager.cluster.manager.metrics; + } + + private RetryPolicy retryPolicy() { + return statement.getRetryPolicy() == null + ? manager.configuration().getPolicies().getRetryPolicy() + : statement.getRetryPolicy(); + } + + interface Callback extends Connection.ResponseCallback { + void onSet( + Connection connection, + Message.Response response, + ExecutionInfo info, + Statement statement, + long latency); + + void register(RequestHandler handler); + } + + /** + * An execution of the query against the cluster. There is at least one instance per + * RequestHandler, and possibly more (depending on the SpeculativeExecutionPolicy). Each instance + * may retry on the same host, or on other hosts as defined by the RetryPolicy. All instances run + * concurrently and share the same query plan. There are three ways a SpeculativeExecution can + * stop: - it completes the query (with either a success or a fatal error), and reports the result + * to the RequestHandler - it gets cancelled, either because another execution completed the + * query, or because the RequestHandler was cancelled - it reaches the end of the query plan and + * informs the RequestHandler, which will decide what to do + */ + class SpeculativeExecution implements Connection.ResponseCallback { + final String id; + private final Message.Request request; + private final int position; + private volatile Host current; + private volatile ConsistencyLevel retryConsistencyLevel; + private final AtomicReference queryStateRef; + private final AtomicBoolean nextExecutionScheduled = new AtomicBoolean(); + private final long startTime = System.nanoTime(); + + // This represents the number of times a retry has been triggered by the RetryPolicy (this is + // different from + // queryStateRef.get().retryCount, because some retries don't involve the policy, for example + // after an + // UNPREPARED response). + // This is incremented by one writer at a time, so volatile is good enough. + private volatile int retriesByPolicy; + + private volatile Connection.ResponseHandler connectionHandler; + + SpeculativeExecution(Message.Request request, int position) { + this.id = RequestHandler.this.id + "-" + position; + this.request = request; + this.position = position; + this.queryStateRef = new AtomicReference(QueryState.INITIAL); + if (logger.isTraceEnabled()) logger.trace("[{}] Starting", id); } - private final TimerTask newExecutionTask = new TimerTask() { - @Override - public void run(final Timeout timeout) throws Exception { - scheduledExecutions.remove(timeout); - if (!isDone.get()) - // We're on the timer thread so reschedule to another executor - manager.executor().execute(new Runnable() { - @Override - public void run() { - if (metricsEnabled()) - metrics().getErrorMetrics().getSpeculativeExecutions().inc(); - startNewExecution(); - } - }); + void findNextHostAndQuery() { + try { + Host host; + while (!isDone.get() + && (host = queryPlan.next()) != null + && !queryStateRef.get().isCancelled()) { + if (query(host)) { + if (hostMetricsEnabled()) { + metrics().getRegistry().counter(MetricsUtil.hostMetricName("writes.", host)).inc(); + } + return; + } else if (hostMetricsEnabled()) { + metrics() + .getRegistry() + .counter(MetricsUtil.hostMetricName("write-errors.", host)) + .inc(); + } } - }; - - private void cancelPendingExecutions(SpeculativeExecution ignore) { - for (SpeculativeExecution execution : runningExecutions) - if (execution != ignore) // not vital but this produces nicer logs - execution.cancel(); - for (Timeout execution : scheduledExecutions) - execution.cancel(); + if (current != null) { + if (triedHosts == null) triedHosts = new CopyOnWriteArrayList(); + triedHosts.add(current); + } + reportNoMoreHosts(this); + } catch (Exception e) { + // Shouldn't happen really, but if ever the loadbalancing policy returned iterator throws, + // we don't want to block. + setFinalException( + null, + new DriverInternalError("An unexpected error happened while sending requests", e)); + } } - private void setFinalResult(SpeculativeExecution execution, Connection connection, Message.Response response) { - if (!isDone.compareAndSet(false, true)) { - if (logger.isTraceEnabled()) - logger.trace("[{}] Got beaten to setting the result", execution.id); - return; - } + private boolean query(final Host host) { + HostConnectionPool pool = manager.pools.get(host); + if (pool == null || pool.isClosed()) return false; + + if (logger.isTraceEnabled()) logger.trace("[{}] Querying node {}", id, host); + + if (allowSpeculativeExecutions && nextExecutionScheduled.compareAndSet(false, true)) + scheduleExecution(speculativeExecutionPlan.nextExecution(host)); + + PoolingOptions poolingOptions = manager.configuration().getPoolingOptions(); + ListenableFuture connectionFuture = + pool.borrowConnection( + poolingOptions.getPoolTimeoutMillis(), + TimeUnit.MILLISECONDS, + poolingOptions.getMaxQueueSize()); + GuavaCompatibility.INSTANCE.addCallback( + connectionFuture, + new FutureCallback() { + @Override + public void onSuccess(Connection connection) { + if (isDone.get()) { + connection.release(); + return; + } + if (current != null) { + if (triedHosts == null) triedHosts = new CopyOnWriteArrayList(); + triedHosts.add(current); + } + current = host; + try { + write(connection, SpeculativeExecution.this); + } catch (ConnectionException e) { + // If we have any problem with the connection, move to the next node. + if (metricsEnabled()) metrics().getErrorMetrics().getConnectionErrors().inc(); + if (connection != null) connection.release(); + logError(host.getEndPoint(), e); + findNextHostAndQuery(); + } catch (BusyConnectionException e) { + // The pool shouldn't have give us a busy connection unless we've maxed up the pool, + // so move on to the next host. + connection.release(true); + logError(host.getEndPoint(), e); + findNextHostAndQuery(); + } catch (RuntimeException e) { + if (connection != null) connection.release(); + logger.warn( + "Unexpected error while querying {} - [{}]. Find next host to query.", + host.getEndPoint(), + e.toString()); + logError(host.getEndPoint(), e); + findNextHostAndQuery(); + } + } - if (logger.isTraceEnabled()) - logger.trace("[{}] Setting final result", execution.id); - - cancelPendingExecutions(execution); - - try { - if (timerContext != null) - timerContext.stop(); - - ExecutionInfo info; - int speculativeExecutions = executionIndex.get() - 1; - // Avoid creating a new instance if we can reuse the host's default one - if (execution.position == 0 && speculativeExecutions == 0 && triedHosts == null && execution.retryConsistencyLevel == null - && response.getCustomPayload() == null) { - info = execution.current.defaultExecutionInfo; - } else { - List hosts; - if (triedHosts == null) { - hosts = ImmutableList.of(execution.current); - } else { - hosts = triedHosts; - hosts.add(execution.current); - } - info = new ExecutionInfo(speculativeExecutions, execution.position, hosts, execution.retryConsistencyLevel, response.getCustomPayload()); + @Override + public void onFailure(Throwable t) { + if (t instanceof BusyPoolException) { + logError(host.getEndPoint(), t); + } else { + logger.warn( + "Unexpected error while querying {} - [{}]. Find next host to query.", + host.getEndPoint(), + t.toString()); + logError(host.getEndPoint(), t); + } + findNextHostAndQuery(); } - callback.onSet(connection, response, info, statement, System.nanoTime() - startTime); - } catch (Exception e) { - callback.onException(connection, - new DriverInternalError("Unexpected exception while setting final result from " + response, e), - System.nanoTime() - startTime, /*unused*/0); - } + }); + return true; } - private void setFinalException(SpeculativeExecution execution, Connection connection, Exception exception) { - if (!isDone.compareAndSet(false, true)) { - if (logger.isTraceEnabled()) - logger.trace("[{}] Got beaten to setting final exception", execution.id); - return; + private void write(Connection connection, Connection.ResponseCallback responseCallback) + throws ConnectionException, BusyConnectionException { + // Make sure cancel() does not see a stale connectionHandler if it sees the new query state + // before connection.write has completed + connectionHandler = null; + + // Ensure query state is "in progress" (can be already if connection.write failed on a + // previous node and we're retrying) + while (true) { + QueryState previous = queryStateRef.get(); + if (previous.isCancelled()) { + connection.release(); + return; } + if (previous.inProgress || queryStateRef.compareAndSet(previous, previous.startNext())) + break; + } + + connectionHandler = + connection.write(responseCallback, statement.getReadTimeoutMillis(), false); + // Only start the timeout when we're sure connectionHandler is set. This avoids an edge case + // where onTimeout() was triggered + // *before* the call to connection.write had returned. + connectionHandler.startTimeout(); + + // Note that we could have already received the response here (so onSet() / onException() + // would have been called). This is + // why we only test for CANCELLED_WHILE_IN_PROGRESS below. + + // If cancel() was called after we set the state to "in progress", but before connection.write + // had completed, it might have + // missed the new value of connectionHandler. So make sure that cancelHandler() gets called + // here (we might call it twice, + // but it knows how to deal with it). + if (queryStateRef.get() == QueryState.CANCELLED_WHILE_IN_PROGRESS + && connectionHandler.cancelHandler()) connection.release(); + } - if (logger.isTraceEnabled()) - logger.trace("[{}] Setting final exception", execution.id); - - cancelPendingExecutions(execution); - - try { - if (timerContext != null) - timerContext.stop(); - } finally { - callback.onException(connection, exception, System.nanoTime() - startTime, /*unused*/0); + private RetryPolicy.RetryDecision computeRetryDecisionOnRequestError( + DriverException exception) { + RetryPolicy.RetryDecision decision; + if (statement.isIdempotentWithDefault(manager.cluster.getConfiguration().getQueryOptions())) { + decision = + retryPolicy() + .onRequestError(statement, request().consistency(), exception, retriesByPolicy); + } else { + decision = RetryPolicy.RetryDecision.rethrow(); + } + if (metricsEnabled()) { + if (exception instanceof OperationTimedOutException) { + metrics().getErrorMetrics().getClientTimeouts().inc(); + if (decision.getType() == Type.RETRY) + metrics().getErrorMetrics().getRetriesOnClientTimeout().inc(); + if (decision.getType() == Type.IGNORE) + metrics().getErrorMetrics().getIgnoresOnClientTimeout().inc(); + } else if (exception instanceof ConnectionException) { + metrics().getErrorMetrics().getConnectionErrors().inc(); + if (decision.getType() == Type.RETRY) + metrics().getErrorMetrics().getRetriesOnConnectionError().inc(); + if (decision.getType() == Type.IGNORE) + metrics().getErrorMetrics().getIgnoresOnConnectionError().inc(); + } else { + metrics().getErrorMetrics().getOthers().inc(); + if (decision.getType() == Type.RETRY) + metrics().getErrorMetrics().getRetriesOnOtherErrors().inc(); + if (decision.getType() == Type.IGNORE) + metrics().getErrorMetrics().getIgnoresOnOtherErrors().inc(); } + } + return decision; } - // Triggered when an execution reaches the end of the query plan. - // This is only a failure if there are no other running executions. - private void reportNoMoreHosts(SpeculativeExecution execution) { - runningExecutions.remove(execution); - if (runningExecutions.isEmpty()) - setFinalException(execution, null, new NoHostAvailableException( - errors == null ? Collections.emptyMap() : errors)); + private void processRetryDecision( + RetryPolicy.RetryDecision retryDecision, + Connection connection, + Exception exceptionToReport) { + switch (retryDecision.getType()) { + case RETRY: + retriesByPolicy++; + if (logger.isDebugEnabled()) + logger.debug( + "[{}] Doing retry {} for query {} at consistency {}", + id, + retriesByPolicy, + statement, + retryDecision.getRetryConsistencyLevel()); + if (metricsEnabled()) metrics().getErrorMetrics().getRetries().inc(); + // log error for the current host if we are switching to another one + if (!retryDecision.isRetryCurrent()) logError(connection.endPoint, exceptionToReport); + retry(retryDecision.isRetryCurrent(), retryDecision.getRetryConsistencyLevel()); + break; + case RETHROW: + setFinalException(connection, exceptionToReport); + break; + case IGNORE: + if (metricsEnabled()) metrics().getErrorMetrics().getIgnores().inc(); + setFinalResult(connection, new Responses.Result.Void()); + break; + } } - private boolean metricsEnabled() { - return manager.configuration().getMetricsOptions().isEnabled(); - } + private void retry(final boolean retryCurrent, ConsistencyLevel newConsistencyLevel) { + final Host h = current; + if (newConsistencyLevel != null) this.retryConsistencyLevel = newConsistencyLevel; - private Metrics metrics() { - return manager.cluster.manager.metrics; - } + if (queryStateRef.get().isCancelled()) return; - private RetryPolicy retryPolicy() { - return statement.getRetryPolicy() == null - ? manager.configuration().getPolicies().getRetryPolicy() - : statement.getRetryPolicy(); + if (!retryCurrent || !query(h)) findNextHostAndQuery(); } - interface Callback extends Connection.ResponseCallback { - void onSet(Connection connection, Message.Response response, ExecutionInfo info, Statement statement, long latency); - - void register(RequestHandler handler); + private void logError(EndPoint endPoint, Throwable exception) { + logger.debug("[{}] Error querying {} : {}", id, endPoint, exception.toString()); + if (errors == null) { + synchronized (RequestHandler.this) { + if (errors == null) { + errors = new ConcurrentHashMap(); + } + } + } + errors.put(endPoint, exception); } - /** - * An execution of the query against the cluster. - * There is at least one instance per RequestHandler, and possibly more (depending on the SpeculativeExecutionPolicy). - * Each instance may retry on the same host, or on other hosts as defined by the RetryPolicy. - * All instances run concurrently and share the same query plan. - * There are three ways a SpeculativeExecution can stop: - * - it completes the query (with either a success or a fatal error), and reports the result to the RequestHandler - * - it gets cancelled, either because another execution completed the query, or because the RequestHandler was cancelled - * - it reaches the end of the query plan and informs the RequestHandler, which will decide what to do - */ - class SpeculativeExecution implements Connection.ResponseCallback { - final String id; - private final Message.Request request; - private final int position; - private volatile Host current; - private volatile ConsistencyLevel retryConsistencyLevel; - private final AtomicReference queryStateRef; - private final AtomicBoolean nextExecutionScheduled = new AtomicBoolean(); - - // This represents the number of times a retry has been triggered by the RetryPolicy (this is different from - // queryStateRef.get().retryCount, because some retries don't involve the policy, for example after an - // UNPREPARED response). - // This is incremented by one writer at a time, so volatile is good enough. - private volatile int retriesByPolicy; - - private volatile Connection.ResponseHandler connectionHandler; - - SpeculativeExecution(Message.Request request, int position) { - this.id = RequestHandler.this.id + "-" + position; - this.request = request; - this.position = position; - this.queryStateRef = new AtomicReference(QueryState.INITIAL); - if (logger.isTraceEnabled()) - logger.trace("[{}] Starting", id); + void cancel() { + // Atomically set a special QueryState, that will cause any further operation to abort. + // We want to remember whether a request was in progress when we did this, so there are two + // cancel states. + while (true) { + QueryState previous = queryStateRef.get(); + if (previous.isCancelled()) { + return; + } else if (previous.inProgress + && queryStateRef.compareAndSet(previous, QueryState.CANCELLED_WHILE_IN_PROGRESS)) { + if (logger.isTraceEnabled()) logger.trace("[{}] Cancelled while in progress", id); + // The connectionHandler should be non-null, but we might miss the update if we're racing + // with write(). + // If it's still null, this will be handled by re-checking queryStateRef at the end of + // write(). + if (connectionHandler != null && connectionHandler.cancelHandler()) + connectionHandler.connection.release(); + Host queriedHost = current; + if (queriedHost != null && statement != Statement.DEFAULT) { + manager.cluster.manager.reportQuery( + queriedHost, + statement, + CancelledSpeculativeExecutionException.INSTANCE, + System.nanoTime() - startTime); + } + return; + } else if (!previous.inProgress + && queryStateRef.compareAndSet(previous, QueryState.CANCELLED_WHILE_COMPLETE)) { + if (logger.isTraceEnabled()) logger.trace("[{}] Cancelled while complete", id); + Host queriedHost = current; + if (queriedHost != null && statement != Statement.DEFAULT) { + manager.cluster.manager.reportQuery( + queriedHost, + statement, + CancelledSpeculativeExecutionException.INSTANCE, + System.nanoTime() - startTime); + } + return; } + } + } + + @Override + public Message.Request request() { + if (retryConsistencyLevel != null && retryConsistencyLevel != request.consistency()) + return request.copy(retryConsistencyLevel); + else return request; + } - void findNextHostAndQuery() { - try { - Host host; - while (!isDone.get() && (host = queryPlan.next()) != null && !queryStateRef.get().isCancelled()) { - if (query(host)) - return; + @Override + public void onSet( + Connection connection, Message.Response response, long latency, int retryCount) { + QueryState queryState = queryStateRef.get(); + if (!queryState.isInProgressAt(retryCount) + || !queryStateRef.compareAndSet(queryState, queryState.complete())) { + logger.debug( + "onSet triggered but the response was completed by another thread, cancelling (retryCount = {}, queryState = {}, queryStateRef = {})", + retryCount, + queryState, + queryStateRef.get()); + return; + } + + Host queriedHost = current; + Exception exceptionToReport = null; + try { + switch (response.type) { + case RESULT: + connection.release(); + setFinalResult(connection, response); + break; + case ERROR: + Responses.Error err = (Responses.Error) response; + exceptionToReport = err.asException(connection.endPoint); + RetryPolicy.RetryDecision retry = null; + RetryPolicy retryPolicy = retryPolicy(); + switch (err.code) { + case READ_TIMEOUT: + connection.release(); + assert err.infos instanceof ReadTimeoutException; + ReadTimeoutException rte = (ReadTimeoutException) err.infos; + retry = + retryPolicy.onReadTimeout( + statement, + rte.getConsistencyLevel(), + rte.getRequiredAcknowledgements(), + rte.getReceivedAcknowledgements(), + rte.wasDataRetrieved(), + retriesByPolicy); + if (metricsEnabled()) { + metrics().getErrorMetrics().getReadTimeouts().inc(); + if (retry.getType() == Type.RETRY) + metrics().getErrorMetrics().getRetriesOnReadTimeout().inc(); + if (retry.getType() == Type.IGNORE) + metrics().getErrorMetrics().getIgnoresOnReadTimeout().inc(); } - if (current != null) { - if (triedHosts == null) - triedHosts = new CopyOnWriteArrayList(); - triedHosts.add(current); + break; + case WRITE_TIMEOUT: + connection.release(); + assert err.infos instanceof WriteTimeoutException; + WriteTimeoutException wte = (WriteTimeoutException) err.infos; + if (statement.isIdempotentWithDefault( + manager.cluster.getConfiguration().getQueryOptions())) + retry = + retryPolicy.onWriteTimeout( + statement, + wte.getConsistencyLevel(), + wte.getWriteType(), + wte.getRequiredAcknowledgements(), + wte.getReceivedAcknowledgements(), + retriesByPolicy); + else { + retry = RetryPolicy.RetryDecision.rethrow(); } - reportNoMoreHosts(this); - } catch (Exception e) { - // Shouldn't happen really, but if ever the loadbalancing policy returned iterator throws, we don't want to block. - setFinalException(null, new DriverInternalError("An unexpected error happened while sending requests", e)); - } - } - - private boolean query(final Host host) { - HostConnectionPool pool = manager.pools.get(host); - if (pool == null || pool.isClosed()) - return false; - - if (logger.isTraceEnabled()) - logger.trace("[{}] Querying node {}", id, host); - - if (allowSpeculativeExecutions && nextExecutionScheduled.compareAndSet(false, true)) - scheduleExecution(speculativeExecutionPlan.nextExecution(host)); - - PoolingOptions poolingOptions = manager.configuration().getPoolingOptions(); - ListenableFuture connectionFuture = pool.borrowConnection( - poolingOptions.getPoolTimeoutMillis(), TimeUnit.MILLISECONDS, - poolingOptions.getMaxQueueSize()); - Futures.addCallback(connectionFuture, new FutureCallback() { - @Override - public void onSuccess(Connection connection) { - if (isDone.get()) { - connection.release(); - return; - } - if (current != null) { - if (triedHosts == null) - triedHosts = new CopyOnWriteArrayList(); - triedHosts.add(current); - } - current = host; - try { - write(connection, SpeculativeExecution.this); - } catch (ConnectionException e) { - // If we have any problem with the connection, move to the next node. - if (metricsEnabled()) - metrics().getErrorMetrics().getConnectionErrors().inc(); - if (connection != null) - connection.release(); - logError(host.getSocketAddress(), e); - findNextHostAndQuery(); - } catch (BusyConnectionException e) { - // The pool shouldn't have give us a busy connection unless we've maxed up the pool, so move on to the next host. - connection.release(); - logError(host.getSocketAddress(), e); - findNextHostAndQuery(); - } catch (RuntimeException e) { - if (connection != null) - connection.release(); - logger.error("Unexpected error while querying " + host.getAddress(), e); - logError(host.getSocketAddress(), e); - findNextHostAndQuery(); - } + if (metricsEnabled()) { + metrics().getErrorMetrics().getWriteTimeouts().inc(); + if (retry.getType() == Type.RETRY) + metrics().getErrorMetrics().getRetriesOnWriteTimeout().inc(); + if (retry.getType() == Type.IGNORE) + metrics().getErrorMetrics().getIgnoresOnWriteTimeout().inc(); } - - @Override - public void onFailure(Throwable t) { - if (t instanceof BusyPoolException) { - logError(host.getSocketAddress(), t); - } else { - logger.error("Unexpected error while querying " + host.getAddress(), t); - logError(host.getSocketAddress(), t); - } - findNextHostAndQuery(); + break; + case UNAVAILABLE: + connection.release(); + assert err.infos instanceof UnavailableException; + UnavailableException ue = (UnavailableException) err.infos; + retry = + retryPolicy.onUnavailable( + statement, + ue.getConsistencyLevel(), + ue.getRequiredReplicas(), + ue.getAliveReplicas(), + retriesByPolicy); + if (metricsEnabled()) { + metrics().getErrorMetrics().getUnavailables().inc(); + if (retry.getType() == Type.RETRY) + metrics().getErrorMetrics().getRetriesOnUnavailable().inc(); + if (retry.getType() == Type.IGNORE) + metrics().getErrorMetrics().getIgnoresOnUnavailable().inc(); } - }); - return true; - } - - private void write(Connection connection, Connection.ResponseCallback responseCallback) throws ConnectionException, BusyConnectionException { - // Make sure cancel() does not see a stale connectionHandler if it sees the new query state - // before connection.write has completed - connectionHandler = null; - - // Ensure query state is "in progress" (can be already if connection.write failed on a previous node and we're retrying) - while (true) { - QueryState previous = queryStateRef.get(); - if (previous.isCancelled()) { - connection.release(); - return; + break; + case OVERLOADED: + connection.release(); + assert exceptionToReport instanceof OverloadedException; + logger.warn("Host {} is overloaded.", connection.endPoint); + retry = computeRetryDecisionOnRequestError((OverloadedException) exceptionToReport); + break; + case SERVER_ERROR: + connection.release(); + assert exceptionToReport instanceof ServerError; + logger.warn( + "{} replied with server error ({}), defuncting connection.", + connection.endPoint, + err.message); + // Defunct connection + connection.defunct(exceptionToReport); + retry = computeRetryDecisionOnRequestError((ServerError) exceptionToReport); + break; + case IS_BOOTSTRAPPING: + connection.release(); + assert exceptionToReport instanceof BootstrappingException; + logger.error( + "Query sent to {} but it is bootstrapping. This shouldn't happen but trying next host.", + connection.endPoint); + if (metricsEnabled()) { + metrics().getErrorMetrics().getOthers().inc(); + } + logError(connection.endPoint, exceptionToReport); + retry(false, null); + return; + case UNPREPARED: + // Do not release connection yet, because we might reuse it to send the PREPARE + // message (see write() call below) + assert err.infos instanceof MD5Digest; + MD5Digest id = (MD5Digest) err.infos; + PreparedStatement toPrepare = manager.cluster.manager.preparedQueries.get(id); + if (toPrepare == null) { + // This shouldn't happen + connection.release(); + String msg = String.format("Tried to execute unknown prepared query %s", id); + logger.error(msg); + setFinalException(connection, new DriverInternalError(msg)); + return; } - if (previous.inProgress || queryStateRef.compareAndSet(previous, previous.startNext())) - break; - } - connectionHandler = connection.write(responseCallback, statement.getReadTimeoutMillis(), false); - // Only start the timeout when we're sure connectionHandler is set. This avoids an edge case where onTimeout() was triggered - // *before* the call to connection.write had returned. - connectionHandler.startTimeout(); + String currentKeyspace = connection.keyspace(); + String prepareKeyspace = toPrepare.getQueryKeyspace(); + if (prepareKeyspace != null + && (currentKeyspace == null || !currentKeyspace.equals(prepareKeyspace))) { + // This shouldn't happen in normal use, because a user shouldn't try to execute + // a prepared statement with the wrong keyspace set. + // Fail fast (we can't change the keyspace to reprepare, because we're using a + // pooled connection + // that's shared with other requests). + connection.release(); + throw new IllegalStateException( + String.format( + "Statement was prepared on keyspace %s, can't execute it on %s (%s)", + toPrepare.getQueryKeyspace(), + connection.keyspace(), + toPrepare.getQueryString())); + } - // Note that we could have already received the response here (so onSet() / onException() would have been called). This is - // why we only test for CANCELLED_WHILE_IN_PROGRESS below. + logger.info( + "Query {} is not prepared on {}, preparing before retrying executing. " + + "Seeing this message a few times is fine, but seeing it a lot may be source of performance problems", + toPrepare.getQueryString(), + toPrepare.getQueryKeyspace(), + connection.endPoint); - // If cancel() was called after we set the state to "in progress", but before connection.write had completed, it might have - // missed the new value of connectionHandler. So make sure that cancelHandler() gets called here (we might call it twice, - // but it knows how to deal with it). - if (queryStateRef.get() == QueryState.CANCELLED_WHILE_IN_PROGRESS && connectionHandler.cancelHandler()) + write(connection, prepareAndRetry(toPrepare.getQueryString())); + // we're done for now, the prepareAndRetry callback will handle the rest + return; + case READ_FAILURE: + assert exceptionToReport instanceof ReadFailureException; connection.release(); - } - - private RetryPolicy.RetryDecision computeRetryDecisionOnRequestError(DriverException exception) { - RetryPolicy.RetryDecision decision; - if (statement.isIdempotentWithDefault(manager.cluster.getConfiguration().getQueryOptions())) { - decision = retryPolicy().onRequestError(statement, request().consistency(), exception, retriesByPolicy); - } else { - decision = RetryPolicy.RetryDecision.rethrow(); - } - if (metricsEnabled()) { - if (exception instanceof OperationTimedOutException) { - metrics().getErrorMetrics().getClientTimeouts().inc(); - if (decision.getType() == Type.RETRY) - metrics().getErrorMetrics().getRetriesOnClientTimeout().inc(); - if (decision.getType() == Type.IGNORE) - metrics().getErrorMetrics().getIgnoresOnClientTimeout().inc(); - } else if (exception instanceof ConnectionException) { - metrics().getErrorMetrics().getConnectionErrors().inc(); - if (decision.getType() == Type.RETRY) - metrics().getErrorMetrics().getRetriesOnConnectionError().inc(); - if (decision.getType() == Type.IGNORE) - metrics().getErrorMetrics().getIgnoresOnConnectionError().inc(); + retry = + computeRetryDecisionOnRequestError((ReadFailureException) exceptionToReport); + break; + case WRITE_FAILURE: + assert exceptionToReport instanceof WriteFailureException; + connection.release(); + if (statement.isIdempotentWithDefault( + manager.cluster.getConfiguration().getQueryOptions())) { + retry = + computeRetryDecisionOnRequestError((WriteFailureException) exceptionToReport); } else { - metrics().getErrorMetrics().getOthers().inc(); - if (decision.getType() == Type.RETRY) - metrics().getErrorMetrics().getRetriesOnOtherErrors().inc(); - if (decision.getType() == Type.IGNORE) - metrics().getErrorMetrics().getIgnoresOnOtherErrors().inc(); + retry = RetryPolicy.RetryDecision.rethrow(); } + break; + default: + connection.release(); + if (metricsEnabled()) metrics().getErrorMetrics().getOthers().inc(); + break; } - return decision; - } - private void processRetryDecision(RetryPolicy.RetryDecision retryDecision, Connection connection, Exception exceptionToReport) { - switch (retryDecision.getType()) { - case RETRY: - retriesByPolicy++; - if (logger.isDebugEnabled()) - logger.debug("[{}] Doing retry {} for query {} at consistency {}", id, retriesByPolicy, statement, retryDecision.getRetryConsistencyLevel()); - if (metricsEnabled()) - metrics().getErrorMetrics().getRetries().inc(); - // log error for the current host if we are switching to another one - if (!retryDecision.isRetryCurrent()) - logError(connection.address, exceptionToReport); - retry(retryDecision.isRetryCurrent(), retryDecision.getRetryConsistencyLevel()); - break; - case RETHROW: - setFinalException(connection, exceptionToReport); - break; - case IGNORE: - if (metricsEnabled()) - metrics().getErrorMetrics().getIgnores().inc(); - setFinalResult(connection, new Responses.Result.Void()); - break; + if (retry == null) setFinalResult(connection, response); + else { + processRetryDecision(retry, connection, exceptionToReport); } + break; + default: + connection.release(); + setFinalResult(connection, response); + break; } - - private void retry(final boolean retryCurrent, ConsistencyLevel newConsistencyLevel) { - final Host h = current; - if (newConsistencyLevel != null) - this.retryConsistencyLevel = newConsistencyLevel; - - if (queryStateRef.get().isCancelled()) - return; - - if (!retryCurrent || !query(h)) - findNextHostAndQuery(); - } - - private void logError(InetSocketAddress address, Throwable exception) { - logger.debug("[{}] Error querying {} : {}", id, address, exception.toString()); - if (errors == null) { - synchronized (RequestHandler.this) { - if (errors == null) { - errors = new ConcurrentHashMap(); - } - } - } - errors.put(address, exception); + } catch (Exception e) { + exceptionToReport = e; + setFinalException(connection, e); + } finally { + if (queriedHost != null && statement != Statement.DEFAULT) { + manager.cluster.manager.reportQuery(queriedHost, statement, exceptionToReport, latency); } + } + } - void cancel() { - // Atomically set a special QueryState, that will cause any further operation to abort. - // We want to remember whether a request was in progress when we did this, so there are two cancel states. - while (true) { - QueryState previous = queryStateRef.get(); - if (previous.isCancelled()) { - return; - } else if (previous.inProgress && queryStateRef.compareAndSet(previous, QueryState.CANCELLED_WHILE_IN_PROGRESS)) { - if (logger.isTraceEnabled()) - logger.trace("[{}] Cancelled while in progress", id); - // The connectionHandler should be non-null, but we might miss the update if we're racing with write(). - // If it's still null, this will be handled by re-checking queryStateRef at the end of write(). - if (connectionHandler != null && connectionHandler.cancelHandler()) - connectionHandler.connection.release(); - return; - } else if (!previous.inProgress && queryStateRef.compareAndSet(previous, QueryState.CANCELLED_WHILE_COMPLETE)) { - if (logger.isTraceEnabled()) - logger.trace("[{}] Cancelled while complete", id); - return; - } - } - } + private Connection.ResponseCallback prepareAndRetry(final String toPrepare) { + // do not bother inspecting retry policy at this step, no other decision + // makes sense than retry on the same host if the query was prepared, + // or on another host, if an error/timeout occurred. + // The original request hasn't been executed so far, so there is no risk + // of re-executing non-idempotent statements. + return new Connection.ResponseCallback() { @Override public Message.Request request() { - if (retryConsistencyLevel != null && retryConsistencyLevel != request.consistency()) - return request.copy(retryConsistencyLevel); - else - return request; + Requests.Prepare request = new Requests.Prepare(toPrepare); + // propagate the original custom payload in the prepare request + request.setCustomPayload(statement.getOutgoingPayload()); + return request; } @Override - public void onSet(Connection connection, Message.Response response, long latency, int retryCount) { - QueryState queryState = queryStateRef.get(); - if (!queryState.isInProgressAt(retryCount) || - !queryStateRef.compareAndSet(queryState, queryState.complete())) { - logger.debug("onSet triggered but the response was completed by another thread, cancelling (retryCount = {}, queryState = {}, queryStateRef = {})", - retryCount, queryState, queryStateRef.get()); - return; - } - - Host queriedHost = current; - Exception exceptionToReport = null; - try { - switch (response.type) { - case RESULT: - connection.release(); - setFinalResult(connection, response); - break; - case ERROR: - Responses.Error err = (Responses.Error) response; - exceptionToReport = err.asException(connection.address); - RetryPolicy.RetryDecision retry = null; - RetryPolicy retryPolicy = retryPolicy(); - switch (err.code) { - case READ_TIMEOUT: - connection.release(); - assert err.infos instanceof ReadTimeoutException; - ReadTimeoutException rte = (ReadTimeoutException) err.infos; - retry = retryPolicy.onReadTimeout(statement, - rte.getConsistencyLevel(), - rte.getRequiredAcknowledgements(), - rte.getReceivedAcknowledgements(), - rte.wasDataRetrieved(), - retriesByPolicy); - if (metricsEnabled()) { - metrics().getErrorMetrics().getReadTimeouts().inc(); - if (retry.getType() == Type.RETRY) - metrics().getErrorMetrics().getRetriesOnReadTimeout().inc(); - if (retry.getType() == Type.IGNORE) - metrics().getErrorMetrics().getIgnoresOnReadTimeout().inc(); - } - break; - case WRITE_TIMEOUT: - connection.release(); - assert err.infos instanceof WriteTimeoutException; - WriteTimeoutException wte = (WriteTimeoutException) err.infos; - if (statement.isIdempotentWithDefault(manager.cluster.getConfiguration().getQueryOptions())) - retry = retryPolicy.onWriteTimeout(statement, - wte.getConsistencyLevel(), - wte.getWriteType(), - wte.getRequiredAcknowledgements(), - wte.getReceivedAcknowledgements(), - retriesByPolicy); - else { - retry = RetryPolicy.RetryDecision.rethrow(); - } - if (metricsEnabled()) { - metrics().getErrorMetrics().getWriteTimeouts().inc(); - if (retry.getType() == Type.RETRY) - metrics().getErrorMetrics().getRetriesOnWriteTimeout().inc(); - if (retry.getType() == Type.IGNORE) - metrics().getErrorMetrics().getIgnoresOnWriteTimeout().inc(); - } - break; - case UNAVAILABLE: - connection.release(); - assert err.infos instanceof UnavailableException; - UnavailableException ue = (UnavailableException) err.infos; - retry = retryPolicy.onUnavailable(statement, - ue.getConsistencyLevel(), - ue.getRequiredReplicas(), - ue.getAliveReplicas(), - retriesByPolicy); - if (metricsEnabled()) { - metrics().getErrorMetrics().getUnavailables().inc(); - if (retry.getType() == Type.RETRY) - metrics().getErrorMetrics().getRetriesOnUnavailable().inc(); - if (retry.getType() == Type.IGNORE) - metrics().getErrorMetrics().getIgnoresOnUnavailable().inc(); - } - break; - case OVERLOADED: - connection.release(); - assert exceptionToReport instanceof OverloadedException; - logger.warn("Host {} is overloaded.", connection.address); - retry = computeRetryDecisionOnRequestError((OverloadedException) exceptionToReport); - break; - case SERVER_ERROR: - connection.release(); - assert exceptionToReport instanceof ServerError; - logger.warn("{} replied with server error ({}), defuncting connection.", connection.address, err.message); - // Defunct connection - connection.defunct(exceptionToReport); - retry = computeRetryDecisionOnRequestError((ServerError) exceptionToReport); - break; - case IS_BOOTSTRAPPING: - connection.release(); - assert exceptionToReport instanceof BootstrappingException; - logger.error("Query sent to {} but it is bootstrapping. This shouldn't happen but trying next host.", connection.address); - if (metricsEnabled()) { - metrics().getErrorMetrics().getOthers().inc(); - } - logError(connection.address, exceptionToReport); - retry(false, null); - return; - case UNPREPARED: - // Do not release connection yet, because we might reuse it to send the PREPARE message (see write() call below) - assert err.infos instanceof MD5Digest; - MD5Digest id = (MD5Digest) err.infos; - PreparedStatement toPrepare = manager.cluster.manager.preparedQueries.get(id); - if (toPrepare == null) { - // This shouldn't happen - connection.release(); - String msg = String.format("Tried to execute unknown prepared query %s", id); - logger.error(msg); - setFinalException(connection, new DriverInternalError(msg)); - return; - } - - String currentKeyspace = connection.keyspace(); - String prepareKeyspace = toPrepare.getQueryKeyspace(); - if (prepareKeyspace != null && (currentKeyspace == null || !currentKeyspace.equals(prepareKeyspace))) { - // This shouldn't happen in normal use, because a user shouldn't try to execute - // a prepared statement with the wrong keyspace set. - // Fail fast (we can't change the keyspace to reprepare, because we're using a pooled connection - // that's shared with other requests). - connection.release(); - throw new IllegalStateException(String.format("Statement was prepared on keyspace %s, can't execute it on %s (%s)", - toPrepare.getQueryKeyspace(), connection.keyspace(), toPrepare.getQueryString())); - } - - logger.info("Query {} is not prepared on {}, preparing before retrying executing. " - + "Seeing this message a few times is fine, but seeing it a lot may be source of performance problems", - toPrepare.getQueryString(), connection.address); - - write(connection, prepareAndRetry(toPrepare.getQueryString())); - // we're done for now, the prepareAndRetry callback will handle the rest - return; - default: - connection.release(); - if (metricsEnabled()) - metrics().getErrorMetrics().getOthers().inc(); - break; - } - - if (retry == null) - setFinalResult(connection, response); - else { - processRetryDecision(retry, connection, exceptionToReport); - } - break; - default: - connection.release(); - setFinalResult(connection, response); - break; - } - } catch (Exception e) { - exceptionToReport = e; - setFinalException(connection, e); - } finally { - if (queriedHost != null && statement != Statement.DEFAULT) { - manager.cluster.manager.reportQuery(queriedHost, statement, exceptionToReport, latency); - } - } + public int retryCount() { + return SpeculativeExecution.this.retryCount(); } - private Connection.ResponseCallback prepareAndRetry(final String toPrepare) { - // do not bother inspecting retry policy at this step, no other decision - // makes sense than retry on the same host if the query was prepared, - // or on another host, if an error/timeout occurred. - // The original request hasn't been executed so far, so there is no risk - // of re-executing non-idempotent statements. - return new Connection.ResponseCallback() { - - @Override - public Message.Request request() { - Requests.Prepare request = new Requests.Prepare(toPrepare); - // propagate the original custom payload in the prepare request - request.setCustomPayload(statement.getOutgoingPayload()); - return request; - } - - @Override - public int retryCount() { - return SpeculativeExecution.this.retryCount(); - } - - @Override - public void onSet(Connection connection, Message.Response response, long latency, int retryCount) { - QueryState queryState = queryStateRef.get(); - if (!queryState.isInProgressAt(retryCount) || - !queryStateRef.compareAndSet(queryState, queryState.complete())) { - logger.debug("onSet triggered but the response was completed by another thread, cancelling (retryCount = {}, queryState = {}, queryStateRef = {})", - retryCount, queryState, queryStateRef.get()); - return; - } - - connection.release(); - - switch (response.type) { - case RESULT: - if (((Responses.Result) response).kind == Responses.Result.Kind.PREPARED) { - logger.debug("Scheduling retry now that query is prepared"); - retry(true, null); - } else { - logError(connection.address, new DriverException("Got unexpected response to prepare message: " + response)); - retry(false, null); - } - break; - case ERROR: - logError(connection.address, new DriverException("Error preparing query, got " + response)); - if (metricsEnabled()) - metrics().getErrorMetrics().getOthers().inc(); - retry(false, null); - break; - default: - // Something's wrong, so we return but we let setFinalResult propagate the exception - SpeculativeExecution.this.setFinalResult(connection, response); - break; - } - } - - @Override - public void onException(Connection connection, Exception exception, long latency, int retryCount) { - SpeculativeExecution.this.onException(connection, exception, latency, retryCount); - } - - @Override - public boolean onTimeout(Connection connection, long latency, int retryCount) { - QueryState queryState = queryStateRef.get(); - if (!queryState.isInProgressAt(retryCount) || - !queryStateRef.compareAndSet(queryState, queryState.complete())) { - logger.debug("onTimeout triggered but the response was completed by another thread, cancelling (retryCount = {}, queryState = {}, queryStateRef = {})", - retryCount, queryState, queryStateRef.get()); - return false; - } - connection.release(); - logError(connection.address, new OperationTimedOutException(connection.address, "Timed out waiting for response to PREPARE message")); - retry(false, null); - return true; - } - }; + @Override + public void onSet( + Connection connection, Message.Response response, long latency, int retryCount) { + QueryState queryState = queryStateRef.get(); + if (!queryState.isInProgressAt(retryCount) + || !queryStateRef.compareAndSet(queryState, queryState.complete())) { + logger.debug( + "onSet triggered but the response was completed by another thread, cancelling (retryCount = {}, queryState = {}, queryStateRef = {})", + retryCount, + queryState, + queryStateRef.get()); + return; + } + + connection.release(); + + switch (response.type) { + case RESULT: + if (((Responses.Result) response).kind == Responses.Result.Kind.PREPARED) { + logger.debug("Scheduling retry now that query is prepared"); + retry(true, null); + } else { + logError( + connection.endPoint, + new DriverException("Got unexpected response to prepare message: " + response)); + retry(false, null); + } + break; + case ERROR: + logError( + connection.endPoint, + new DriverException("Error preparing query, got " + response)); + if (metricsEnabled()) metrics().getErrorMetrics().getOthers().inc(); + retry(false, null); + break; + default: + // Something's wrong, so we return but we let setFinalResult propagate the exception + SpeculativeExecution.this.setFinalResult(connection, response); + break; + } } @Override - public void onException(Connection connection, Exception exception, long latency, int retryCount) { - QueryState queryState = queryStateRef.get(); - if (!queryState.isInProgressAt(retryCount) || - !queryStateRef.compareAndSet(queryState, queryState.complete())) { - logger.debug("onException triggered but the response was completed by another thread, cancelling (retryCount = {}, queryState = {}, queryStateRef = {})", - retryCount, queryState, queryStateRef.get()); - return; - } - - Host queriedHost = current; - try { - connection.release(); - - if (exception instanceof ConnectionException) { - RetryPolicy.RetryDecision decision = computeRetryDecisionOnRequestError((ConnectionException) exception); - processRetryDecision(decision, connection, exception); - return; - } - setFinalException(connection, exception); - } catch (Exception e) { - // This shouldn't happen, but if it does, we want to signal the callback, not let it hang indefinitely - setFinalException(null, new DriverInternalError("An unexpected error happened while handling exception " + exception, e)); - } finally { - if (queriedHost != null && statement != Statement.DEFAULT) - manager.cluster.manager.reportQuery(queriedHost, statement, exception, latency); - } + public void onException( + Connection connection, Exception exception, long latency, int retryCount) { + SpeculativeExecution.this.onException(connection, exception, latency, retryCount); } @Override public boolean onTimeout(Connection connection, long latency, int retryCount) { - QueryState queryState = queryStateRef.get(); - if (!queryState.isInProgressAt(retryCount) || - !queryStateRef.compareAndSet(queryState, queryState.complete())) { - logger.debug("onTimeout triggered but the response was completed by another thread, cancelling (retryCount = {}, queryState = {}, queryStateRef = {})", - retryCount, queryState, queryStateRef.get()); - return false; - } - - Host queriedHost = current; - - OperationTimedOutException timeoutException = new OperationTimedOutException(connection.address, "Timed out waiting for server response"); - - try { - connection.release(); - - RetryPolicy.RetryDecision decision = computeRetryDecisionOnRequestError(timeoutException); - processRetryDecision(decision, connection, timeoutException); - } catch (Exception e) { - // This shouldn't happen, but if it does, we want to signal the callback, not let it hang indefinitely - setFinalException(null, new DriverInternalError("An unexpected error happened while handling timeout", e)); - } finally { - if (queriedHost != null && statement != Statement.DEFAULT) - manager.cluster.manager.reportQuery(queriedHost, statement, timeoutException, latency); - } - return true; + QueryState queryState = queryStateRef.get(); + if (!queryState.isInProgressAt(retryCount) + || !queryStateRef.compareAndSet(queryState, queryState.complete())) { + logger.debug( + "onTimeout triggered but the response was completed by another thread, cancelling (retryCount = {}, queryState = {}, queryStateRef = {})", + retryCount, + queryState, + queryStateRef.get()); + return false; + } + connection.release(); + logError( + connection.endPoint, + new OperationTimedOutException( + connection.endPoint, "Timed out waiting for response to PREPARE message")); + retry(false, null); + return true; } + }; + } - @Override - public int retryCount() { - return queryStateRef.get().retryCount; + @Override + public void onException( + Connection connection, Exception exception, long latency, int retryCount) { + QueryState queryState = queryStateRef.get(); + if (!queryState.isInProgressAt(retryCount) + || !queryStateRef.compareAndSet(queryState, queryState.complete())) { + logger.debug( + "onException triggered but the response was completed by another thread, cancelling (retryCount = {}, queryState = {}, queryStateRef = {})", + retryCount, + queryState, + queryStateRef.get()); + return; + } + + Host queriedHost = current; + try { + connection.release(); + + if (exception instanceof ConnectionException) { + RetryPolicy.RetryDecision decision = + computeRetryDecisionOnRequestError((ConnectionException) exception); + processRetryDecision(decision, connection, exception); + return; } + setFinalException(connection, exception); + } catch (Exception e) { + // This shouldn't happen, but if it does, we want to signal the callback, not let it hang + // indefinitely + setFinalException( + null, + new DriverInternalError( + "An unexpected error happened while handling exception " + exception, e)); + } finally { + if (queriedHost != null && statement != Statement.DEFAULT) + manager.cluster.manager.reportQuery(queriedHost, statement, exception, latency); + } + } - private void setFinalException(Connection connection, Exception exception) { - RequestHandler.this.setFinalException(this, connection, exception); - } + @Override + public boolean onTimeout(Connection connection, long latency, int retryCount) { + QueryState queryState = queryStateRef.get(); + if (!queryState.isInProgressAt(retryCount) + || !queryStateRef.compareAndSet(queryState, queryState.complete())) { + logger.debug( + "onTimeout triggered but the response was completed by another thread, cancelling (retryCount = {}, queryState = {}, queryStateRef = {})", + retryCount, + queryState, + queryStateRef.get()); + return false; + } + + Host queriedHost = current; + + OperationTimedOutException timeoutException = + new OperationTimedOutException( + connection.endPoint, "Timed out waiting for server response"); + + try { + connection.release(); + + RetryPolicy.RetryDecision decision = computeRetryDecisionOnRequestError(timeoutException); + processRetryDecision(decision, connection, timeoutException); + } catch (Exception e) { + // This shouldn't happen, but if it does, we want to signal the callback, not let it hang + // indefinitely + setFinalException( + null, + new DriverInternalError("An unexpected error happened while handling timeout", e)); + } finally { + if (queriedHost != null && statement != Statement.DEFAULT) + manager.cluster.manager.reportQuery(queriedHost, statement, timeoutException, latency); + } + return true; + } - private void setFinalResult(Connection connection, Message.Response response) { - RequestHandler.this.setFinalResult(this, connection, response); - } + @Override + public int retryCount() { + return queryStateRef.get().retryCount; } - /** - * The state of a SpeculativeExecution. - *

- * This is used to prevent races between request completion (either success or error) and timeout. - * A retry is in progress once we have written the request to the connection and until we get back a response (see onSet - * or onException) or a timeout (see onTimeout). - * The count increments on each retry. - */ - static class QueryState { - static final QueryState INITIAL = new QueryState(-1, false); - static final QueryState CANCELLED_WHILE_IN_PROGRESS = new QueryState(Integer.MIN_VALUE, false); - static final QueryState CANCELLED_WHILE_COMPLETE = new QueryState(Integer.MIN_VALUE + 1, false); - - final int retryCount; - final boolean inProgress; - - private QueryState(int count, boolean inProgress) { - this.retryCount = count; - this.inProgress = inProgress; - } + private void setFinalException(Connection connection, Exception exception) { + RequestHandler.this.setFinalException(this, connection, exception); + } - boolean isInProgressAt(int retryCount) { - return inProgress && this.retryCount == retryCount; - } + private void setFinalResult(Connection connection, Message.Response response) { + RequestHandler.this.setFinalResult(this, connection, response); + } + } + + /** + * The state of a SpeculativeExecution. + * + *

This is used to prevent races between request completion (either success or error) and + * timeout. A retry is in progress once we have written the request to the connection and until we + * get back a response (see onSet or onException) or a timeout (see onTimeout). The count + * increments on each retry. + */ + static class QueryState { + static final QueryState INITIAL = new QueryState(-1, false); + static final QueryState CANCELLED_WHILE_IN_PROGRESS = new QueryState(Integer.MIN_VALUE, false); + static final QueryState CANCELLED_WHILE_COMPLETE = new QueryState(Integer.MIN_VALUE + 1, false); + + final int retryCount; + final boolean inProgress; + + private QueryState(int count, boolean inProgress) { + this.retryCount = count; + this.inProgress = inProgress; + } - QueryState complete() { - assert inProgress; - return new QueryState(retryCount, false); - } + boolean isInProgressAt(int retryCount) { + return inProgress && this.retryCount == retryCount; + } - QueryState startNext() { - assert !inProgress; - return new QueryState(retryCount + 1, true); - } + QueryState complete() { + assert inProgress; + return new QueryState(retryCount, false); + } - public boolean isCancelled() { - return this == CANCELLED_WHILE_IN_PROGRESS || this == CANCELLED_WHILE_COMPLETE; - } + QueryState startNext() { + assert !inProgress; + return new QueryState(retryCount + 1, true); + } - @Override - public String toString() { - return String.format("QueryState(count=%d, inProgress=%s, cancelled=%s)", retryCount, inProgress, isCancelled()); - } + public boolean isCancelled() { + return this == CANCELLED_WHILE_IN_PROGRESS || this == CANCELLED_WHILE_COMPLETE; } - /** - * Wraps the iterator return by {@link com.datastax.driver.core.policies.LoadBalancingPolicy} to make it safe for - * concurrent access by multiple threads. - */ - static class QueryPlan { - private final Iterator iterator; + @Override + public String toString() { + return String.format( + "QueryState(count=%d, inProgress=%s, cancelled=%s)", + retryCount, inProgress, isCancelled()); + } + } - QueryPlan(Iterator iterator) { - this.iterator = iterator; - } + /** + * Wraps the iterator return by {@link com.datastax.driver.core.policies.LoadBalancingPolicy} to + * make it safe for concurrent access by multiple threads. + */ + static class QueryPlan { + private final Iterator iterator; - /** - * @return null if there are no more hosts - */ - synchronized Host next() { - return iterator.hasNext() ? iterator.next() : null; - } + QueryPlan(Iterator iterator) { + this.iterator = iterator; + } + + /** @return null if there are no more hosts */ + synchronized Host next() { + return iterator.hasNext() ? iterator.next() : null; } + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/Requests.java b/driver-core/src/main/java/com/datastax/driver/core/Requests.java index a4b69d2e284..04833ce159c 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Requests.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Requests.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,8 +20,8 @@ import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableMap; import io.netty.buffer.ByteBuf; - import java.nio.ByteBuffer; +import java.util.Arrays; import java.util.Collections; import java.util.EnumSet; import java.util.List; @@ -27,639 +29,726 @@ class Requests { - private Requests() { - } + static final ByteBuffer[] EMPTY_BB_ARRAY = new ByteBuffer[0]; - static class Startup extends Message.Request { - private static final String CQL_VERSION_OPTION = "CQL_VERSION"; - private static final String CQL_VERSION = "3.0.0"; + private Requests() {} - static final String COMPRESSION_OPTION = "COMPRESSION"; + static class Startup extends Message.Request { + private static final String CQL_VERSION_OPTION = "CQL_VERSION"; + private static final String CQL_VERSION = "3.0.0"; + private static final String DRIVER_VERSION_OPTION = "DRIVER_VERSION"; + private static final String DRIVER_NAME_OPTION = "DRIVER_NAME"; + private static final String DRIVER_NAME = "Apache Cassandra Java Driver"; - static final Message.Coder coder = new Message.Coder() { - @Override - public void encode(Startup msg, ByteBuf dest, ProtocolVersion version) { - CBUtil.writeStringMap(msg.options, dest); - } + static final String COMPRESSION_OPTION = "COMPRESSION"; + static final String NO_COMPACT_OPTION = "NO_COMPACT"; - @Override - public int encodedSize(Startup msg, ProtocolVersion version) { - return CBUtil.sizeOfStringMap(msg.options); - } + static final Message.Coder coder = + new Message.Coder() { + @Override + public void encode(Startup msg, ByteBuf dest, ProtocolVersion version) { + CBUtil.writeStringMap(msg.options, dest); + } + + @Override + public int encodedSize(Startup msg, ProtocolVersion version) { + return CBUtil.sizeOfStringMap(msg.options); + } }; - private final Map options; - private final ProtocolOptions.Compression compression; + private final Map options; + private final ProtocolOptions.Compression compression; + private final boolean noCompact; - Startup(ProtocolOptions.Compression compression) { - super(Message.Request.Type.STARTUP); - this.compression = compression; + Startup(ProtocolOptions.Compression compression, boolean noCompact) { + super(Message.Request.Type.STARTUP); + this.compression = compression; + this.noCompact = noCompact; - ImmutableMap.Builder map = new ImmutableMap.Builder(); - map.put(CQL_VERSION_OPTION, CQL_VERSION); - if (compression != ProtocolOptions.Compression.NONE) - map.put(COMPRESSION_OPTION, compression.toString()); - this.options = map.build(); - } + ImmutableMap.Builder map = new ImmutableMap.Builder(); + map.put(CQL_VERSION_OPTION, CQL_VERSION); + if (compression != ProtocolOptions.Compression.NONE) + map.put(COMPRESSION_OPTION, compression.toString()); + if (noCompact) map.put(NO_COMPACT_OPTION, "true"); - @Override - protected Request copyInternal() { - return new Startup(compression); - } + map.put(DRIVER_VERSION_OPTION, Cluster.getDriverVersion()); + map.put(DRIVER_NAME_OPTION, DRIVER_NAME); - @Override - public String toString() { - return "STARTUP " + options; - } + this.options = map.build(); } - // Only for protocol v1 - static class Credentials extends Message.Request { + @Override + protected Request copyInternal() { + return new Startup(compression, noCompact); + } - static final Message.Coder coder = new Message.Coder() { + @Override + public String toString() { + return "STARTUP " + options; + } + } - @Override - public void encode(Credentials msg, ByteBuf dest, ProtocolVersion version) { - assert version == ProtocolVersion.V1; - CBUtil.writeStringMap(msg.credentials, dest); - } + // Only for protocol v1 + static class Credentials extends Message.Request { - @Override - public int encodedSize(Credentials msg, ProtocolVersion version) { - assert version == ProtocolVersion.V1; - return CBUtil.sizeOfStringMap(msg.credentials); - } + static final Message.Coder coder = + new Message.Coder() { + + @Override + public void encode(Credentials msg, ByteBuf dest, ProtocolVersion version) { + assert version == ProtocolVersion.V1; + CBUtil.writeStringMap(msg.credentials, dest); + } + + @Override + public int encodedSize(Credentials msg, ProtocolVersion version) { + assert version == ProtocolVersion.V1; + return CBUtil.sizeOfStringMap(msg.credentials); + } }; - private final Map credentials; + private final Map credentials; - Credentials(Map credentials) { - super(Message.Request.Type.CREDENTIALS); - this.credentials = credentials; - } + Credentials(Map credentials) { + super(Message.Request.Type.CREDENTIALS); + this.credentials = credentials; + } - @Override - protected Request copyInternal() { - return new Credentials(credentials); - } + @Override + protected Request copyInternal() { + return new Credentials(credentials); } + } - static class Options extends Message.Request { + static class Options extends Message.Request { - static final Message.Coder coder = new Message.Coder() { - @Override - public void encode(Options msg, ByteBuf dest, ProtocolVersion version) { - } + static final Message.Coder coder = + new Message.Coder() { + @Override + public void encode(Options msg, ByteBuf dest, ProtocolVersion version) {} - @Override - public int encodedSize(Options msg, ProtocolVersion version) { - return 0; - } + @Override + public int encodedSize(Options msg, ProtocolVersion version) { + return 0; + } }; - Options() { - super(Message.Request.Type.OPTIONS); - } + Options() { + super(Message.Request.Type.OPTIONS); + } - @Override - protected Request copyInternal() { - return new Options(); - } + @Override + protected Request copyInternal() { + return new Options(); + } - @Override - public String toString() { - return "OPTIONS"; - } + @Override + public String toString() { + return "OPTIONS"; } + } + + static class Query extends Message.Request { + + static final Message.Coder coder = + new Message.Coder() { + @Override + public void encode(Query msg, ByteBuf dest, ProtocolVersion version) { + CBUtil.writeLongString(msg.query, dest); + msg.options.encode(dest, version); + } + + @Override + public int encodedSize(Query msg, ProtocolVersion version) { + return CBUtil.sizeOfLongString(msg.query) + msg.options.encodedSize(version); + } + }; - static class Query extends Message.Request { + final String query; + final QueryProtocolOptions options; - static final Message.Coder coder = new Message.Coder() { - @Override - public void encode(Query msg, ByteBuf dest, ProtocolVersion version) { - CBUtil.writeLongString(msg.query, dest); - msg.options.encode(dest, version); - } + Query(String query) { + this(query, QueryProtocolOptions.DEFAULT, false); + } - @Override - public int encodedSize(Query msg, ProtocolVersion version) { - return CBUtil.sizeOfLongString(msg.query) - + msg.options.encodedSize(version); - } - }; + Query(String query, QueryProtocolOptions options, boolean tracingRequested) { + super(Type.QUERY, tracingRequested); + this.query = query; + this.options = options; + } - final String query; - final QueryProtocolOptions options; + @Override + protected Request copyInternal() { + return new Query(this.query, options, isTracingRequested()); + } - Query(String query) { - this(query, QueryProtocolOptions.DEFAULT, false); - } + @Override + protected Request copyInternal(ConsistencyLevel newConsistencyLevel) { + return new Query(this.query, options.copy(newConsistencyLevel), isTracingRequested()); + } - Query(String query, QueryProtocolOptions options, boolean tracingRequested) { - super(Type.QUERY, tracingRequested); - this.query = query; - this.options = options; - } + @Override + public String toString() { + return "QUERY " + query + '(' + options + ')'; + } + } + + static class Execute extends Message.Request { + + static final Message.Coder coder = + new Message.Coder() { + @Override + public void encode(Execute msg, ByteBuf dest, ProtocolVersion version) { + CBUtil.writeShortBytes(msg.statementId.bytes, dest); + if (ProtocolFeature.PREPARED_METADATA_CHANGES.isSupportedBy(version)) + CBUtil.writeShortBytes(msg.resultMetadataId.bytes, dest); + msg.options.encode(dest, version); + } + + @Override + public int encodedSize(Execute msg, ProtocolVersion version) { + int size = CBUtil.sizeOfShortBytes(msg.statementId.bytes); + if (ProtocolFeature.PREPARED_METADATA_CHANGES.isSupportedBy(version)) + size += CBUtil.sizeOfShortBytes(msg.resultMetadataId.bytes); + size += msg.options.encodedSize(version); + return size; + } + }; - @Override - protected Request copyInternal() { - return new Query(this.query, options, isTracingRequested()); - } + final MD5Digest statementId; + final MD5Digest resultMetadataId; + final QueryProtocolOptions options; + + Execute( + MD5Digest statementId, + MD5Digest resultMetadataId, + QueryProtocolOptions options, + boolean tracingRequested) { + super(Message.Request.Type.EXECUTE, tracingRequested); + this.statementId = statementId; + this.resultMetadataId = resultMetadataId; + this.options = options; + } - @Override - protected Request copyInternal(ConsistencyLevel newConsistencyLevel) { - return new Query(this.query, options.copy(newConsistencyLevel), isTracingRequested()); - } + @Override + protected Request copyInternal() { + return new Execute(statementId, resultMetadataId, options, isTracingRequested()); + } - @Override - public String toString() { - return "QUERY " + query + '(' + options + ')'; - } + @Override + protected Request copyInternal(ConsistencyLevel newConsistencyLevel) { + return new Execute( + statementId, resultMetadataId, options.copy(newConsistencyLevel), isTracingRequested()); } - static class Execute extends Message.Request { + @Override + public String toString() { + if (resultMetadataId != null) + return "EXECUTE preparedId: " + + statementId + + " resultMetadataId: " + + resultMetadataId + + " (" + + options + + ')'; + else return "EXECUTE preparedId: " + statementId + " (" + options + ')'; + } + } + + enum QueryFlag { + VALUES(0x00000001), + SKIP_METADATA(0x00000002), + PAGE_SIZE(0x00000004), + PAGING_STATE(0x00000008), + SERIAL_CONSISTENCY(0x00000010), + DEFAULT_TIMESTAMP(0x00000020), + VALUE_NAMES(0x00000040), + NOW_IN_SECONDS(0x00000100), + ; + + private int mask; + + QueryFlag(int mask) { + this.mask = mask; + } - static final Message.Coder coder = new Message.Coder() { - @Override - public void encode(Execute msg, ByteBuf dest, ProtocolVersion version) { - CBUtil.writeBytes(msg.statementId.bytes, dest); - msg.options.encode(dest, version); - } + static EnumSet deserialize(int flags) { + EnumSet set = EnumSet.noneOf(QueryFlag.class); + for (QueryFlag flag : values()) { + if ((flags & flag.mask) != 0) set.add(flag); + } + return set; + } - @Override - public int encodedSize(Execute msg, ProtocolVersion version) { - return CBUtil.sizeOfBytes(msg.statementId.bytes) - + msg.options.encodedSize(version); - } - }; + static void serialize(EnumSet flags, ByteBuf dest, ProtocolVersion version) { + int i = 0; + for (QueryFlag flag : flags) i |= flag.mask; + if (version.compareTo(ProtocolVersion.V5) >= 0) { + dest.writeInt(i); + } else { + dest.writeByte((byte) i); + } + } - final MD5Digest statementId; - final QueryProtocolOptions options; - - Execute(MD5Digest statementId, QueryProtocolOptions options, boolean tracingRequested) { - super(Message.Request.Type.EXECUTE, tracingRequested); - this.statementId = statementId; - this.options = options; - } - - @Override - protected Request copyInternal() { - return new Execute(statementId, options, isTracingRequested()); - } - - @Override - protected Request copyInternal(ConsistencyLevel newConsistencyLevel) { - return new Execute(statementId, options.copy(newConsistencyLevel), isTracingRequested()); - } - - @Override - public String toString() { - return "EXECUTE " + statementId + " (" + options + ')'; - } - } - - enum QueryFlag { - // The order of that enum matters!! - VALUES, - SKIP_METADATA, - PAGE_SIZE, - PAGING_STATE, - SERIAL_CONSISTENCY, - DEFAULT_TIMESTAMP, - VALUE_NAMES; - - static EnumSet deserialize(int flags) { - EnumSet set = EnumSet.noneOf(QueryFlag.class); - QueryFlag[] values = QueryFlag.values(); - for (int n = 0; n < values.length; n++) { - if ((flags & (1 << n)) != 0) - set.add(values[n]); - } - return set; - } + static int serializedSize(ProtocolVersion version) { + return version.compareTo(ProtocolVersion.V5) >= 0 ? 4 : 1; + } + } + + static class QueryProtocolOptions { + + static final QueryProtocolOptions DEFAULT = + new QueryProtocolOptions( + Message.Request.Type.QUERY, + ConsistencyLevel.ONE, + EMPTY_BB_ARRAY, + Collections.emptyMap(), + false, + -1, + null, + ConsistencyLevel.SERIAL, + Long.MIN_VALUE, + Integer.MIN_VALUE); + + private final EnumSet flags = EnumSet.noneOf(QueryFlag.class); + private final Message.Request.Type requestType; + final ConsistencyLevel consistency; + final ByteBuffer[] positionalValues; + final Map namedValues; + final boolean skipMetadata; + final int pageSize; + final ByteBuffer pagingState; + final ConsistencyLevel serialConsistency; + final long defaultTimestamp; + final int nowInSeconds; + + QueryProtocolOptions( + Message.Request.Type requestType, + ConsistencyLevel consistency, + ByteBuffer[] positionalValues, + Map namedValues, + boolean skipMetadata, + int pageSize, + ByteBuffer pagingState, + ConsistencyLevel serialConsistency, + long defaultTimestamp, + int nowInSeconds) { + + Preconditions.checkArgument(positionalValues.length == 0 || namedValues.isEmpty()); + + this.requestType = requestType; + this.consistency = consistency; + this.positionalValues = positionalValues; + this.namedValues = namedValues; + this.skipMetadata = skipMetadata; + this.pageSize = pageSize; + this.pagingState = pagingState; + this.serialConsistency = serialConsistency; + this.defaultTimestamp = defaultTimestamp; + this.nowInSeconds = nowInSeconds; + + // Populate flags + if (positionalValues.length > 0) { + flags.add(QueryFlag.VALUES); + } + if (!namedValues.isEmpty()) { + flags.add(QueryFlag.VALUES); + flags.add(QueryFlag.VALUE_NAMES); + } + if (skipMetadata) flags.add(QueryFlag.SKIP_METADATA); + if (pageSize >= 0) flags.add(QueryFlag.PAGE_SIZE); + if (pagingState != null) flags.add(QueryFlag.PAGING_STATE); + if (serialConsistency != ConsistencyLevel.SERIAL) flags.add(QueryFlag.SERIAL_CONSISTENCY); + if (defaultTimestamp != Long.MIN_VALUE) flags.add(QueryFlag.DEFAULT_TIMESTAMP); + if (nowInSeconds != Integer.MIN_VALUE) flags.add(QueryFlag.NOW_IN_SECONDS); + } - static void serialize(EnumSet flags, ByteBuf dest, ProtocolVersion version) { - int i = 0; - for (QueryFlag flag : flags) - i |= 1 << flag.ordinal(); - if (version.compareTo(ProtocolVersion.V5) >= 0) { - dest.writeInt(i); + QueryProtocolOptions copy(ConsistencyLevel newConsistencyLevel) { + return new QueryProtocolOptions( + requestType, + newConsistencyLevel, + positionalValues, + namedValues, + skipMetadata, + pageSize, + pagingState, + serialConsistency, + defaultTimestamp, + nowInSeconds); + } + + void encode(ByteBuf dest, ProtocolVersion version) { + switch (version) { + case V1: + // only EXECUTE messages have variables in V1, and their list must be written + // even if it is empty; and they are never named + if (requestType == Message.Request.Type.EXECUTE) + CBUtil.writeValueList(positionalValues, dest); + CBUtil.writeConsistencyLevel(consistency, dest); + break; + case V2: + case V3: + case V4: + case V5: + case V6: + CBUtil.writeConsistencyLevel(consistency, dest); + QueryFlag.serialize(flags, dest, version); + if (flags.contains(QueryFlag.VALUES)) { + if (flags.contains(QueryFlag.VALUE_NAMES)) { + assert version.compareTo(ProtocolVersion.V3) >= 0; + CBUtil.writeNamedValueList(namedValues, dest); } else { - dest.writeByte((byte) i); - } - } - - static int serializedSize(ProtocolVersion version) { - return version.compareTo(ProtocolVersion.V5) >= 0 ? 4 : 1; - } - } - - static class QueryProtocolOptions { - - static final QueryProtocolOptions DEFAULT = new QueryProtocolOptions( - Message.Request.Type.QUERY, - ConsistencyLevel.ONE, - Collections.emptyList(), - Collections.emptyMap(), - false, - -1, - null, - ConsistencyLevel.SERIAL, Long.MIN_VALUE); - - private final EnumSet flags = EnumSet.noneOf(QueryFlag.class); - private final Message.Request.Type requestType; - final ConsistencyLevel consistency; - final List positionalValues; - final Map namedValues; - final boolean skipMetadata; - final int pageSize; - final ByteBuffer pagingState; - final ConsistencyLevel serialConsistency; - final long defaultTimestamp; - - QueryProtocolOptions(Message.Request.Type requestType, - ConsistencyLevel consistency, - List positionalValues, - Map namedValues, - boolean skipMetadata, - int pageSize, - ByteBuffer pagingState, - ConsistencyLevel serialConsistency, - long defaultTimestamp) { - - Preconditions.checkArgument(positionalValues.isEmpty() || namedValues.isEmpty()); - - this.requestType = requestType; - this.consistency = consistency; - this.positionalValues = positionalValues; - this.namedValues = namedValues; - this.skipMetadata = skipMetadata; - this.pageSize = pageSize; - this.pagingState = pagingState; - this.serialConsistency = serialConsistency; - this.defaultTimestamp = defaultTimestamp; - - // Populate flags - if (!positionalValues.isEmpty()) - flags.add(QueryFlag.VALUES); - if (!namedValues.isEmpty()) { - flags.add(QueryFlag.VALUES); - flags.add(QueryFlag.VALUE_NAMES); - } - if (skipMetadata) - flags.add(QueryFlag.SKIP_METADATA); - if (pageSize >= 0) - flags.add(QueryFlag.PAGE_SIZE); - if (pagingState != null) - flags.add(QueryFlag.PAGING_STATE); - if (serialConsistency != ConsistencyLevel.SERIAL) - flags.add(QueryFlag.SERIAL_CONSISTENCY); - if (defaultTimestamp != Long.MIN_VALUE) - flags.add(QueryFlag.DEFAULT_TIMESTAMP); - } - - QueryProtocolOptions copy(ConsistencyLevel newConsistencyLevel) { - return new QueryProtocolOptions(requestType, newConsistencyLevel, positionalValues, namedValues, skipMetadata, pageSize, pagingState, serialConsistency, defaultTimestamp); - } - - void encode(ByteBuf dest, ProtocolVersion version) { - switch (version) { - case V1: - // only EXECUTE messages have variables in V1, and their list must be written - // even if it is empty; and they are never named - if (requestType == Message.Request.Type.EXECUTE) - CBUtil.writeValueList(positionalValues, dest); - CBUtil.writeConsistencyLevel(consistency, dest); - break; - case V2: - case V3: - case V4: - case V5: - CBUtil.writeConsistencyLevel(consistency, dest); - QueryFlag.serialize(flags, dest, version); - if (flags.contains(QueryFlag.VALUES)) { - if (flags.contains(QueryFlag.VALUE_NAMES)) { - assert version.compareTo(ProtocolVersion.V3) >= 0; - CBUtil.writeNamedValueList(namedValues, dest); - } else { - CBUtil.writeValueList(positionalValues, dest); - } - } - if (flags.contains(QueryFlag.PAGE_SIZE)) - dest.writeInt(pageSize); - if (flags.contains(QueryFlag.PAGING_STATE)) - CBUtil.writeValue(pagingState, dest); - if (flags.contains(QueryFlag.SERIAL_CONSISTENCY)) - CBUtil.writeConsistencyLevel(serialConsistency, dest); - if (version.compareTo(ProtocolVersion.V3) >= 0 && flags.contains(QueryFlag.DEFAULT_TIMESTAMP)) - dest.writeLong(defaultTimestamp); - break; - default: - throw version.unsupported(); + CBUtil.writeValueList(positionalValues, dest); } - } - - int encodedSize(ProtocolVersion version) { - switch (version) { - case V1: - // only EXECUTE messages have variables in V1, and their list must be written - // even if it is empty; and they are never named - return (requestType == Message.Request.Type.EXECUTE ? CBUtil.sizeOfValueList(positionalValues) : 0) - + CBUtil.sizeOfConsistencyLevel(consistency); - case V2: - case V3: - case V4: - case V5: - int size = 0; - size += CBUtil.sizeOfConsistencyLevel(consistency); - size += QueryFlag.serializedSize(version); - if (flags.contains(QueryFlag.VALUES)) { - if (flags.contains(QueryFlag.VALUE_NAMES)) { - assert version.compareTo(ProtocolVersion.V3) >= 0; - size += CBUtil.sizeOfNamedValueList(namedValues); - } else { - size += CBUtil.sizeOfValueList(positionalValues); - } - } - if (flags.contains(QueryFlag.PAGE_SIZE)) - size += 4; - if (flags.contains(QueryFlag.PAGING_STATE)) - size += CBUtil.sizeOfValue(pagingState); - if (flags.contains(QueryFlag.SERIAL_CONSISTENCY)) - size += CBUtil.sizeOfConsistencyLevel(serialConsistency); - if (version == ProtocolVersion.V3 && flags.contains(QueryFlag.DEFAULT_TIMESTAMP)) - size += 8; - return size; - default: - throw version.unsupported(); - } - } + } + if (flags.contains(QueryFlag.PAGE_SIZE)) dest.writeInt(pageSize); + if (flags.contains(QueryFlag.PAGING_STATE)) CBUtil.writeValue(pagingState, dest); + if (flags.contains(QueryFlag.SERIAL_CONSISTENCY)) + CBUtil.writeConsistencyLevel(serialConsistency, dest); + if (version.compareTo(ProtocolVersion.V3) >= 0 + && flags.contains(QueryFlag.DEFAULT_TIMESTAMP)) dest.writeLong(defaultTimestamp); + if (version.compareTo(ProtocolVersion.V5) >= 0 + && flags.contains(QueryFlag.NOW_IN_SECONDS)) dest.writeInt(nowInSeconds); + break; + default: + throw version.unsupported(); + } + } - @Override - public String toString() { - return String.format("[cl=%s, positionalVals=%s, namedVals=%s, skip=%b, psize=%d, state=%s, serialCl=%s]", - consistency, positionalValues, namedValues, skipMetadata, pageSize, pagingState, serialConsistency); - } + int encodedSize(ProtocolVersion version) { + switch (version) { + case V1: + // only EXECUTE messages have variables in V1, and their list must be written + // even if it is empty; and they are never named + return (requestType == Message.Request.Type.EXECUTE + ? CBUtil.sizeOfValueList(positionalValues) + : 0) + + CBUtil.sizeOfConsistencyLevel(consistency); + case V2: + case V3: + case V4: + case V5: + case V6: + int size = 0; + size += CBUtil.sizeOfConsistencyLevel(consistency); + size += QueryFlag.serializedSize(version); + if (flags.contains(QueryFlag.VALUES)) { + if (flags.contains(QueryFlag.VALUE_NAMES)) { + assert version.compareTo(ProtocolVersion.V3) >= 0; + size += CBUtil.sizeOfNamedValueList(namedValues); + } else { + size += CBUtil.sizeOfValueList(positionalValues); + } + } + if (flags.contains(QueryFlag.PAGE_SIZE)) size += 4; + if (flags.contains(QueryFlag.PAGING_STATE)) size += CBUtil.sizeOfValue(pagingState); + if (flags.contains(QueryFlag.SERIAL_CONSISTENCY)) + size += CBUtil.sizeOfConsistencyLevel(serialConsistency); + if (version.compareTo(ProtocolVersion.V3) >= 0 + && flags.contains(QueryFlag.DEFAULT_TIMESTAMP)) size += 8; + if (version.compareTo(ProtocolVersion.V5) >= 0 + && flags.contains(QueryFlag.NOW_IN_SECONDS)) size += 4; + return size; + default: + throw version.unsupported(); + } } - static class Batch extends Message.Request { + @Override + public String toString() { + return String.format( + "[cl=%s, positionalVals=%s, namedVals=%s, skip=%b, psize=%d, state=%s, serialCl=%s]", + consistency, + Arrays.toString(positionalValues), + namedValues, + skipMetadata, + pageSize, + pagingState, + serialConsistency); + } + } - static final Message.Coder coder = new Message.Coder() { - @Override - public void encode(Batch msg, ByteBuf dest, ProtocolVersion version) { - int queries = msg.queryOrIdList.size(); - assert queries <= 0xFFFF; + static class Batch extends Message.Request { - dest.writeByte(fromType(msg.type)); - dest.writeShort(queries); + static final Message.Coder coder = + new Message.Coder() { + @Override + public void encode(Batch msg, ByteBuf dest, ProtocolVersion version) { + int queries = msg.queryOrIdList.size(); + assert queries <= 0xFFFF; - for (int i = 0; i < queries; i++) { - Object q = msg.queryOrIdList.get(i); - dest.writeByte((byte) (q instanceof String ? 0 : 1)); - if (q instanceof String) - CBUtil.writeLongString((String) q, dest); - else - CBUtil.writeBytes(((MD5Digest) q).bytes, dest); + dest.writeByte(fromType(msg.type)); + dest.writeShort(queries); - CBUtil.writeValueList(msg.values.get(i), dest); - } + for (int i = 0; i < queries; i++) { + Object q = msg.queryOrIdList.get(i); + dest.writeByte((byte) (q instanceof String ? 0 : 1)); + if (q instanceof String) CBUtil.writeLongString((String) q, dest); + else CBUtil.writeShortBytes(((MD5Digest) q).bytes, dest); - msg.options.encode(dest, version); + CBUtil.writeValueList(msg.values[i], dest); } - @Override - public int encodedSize(Batch msg, ProtocolVersion version) { - int size = 3; // type + nb queries - for (int i = 0; i < msg.queryOrIdList.size(); i++) { - Object q = msg.queryOrIdList.get(i); - size += 1 + (q instanceof String - ? CBUtil.sizeOfLongString((String) q) - : CBUtil.sizeOfBytes(((MD5Digest) q).bytes)); - - size += CBUtil.sizeOfValueList(msg.values.get(i)); - } - size += msg.options.encodedSize(version); - return size; + msg.options.encode(dest, version); + } + + @Override + public int encodedSize(Batch msg, ProtocolVersion version) { + int size = 3; // type + nb queries + for (int i = 0; i < msg.queryOrIdList.size(); i++) { + Object q = msg.queryOrIdList.get(i); + size += + 1 + + (q instanceof String + ? CBUtil.sizeOfLongString((String) q) + : CBUtil.sizeOfShortBytes(((MD5Digest) q).bytes)); + + size += CBUtil.sizeOfValueList(msg.values[i]); } + size += msg.options.encodedSize(version); + return size; + } - private byte fromType(BatchStatement.Type type) { - switch (type) { - case LOGGED: - return 0; - case UNLOGGED: - return 1; - case COUNTER: - return 2; - default: - throw new AssertionError(); - } + private byte fromType(BatchStatement.Type type) { + switch (type) { + case LOGGED: + return 0; + case UNLOGGED: + return 1; + case COUNTER: + return 2; + default: + throw new AssertionError(); } + } }; - final BatchStatement.Type type; - final List queryOrIdList; - final List> values; - final BatchProtocolOptions options; - - Batch(BatchStatement.Type type, List queryOrIdList, List> values, BatchProtocolOptions options, boolean tracingRequested) { - super(Message.Request.Type.BATCH, tracingRequested); - this.type = type; - this.queryOrIdList = queryOrIdList; - this.values = values; - this.options = options; - } - - @Override - protected Request copyInternal() { - return new Batch(type, queryOrIdList, values, options, isTracingRequested()); - } - - @Override - protected Request copyInternal(ConsistencyLevel newConsistencyLevel) { - return new Batch(type, queryOrIdList, values, options.copy(newConsistencyLevel), isTracingRequested()); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder(); - sb.append("BATCH of ["); - for (int i = 0; i < queryOrIdList.size(); i++) { - if (i > 0) sb.append(", "); - sb.append(queryOrIdList.get(i)).append(" with ").append(values.get(i).size()).append(" values"); - } - sb.append("] with options ").append(options); - return sb.toString(); - } - } - - static class BatchProtocolOptions { - private final EnumSet flags = EnumSet.noneOf(QueryFlag.class); - final ConsistencyLevel consistency; - final ConsistencyLevel serialConsistency; - final long defaultTimestamp; - - BatchProtocolOptions(ConsistencyLevel consistency, ConsistencyLevel serialConsistency, long defaultTimestamp) { - this.consistency = consistency; - this.serialConsistency = serialConsistency; - this.defaultTimestamp = defaultTimestamp; - - if (serialConsistency != ConsistencyLevel.SERIAL) - flags.add(QueryFlag.SERIAL_CONSISTENCY); - if (defaultTimestamp != Long.MIN_VALUE) - flags.add(QueryFlag.DEFAULT_TIMESTAMP); - } - - BatchProtocolOptions copy(ConsistencyLevel newConsistencyLevel) { - return new BatchProtocolOptions(newConsistencyLevel, serialConsistency, defaultTimestamp); - } - - void encode(ByteBuf dest, ProtocolVersion version) { - switch (version) { - case V2: - CBUtil.writeConsistencyLevel(consistency, dest); - break; - case V3: - case V4: - case V5: - CBUtil.writeConsistencyLevel(consistency, dest); - QueryFlag.serialize(flags, dest, version); - if (flags.contains(QueryFlag.SERIAL_CONSISTENCY)) - CBUtil.writeConsistencyLevel(serialConsistency, dest); - if (flags.contains(QueryFlag.DEFAULT_TIMESTAMP)) - dest.writeLong(defaultTimestamp); - break; - default: - throw version.unsupported(); - } - } - - int encodedSize(ProtocolVersion version) { - switch (version) { - case V2: - return CBUtil.sizeOfConsistencyLevel(consistency); - case V3: - case V4: - case V5: - int size = 0; - size += CBUtil.sizeOfConsistencyLevel(consistency); - size += QueryFlag.serializedSize(version); - if (flags.contains(QueryFlag.SERIAL_CONSISTENCY)) - size += CBUtil.sizeOfConsistencyLevel(serialConsistency); - if (flags.contains(QueryFlag.DEFAULT_TIMESTAMP)) - size += 8; - return size; - default: - throw version.unsupported(); - } - } - - @Override - public String toString() { - return String.format("[cl=%s, serialCl=%s, defaultTs=%d]", - consistency, serialConsistency, defaultTimestamp); - } + final BatchStatement.Type type; + final List queryOrIdList; + final ByteBuffer[][] values; + final BatchProtocolOptions options; + + Batch( + BatchStatement.Type type, + List queryOrIdList, + ByteBuffer[][] values, + BatchProtocolOptions options, + boolean tracingRequested) { + super(Message.Request.Type.BATCH, tracingRequested); + this.type = type; + this.queryOrIdList = queryOrIdList; + this.values = values; + this.options = options; } - static class Prepare extends Message.Request { + @Override + protected Request copyInternal() { + return new Batch(type, queryOrIdList, values, options, isTracingRequested()); + } - static final Message.Coder coder = new Message.Coder() { + @Override + protected Request copyInternal(ConsistencyLevel newConsistencyLevel) { + return new Batch( + type, queryOrIdList, values, options.copy(newConsistencyLevel), isTracingRequested()); + } - @Override - public void encode(Prepare msg, ByteBuf dest, ProtocolVersion version) { - CBUtil.writeLongString(msg.query, dest); + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("BATCH of ["); + for (int i = 0; i < queryOrIdList.size(); i++) { + if (i > 0) sb.append(", "); + sb.append(queryOrIdList.get(i)).append(" with ").append(values[i].length).append(" values"); + } + sb.append("] with options ").append(options); + return sb.toString(); + } + } + + static class BatchProtocolOptions { + private final EnumSet flags = EnumSet.noneOf(QueryFlag.class); + final ConsistencyLevel consistency; + final ConsistencyLevel serialConsistency; + final long defaultTimestamp; + final int nowInSeconds; + + BatchProtocolOptions( + ConsistencyLevel consistency, + ConsistencyLevel serialConsistency, + long defaultTimestamp, + int nowInSeconds) { + this.consistency = consistency; + this.serialConsistency = serialConsistency; + this.defaultTimestamp = defaultTimestamp; + this.nowInSeconds = nowInSeconds; + + if (serialConsistency != ConsistencyLevel.SERIAL) flags.add(QueryFlag.SERIAL_CONSISTENCY); + if (defaultTimestamp != Long.MIN_VALUE) flags.add(QueryFlag.DEFAULT_TIMESTAMP); + if (nowInSeconds != Integer.MIN_VALUE) flags.add(QueryFlag.NOW_IN_SECONDS); + } - if (version.compareTo(ProtocolVersion.V5) >= 0) { - // Write empty flags for now, to communicate that no keyspace is being set. - dest.writeInt(0); - } - } + BatchProtocolOptions copy(ConsistencyLevel newConsistencyLevel) { + return new BatchProtocolOptions( + newConsistencyLevel, serialConsistency, defaultTimestamp, nowInSeconds); + } - @Override - public int encodedSize(Prepare msg, ProtocolVersion version) { - return CBUtil.sizeOfLongString(msg.query); - } - }; + void encode(ByteBuf dest, ProtocolVersion version) { + switch (version) { + case V2: + CBUtil.writeConsistencyLevel(consistency, dest); + break; + case V3: + case V4: + case V5: + case V6: + CBUtil.writeConsistencyLevel(consistency, dest); + QueryFlag.serialize(flags, dest, version); + if (flags.contains(QueryFlag.SERIAL_CONSISTENCY)) + CBUtil.writeConsistencyLevel(serialConsistency, dest); + if (flags.contains(QueryFlag.DEFAULT_TIMESTAMP)) dest.writeLong(defaultTimestamp); + if (version.compareTo(ProtocolVersion.V5) >= 0 + && flags.contains(QueryFlag.NOW_IN_SECONDS)) dest.writeInt(nowInSeconds); + break; + default: + throw version.unsupported(); + } + } - private final String query; + int encodedSize(ProtocolVersion version) { + switch (version) { + case V2: + return CBUtil.sizeOfConsistencyLevel(consistency); + case V3: + case V4: + case V5: + case V6: + int size = 0; + size += CBUtil.sizeOfConsistencyLevel(consistency); + size += QueryFlag.serializedSize(version); + if (flags.contains(QueryFlag.SERIAL_CONSISTENCY)) + size += CBUtil.sizeOfConsistencyLevel(serialConsistency); + if (flags.contains(QueryFlag.DEFAULT_TIMESTAMP)) size += 8; + if (version.compareTo(ProtocolVersion.V5) >= 0 + && flags.contains(QueryFlag.NOW_IN_SECONDS)) size += 4; + return size; + default: + throw version.unsupported(); + } + } - Prepare(String query) { - super(Message.Request.Type.PREPARE); - this.query = query; - } + @Override + public String toString() { + return String.format( + "[cl=%s, serialCl=%s, defaultTs=%d]", consistency, serialConsistency, defaultTimestamp); + } + } - @Override - protected Request copyInternal() { - return new Prepare(query); - } + static class Prepare extends Message.Request { - @Override - public String toString() { - return "PREPARE " + query; - } - } + static final Message.Coder coder = + new Message.Coder() { - static class Register extends Message.Request { + @Override + public void encode(Prepare msg, ByteBuf dest, ProtocolVersion version) { + CBUtil.writeLongString(msg.query, dest); - static final Message.Coder coder = new Message.Coder() { - @Override - public void encode(Register msg, ByteBuf dest, ProtocolVersion version) { - dest.writeShort(msg.eventTypes.size()); - for (ProtocolEvent.Type type : msg.eventTypes) - CBUtil.writeEnumValue(type, dest); + if (version.compareTo(ProtocolVersion.V5) >= 0) { + // Write empty flags for now, to communicate that no keyspace is being set. + dest.writeInt(0); } + } - @Override - public int encodedSize(Register msg, ProtocolVersion version) { - int size = 2; - for (ProtocolEvent.Type type : msg.eventTypes) - size += CBUtil.sizeOfEnumValue(type); - return size; + @Override + public int encodedSize(Prepare msg, ProtocolVersion version) { + int size = CBUtil.sizeOfLongString(msg.query); + + if (version.compareTo(ProtocolVersion.V5) >= 0) { + size += 4; // flags } + return size; + } + }; + + private final String query; + + Prepare(String query) { + super(Message.Request.Type.PREPARE); + this.query = query; + } + + @Override + protected Request copyInternal() { + return new Prepare(query); + } + + @Override + public String toString() { + return "PREPARE " + query; + } + } + + static class Register extends Message.Request { + + static final Message.Coder coder = + new Message.Coder() { + @Override + public void encode(Register msg, ByteBuf dest, ProtocolVersion version) { + dest.writeShort(msg.eventTypes.size()); + for (ProtocolEvent.Type type : msg.eventTypes) CBUtil.writeEnumValue(type, dest); + } + + @Override + public int encodedSize(Register msg, ProtocolVersion version) { + int size = 2; + for (ProtocolEvent.Type type : msg.eventTypes) size += CBUtil.sizeOfEnumValue(type); + return size; + } }; - private final List eventTypes; + private final List eventTypes; - Register(List eventTypes) { - super(Message.Request.Type.REGISTER); - this.eventTypes = eventTypes; - } + Register(List eventTypes) { + super(Message.Request.Type.REGISTER); + this.eventTypes = eventTypes; + } - @Override - protected Request copyInternal() { - return new Register(eventTypes); - } + @Override + protected Request copyInternal() { + return new Register(eventTypes); + } - @Override - public String toString() { - return "REGISTER " + eventTypes; - } + @Override + public String toString() { + return "REGISTER " + eventTypes; } + } - static class AuthResponse extends Message.Request { + static class AuthResponse extends Message.Request { - static final Message.Coder coder = new Message.Coder() { + static final Message.Coder coder = + new Message.Coder() { - @Override - public void encode(AuthResponse response, ByteBuf dest, ProtocolVersion version) { - CBUtil.writeValue(response.token, dest); - } + @Override + public void encode(AuthResponse response, ByteBuf dest, ProtocolVersion version) { + CBUtil.writeValue(response.token, dest); + } - @Override - public int encodedSize(AuthResponse response, ProtocolVersion version) { - return CBUtil.sizeOfValue(response.token); - } + @Override + public int encodedSize(AuthResponse response, ProtocolVersion version) { + return CBUtil.sizeOfValue(response.token); + } }; - private final byte[] token; + private final byte[] token; - AuthResponse(byte[] token) { - super(Message.Request.Type.AUTH_RESPONSE); - this.token = token; - } + AuthResponse(byte[] token) { + super(Message.Request.Type.AUTH_RESPONSE); + this.token = token; + } - @Override - protected Request copyInternal() { - return new AuthResponse(token); - } + @Override + protected Request copyInternal() { + return new AuthResponse(token); } + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/Responses.java b/driver-core/src/main/java/com/datastax/driver/core/Responses.java index cecd1e1ae9f..f17018cc2c7 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Responses.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Responses.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,671 +17,795 @@ */ package com.datastax.driver.core; +import static com.datastax.driver.core.SchemaElement.AGGREGATE; +import static com.datastax.driver.core.SchemaElement.FUNCTION; +import static com.datastax.driver.core.SchemaElement.KEYSPACE; +import static com.datastax.driver.core.SchemaElement.TABLE; + import com.datastax.driver.core.Responses.Result.Rows.Metadata; -import com.datastax.driver.core.exceptions.*; +import com.datastax.driver.core.exceptions.AlreadyExistsException; +import com.datastax.driver.core.exceptions.AuthenticationException; +import com.datastax.driver.core.exceptions.BootstrappingException; +import com.datastax.driver.core.exceptions.CASWriteUnknownException; +import com.datastax.driver.core.exceptions.CDCWriteException; +import com.datastax.driver.core.exceptions.DriverException; +import com.datastax.driver.core.exceptions.DriverInternalError; +import com.datastax.driver.core.exceptions.FunctionExecutionException; +import com.datastax.driver.core.exceptions.InvalidConfigurationInQueryException; +import com.datastax.driver.core.exceptions.InvalidQueryException; +import com.datastax.driver.core.exceptions.OverloadedException; +import com.datastax.driver.core.exceptions.ProtocolError; +import com.datastax.driver.core.exceptions.ReadFailureException; +import com.datastax.driver.core.exceptions.ReadTimeoutException; +import com.datastax.driver.core.exceptions.ServerError; +import com.datastax.driver.core.exceptions.SyntaxError; +import com.datastax.driver.core.exceptions.TruncateException; +import com.datastax.driver.core.exceptions.UnauthorizedException; +import com.datastax.driver.core.exceptions.UnavailableException; +import com.datastax.driver.core.exceptions.UnpreparedException; +import com.datastax.driver.core.exceptions.WriteFailureException; +import com.datastax.driver.core.exceptions.WriteTimeoutException; import com.datastax.driver.core.utils.Bytes; import io.netty.buffer.ByteBuf; - import java.net.InetAddress; -import java.net.InetSocketAddress; import java.nio.ByteBuffer; -import java.util.*; - -import static com.datastax.driver.core.ProtocolVersion.V4; -import static com.datastax.driver.core.SchemaElement.*; +import java.util.ArrayDeque; +import java.util.ArrayList; +import java.util.Collections; +import java.util.EnumSet; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Queue; +import java.util.Set; class Responses { - private Responses() { - } + private Responses() {} - static class Error extends Message.Response { + static class Error extends Message.Response { - static final Message.Decoder decoder = new Message.Decoder() { - @Override - public Error decode(ByteBuf body, ProtocolVersion version, CodecRegistry codecRegistry) { - ExceptionCode code = ExceptionCode.fromValue(body.readInt()); - String msg = CBUtil.readString(body); - Object infos = null; - ConsistencyLevel clt; - int received, blockFor; - switch (code) { - case UNAVAILABLE: - ConsistencyLevel clu = CBUtil.readConsistencyLevel(body); - int required = body.readInt(); - int alive = body.readInt(); - infos = new UnavailableException(clu, required, alive); - break; - case WRITE_TIMEOUT: - case READ_TIMEOUT: - clt = CBUtil.readConsistencyLevel(body); - received = body.readInt(); - blockFor = body.readInt(); - if (code == ExceptionCode.WRITE_TIMEOUT) { - WriteType writeType = Enum.valueOf(WriteType.class, CBUtil.readString(body)); - infos = new WriteTimeoutException(clt, writeType, received, blockFor); - } else { - byte dataPresent = body.readByte(); - infos = new ReadTimeoutException(clt, received, blockFor, dataPresent != 0); - } - break; - case WRITE_FAILURE: - case READ_FAILURE: - clt = CBUtil.readConsistencyLevel(body); - received = body.readInt(); - blockFor = body.readInt(); - int failures = body.readInt(); - Map failuresMap; - if (version.compareTo(ProtocolVersion.V5) < 0) { - failuresMap = Collections.emptyMap(); - } else { - failuresMap = new HashMap(); - for (int i = 0; i < failures; i++) { - InetAddress address = CBUtil.readInetWithoutPort(body); - int reasonCode = body.readUnsignedShort(); - failuresMap.put(address, reasonCode); - } - } - if (code == ExceptionCode.WRITE_FAILURE) { - WriteType writeType = Enum.valueOf(WriteType.class, CBUtil.readString(body)); - infos = new WriteFailureException(clt, writeType, received, blockFor, failures, failuresMap); - } else { - byte dataPresent = body.readByte(); - infos = new ReadFailureException(clt, received, blockFor, failures, failuresMap, dataPresent != 0); - } - break; - case UNPREPARED: - infos = MD5Digest.wrap(CBUtil.readBytes(body)); - break; - case ALREADY_EXISTS: - String ksName = CBUtil.readString(body); - String cfName = CBUtil.readString(body); - infos = new AlreadyExistsException(ksName, cfName); - break; + static final Message.Decoder decoder = + new Message.Decoder() { + @Override + public Error decode(ByteBuf body, ProtocolVersion version, CodecRegistry codecRegistry) { + ExceptionCode code = ExceptionCode.fromValue(body.readInt()); + String msg = CBUtil.readString(body); + Object infos = null; + ConsistencyLevel clt; + int received, blockFor; + switch (code) { + case UNAVAILABLE: + ConsistencyLevel clu = CBUtil.readConsistencyLevel(body); + int required = body.readInt(); + int alive = body.readInt(); + infos = new UnavailableException(clu, required, alive); + break; + case WRITE_TIMEOUT: + case READ_TIMEOUT: + clt = CBUtil.readConsistencyLevel(body); + received = body.readInt(); + blockFor = body.readInt(); + if (code == ExceptionCode.WRITE_TIMEOUT) { + WriteType writeType = Enum.valueOf(WriteType.class, CBUtil.readString(body)); + infos = new WriteTimeoutException(clt, writeType, received, blockFor); + } else { + byte dataPresent = body.readByte(); + infos = new ReadTimeoutException(clt, received, blockFor, dataPresent != 0); + } + break; + case WRITE_FAILURE: + case READ_FAILURE: + clt = CBUtil.readConsistencyLevel(body); + received = body.readInt(); + blockFor = body.readInt(); + int failures = body.readInt(); + Map failuresMap; + if (version.compareTo(ProtocolVersion.V5) < 0) { + failuresMap = Collections.emptyMap(); + } else { + failuresMap = new HashMap(); + for (int i = 0; i < failures; i++) { + InetAddress address = CBUtil.readInetWithoutPort(body); + int reasonCode = body.readUnsignedShort(); + failuresMap.put(address, reasonCode); + } } - return new Error(version, code, msg, infos); + if (code == ExceptionCode.WRITE_FAILURE) { + WriteType writeType = Enum.valueOf(WriteType.class, CBUtil.readString(body)); + infos = + new WriteFailureException( + clt, writeType, received, blockFor, failures, failuresMap); + } else { + byte dataPresent = body.readByte(); + infos = + new ReadFailureException( + clt, received, blockFor, failures, failuresMap, dataPresent != 0); + } + break; + case CAS_WRITE_UNKNOWN: + clt = CBUtil.readConsistencyLevel(body); + received = body.readInt(); + blockFor = body.readInt(); + infos = new CASWriteUnknownException(clt, received, blockFor); + break; + case UNPREPARED: + infos = MD5Digest.wrap(CBUtil.readBytes(body)); + break; + case ALREADY_EXISTS: + String ksName = CBUtil.readString(body); + String cfName = CBUtil.readString(body); + infos = new AlreadyExistsException(ksName, cfName); + break; } + return new Error(version, code, msg, infos); + } }; - final ProtocolVersion serverProtocolVersion; - final ExceptionCode code; - final String message; - final Object infos; // can be null - - private Error(ProtocolVersion serverProtocolVersion, ExceptionCode code, String message, Object infos) { - super(Message.Response.Type.ERROR); - this.serverProtocolVersion = serverProtocolVersion; - this.code = code; - this.message = message; - this.infos = infos; - } - - DriverException asException(InetSocketAddress host) { - switch (code) { - case SERVER_ERROR: - return new ServerError(host, message); - case PROTOCOL_ERROR: - return new ProtocolError(host, message); - case BAD_CREDENTIALS: - return new AuthenticationException(host, message); - case UNAVAILABLE: - return ((UnavailableException) infos).copy(host); // We copy to have a nice stack trace - case OVERLOADED: - return new OverloadedException(host, message); - case IS_BOOTSTRAPPING: - return new BootstrappingException(host, message); - case TRUNCATE_ERROR: - return new TruncateException(host, message); - case WRITE_TIMEOUT: - return ((WriteTimeoutException) infos).copy(host); - case READ_TIMEOUT: - return ((ReadTimeoutException) infos).copy(host); - case WRITE_FAILURE: - return ((WriteFailureException) infos).copy(host); - case READ_FAILURE: - return ((ReadFailureException) infos).copy(host); - case FUNCTION_FAILURE: - return new FunctionExecutionException(host, message); - case SYNTAX_ERROR: - return new SyntaxError(host, message); - case UNAUTHORIZED: - return new UnauthorizedException(host, message); - case INVALID: - return new InvalidQueryException(host, message); - case CONFIG_ERROR: - return new InvalidConfigurationInQueryException(host, message); - case ALREADY_EXISTS: - return ((AlreadyExistsException) infos).copy(host); - case UNPREPARED: - return new UnpreparedException(host, message); - default: - return new DriverInternalError(String.format("Unknown protocol error code %s returned by %s. The error message was: %s", code, host, message)); - } - } - - @Override - public String toString() { - return "ERROR " + code + ": " + message; - } + final ProtocolVersion serverProtocolVersion; + final ExceptionCode code; + final String message; + final Object infos; // can be null + + private Error( + ProtocolVersion serverProtocolVersion, ExceptionCode code, String message, Object infos) { + super(Message.Response.Type.ERROR); + this.serverProtocolVersion = serverProtocolVersion; + this.code = code; + this.message = message; + this.infos = infos; } - static class Ready extends Message.Response { + DriverException asException(EndPoint endPoint) { + switch (code) { + case SERVER_ERROR: + return new ServerError(endPoint, message); + case PROTOCOL_ERROR: + return new ProtocolError(endPoint, message); + case BAD_CREDENTIALS: + return new AuthenticationException(endPoint, message); + case UNAVAILABLE: + return ((UnavailableException) infos) + .copy(endPoint); // We copy to have a nice stack trace + case OVERLOADED: + return new OverloadedException(endPoint, message); + case IS_BOOTSTRAPPING: + return new BootstrappingException(endPoint, message); + case TRUNCATE_ERROR: + return new TruncateException(endPoint, message); + case WRITE_TIMEOUT: + return ((WriteTimeoutException) infos).copy(endPoint); + case READ_TIMEOUT: + return ((ReadTimeoutException) infos).copy(endPoint); + case WRITE_FAILURE: + return ((WriteFailureException) infos).copy(endPoint); + case READ_FAILURE: + return ((ReadFailureException) infos).copy(endPoint); + case FUNCTION_FAILURE: + return new FunctionExecutionException(endPoint, message); + case CDC_WRITE_FAILURE: + return new CDCWriteException(endPoint, message); + case CAS_WRITE_UNKNOWN: + return ((CASWriteUnknownException) infos).copy(endPoint); + case SYNTAX_ERROR: + return new SyntaxError(endPoint, message); + case UNAUTHORIZED: + return new UnauthorizedException(endPoint, message); + case INVALID: + return new InvalidQueryException(endPoint, message); + case CONFIG_ERROR: + return new InvalidConfigurationInQueryException(endPoint, message); + case ALREADY_EXISTS: + return ((AlreadyExistsException) infos).copy(endPoint); + case UNPREPARED: + return new UnpreparedException(endPoint, message); + + default: + return new DriverInternalError( + String.format( + "Unknown protocol error code %s returned by %s. The error message was: %s", + code, endPoint, message)); + } + } - static final Message.Decoder decoder = new Message.Decoder() { - @Override - public Ready decode(ByteBuf body, ProtocolVersion version, CodecRegistry codecRegistry) { - // TODO: Would it be cool to return a singleton? Check we don't need to - // set the streamId or something - return new Ready(); - } + @Override + public String toString() { + return "ERROR " + code + ": " + message; + } + } + + static class Ready extends Message.Response { + + static final Message.Decoder decoder = + new Message.Decoder() { + @Override + public Ready decode(ByteBuf body, ProtocolVersion version, CodecRegistry codecRegistry) { + // TODO: Would it be cool to return a singleton? Check we don't need to + // set the streamId or something + return new Ready(); + } }; - Ready() { - super(Message.Response.Type.READY); - } - - @Override - public String toString() { - return "READY"; - } + Ready() { + super(Message.Response.Type.READY); } - static class Authenticate extends Message.Response { - - static final Message.Decoder decoder = new Message.Decoder() { - @Override - public Authenticate decode(ByteBuf body, ProtocolVersion version, CodecRegistry codecRegistry) { - String authenticator = CBUtil.readString(body); - return new Authenticate(authenticator); - } + @Override + public String toString() { + return "READY"; + } + } + + static class Authenticate extends Message.Response { + + static final Message.Decoder decoder = + new Message.Decoder() { + @Override + public Authenticate decode( + ByteBuf body, ProtocolVersion version, CodecRegistry codecRegistry) { + String authenticator = CBUtil.readString(body); + return new Authenticate(authenticator); + } }; - final String authenticator; + final String authenticator; - Authenticate(String authenticator) { - super(Message.Response.Type.AUTHENTICATE); - this.authenticator = authenticator; - } + Authenticate(String authenticator) { + super(Message.Response.Type.AUTHENTICATE); + this.authenticator = authenticator; + } - @Override - public String toString() { - return "AUTHENTICATE " + authenticator; - } + @Override + public String toString() { + return "AUTHENTICATE " + authenticator; } + } - static class Supported extends Message.Response { + static class Supported extends Message.Response { - static final Message.Decoder decoder = new Message.Decoder() { - @Override - public Supported decode(ByteBuf body, ProtocolVersion version, CodecRegistry codecRegistry) { - return new Supported(CBUtil.readStringToStringListMap(body)); - } + static final Message.Decoder decoder = + new Message.Decoder() { + @Override + public Supported decode( + ByteBuf body, ProtocolVersion version, CodecRegistry codecRegistry) { + return new Supported(CBUtil.readStringToStringListMap(body)); + } }; - final Map> supported; - final Set supportedCompressions = EnumSet.noneOf(ProtocolOptions.Compression.class); + final Map> supported; + final Set supportedCompressions = + EnumSet.noneOf(ProtocolOptions.Compression.class); - Supported(Map> supported) { - super(Message.Response.Type.SUPPORTED); - this.supported = supported; + Supported(Map> supported) { + super(Message.Response.Type.SUPPORTED); + this.supported = supported; - parseCompressions(); - } + parseCompressions(); + } - private void parseCompressions() { - List compList = supported.get(Requests.Startup.COMPRESSION_OPTION); - if (compList == null) - return; + private void parseCompressions() { + List compList = supported.get(Requests.Startup.COMPRESSION_OPTION); + if (compList == null) return; - for (String compStr : compList) { - ProtocolOptions.Compression compr = ProtocolOptions.Compression.fromString(compStr); - if (compr != null) - supportedCompressions.add(compr); - } - } + for (String compStr : compList) { + ProtocolOptions.Compression compr = ProtocolOptions.Compression.fromString(compStr); + if (compr != null) supportedCompressions.add(compr); + } + } - @Override - public String toString() { - return "SUPPORTED " + supported; - } + @Override + public String toString() { + return "SUPPORTED " + supported; } + } - static abstract class Result extends Message.Response { + abstract static class Result extends Message.Response { - static final Message.Decoder decoder = new Message.Decoder() { - @Override - public Result decode(ByteBuf body, ProtocolVersion version, CodecRegistry codecRegistry) { - Kind kind = Kind.fromId(body.readInt()); - return kind.subDecoder.decode(body, version, codecRegistry); - } + static final Message.Decoder decoder = + new Message.Decoder() { + @Override + public Result decode(ByteBuf body, ProtocolVersion version, CodecRegistry codecRegistry) { + Kind kind = Kind.fromId(body.readInt()); + return kind.subDecoder.decode(body, version, codecRegistry); + } }; - enum Kind { - VOID(1, Void.subcodec), - ROWS(2, Rows.subcodec), - SET_KEYSPACE(3, SetKeyspace.subcodec), - PREPARED(4, Prepared.subcodec), - SCHEMA_CHANGE(5, SchemaChange.subcodec); - - private final int id; - final Message.Decoder subDecoder; - - private static final Kind[] ids; - - static { - int maxId = -1; - for (Kind k : Kind.values()) - maxId = Math.max(maxId, k.id); - ids = new Kind[maxId + 1]; - for (Kind k : Kind.values()) { - if (ids[k.id] != null) - throw new IllegalStateException("Duplicate kind id"); - ids[k.id] = k; - } - } - - Kind(int id, Message.Decoder subDecoder) { - this.id = id; - this.subDecoder = subDecoder; - } - - static Kind fromId(int id) { - Kind k = ids[id]; - if (k == null) - throw new DriverInternalError(String.format("Unknown kind id %d in RESULT message", id)); - return k; - } + enum Kind { + VOID(1, Void.subcodec), + ROWS(2, Rows.subcodec), + SET_KEYSPACE(3, SetKeyspace.subcodec), + PREPARED(4, Prepared.subcodec), + SCHEMA_CHANGE(5, SchemaChange.subcodec); + + private final int id; + final Message.Decoder subDecoder; + + private static final Kind[] ids; + + static { + int maxId = -1; + for (Kind k : Kind.values()) maxId = Math.max(maxId, k.id); + ids = new Kind[maxId + 1]; + for (Kind k : Kind.values()) { + if (ids[k.id] != null) throw new IllegalStateException("Duplicate kind id"); + ids[k.id] = k; } + } + + Kind(int id, Message.Decoder subDecoder) { + this.id = id; + this.subDecoder = subDecoder; + } + + static Kind fromId(int id) { + Kind k = ids[id]; + if (k == null) + throw new DriverInternalError(String.format("Unknown kind id %d in RESULT message", id)); + return k; + } + } - final Kind kind; - - protected Result(Kind kind) { - super(Message.Response.Type.RESULT); - this.kind = kind; - } + final Kind kind; - static class Void extends Result { - // Even though we have no specific information here, don't make a - // singleton since as each message it has in fact a streamid and connection. - Void() { - super(Kind.VOID); - } + protected Result(Kind kind) { + super(Message.Response.Type.RESULT); + this.kind = kind; + } - static final Message.Decoder subcodec = new Message.Decoder() { - @Override - public Result decode(ByteBuf body, ProtocolVersion version, CodecRegistry codecRegistry) { - return new Void(); - } - }; + static class Void extends Result { + // Even though we have no specific information here, don't make a + // singleton since as each message it has in fact a streamid and connection. + Void() { + super(Kind.VOID); + } + static final Message.Decoder subcodec = + new Message.Decoder() { @Override - public String toString() { - return "EMPTY RESULT"; + public Result decode( + ByteBuf body, ProtocolVersion version, CodecRegistry codecRegistry) { + return new Void(); } - } + }; - static class SetKeyspace extends Result { - final String keyspace; + @Override + public String toString() { + return "EMPTY RESULT"; + } + } - private SetKeyspace(String keyspace) { - super(Kind.SET_KEYSPACE); - this.keyspace = keyspace; - } + static class SetKeyspace extends Result { + final String keyspace; - static final Message.Decoder subcodec = new Message.Decoder() { - @Override - public Result decode(ByteBuf body, ProtocolVersion version, CodecRegistry codecRegistry) { - return new SetKeyspace(CBUtil.readString(body)); - } - }; + private SetKeyspace(String keyspace) { + super(Kind.SET_KEYSPACE); + this.keyspace = keyspace; + } + static final Message.Decoder subcodec = + new Message.Decoder() { @Override - public String toString() { - return "RESULT set keyspace " + keyspace; + public Result decode( + ByteBuf body, ProtocolVersion version, CodecRegistry codecRegistry) { + return new SetKeyspace(CBUtil.readString(body)); } - } - - static class Rows extends Result { - - static class Metadata { - - private enum Flag { - // The order of that enum matters!! - GLOBAL_TABLES_SPEC, - HAS_MORE_PAGES, - NO_METADATA; - - static EnumSet deserialize(int flags) { - EnumSet set = EnumSet.noneOf(Flag.class); - Flag[] values = Flag.values(); - for (int n = 0; n < values.length; n++) { - if ((flags & (1 << n)) != 0) - set.add(values[n]); - } - return set; - } - - static int serialize(EnumSet flags) { - int i = 0; - for (Flag flag : flags) - i |= 1 << flag.ordinal(); - return i; - } - } - - static final Metadata EMPTY = new Metadata(0, null, null, null); + }; - final int columnCount; - final ColumnDefinitions columns; // Can be null if no metadata was asked by the query - final ByteBuffer pagingState; - final int[] pkIndices; + @Override + public String toString() { + return "RESULT set keyspace " + keyspace; + } + } - private Metadata(int columnCount, ColumnDefinitions columns, ByteBuffer pagingState, int[] pkIndices) { - this.columnCount = columnCount; - this.columns = columns; - this.pagingState = pagingState; - this.pkIndices = pkIndices; - } + static class Rows extends Result { - static Metadata decode(ByteBuf body, ProtocolVersion protocolVersion, CodecRegistry codecRegistry) { - return decode(body, false, protocolVersion, codecRegistry); - } + static class Metadata { - static Metadata decode(ByteBuf body, boolean withPkIndices, ProtocolVersion protocolVersion, CodecRegistry codecRegistry) { - - // flags & column count - EnumSet flags = Flag.deserialize(body.readInt()); - int columnCount = body.readInt(); - - int[] pkIndices = null; - int pkCount; - if (withPkIndices && (pkCount = body.readInt()) > 0) { - pkIndices = new int[pkCount]; - for (int i = 0; i < pkCount; i++) - pkIndices[i] = (int) body.readShort(); - } - - ByteBuffer state = null; - if (flags.contains(Flag.HAS_MORE_PAGES)) - state = CBUtil.readValue(body); - - if (flags.contains(Flag.NO_METADATA)) - return new Metadata(columnCount, null, state, pkIndices); - - boolean globalTablesSpec = flags.contains(Flag.GLOBAL_TABLES_SPEC); - - String globalKsName = null; - String globalCfName = null; - if (globalTablesSpec) { - globalKsName = CBUtil.readString(body); - globalCfName = CBUtil.readString(body); - } - - // metadata (names/types) - ColumnDefinitions.Definition[] defs = new ColumnDefinitions.Definition[columnCount]; - for (int i = 0; i < columnCount; i++) { - String ksName = globalTablesSpec ? globalKsName : CBUtil.readString(body); - String cfName = globalTablesSpec ? globalCfName : CBUtil.readString(body); - String name = CBUtil.readString(body); - DataType type = DataType.decode(body, protocolVersion, codecRegistry); - defs[i] = new ColumnDefinitions.Definition(ksName, cfName, name, type); - } - - return new Metadata(columnCount, new ColumnDefinitions(defs, codecRegistry), state, pkIndices); - } + private enum Flag { + // The order of that enum matters!! + GLOBAL_TABLES_SPEC, + HAS_MORE_PAGES, + NO_METADATA, + METADATA_CHANGED; - @Override - public String toString() { - StringBuilder sb = new StringBuilder(); - - if (columns == null) { - sb.append('[').append(columnCount).append(" columns]"); - } else { - for (ColumnDefinitions.Definition column : columns) { - sb.append('[').append(column.getName()); - sb.append(" (").append(column.getType()).append(")]"); - } - } - if (pagingState != null) - sb.append(" (to be continued)"); - return sb.toString(); - } + static EnumSet deserialize(int flags) { + EnumSet set = EnumSet.noneOf(Flag.class); + Flag[] values = Flag.values(); + for (int n = 0; n < values.length; n++) { + if ((flags & (1 << n)) != 0) set.add(values[n]); } + return set; + } + + static int serialize(EnumSet flags) { + int i = 0; + for (Flag flag : flags) i |= 1 << flag.ordinal(); + return i; + } + } - static final Message.Decoder subcodec = new Message.Decoder() { - @Override - public Result decode(ByteBuf body, ProtocolVersion version, CodecRegistry codecRegistry) { - - Metadata metadata = Metadata.decode(body, version, codecRegistry); - - int rowCount = body.readInt(); - int columnCount = metadata.columnCount; - - Queue> data = new ArrayDeque>(rowCount); - for (int i = 0; i < rowCount; i++) { - List row = new ArrayList(columnCount); - for (int j = 0; j < columnCount; j++) - row.add(CBUtil.readValue(body)); - data.add(row); - } - - return new Rows(metadata, data, version); - } - }; + static final Metadata EMPTY = new Metadata(null, 0, null, null, null); + + final int columnCount; + final ColumnDefinitions columns; // Can be null if no metadata was asked by the query + final ByteBuffer pagingState; + final int[] pkIndices; + final MD5Digest + metadataId; // only present if the flag METADATA_CHANGED is set (ROWS response only) + + private Metadata( + MD5Digest metadataId, + int columnCount, + ColumnDefinitions columns, + ByteBuffer pagingState, + int[] pkIndices) { + this.metadataId = metadataId; + this.columnCount = columnCount; + this.columns = columns; + this.pagingState = pagingState; + this.pkIndices = pkIndices; + } - final Metadata metadata; - final Queue> data; - private final ProtocolVersion version; + static Metadata decode( + ByteBuf body, ProtocolVersion protocolVersion, CodecRegistry codecRegistry) { + return decode(body, false, protocolVersion, codecRegistry); + } - private Rows(Metadata metadata, Queue> data, ProtocolVersion version) { - super(Kind.ROWS); - this.metadata = metadata; - this.data = data; - this.version = version; - } + static Metadata decode( + ByteBuf body, + boolean withPkIndices, + ProtocolVersion protocolVersion, + CodecRegistry codecRegistry) { + + // flags & column count + EnumSet flags = Flag.deserialize(body.readInt()); + int columnCount = body.readInt(); + + ByteBuffer state = null; + if (flags.contains(Flag.HAS_MORE_PAGES)) state = CBUtil.readValue(body); + + MD5Digest resultMetadataId = null; + if (flags.contains(Flag.METADATA_CHANGED)) { + assert ProtocolFeature.PREPARED_METADATA_CHANGES.isSupportedBy(protocolVersion) + : "METADATA_CHANGED flag is not supported in protocol version " + protocolVersion; + assert !flags.contains(Flag.NO_METADATA) + : "METADATA_CHANGED and NO_METADATA are mutually exclusive flags"; + resultMetadataId = MD5Digest.wrap(CBUtil.readBytes(body)); + } + + int[] pkIndices = null; + int pkCount; + if (withPkIndices && (pkCount = body.readInt()) > 0) { + pkIndices = new int[pkCount]; + for (int i = 0; i < pkCount; i++) pkIndices[i] = (int) body.readShort(); + } + + if (flags.contains(Flag.NO_METADATA)) + return new Metadata(resultMetadataId, columnCount, null, state, pkIndices); + + boolean globalTablesSpec = flags.contains(Flag.GLOBAL_TABLES_SPEC); + + String globalKsName = null; + String globalCfName = null; + if (globalTablesSpec) { + globalKsName = CBUtil.readString(body); + globalCfName = CBUtil.readString(body); + } + + // metadata (names/types) + ColumnDefinitions.Definition[] defs = new ColumnDefinitions.Definition[columnCount]; + for (int i = 0; i < columnCount; i++) { + String ksName = globalTablesSpec ? globalKsName : CBUtil.readString(body); + String cfName = globalTablesSpec ? globalCfName : CBUtil.readString(body); + String name = CBUtil.readString(body); + DataType type = DataType.decode(body, protocolVersion, codecRegistry); + defs[i] = new ColumnDefinitions.Definition(ksName, cfName, name, type); + } + + return new Metadata( + resultMetadataId, + columnCount, + new ColumnDefinitions(defs, codecRegistry), + state, + pkIndices); + } - @Override - public String toString() { - StringBuilder sb = new StringBuilder(); - sb.append("ROWS ").append(metadata).append('\n'); - for (List row : data) { - for (int i = 0; i < row.size(); i++) { - ByteBuffer v = row.get(i); - if (v == null) { - sb.append(" | null"); - } else { - sb.append(" | "); - if (metadata.columns != null) { - DataType dataType = metadata.columns.getType(i); - sb.append(dataType); - sb.append(" "); - TypeCodec codec = metadata.columns.codecRegistry.codecFor(dataType); - Object o = codec.deserialize(v, version); - String s = codec.format(o); - if (s.length() > 100) - s = s.substring(0, 100) + "..."; - sb.append(s); - } else { - sb.append(Bytes.toHexString(v)); - } - } - } - sb.append('\n'); - } - sb.append("---"); - return sb.toString(); + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + + if (columns == null) { + sb.append('[').append(columnCount).append(" columns]"); + } else { + for (ColumnDefinitions.Definition column : columns) { + sb.append('[').append(column.getName()); + sb.append(" (").append(column.getType()).append(")]"); } + } + if (pagingState != null) sb.append(" (to be continued)"); + return sb.toString(); } + } - static class Prepared extends Result { - - static final Message.Decoder subcodec = new Message.Decoder() { - @Override - public Result decode(ByteBuf body, ProtocolVersion version, CodecRegistry codecRegistry) { - MD5Digest id = MD5Digest.wrap(CBUtil.readBytes(body)); - boolean withPkIndices = version.compareTo(V4) >= 0; - Rows.Metadata metadata = Rows.Metadata.decode(body, withPkIndices, version, codecRegistry); - Rows.Metadata resultMetadata = decodeResultMetadata(body, version, codecRegistry); - return new Prepared(id, metadata, resultMetadata); - } - - private Metadata decodeResultMetadata(ByteBuf body, ProtocolVersion version, CodecRegistry codecRegistry) { - switch (version) { - case V1: - return Rows.Metadata.EMPTY; - case V2: - case V3: - case V4: - case V5: - return Rows.Metadata.decode(body, version, codecRegistry); - default: - throw version.unsupported(); - } - } - }; + static final Message.Decoder subcodec = + new Message.Decoder() { + @Override + public Result decode( + ByteBuf body, ProtocolVersion version, CodecRegistry codecRegistry) { - final MD5Digest statementId; - final Rows.Metadata metadata; - final Rows.Metadata resultMetadata; + Metadata metadata = Metadata.decode(body, version, codecRegistry); - private Prepared(MD5Digest statementId, Rows.Metadata metadata, Rows.Metadata resultMetadata) { - super(Kind.PREPARED); - this.statementId = statementId; - this.metadata = metadata; - this.resultMetadata = resultMetadata; - } + int rowCount = body.readInt(); + int columnCount = metadata.columnCount; - @Override - public String toString() { - return "RESULT PREPARED " + statementId + ' ' + metadata + " (resultMetadata=" + resultMetadata + ')'; - } - } + Queue> data = new ArrayDeque>(rowCount); + for (int i = 0; i < rowCount; i++) { + List row = new ArrayList(columnCount); + for (int j = 0; j < columnCount; j++) row.add(CBUtil.readValue(body)); + data.add(row); + } - static class SchemaChange extends Result { - - enum Change {CREATED, UPDATED, DROPPED} - - final Change change; - final SchemaElement targetType; - final String targetKeyspace; - final String targetName; - final List targetSignature; - - static final Message.Decoder subcodec = new Message.Decoder() { - @Override - public Result decode(ByteBuf body, ProtocolVersion version, CodecRegistry codecRegistry) { - // Note: the CREATE KEYSPACE/TABLE/TYPE SCHEMA_CHANGE response is different from the SCHEMA_CHANGE EVENT type - Change change; - SchemaElement targetType; - String targetKeyspace, targetName; - List targetSignature; - switch (version) { - case V1: - case V2: - change = CBUtil.readEnumValue(Change.class, body); - targetKeyspace = CBUtil.readString(body); - targetName = CBUtil.readString(body); - targetType = targetName.isEmpty() ? KEYSPACE : TABLE; - targetSignature = Collections.emptyList(); - return new SchemaChange(change, targetType, targetKeyspace, targetName, targetSignature); - case V3: - case V4: - case V5: - change = CBUtil.readEnumValue(Change.class, body); - targetType = CBUtil.readEnumValue(SchemaElement.class, body); - targetKeyspace = CBUtil.readString(body); - targetName = (targetType == KEYSPACE) ? "" : CBUtil.readString(body); - targetSignature = (targetType == FUNCTION || targetType == AGGREGATE) - ? CBUtil.readStringList(body) - : Collections.emptyList(); - return new SchemaChange(change, targetType, targetKeyspace, targetName, targetSignature); - default: - throw version.unsupported(); - } - } - }; - - private SchemaChange(Change change, SchemaElement targetType, String targetKeyspace, String targetName, List targetSignature) { - super(Kind.SCHEMA_CHANGE); - this.change = change; - this.targetType = targetType; - this.targetKeyspace = targetKeyspace; - this.targetName = targetName; - this.targetSignature = targetSignature; + return new Rows(metadata, data, version); } - - @Override - public String toString() { - return "RESULT schema change " + change + " on " + targetType + ' ' + targetKeyspace + (targetName.isEmpty() ? "" : '.' + targetName); + }; + + final Metadata metadata; + final Queue> data; + private final ProtocolVersion version; + + private Rows(Metadata metadata, Queue> data, ProtocolVersion version) { + super(Kind.ROWS); + this.metadata = metadata; + this.data = data; + this.version = version; + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("ROWS ").append(metadata).append('\n'); + for (List row : data) { + for (int i = 0; i < row.size(); i++) { + ByteBuffer v = row.get(i); + if (v == null) { + sb.append(" | null"); + } else { + sb.append(" | "); + if (metadata.columns != null) { + DataType dataType = metadata.columns.getType(i); + sb.append(dataType); + sb.append(" "); + TypeCodec codec = metadata.columns.codecRegistry.codecFor(dataType); + Object o = codec.deserialize(v, version); + String s = codec.format(o); + if (s.length() > 100) s = s.substring(0, 100) + "..."; + sb.append(s); + } else { + sb.append(Bytes.toHexString(v)); + } } + } + sb.append('\n'); } + sb.append("---"); + return sb.toString(); + } } - static class Event extends Message.Response { + static class Prepared extends Result { - static final Message.Decoder decoder = new Message.Decoder() { + static final Message.Decoder subcodec = + new Message.Decoder() { @Override - public Event decode(ByteBuf body, ProtocolVersion version, CodecRegistry codecRegistry) { - return new Event(ProtocolEvent.deserialize(body, version)); + public Result decode( + ByteBuf body, ProtocolVersion version, CodecRegistry codecRegistry) { + MD5Digest id = MD5Digest.wrap(CBUtil.readBytes(body)); + MD5Digest resultMetadataId = null; + if (ProtocolFeature.PREPARED_METADATA_CHANGES.isSupportedBy(version)) + resultMetadataId = MD5Digest.wrap(CBUtil.readBytes(body)); + boolean withPkIndices = version.compareTo(ProtocolVersion.V4) >= 0; + Rows.Metadata metadata = + Rows.Metadata.decode(body, withPkIndices, version, codecRegistry); + Rows.Metadata resultMetadata = decodeResultMetadata(body, version, codecRegistry); + return new Prepared(id, resultMetadataId, metadata, resultMetadata); } - }; - final ProtocolEvent event; + private Metadata decodeResultMetadata( + ByteBuf body, ProtocolVersion version, CodecRegistry codecRegistry) { + switch (version) { + case V1: + return Rows.Metadata.EMPTY; + case V2: + case V3: + case V4: + case V5: + case V6: + return Rows.Metadata.decode(body, version, codecRegistry); + default: + throw version.unsupported(); + } + } + }; + + final MD5Digest statementId; + final MD5Digest resultMetadataId; + final Rows.Metadata metadata; + final Rows.Metadata resultMetadata; + + private Prepared( + MD5Digest statementId, + MD5Digest resultMetadataId, + Rows.Metadata metadata, + Rows.Metadata resultMetadata) { + super(Kind.PREPARED); + this.statementId = statementId; + this.resultMetadataId = resultMetadataId; + this.metadata = metadata; + this.resultMetadata = resultMetadata; + } + + @Override + public String toString() { + return "RESULT PREPARED " + + statementId + + ' ' + + metadata + + " (resultMetadata=" + + resultMetadata + + ')'; + } + } - Event(ProtocolEvent event) { - super(Message.Response.Type.EVENT); - this.event = event; - } + static class SchemaChange extends Result { - @Override - public String toString() { - return "EVENT " + event; - } - } + enum Change { + CREATED, + UPDATED, + DROPPED + } - static class AuthChallenge extends Message.Response { + final Change change; + final SchemaElement targetType; + final String targetKeyspace; + final String targetName; + final List targetSignature; - static final Message.Decoder decoder = new Message.Decoder() { + static final Message.Decoder subcodec = + new Message.Decoder() { @Override - public AuthChallenge decode(ByteBuf body, ProtocolVersion version, CodecRegistry codecRegistry) { - ByteBuffer b = CBUtil.readValue(body); - if (b == null) - return new AuthChallenge(null); - - byte[] token = new byte[b.remaining()]; - b.get(token); - return new AuthChallenge(token); + public Result decode( + ByteBuf body, ProtocolVersion version, CodecRegistry codecRegistry) { + // Note: the CREATE KEYSPACE/TABLE/TYPE SCHEMA_CHANGE response is different from the + // SCHEMA_CHANGE EVENT type + Change change; + SchemaElement targetType; + String targetKeyspace, targetName; + List targetSignature; + switch (version) { + case V1: + case V2: + change = CBUtil.readEnumValue(Change.class, body); + targetKeyspace = CBUtil.readString(body); + targetName = CBUtil.readString(body); + targetType = targetName.isEmpty() ? KEYSPACE : TABLE; + targetSignature = Collections.emptyList(); + return new SchemaChange( + change, targetType, targetKeyspace, targetName, targetSignature); + case V3: + case V4: + case V5: + case V6: + change = CBUtil.readEnumValue(Change.class, body); + targetType = CBUtil.readEnumValue(SchemaElement.class, body); + targetKeyspace = CBUtil.readString(body); + targetName = (targetType == KEYSPACE) ? "" : CBUtil.readString(body); + targetSignature = + (targetType == FUNCTION || targetType == AGGREGATE) + ? CBUtil.readStringList(body) + : Collections.emptyList(); + return new SchemaChange( + change, targetType, targetKeyspace, targetName, targetSignature); + default: + throw version.unsupported(); + } } + }; + + private SchemaChange( + Change change, + SchemaElement targetType, + String targetKeyspace, + String targetName, + List targetSignature) { + super(Kind.SCHEMA_CHANGE); + this.change = change; + this.targetType = targetType; + this.targetKeyspace = targetKeyspace; + this.targetName = targetName; + this.targetSignature = targetSignature; + } + + @Override + public String toString() { + return "RESULT schema change " + + change + + " on " + + targetType + + ' ' + + targetKeyspace + + (targetName.isEmpty() ? "" : '.' + targetName); + } + } + } + + static class Event extends Message.Response { + + static final Message.Decoder decoder = + new Message.Decoder() { + @Override + public Event decode(ByteBuf body, ProtocolVersion version, CodecRegistry codecRegistry) { + return new Event(ProtocolEvent.deserialize(body, version)); + } }; - final byte[] token; + final ProtocolEvent event; - private AuthChallenge(byte[] token) { - super(Message.Response.Type.AUTH_CHALLENGE); - this.token = token; - } + Event(ProtocolEvent event) { + super(Message.Response.Type.EVENT); + this.event = event; } - static class AuthSuccess extends Message.Response { + @Override + public String toString() { + return "EVENT " + event; + } + } + + static class AuthChallenge extends Message.Response { + + static final Message.Decoder decoder = + new Message.Decoder() { + @Override + public AuthChallenge decode( + ByteBuf body, ProtocolVersion version, CodecRegistry codecRegistry) { + ByteBuffer b = CBUtil.readValue(body); + if (b == null) return new AuthChallenge(null); + + byte[] token = new byte[b.remaining()]; + b.get(token); + return new AuthChallenge(token); + } + }; - static final Message.Decoder decoder = new Message.Decoder() { - @Override - public AuthSuccess decode(ByteBuf body, ProtocolVersion version, CodecRegistry codecRegistry) { - ByteBuffer b = CBUtil.readValue(body); - if (b == null) - return new AuthSuccess(null); - - byte[] token = new byte[b.remaining()]; - b.get(token); - return new AuthSuccess(token); - } + final byte[] token; + + private AuthChallenge(byte[] token) { + super(Message.Response.Type.AUTH_CHALLENGE); + this.token = token; + } + } + + static class AuthSuccess extends Message.Response { + + static final Message.Decoder decoder = + new Message.Decoder() { + @Override + public AuthSuccess decode( + ByteBuf body, ProtocolVersion version, CodecRegistry codecRegistry) { + ByteBuffer b = CBUtil.readValue(body); + if (b == null) return new AuthSuccess(null); + + byte[] token = new byte[b.remaining()]; + b.get(token); + return new AuthSuccess(token); + } }; - final byte[] token; + final byte[] token; - private AuthSuccess(byte[] token) { - super(Message.Response.Type.AUTH_SUCCESS); - this.token = token; - } + private AuthSuccess(byte[] token) { + super(Message.Response.Type.AUTH_SUCCESS); + this.token = token; } + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java b/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java index 3d058968ab2..78083f9ccc9 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,61 +17,61 @@ */ package com.datastax.driver.core; - /** * The result of a query. - *

- * The retrieval of the rows of a ResultSet is generally paged (a first page - * of result is fetched and the next one is only fetched once all the results - * of the first one has been consumed). The size of the pages can be configured - * either globally through {@link QueryOptions#setFetchSize} or per-statement - * with {@link Statement#setFetchSize}. Though new pages are automatically (and - * transparently) fetched when needed, it is possible to force the retrieval - * of the next page early through {@link #fetchMoreResults}. Please note however - * that this ResultSet paging is not available with the version 1 of the native - * protocol (i.e. with Cassandra 1.2 or if version 1 has been explicitly requested - * through {@link Cluster.Builder#withProtocolVersion}). If the protocol version 1 - * is in use, a ResultSet is always fetched in it's entirely and it's up to the - * client to make sure that no query can yield ResultSet that won't hold in memory. - *

- * Note that this class is not thread-safe. + * + *

The retrieval of the rows of a ResultSet is generally paged (a first page of result is fetched + * and the next one is only fetched once all the results of the first one has been consumed). The + * size of the pages can be configured either globally through {@link QueryOptions#setFetchSize} or + * per-statement with {@link Statement#setFetchSize}. Though new pages are automatically (and + * transparently) fetched when needed, it is possible to force the retrieval of the next page early + * through {@link #fetchMoreResults}. Please note however that this ResultSet paging is not + * available with the version 1 of the native protocol (i.e. with Cassandra 1.2 or if version 1 has + * been explicitly requested through {@link Cluster.Builder#withProtocolVersion}). If the protocol + * version 1 is in use, a ResultSet is always fetched in it's entirely and it's up to the client to + * make sure that no query can yield ResultSet that won't hold in memory. + * + *

Note that this class is not thread-safe. */ public interface ResultSet extends PagingIterable { - // redeclared only to make clirr happy - @Override - Row one(); + // redeclared only to make clirr happy + @Override + Row one(); - /** - * Returns the columns returned in this ResultSet. - * - * @return the columns returned in this ResultSet. - */ - public ColumnDefinitions getColumnDefinitions(); + /** + * Returns the columns returned in this ResultSet. + * + * @return the columns returned in this ResultSet. + */ + public ColumnDefinitions getColumnDefinitions(); - /** - * If the query that produced this ResultSet was a conditional update, - * return whether it was successfully applied. - *

- * This is equivalent to calling: - *

- *

-     * rs.one().getBool("[applied]");
-     * 
- *

- * For consistency, this method always returns {@code true} for - * non-conditional queries (although there is no reason to call the method - * in that case). This is also the case for conditional DDL statements - * ({@code CREATE KEYSPACE... IF NOT EXISTS}, {@code CREATE TABLE... IF NOT EXISTS}), - * for which Cassandra doesn't return an {@code [applied]} column. - *

- * Note that, for versions of Cassandra strictly lower than 2.0.9 and 2.1.0-rc2, - * a server-side bug (CASSANDRA-7337) causes this method to always return - * {@code true} for batches containing conditional queries. - * - * @return if the query was a conditional update, whether it was applied. - * {@code true} for other types of queries. - * @see CASSANDRA-7337 - */ - public boolean wasApplied(); + /** + * If the query that produced this ResultSet was a conditional update, return whether it was + * successfully applied. + * + *

This is equivalent to calling: + * + *

+ * + *

+   * rs.one().getBool("[applied]");
+   * 
+ * + * Except that this method peeks at the next row without consuming it. + * + *

For consistency, this method always returns {@code true} for non-conditional queries + * (although there is no reason to call the method in that case). This is also the case for + * conditional DDL statements ({@code CREATE KEYSPACE... IF NOT EXISTS}, {@code CREATE TABLE... IF + * NOT EXISTS}), for which Cassandra doesn't return an {@code [applied]} column. + * + *

Note that, for versions of Cassandra strictly lower than 2.0.9 and 2.1.0-rc2, a server-side + * bug (CASSANDRA-7337) causes this method to always return {@code true} for batches containing + * conditional queries. + * + * @return if the query was a conditional update, whether it was applied. {@code true} for other + * types of queries. + * @see CASSANDRA-7337 + */ + public boolean wasApplied(); } diff --git a/driver-core/src/main/java/com/datastax/driver/core/ResultSetFuture.java b/driver-core/src/main/java/com/datastax/driver/core/ResultSetFuture.java index 6a246f69343..09a227ed5f8 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ResultSetFuture.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ResultSetFuture.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,102 +21,97 @@ import com.datastax.driver.core.exceptions.QueryExecutionException; import com.datastax.driver.core.exceptions.QueryValidationException; import com.google.common.util.concurrent.ListenableFuture; - import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; /** * A future on a {@link ResultSet}. - *

- * Note that this class implements Guava's {@code + * + *

Note that this class implements Guava's {@code * ListenableFuture} and can so be used with Guava's future utilities. */ public interface ResultSetFuture extends ListenableFuture { - /** - * Waits for the query to return and return its result. - *

- * This method is usually more convenient than {@link #get} because it: - *

    - *
  • Waits for the result uninterruptibly, and so doesn't throw - * {@link InterruptedException}.
  • - *
  • Returns meaningful exceptions, instead of having to deal - * with ExecutionException.
  • - *
- * As such, it is the preferred way to get the future result. - * - * @return the query result set. - * @throws NoHostAvailableException if no host in the cluster can be - * contacted successfully to execute this query. - * @throws QueryExecutionException if the query triggered an execution - * exception, that is an exception thrown by Cassandra when it cannot execute - * the query with the requested consistency level successfully. - * @throws QueryValidationException if the query is invalid (syntax error, - * unauthorized or any other validation problem). - */ - public ResultSet getUninterruptibly(); + /** + * Waits for the query to return and return its result. + * + *

This method is usually more convenient than {@link #get} because it: + * + *

    + *
  • Waits for the result uninterruptibly, and so doesn't throw {@link InterruptedException}. + *
  • Returns meaningful exceptions, instead of having to deal with ExecutionException. + *
+ * + * As such, it is the preferred way to get the future result. + * + * @return the query result set. + * @throws NoHostAvailableException if no host in the cluster can be contacted successfully to + * execute this query. + * @throws QueryExecutionException if the query triggered an execution exception, that is an + * exception thrown by Cassandra when it cannot execute the query with the requested + * consistency level successfully. + * @throws QueryValidationException if the query is invalid (syntax error, unauthorized or any + * other validation problem). + */ + public ResultSet getUninterruptibly(); - /** - * Waits for the provided time for the query to return and return its - * result if available. - *

- * This method is usually more convenient than {@link #get} because it: - *

    - *
  • Waits for the result uninterruptibly, and so doesn't throw - * {@link InterruptedException}.
  • - *
  • Returns meaningful exceptions, instead of having to deal - * with ExecutionException.
  • - *
- * As such, it is the preferred way to get the future result. - * - * @param timeout the time to wait for the query to return. - * @param unit the unit for {@code timeout}. - * @return the query result set. - * @throws NoHostAvailableException if no host in the cluster can be - * contacted successfully to execute this query. - * @throws QueryExecutionException if the query triggered an execution - * exception, that is an exception thrown by Cassandra when it cannot execute - * the query with the requested consistency level successfully. - * @throws QueryValidationException if the query if invalid (syntax error, - * unauthorized or any other validation problem). - * @throws TimeoutException if the wait timed out (Note that this is - * different from a Cassandra timeout, which is a {@code - * QueryExecutionException}). - */ - public ResultSet getUninterruptibly(long timeout, TimeUnit unit) throws TimeoutException; + /** + * Waits for the provided time for the query to return and return its result if available. + * + *

This method is usually more convenient than {@link #get} because it: + * + *

    + *
  • Waits for the result uninterruptibly, and so doesn't throw {@link InterruptedException}. + *
  • Returns meaningful exceptions, instead of having to deal with ExecutionException. + *
+ * + * As such, it is the preferred way to get the future result. + * + * @param timeout the time to wait for the query to return. + * @param unit the unit for {@code timeout}. + * @return the query result set. + * @throws NoHostAvailableException if no host in the cluster can be contacted successfully to + * execute this query. + * @throws QueryExecutionException if the query triggered an execution exception, that is an + * exception thrown by Cassandra when it cannot execute the query with the requested + * consistency level successfully. + * @throws QueryValidationException if the query if invalid (syntax error, unauthorized or any + * other validation problem). + * @throws TimeoutException if the wait timed out (Note that this is different from a Cassandra + * timeout, which is a {@code QueryExecutionException}). + */ + public ResultSet getUninterruptibly(long timeout, TimeUnit unit) throws TimeoutException; - /** - * Attempts to cancel the execution of the request corresponding to this - * future. This attempt will fail if the request has already returned. - *

- * Please note that this only cancel the request driver side, but nothing - * is done to interrupt the execution of the request Cassandra side (and that even - * if {@code mayInterruptIfRunning} is true) since Cassandra does not - * support such interruption. - *

- * This method can be used to ensure no more work is performed driver side - * (which, while it doesn't include stopping a request already submitted - * to a Cassandra node, may include not retrying another Cassandra host on - * failure/timeout) if the ResultSet is not going to be retried. Typically, - * the code to wait for a request result for a maximum of 1 second could - * look like: - *

-     *   ResultSetFuture future = session.executeAsync(...some query...);
-     *   try {
-     *       ResultSet result = future.get(1, TimeUnit.SECONDS);
-     *       ... process result ...
-     *   } catch (TimeoutException e) {
-     *       future.cancel(true); // Ensure any resource used by this query driver
-     *                            // side is released immediately
-     *       ... handle timeout ...
-     *   }
-     * 
- * - * @param mayInterruptIfRunning the value of this parameter is currently - * ignored. - * @return {@code false} if the future could not be cancelled (it has already - * completed normally); {@code true} otherwise. - */ - @Override - public boolean cancel(boolean mayInterruptIfRunning); + /** + * Attempts to cancel the execution of the request corresponding to this future. This attempt will + * fail if the request has already returned. + * + *

Please note that this only cancel the request driver side, but nothing is done to interrupt + * the execution of the request Cassandra side (and that even if {@code mayInterruptIfRunning} is + * true) since Cassandra does not support such interruption. + * + *

This method can be used to ensure no more work is performed driver side (which, while it + * doesn't include stopping a request already submitted to a Cassandra node, may include not + * retrying another Cassandra host on failure/timeout) if the ResultSet is not going to be + * retried. Typically, the code to wait for a request result for a maximum of 1 second could look + * like: + * + *

+   *   ResultSetFuture future = session.executeAsync(...some query...);
+   *   try {
+   *       ResultSet result = future.get(1, TimeUnit.SECONDS);
+   *       ... process result ...
+   *   } catch (TimeoutException e) {
+   *       future.cancel(true); // Ensure any resource used by this query driver
+   *                            // side is released immediately
+   *       ... handle timeout ...
+   *   }
+   * 
+ * + * @param mayInterruptIfRunning the value of this parameter is currently ignored. + * @return {@code false} if the future could not be cancelled (it has already completed normally); + * {@code true} otherwise. + */ + @Override + public boolean cancel(boolean mayInterruptIfRunning); } diff --git a/driver-core/src/main/java/com/datastax/driver/core/Row.java b/driver-core/src/main/java/com/datastax/driver/core/Row.java index 69b935ce977..57dd5870c0e 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Row.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Row.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,69 +21,67 @@ /** * A CQL Row returned in a {@link ResultSet}. - *

- * The values of a CQL Row can be retrieved by either index (index starts at zero) - * or name. When getting them by name, names follow the case insensitivity - * rules explained in {@link ColumnDefinitions}. + * + *

The values of a CQL Row can be retrieved by either index (index starts at zero) or name. When + * getting them by name, names follow the case insensitivity rules explained in {@link + * ColumnDefinitions}. */ public interface Row extends GettableData { - /** - * Returns the columns contained in this Row. - * - * @return the columns contained in this Row. - */ - public ColumnDefinitions getColumnDefinitions(); - - /** - * Returns the {@code i}th value of this row as a {@link Token}. - *

- * {@link #getPartitionKeyToken()} should generally be preferred to this method (unless the - * token column is aliased). - * - * @param i the index ({@code 0 <= i < size()}) of the column to retrieve. - * @return the value of the {@code i}th column in this row as an Token. - * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.columns().size()}. - * @throws InvalidTypeException if column {@code i} is not of the type of token values - * for this cluster (this depends on the configured partitioner). - */ - public Token getToken(int i); + /** + * Returns the columns contained in this Row. + * + * @return the columns contained in this Row. + */ + public ColumnDefinitions getColumnDefinitions(); - /** - * Returns the value of column {@code name} as a {@link Token}. - *

- * {@link #getPartitionKeyToken()} should generally be preferred to this method (unless the - * token column is aliased). - * - * @param name the name of the column to retrieve. - * @return the value of column {@code name} as a Token. - * @throws IllegalArgumentException if {@code name} is not part of the - * ResultSet this row is part of, i.e. if {@code !this.columns().names().contains(name)}. - * @throws InvalidTypeException if column {@code name} is not of the type of token values - * for this cluster (this depends on the configured partitioner). - */ - public Token getToken(String name); + /** + * Returns the {@code i}th value of this row as a {@link Token}. + * + *

{@link #getPartitionKeyToken()} should generally be preferred to this method (unless the + * token column is aliased). + * + * @param i the index ({@code 0 <= i < size()}) of the column to retrieve. + * @return the value of the {@code i}th column in this row as an Token. + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.columns().size()}. + * @throws InvalidTypeException if column {@code i} is not of the type of token values for this + * cluster (this depends on the configured partitioner). + */ + public Token getToken(int i); - /** - * Returns the value of the first column containing a {@link Token}. - *

- * This method is a shorthand for queries returning a single token in an unaliased - * column. It will look for the first name matching {@code token(...)}: - *

-     * {@code
-     * ResultSet rs = session.execute("SELECT token(k) FROM my_table WHERE k = 1");
-     * Token token = rs.one().getPartitionKeyToken(); // retrieves token(k)
-     * }
-     * 
- * If that doesn't work for you (for example, if you're using an alias), use - * {@link #getToken(int)} or {@link #getToken(String)}. - * - * @return the value of column {@code name} as a Token. - * @throws IllegalStateException if no column named {@code token(...)} exists in this - * ResultSet. - * @throws InvalidTypeException if the first column named {@code token(...)} is not of - * the type of token values for this cluster (this depends on the configured partitioner). - */ - public Token getPartitionKeyToken(); + /** + * Returns the value of column {@code name} as a {@link Token}. + * + *

{@link #getPartitionKeyToken()} should generally be preferred to this method (unless the + * token column is aliased). + * + * @param name the name of the column to retrieve. + * @return the value of column {@code name} as a Token. + * @throws IllegalArgumentException if {@code name} is not part of the ResultSet this row is part + * of, i.e. if {@code !this.columns().names().contains(name)}. + * @throws InvalidTypeException if column {@code name} is not of the type of token values for this + * cluster (this depends on the configured partitioner). + */ + public Token getToken(String name); + /** + * Returns the value of the first column containing a {@link Token}. + * + *

This method is a shorthand for queries returning a single token in an unaliased column. It + * will look for the first name matching {@code token(...)}: + * + *

{@code
+   * ResultSet rs = session.execute("SELECT token(k) FROM my_table WHERE k = 1");
+   * Token token = rs.one().getPartitionKeyToken(); // retrieves token(k)
+   * }
+ * + * If that doesn't work for you (for example, if you're using an alias), use {@link + * #getToken(int)} or {@link #getToken(String)}. + * + * @return the value of column {@code name} as a Token. + * @throws IllegalStateException if no column named {@code token(...)} exists in this ResultSet. + * @throws InvalidTypeException if the first column named {@code token(...)} is not of the type of + * token values for this cluster (this depends on the configured partitioner). + */ + public Token getPartitionKeyToken(); } diff --git a/driver-core/src/main/java/com/datastax/driver/core/SSLOptions.java b/driver-core/src/main/java/com/datastax/driver/core/SSLOptions.java index e621854a21f..f97b56c7313 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/SSLOptions.java +++ b/driver-core/src/main/java/com/datastax/driver/core/SSLOptions.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,13 +20,11 @@ import io.netty.channel.socket.SocketChannel; import io.netty.handler.ssl.SslHandler; -import java.net.InetSocketAddress; - /** * Defines how the driver configures SSL connections. - *

- * Note: since version 3.2.0, users are encouraged to implement - * {@link RemoteEndpointAwareSSLOptions} instead. + * + *

Note: since version 3.2.0, users are encouraged to implement {@link + * RemoteEndpointAwareSSLOptions} instead. * * @see RemoteEndpointAwareSSLOptions * @see JdkSSLOptions @@ -33,21 +33,22 @@ @SuppressWarnings("deprecation") public interface SSLOptions { - /** - * Creates a new SSL handler for the given Netty channel. - *

- * This gets called each time the driver opens a new connection to a Cassandra host. The newly created handler will be added - * to the channel's pipeline to provide SSL support for the connection. - *

- * You don't necessarily need to implement this method directly; see the provided implementations: {@link JdkSSLOptions} and - * {@link NettySSLOptions}. - * - * @param channel the channel. - * @return the handler. - * @deprecated use {@link RemoteEndpointAwareSSLOptions#newSSLHandler(SocketChannel, InetSocketAddress)} instead. - * - */ - @SuppressWarnings("DeprecatedIsStillUsed") - @Deprecated - SslHandler newSSLHandler(SocketChannel channel); + /** + * Creates a new SSL handler for the given Netty channel. + * + *

This gets called each time the driver opens a new connection to a Cassandra host. The newly + * created handler will be added to the channel's pipeline to provide SSL support for the + * connection. + * + *

You don't necessarily need to implement this method directly; see the provided + * implementations: {@link JdkSSLOptions} and {@link NettySSLOptions}. + * + * @param channel the channel. + * @return the handler. + * @deprecated use {@link RemoteEndpointAwareSSLOptions#newSSLHandler(SocketChannel, EndPoint)} + * instead. + */ + @SuppressWarnings("DeprecatedIsStillUsed") + @Deprecated + SslHandler newSSLHandler(SocketChannel channel); } diff --git a/driver-core/src/main/java/com/datastax/driver/core/SchemaChangeListener.java b/driver-core/src/main/java/com/datastax/driver/core/SchemaChangeListener.java index 47cf82d1cbf..2989cb98e3c 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/SchemaChangeListener.java +++ b/driver-core/src/main/java/com/datastax/driver/core/SchemaChangeListener.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,161 +19,163 @@ /** * Interface for objects that are interested in tracking schema change events in the cluster. - *

- * An implementation of this interface can be registered against a Cluster - * object through the {@link com.datastax.driver.core.Cluster#register(SchemaChangeListener)} method. - *

- * Note that the methods defined by this interface will be executed by internal driver threads, and are - * therefore expected to have short execution times. If you need to perform long computations or blocking - * calls in response to schema change events, it is strongly recommended to schedule them asynchronously - * on a separate thread provided by your application code. + * + *

An implementation of this interface can be registered against a Cluster object through the + * {@link com.datastax.driver.core.Cluster#register(SchemaChangeListener)} method. + * + *

Note that the methods defined by this interface will be executed by internal driver threads, + * and are therefore expected to have short execution times. If you need to perform long + * computations or blocking calls in response to schema change events, it is strongly recommended to + * schedule them asynchronously on a separate thread provided by your application code. */ public interface SchemaChangeListener { - /** - * Called when a keyspace has been added. - * - * @param keyspace the keyspace that has been added. - */ - void onKeyspaceAdded(KeyspaceMetadata keyspace); - - /** - * Called when a keyspace has been removed. - * - * @param keyspace the keyspace that has been removed. - */ - void onKeyspaceRemoved(KeyspaceMetadata keyspace); - - /** - * Called when a keyspace has changed. - * - * @param current the keyspace that has changed, in its current form (after the change). - * @param previous the keyspace that has changed, in its previous form (before the change). - */ - void onKeyspaceChanged(KeyspaceMetadata current, KeyspaceMetadata previous); - - /** - * Called when a table has been added. - * - * @param table the table that has been newly added. - */ - void onTableAdded(TableMetadata table); - - /** - * Called when a table has been removed. - * - * @param table the table that has been removed. - */ - void onTableRemoved(TableMetadata table); - - /** - * Called when a table has changed. - * - * @param current the table that has changed, in its current form (after the change). - * @param previous the table that has changed, in its previous form (before the change). - */ - void onTableChanged(TableMetadata current, TableMetadata previous); - - /** - * Called when a user-defined type has been added. - * - * @param type the type that has been newly added. - */ - void onUserTypeAdded(UserType type); - - /** - * Called when a user-defined type has been removed. - * - * @param type the type that has been removed. - */ - void onUserTypeRemoved(UserType type); - - /** - * Called when a user-defined type has changed. - * - * @param current the type that has changed, in its current form (after the change). - * @param previous the type that has changed, in its previous form (before the change). - */ - void onUserTypeChanged(UserType current, UserType previous); - - /** - * Called when a user-defined function has been added. - * - * @param function the function that has been newly added. - */ - void onFunctionAdded(FunctionMetadata function); - - /** - * Called when a user-defined function has been removed. - * - * @param function the function that has been removed. - */ - void onFunctionRemoved(FunctionMetadata function); - - /** - * Called when a user-defined function has changed. - * - * @param current the function that has changed, in its current form (after the change). - * @param previous the function that has changed, in its previous form (before the change). - */ - void onFunctionChanged(FunctionMetadata current, FunctionMetadata previous); - - /** - * Called when a user-defined aggregate has been added. - * - * @param aggregate the aggregate that has been newly added. - */ - void onAggregateAdded(AggregateMetadata aggregate); - - /** - * Called when a user-defined aggregate has been removed. - * - * @param aggregate the aggregate that has been removed. - */ - void onAggregateRemoved(AggregateMetadata aggregate); - - /** - * Called when a user-defined aggregate has changed. - * - * @param current the aggregate that has changed, in its current form (after the change). - * @param previous the aggregate that has changed, in its previous form (before the change). - */ - void onAggregateChanged(AggregateMetadata current, AggregateMetadata previous); - - /** - * Called when a materialized view has been added. - * - * @param view the materialized view that has been newly added. - */ - void onMaterializedViewAdded(MaterializedViewMetadata view); - - /** - * Called when a materialized view has been removed. - * - * @param view the materialized view that has been removed. - */ - void onMaterializedViewRemoved(MaterializedViewMetadata view); - - /** - * Called when a materialized view has changed. - * - * @param current the materialized view that has changed, in its current form (after the change). - * @param previous the materialized view that has changed, in its previous form (before the change). - */ - void onMaterializedViewChanged(MaterializedViewMetadata current, MaterializedViewMetadata previous); - - /** - * Gets invoked when the listener is registered with a cluster. - * - * @param cluster the cluster that this tracker is registered with. - */ - void onRegister(Cluster cluster); - - /** - * Gets invoked when the listener is unregistered from a cluster, or at cluster shutdown if - * the tracker was not unregistered. - * - * @param cluster the cluster that this tracker was registered with. - */ - void onUnregister(Cluster cluster); + /** + * Called when a keyspace has been added. + * + * @param keyspace the keyspace that has been added. + */ + void onKeyspaceAdded(KeyspaceMetadata keyspace); + + /** + * Called when a keyspace has been removed. + * + * @param keyspace the keyspace that has been removed. + */ + void onKeyspaceRemoved(KeyspaceMetadata keyspace); + + /** + * Called when a keyspace has changed. + * + * @param current the keyspace that has changed, in its current form (after the change). + * @param previous the keyspace that has changed, in its previous form (before the change). + */ + void onKeyspaceChanged(KeyspaceMetadata current, KeyspaceMetadata previous); + + /** + * Called when a table has been added. + * + * @param table the table that has been newly added. + */ + void onTableAdded(TableMetadata table); + + /** + * Called when a table has been removed. + * + * @param table the table that has been removed. + */ + void onTableRemoved(TableMetadata table); + + /** + * Called when a table has changed. + * + * @param current the table that has changed, in its current form (after the change). + * @param previous the table that has changed, in its previous form (before the change). + */ + void onTableChanged(TableMetadata current, TableMetadata previous); + + /** + * Called when a user-defined type has been added. + * + * @param type the type that has been newly added. + */ + void onUserTypeAdded(UserType type); + + /** + * Called when a user-defined type has been removed. + * + * @param type the type that has been removed. + */ + void onUserTypeRemoved(UserType type); + + /** + * Called when a user-defined type has changed. + * + * @param current the type that has changed, in its current form (after the change). + * @param previous the type that has changed, in its previous form (before the change). + */ + void onUserTypeChanged(UserType current, UserType previous); + + /** + * Called when a user-defined function has been added. + * + * @param function the function that has been newly added. + */ + void onFunctionAdded(FunctionMetadata function); + + /** + * Called when a user-defined function has been removed. + * + * @param function the function that has been removed. + */ + void onFunctionRemoved(FunctionMetadata function); + + /** + * Called when a user-defined function has changed. + * + * @param current the function that has changed, in its current form (after the change). + * @param previous the function that has changed, in its previous form (before the change). + */ + void onFunctionChanged(FunctionMetadata current, FunctionMetadata previous); + + /** + * Called when a user-defined aggregate has been added. + * + * @param aggregate the aggregate that has been newly added. + */ + void onAggregateAdded(AggregateMetadata aggregate); + + /** + * Called when a user-defined aggregate has been removed. + * + * @param aggregate the aggregate that has been removed. + */ + void onAggregateRemoved(AggregateMetadata aggregate); + + /** + * Called when a user-defined aggregate has changed. + * + * @param current the aggregate that has changed, in its current form (after the change). + * @param previous the aggregate that has changed, in its previous form (before the change). + */ + void onAggregateChanged(AggregateMetadata current, AggregateMetadata previous); + + /** + * Called when a materialized view has been added. + * + * @param view the materialized view that has been newly added. + */ + void onMaterializedViewAdded(MaterializedViewMetadata view); + + /** + * Called when a materialized view has been removed. + * + * @param view the materialized view that has been removed. + */ + void onMaterializedViewRemoved(MaterializedViewMetadata view); + + /** + * Called when a materialized view has changed. + * + * @param current the materialized view that has changed, in its current form (after the change). + * @param previous the materialized view that has changed, in its previous form (before the + * change). + */ + void onMaterializedViewChanged( + MaterializedViewMetadata current, MaterializedViewMetadata previous); + + /** + * Gets invoked when the listener is registered with a cluster. + * + * @param cluster the cluster that this tracker is registered with. + */ + void onRegister(Cluster cluster); + + /** + * Gets invoked when the listener is unregistered from a cluster, or at cluster shutdown if the + * tracker was not unregistered. + * + * @param cluster the cluster that this tracker was registered with. + */ + void onUnregister(Cluster cluster); } diff --git a/driver-core/src/main/java/com/datastax/driver/core/SchemaChangeListenerBase.java b/driver-core/src/main/java/com/datastax/driver/core/SchemaChangeListenerBase.java index a48579b8f8e..f46231963ed 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/SchemaChangeListenerBase.java +++ b/driver-core/src/main/java/com/datastax/driver/core/SchemaChangeListenerBase.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,108 +17,67 @@ */ package com.datastax.driver.core; -/** - * Base implementation for {@link SchemaChangeListener}. - */ +/** Base implementation for {@link SchemaChangeListener}. */ public abstract class SchemaChangeListenerBase implements SchemaChangeListener { - @Override - public void onKeyspaceAdded(KeyspaceMetadata keyspace) { - - } - - @Override - public void onKeyspaceRemoved(KeyspaceMetadata keyspace) { - - } - - @Override - public void onKeyspaceChanged(KeyspaceMetadata current, KeyspaceMetadata previous) { - - } - - @Override - public void onTableAdded(TableMetadata table) { - - } - - @Override - public void onTableRemoved(TableMetadata table) { - - } - - @Override - public void onTableChanged(TableMetadata current, TableMetadata previous) { - - } - - @Override - public void onUserTypeAdded(UserType type) { - - } - - @Override - public void onUserTypeRemoved(UserType type) { - - } - - @Override - public void onUserTypeChanged(UserType current, UserType previous) { - - } - - @Override - public void onFunctionAdded(FunctionMetadata function) { - - } - - @Override - public void onFunctionRemoved(FunctionMetadata function) { + @Override + public void onKeyspaceAdded(KeyspaceMetadata keyspace) {} - } + @Override + public void onKeyspaceRemoved(KeyspaceMetadata keyspace) {} - @Override - public void onFunctionChanged(FunctionMetadata current, FunctionMetadata previous) { + @Override + public void onKeyspaceChanged(KeyspaceMetadata current, KeyspaceMetadata previous) {} - } + @Override + public void onTableAdded(TableMetadata table) {} - @Override - public void onAggregateAdded(AggregateMetadata aggregate) { + @Override + public void onTableRemoved(TableMetadata table) {} - } + @Override + public void onTableChanged(TableMetadata current, TableMetadata previous) {} - @Override - public void onAggregateRemoved(AggregateMetadata aggregate) { + @Override + public void onUserTypeAdded(UserType type) {} - } + @Override + public void onUserTypeRemoved(UserType type) {} - @Override - public void onAggregateChanged(AggregateMetadata current, AggregateMetadata previous) { + @Override + public void onUserTypeChanged(UserType current, UserType previous) {} - } + @Override + public void onFunctionAdded(FunctionMetadata function) {} - @Override - public void onMaterializedViewAdded(MaterializedViewMetadata view) { + @Override + public void onFunctionRemoved(FunctionMetadata function) {} - } + @Override + public void onFunctionChanged(FunctionMetadata current, FunctionMetadata previous) {} - @Override - public void onMaterializedViewRemoved(MaterializedViewMetadata view) { + @Override + public void onAggregateAdded(AggregateMetadata aggregate) {} - } + @Override + public void onAggregateRemoved(AggregateMetadata aggregate) {} - @Override - public void onMaterializedViewChanged(MaterializedViewMetadata current, MaterializedViewMetadata previous) { + @Override + public void onAggregateChanged(AggregateMetadata current, AggregateMetadata previous) {} - } + @Override + public void onMaterializedViewAdded(MaterializedViewMetadata view) {} - @Override - public void onRegister(Cluster cluster) { + @Override + public void onMaterializedViewRemoved(MaterializedViewMetadata view) {} - } + @Override + public void onMaterializedViewChanged( + MaterializedViewMetadata current, MaterializedViewMetadata previous) {} - @Override - public void onUnregister(Cluster cluster) { + @Override + public void onRegister(Cluster cluster) {} - } + @Override + public void onUnregister(Cluster cluster) {} } diff --git a/driver-core/src/main/java/com/datastax/driver/core/SchemaElement.java b/driver-core/src/main/java/com/datastax/driver/core/SchemaElement.java index 5443e67caa4..1ef0944cb0e 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/SchemaElement.java +++ b/driver-core/src/main/java/com/datastax/driver/core/SchemaElement.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,10 +18,14 @@ package com.datastax.driver.core; /** - * Values for a SCHEMA_CHANGE event. - * See protocol v4 section 4.2.6. - * Note that {@code VIEW} is not a valid string under protocol v4 or lower, but is included for internal use only. + * Values for a SCHEMA_CHANGE event. See protocol v4 section 4.2.6. Note that {@code VIEW} is not a + * valid string under protocol v4 or lower, but is included for internal use only. */ enum SchemaElement { - KEYSPACE, TABLE, TYPE, FUNCTION, AGGREGATE, VIEW + KEYSPACE, + TABLE, + TYPE, + FUNCTION, + AGGREGATE, + VIEW } diff --git a/driver-core/src/main/java/com/datastax/driver/core/SchemaParser.java b/driver-core/src/main/java/com/datastax/driver/core/SchemaParser.java index 09ea0e33037..ee2e272bf05 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/SchemaParser.java +++ b/driver-core/src/main/java/com/datastax/driver/core/SchemaParser.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,698 +17,1174 @@ */ package com.datastax.driver.core; +import static com.datastax.driver.core.SchemaElement.AGGREGATE; +import static com.datastax.driver.core.SchemaElement.FUNCTION; +import static com.datastax.driver.core.SchemaElement.KEYSPACE; +import static com.datastax.driver.core.SchemaElement.TABLE; +import static com.datastax.driver.core.SchemaElement.TYPE; +import static com.datastax.driver.core.SchemaElement.VIEW; + import com.datastax.driver.core.exceptions.BusyConnectionException; import com.datastax.driver.core.exceptions.ConnectionException; import com.google.common.collect.Lists; import com.google.common.collect.Maps; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.HashMap; +import java.util.Iterator; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ExecutionException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.util.*; -import java.util.concurrent.ExecutionException; - -import static com.datastax.driver.core.SchemaElement.*; - abstract class SchemaParser { - private static final Logger logger = LoggerFactory.getLogger(SchemaParser.class); - - private static final TypeCodec> LIST_OF_TEXT_CODEC = TypeCodec.list(TypeCodec.varchar()); - - private static final SchemaParser V2_PARSER = new V2SchemaParser(); - private static final SchemaParser V3_PARSER = new V3SchemaParser(); - - static SchemaParser forVersion(VersionNumber cassandraVersion) { - if (cassandraVersion.getMajor() >= 3) return V3_PARSER; - return V2_PARSER; + private static final Logger logger = LoggerFactory.getLogger(SchemaParser.class); + + private static final TypeCodec> LIST_OF_TEXT_CODEC = + TypeCodec.list(TypeCodec.varchar()); + + private static final SchemaParser V2_PARSER = new V2SchemaParser(); + private static final SchemaParser V3_PARSER = new V3SchemaParser(); + private static final SchemaParser V4_PARSER = new V4SchemaParser(); + + static SchemaParser forVersion(VersionNumber cassandraVersion) { + if (cassandraVersion.getMajor() >= 4) return V4_PARSER; + if (cassandraVersion.getMajor() >= 3) return V3_PARSER; + return V2_PARSER; + } + + static SchemaParser forDseVersion(VersionNumber dseVersion) { + if (dseVersion.getMajor() == 6 && dseVersion.getMinor() >= 8) return V4_PARSER; + if (dseVersion.getMajor() >= 5) return V3_PARSER; + return V2_PARSER; + } + + abstract SystemRows fetchSystemRows( + Cluster cluster, + SchemaElement targetType, + String targetKeyspace, + String targetName, + List targetSignature, + Connection connection, + VersionNumber cassandraVersion) + throws ConnectionException, BusyConnectionException, ExecutionException, InterruptedException; + + abstract String tableNameColumn(); + + void refresh( + Cluster cluster, + SchemaElement targetType, + String targetKeyspace, + String targetName, + List targetSignature, + Connection connection, + VersionNumber cassandraVersion) + throws ConnectionException, BusyConnectionException, ExecutionException, + InterruptedException { + + SystemRows rows = + fetchSystemRows( + cluster, + targetType, + targetKeyspace, + targetName, + targetSignature, + connection, + cassandraVersion); + + Metadata metadata; + try { + metadata = cluster.getMetadata(); + } catch (IllegalStateException e) { + logger.warn("Unable to refresh metadata, cluster has been closed"); + return; + } + metadata.lock.lock(); + try { + if (targetType == null || targetType == KEYSPACE) { + // building the whole schema or a keyspace + assert rows.keyspaces != null; + Map keyspaces = buildKeyspaces(rows, cassandraVersion, cluster); + updateKeyspaces(metadata, metadata.keyspaces, keyspaces, targetKeyspace); + // If we rebuild all from scratch or have an updated keyspace, rebuild the token map + // since some replication on some keyspace may have changed + metadata.rebuildTokenMap(); + } else { + assert targetKeyspace != null; + KeyspaceMetadata keyspace = metadata.keyspaces.get(targetKeyspace); + // If we update a keyspace we don't know about, something went + // wrong. Log an error and schedule a full schema rebuild. + if (keyspace == null) { + logger.info( + String.format( + "Asked to rebuild %s %s.%s but I don't know keyspace %s", + targetType, targetKeyspace, targetName, targetKeyspace)); + metadata.cluster.submitSchemaRefresh(null, null, null, null); + } else { + switch (targetType) { + case TABLE: + if (rows.tables.containsKey(targetKeyspace)) { + Map tables = + buildTables( + keyspace, + rows.tables.get(targetKeyspace), + rows.columns.get(targetKeyspace), + rows.indexes.get(targetKeyspace), + cassandraVersion, + cluster); + updateTables(metadata, keyspace.tables, tables, targetName); + } + if (rows.views.containsKey(targetKeyspace)) { + Map tables = + buildViews( + keyspace, + rows.views.get(targetKeyspace), + rows.columns.get(targetKeyspace), + cassandraVersion, + cluster); + updateViews(metadata, keyspace.views, tables, targetName); + } + break; + case TYPE: + if (rows.udts.containsKey(targetKeyspace)) { + Map userTypes = + buildUserTypes( + keyspace, rows.udts.get(targetKeyspace), cassandraVersion, cluster); + updateUserTypes(metadata, keyspace.userTypes, userTypes, targetName); + } + break; + case FUNCTION: + if (rows.functions.containsKey(targetKeyspace)) { + Map functions = + buildFunctions( + keyspace, rows.functions.get(targetKeyspace), cassandraVersion, cluster); + updateFunctions(metadata, keyspace.functions, functions, targetName); + } + break; + case AGGREGATE: + if (rows.aggregates.containsKey(targetKeyspace)) { + Map aggregates = + buildAggregates( + keyspace, rows.aggregates.get(targetKeyspace), cassandraVersion, cluster); + updateAggregates(metadata, keyspace.aggregates, aggregates, targetName); + } + break; + } + } + } + } catch (RuntimeException e) { + // Failure to parse the schema is definitively wrong so log a full-on error, but this won't + // generally prevent queries to + // work and this can happen when new Cassandra versions modify stuff in the schema and the + // driver hasn't yet be modified. + // So log, but let things go otherwise. + logger.error( + "Error parsing schema from Cassandra system tables: the schema in Cluster#getMetadata() will appear incomplete or stale", + e); + } finally { + metadata.lock.unlock(); + } + } + + private Map buildKeyspaces( + SystemRows rows, VersionNumber cassandraVersion, Cluster cluster) { + + Map keyspaces = new LinkedHashMap(); + for (Row keyspaceRow : rows.keyspaces) { + KeyspaceMetadata keyspace = KeyspaceMetadata.build(keyspaceRow, cassandraVersion); + Map userTypes = + buildUserTypes(keyspace, rows.udts.get(keyspace.getName()), cassandraVersion, cluster); + for (UserType userType : userTypes.values()) { + keyspace.add(userType); + } + Map tables = + buildTables( + keyspace, + rows.tables.get(keyspace.getName()), + rows.columns.get(keyspace.getName()), + rows.indexes.get(keyspace.getName()), + cassandraVersion, + cluster); + for (TableMetadata table : tables.values()) { + keyspace.add(table); + } + Map functions = + buildFunctions( + keyspace, rows.functions.get(keyspace.getName()), cassandraVersion, cluster); + for (FunctionMetadata function : functions.values()) { + keyspace.add(function); + } + Map aggregates = + buildAggregates( + keyspace, rows.aggregates.get(keyspace.getName()), cassandraVersion, cluster); + for (AggregateMetadata aggregate : aggregates.values()) { + keyspace.add(aggregate); + } + Map views = + buildViews( + keyspace, + rows.views.get(keyspace.getName()), + rows.columns.get(keyspace.getName()), + cassandraVersion, + cluster); + for (MaterializedViewMetadata view : views.values()) { + keyspace.add(view); + } + keyspaces.put(keyspace.getName(), keyspace); + } + if (rows.virtualKeyspaces != null) { + for (Row keyspaceRow : rows.virtualKeyspaces) { + KeyspaceMetadata keyspace = KeyspaceMetadata.buildVirtual(keyspaceRow, cassandraVersion); + Map tables = + buildTables( + keyspace, + rows.virtualTables.get(keyspace.getName()), + rows.virtualColumns.get(keyspace.getName()), + Collections.>emptyMap(), + cassandraVersion, + cluster); + for (TableMetadata table : tables.values()) { + keyspace.add(table); + } + keyspaces.put(keyspace.getName(), keyspace); + } } - abstract SystemRows fetchSystemRows(Cluster cluster, - SchemaElement targetType, String targetKeyspace, String targetName, List targetSignature, - Connection connection, VersionNumber cassandraVersion) - throws ConnectionException, BusyConnectionException, ExecutionException, InterruptedException; - - abstract String tableNameColumn(); - - void refresh(Cluster cluster, - SchemaElement targetType, String targetKeyspace, String targetName, List targetSignature, - Connection connection, VersionNumber cassandraVersion) - throws ConnectionException, BusyConnectionException, ExecutionException, InterruptedException { - - SystemRows rows = fetchSystemRows(cluster, targetType, targetKeyspace, targetName, targetSignature, connection, cassandraVersion); - - Metadata metadata = cluster.getMetadata(); - metadata.lock.lock(); + return keyspaces; + } + + private Map buildTables( + KeyspaceMetadata keyspace, + List tableRows, + Map> colsDefs, + Map> indexDefs, + VersionNumber cassandraVersion, + Cluster cluster) { + Map tables = new LinkedHashMap(); + if (tableRows != null) { + for (Row tableDef : tableRows) { + String cfName = tableDef.getString(tableNameColumn()); try { - if (targetType == null || targetType == KEYSPACE) { - // building the whole schema or a keyspace - assert rows.keyspaces != null; - Map keyspaces = buildKeyspaces(rows, cassandraVersion, cluster); - updateKeyspaces(metadata, metadata.keyspaces, keyspaces, targetKeyspace); - // If we rebuild all from scratch or have an updated keyspace, rebuild the token map - // since some replication on some keyspace may have changed - metadata.rebuildTokenMap(); + Map cols = colsDefs == null ? null : colsDefs.get(cfName); + if (cols == null || cols.isEmpty()) { + if (cassandraVersion.getMajor() >= 2) { + // In C* >= 2.0, we should never have no columns metadata because at the very least we + // should + // have the metadata corresponding to the default CQL metadata. So if we don't have + // any columns, + // that can only mean that the table got creating concurrently with our schema + // queries, and the + // query for columns metadata reached the node before the table was persisted while + // the table + // metadata one reached it afterwards. We could make the query to the column metadata + // sequential + // with the table metadata instead of in parallel, but it's probably not worth making + // it slower + // all the time to avoid this race since 1) it's very very uncommon and 2) we can just + // ignore the + // incomplete table here for now and it'll get updated next time with no particular + // consequence + // (if the table creation was concurrent with our querying, we'll get a notifciation + // later and + // will reupdate the schema for it anyway). See JAVA-320 for why we need this. + continue; } else { - assert targetKeyspace != null; - KeyspaceMetadata keyspace = metadata.keyspaces.get(targetKeyspace); - // If we update a keyspace we don't know about, something went - // wrong. Log an error and schedule a full schema rebuild. - if (keyspace == null) { - logger.info(String.format("Asked to rebuild %s %s.%s but I don't know keyspace %s", - targetType, targetKeyspace, targetName, targetKeyspace)); - metadata.cluster.submitSchemaRefresh(null, null, null, null); - } else { - switch (targetType) { - case TABLE: - if (rows.tables.containsKey(targetKeyspace)) { - Map tables = buildTables(keyspace, rows.tables.get(targetKeyspace), rows.columns.get(targetKeyspace), rows.indexes.get(targetKeyspace), cassandraVersion, cluster); - updateTables(metadata, keyspace.tables, tables, targetName); - } - if (rows.views.containsKey(targetKeyspace)) { - Map tables = buildViews(keyspace, rows.views.get(targetKeyspace), rows.columns.get(targetKeyspace), cassandraVersion, cluster); - updateViews(metadata, keyspace.views, tables, targetName); - } - break; - case TYPE: - if (rows.udts.containsKey(targetKeyspace)) { - Map userTypes = buildUserTypes(keyspace, rows.udts.get(targetKeyspace), cassandraVersion, cluster); - updateUserTypes(metadata, keyspace.userTypes, userTypes, targetName); - } - break; - case FUNCTION: - if (rows.functions.containsKey(targetKeyspace)) { - Map functions = buildFunctions(keyspace, rows.functions.get(targetKeyspace), cassandraVersion, cluster); - updateFunctions(metadata, keyspace.functions, functions, targetName); - } - break; - case AGGREGATE: - if (rows.aggregates.containsKey(targetKeyspace)) { - Map aggregates = buildAggregates(keyspace, rows.aggregates.get(targetKeyspace), cassandraVersion, cluster); - updateAggregates(metadata, keyspace.aggregates, aggregates, targetName); - } - break; - } - } + // C* 1.2 don't persists default CQL metadata, so it's possible not to have columns + // (for thirft + // tables). But in that case TableMetadata.build() knows how to handle it. + cols = Collections.emptyMap(); } + } + List cfIndexes = (indexDefs == null) ? null : indexDefs.get(cfName); + TableMetadata table = + TableMetadata.build( + keyspace, + tableDef, + cols, + cfIndexes, + tableNameColumn(), + cassandraVersion, + cluster); + tables.put(table.getName(), table); } catch (RuntimeException e) { - // Failure to parse the schema is definitively wrong so log a full-on error, but this won't generally prevent queries to - // work and this can happen when new Cassandra versions modify stuff in the schema and the driver hasn't yet be modified. - // So log, but let things go otherwise. - logger.error("Error parsing schema from Cassandra system tables: the schema in Cluster#getMetadata() will appear incomplete or stale", e); - } finally { - metadata.lock.unlock(); + // See #refresh for why we'd rather not propagate this further + logger.error( + String.format( + "Error parsing schema for table %s.%s: " + + "Cluster.getMetadata().getKeyspace(\"%s\").getTable(\"%s\") will be missing or incomplete", + keyspace.getName(), cfName, keyspace.getName(), cfName), + e); } + } } - - private Map buildKeyspaces(SystemRows rows, - VersionNumber cassandraVersion, Cluster cluster) { - - Map keyspaces = new LinkedHashMap(); - for (Row keyspaceRow : rows.keyspaces) { - KeyspaceMetadata keyspace = KeyspaceMetadata.build(keyspaceRow, cassandraVersion); - Map userTypes = buildUserTypes(keyspace, rows.udts.get(keyspace.getName()), cassandraVersion, cluster); - for (UserType userType : userTypes.values()) { - keyspace.add(userType); - } - Map tables = buildTables(keyspace, rows.tables.get(keyspace.getName()), rows.columns.get(keyspace.getName()), rows.indexes.get(keyspace.getName()), cassandraVersion, cluster); - for (TableMetadata table : tables.values()) { - keyspace.add(table); - } - Map functions = buildFunctions(keyspace, rows.functions.get(keyspace.getName()), cassandraVersion, cluster); - for (FunctionMetadata function : functions.values()) { - keyspace.add(function); - } - Map aggregates = buildAggregates(keyspace, rows.aggregates.get(keyspace.getName()), cassandraVersion, cluster); - for (AggregateMetadata aggregate : aggregates.values()) { - keyspace.add(aggregate); - } - Map views = buildViews(keyspace, rows.views.get(keyspace.getName()), rows.columns.get(keyspace.getName()), cassandraVersion, cluster); - for (MaterializedViewMetadata view : views.values()) { - keyspace.add(view); - } - keyspaces.put(keyspace.getName(), keyspace); - } - return keyspaces; - } - - private Map buildTables(KeyspaceMetadata keyspace, List tableRows, Map> colsDefs, Map> indexDefs, VersionNumber cassandraVersion, Cluster cluster) { - Map tables = new LinkedHashMap(); - if (tableRows != null) { - for (Row tableDef : tableRows) { - String cfName = tableDef.getString(tableNameColumn()); - try { - Map cols = colsDefs == null ? null : colsDefs.get(cfName); - if (cols == null || cols.isEmpty()) { - if (cassandraVersion.getMajor() >= 2) { - // In C* >= 2.0, we should never have no columns metadata because at the very least we should - // have the metadata corresponding to the default CQL metadata. So if we don't have any columns, - // that can only mean that the table got creating concurrently with our schema queries, and the - // query for columns metadata reached the node before the table was persisted while the table - // metadata one reached it afterwards. We could make the query to the column metadata sequential - // with the table metadata instead of in parallel, but it's probably not worth making it slower - // all the time to avoid this race since 1) it's very very uncommon and 2) we can just ignore the - // incomplete table here for now and it'll get updated next time with no particular consequence - // (if the table creation was concurrent with our querying, we'll get a notifciation later and - // will reupdate the schema for it anyway). See JAVA-320 for why we need this. - continue; - } else { - // C* 1.2 don't persists default CQL metadata, so it's possible not to have columns (for thirft - // tables). But in that case TableMetadata.build() knows how to handle it. - cols = Collections.emptyMap(); - } - } - List cfIndexes = (indexDefs == null) ? null : indexDefs.get(cfName); - TableMetadata table = TableMetadata.build(keyspace, tableDef, cols, cfIndexes, tableNameColumn(), cassandraVersion, cluster); - tables.put(table.getName(), table); - } catch (RuntimeException e) { - // See #refresh for why we'd rather not propagate this further - logger.error(String.format("Error parsing schema for table %s.%s: " - + "Cluster.getMetadata().getKeyspace(\"%s\").getTable(\"%s\") will be missing or incomplete", - keyspace.getName(), cfName, keyspace.getName(), cfName), e); - } - } - } - return tables; + return tables; + } + + private Map buildUserTypes( + KeyspaceMetadata keyspace, + List udtRows, + VersionNumber cassandraVersion, + Cluster cluster) { + Map userTypes = new LinkedHashMap(); + if (udtRows != null) { + for (Row udtRow : maybeSortUdts(udtRows, cluster, keyspace.getName())) { + UserType type = UserType.build(keyspace, udtRow, cassandraVersion, cluster, userTypes); + userTypes.put(type.getTypeName(), type); + } } - - private Map buildUserTypes(KeyspaceMetadata keyspace, List udtRows, VersionNumber cassandraVersion, Cluster cluster) { - Map userTypes = new LinkedHashMap(); - if (udtRows != null) { - for (Row udtRow : maybeSortUdts(udtRows, cluster, keyspace.getName())) { - UserType type = UserType.build(keyspace, udtRow, cassandraVersion, cluster, userTypes); - userTypes.put(type.getTypeName(), type); - } + return userTypes; + } + + // Some schema versions require parsing UDTs in a specific order + protected List maybeSortUdts(List udtRows, Cluster cluster, String keyspace) { + return udtRows; + } + + private Map buildFunctions( + KeyspaceMetadata keyspace, + List functionRows, + VersionNumber cassandraVersion, + Cluster cluster) { + Map functions = new LinkedHashMap(); + if (functionRows != null) { + for (Row functionRow : functionRows) { + FunctionMetadata function = + FunctionMetadata.build(keyspace, functionRow, cassandraVersion, cluster); + if (function != null) { + String name = + Metadata.fullFunctionName(function.getSimpleName(), function.getArguments().values()); + functions.put(name, function); } - return userTypes; + } } - - // Some schema versions require parsing UDTs in a specific order - protected List maybeSortUdts(List udtRows, Cluster cluster, String keyspace) { - return udtRows; - } - - private Map buildFunctions(KeyspaceMetadata keyspace, List functionRows, VersionNumber cassandraVersion, Cluster cluster) { - Map functions = new LinkedHashMap(); - if (functionRows != null) { - for (Row functionRow : functionRows) { - FunctionMetadata function = FunctionMetadata.build(keyspace, functionRow, cassandraVersion, cluster); - if (function != null) { - String name = Metadata.fullFunctionName(function.getSimpleName(), function.getArguments().values()); - functions.put(name, function); - } - } - } - return functions; - } - - private Map buildAggregates(KeyspaceMetadata keyspace, List aggregateRows, VersionNumber cassandraVersion, Cluster cluster) { - Map aggregates = new LinkedHashMap(); - if (aggregateRows != null) { - for (Row aggregateRow : aggregateRows) { - AggregateMetadata aggregate = AggregateMetadata.build(keyspace, aggregateRow, cassandraVersion, cluster); - if (aggregate != null) { - String name = Metadata.fullFunctionName(aggregate.getSimpleName(), aggregate.getArgumentTypes()); - aggregates.put(name, aggregate); - } - } - } - return aggregates; - } - - private Map buildViews(KeyspaceMetadata keyspace, List viewRows, Map> colsDefs, VersionNumber cassandraVersion, Cluster cluster) { - Map views = new LinkedHashMap(); - if (viewRows != null) { - for (Row viewRow : viewRows) { - String viewName = viewRow.getString("view_name"); - try { - Map cols = colsDefs.get(viewName); - if (cols == null || cols.isEmpty()) - continue; // we probably raced, we will update the metadata next time - - MaterializedViewMetadata view = MaterializedViewMetadata.build(keyspace, viewRow, cols, cassandraVersion, cluster); - if (view != null) - views.put(view.getName(), view); - } catch (RuntimeException e) { - // See #refresh for why we'd rather not propagate this further - logger.error(String.format("Error parsing schema for view %s.%s: " - + "Cluster.getMetadata().getKeyspace(\"%s\").getView(\"%s\") will be missing or incomplete", - keyspace.getName(), viewName, keyspace.getName(), viewName), e); - } - } - } - return views; - } - - // Update oldKeyspaces with the changes contained in newKeyspaces. - // This method also takes care of triggering the relevant events - private void updateKeyspaces(Metadata metadata, Map oldKeyspaces, Map newKeyspaces, String keyspaceToRebuild) { - Iterator it = oldKeyspaces.values().iterator(); - while (it.hasNext()) { - KeyspaceMetadata oldKeyspace = it.next(); - String keyspaceName = oldKeyspace.getName(); - // If we're rebuilding only a single keyspace, we should only consider that one - // because newKeyspaces will only contain that keyspace. - if ((keyspaceToRebuild == null || keyspaceToRebuild.equals(keyspaceName)) && !newKeyspaces.containsKey(keyspaceName)) { - it.remove(); - metadata.triggerOnKeyspaceRemoved(oldKeyspace); - } - } - for (KeyspaceMetadata newKeyspace : newKeyspaces.values()) { - KeyspaceMetadata oldKeyspace = oldKeyspaces.put(newKeyspace.getName(), newKeyspace); - if (oldKeyspace == null) { - metadata.triggerOnKeyspaceAdded(newKeyspace); - } else if (!oldKeyspace.equals(newKeyspace)) { - metadata.triggerOnKeyspaceChanged(newKeyspace, oldKeyspace); - } - Map oldTables = oldKeyspace == null ? new HashMap() : oldKeyspace.tables; - updateTables(metadata, oldTables, newKeyspace.tables, null); - Map oldTypes = oldKeyspace == null ? new HashMap() : oldKeyspace.userTypes; - updateUserTypes(metadata, oldTypes, newKeyspace.userTypes, null); - Map oldFunctions = oldKeyspace == null ? new HashMap() : oldKeyspace.functions; - updateFunctions(metadata, oldFunctions, newKeyspace.functions, null); - Map oldAggregates = oldKeyspace == null ? new HashMap() : oldKeyspace.aggregates; - updateAggregates(metadata, oldAggregates, newKeyspace.aggregates, null); - Map oldViews = oldKeyspace == null ? new HashMap() : oldKeyspace.views; - updateViews(metadata, oldViews, newKeyspace.views, null); + return functions; + } + + private Map buildAggregates( + KeyspaceMetadata keyspace, + List aggregateRows, + VersionNumber cassandraVersion, + Cluster cluster) { + Map aggregates = new LinkedHashMap(); + if (aggregateRows != null) { + for (Row aggregateRow : aggregateRows) { + AggregateMetadata aggregate = + AggregateMetadata.build(keyspace, aggregateRow, cassandraVersion, cluster); + if (aggregate != null) { + String name = + Metadata.fullFunctionName(aggregate.getSimpleName(), aggregate.getArgumentTypes()); + aggregates.put(name, aggregate); } + } } + return aggregates; + } + + private Map buildViews( + KeyspaceMetadata keyspace, + List viewRows, + Map> colsDefs, + VersionNumber cassandraVersion, + Cluster cluster) { + Map views = + new LinkedHashMap(); + if (viewRows != null) { + for (Row viewRow : viewRows) { + String viewName = viewRow.getString("view_name"); + try { + Map cols = colsDefs.get(viewName); + if (cols == null || cols.isEmpty()) + continue; // we probably raced, we will update the metadata next time - private void updateTables(Metadata metadata, Map oldTables, Map newTables, String tableToRebuild) { - Iterator it = oldTables.values().iterator(); - while (it.hasNext()) { - TableMetadata oldTable = it.next(); - String tableName = oldTable.getName(); - // If we're rebuilding only a single table, we should only consider that one - // because newTables will only contain that table. - if ((tableToRebuild == null || tableToRebuild.equals(tableName)) && !newTables.containsKey(tableName)) { - it.remove(); - metadata.triggerOnTableRemoved(oldTable); - } - } - for (TableMetadata newTable : newTables.values()) { - TableMetadata oldTable = oldTables.put(newTable.getName(), newTable); - if (oldTable == null) { - metadata.triggerOnTableAdded(newTable); - } else if (!oldTable.equals(newTable)) { - metadata.triggerOnTableChanged(newTable, oldTable); - } + MaterializedViewMetadata view = + MaterializedViewMetadata.build(keyspace, viewRow, cols, cassandraVersion, cluster); + if (view != null) views.put(view.getName(), view); + } catch (RuntimeException e) { + // See #refresh for why we'd rather not propagate this further + logger.error( + String.format( + "Error parsing schema for view %s.%s: " + + "Cluster.getMetadata().getKeyspace(\"%s\").getView(\"%s\") will be missing or incomplete", + keyspace.getName(), viewName, keyspace.getName(), viewName), + e); } + } } - - private void updateUserTypes(Metadata metadata, Map oldTypes, Map newTypes, String typeToRebuild) { - Iterator it = oldTypes.values().iterator(); - while (it.hasNext()) { - UserType oldType = it.next(); - String typeName = oldType.getTypeName(); - if ((typeToRebuild == null || typeToRebuild.equals(typeName)) && !newTypes.containsKey(typeName)) { - it.remove(); - metadata.triggerOnUserTypeRemoved(oldType); - } - } - for (UserType newType : newTypes.values()) { - UserType oldType = oldTypes.put(newType.getTypeName(), newType); - if (oldType == null) { - metadata.triggerOnUserTypeAdded(newType); - } else if (!newType.equals(oldType)) { - metadata.triggerOnUserTypeChanged(newType, oldType); - } - } + return views; + } + + // Update oldKeyspaces with the changes contained in newKeyspaces. + // This method also takes care of triggering the relevant events + private void updateKeyspaces( + Metadata metadata, + Map oldKeyspaces, + Map newKeyspaces, + String keyspaceToRebuild) { + Iterator it = oldKeyspaces.values().iterator(); + while (it.hasNext()) { + KeyspaceMetadata oldKeyspace = it.next(); + String keyspaceName = oldKeyspace.getName(); + // If we're rebuilding only a single keyspace, we should only consider that one + // because newKeyspaces will only contain that keyspace. + if ((keyspaceToRebuild == null || keyspaceToRebuild.equals(keyspaceName)) + && !newKeyspaces.containsKey(keyspaceName)) { + it.remove(); + metadata.triggerOnKeyspaceRemoved(oldKeyspace); + } } - - private void updateFunctions(Metadata metadata, Map oldFunctions, Map newFunctions, String functionToRebuild) { - Iterator it = oldFunctions.values().iterator(); - while (it.hasNext()) { - FunctionMetadata oldFunction = it.next(); - String oldFunctionName = Metadata.fullFunctionName(oldFunction.getSimpleName(), oldFunction.getArguments().values()); - if ((functionToRebuild == null || functionToRebuild.equals(oldFunctionName)) && !newFunctions.containsKey(oldFunctionName)) { - it.remove(); - metadata.triggerOnFunctionRemoved(oldFunction); - } - } - for (FunctionMetadata newFunction : newFunctions.values()) { - String newFunctionName = Metadata.fullFunctionName(newFunction.getSimpleName(), newFunction.getArguments().values()); - FunctionMetadata oldFunction = oldFunctions.put(newFunctionName, newFunction); - if (oldFunction == null) { - metadata.triggerOnFunctionAdded(newFunction); - } else if (!newFunction.equals(oldFunction)) { - metadata.triggerOnFunctionChanged(newFunction, oldFunction); - } - } + for (KeyspaceMetadata newKeyspace : newKeyspaces.values()) { + KeyspaceMetadata oldKeyspace = oldKeyspaces.put(newKeyspace.getName(), newKeyspace); + if (oldKeyspace == null) { + metadata.triggerOnKeyspaceAdded(newKeyspace); + } else if (!oldKeyspace.equals(newKeyspace)) { + metadata.triggerOnKeyspaceChanged(newKeyspace, oldKeyspace); + } + Map oldTables = + oldKeyspace == null + ? new HashMap() + : new HashMap(oldKeyspace.tables); + updateTables(metadata, oldTables, newKeyspace.tables, null); + Map oldTypes = + oldKeyspace == null + ? new HashMap() + : new HashMap(oldKeyspace.userTypes); + updateUserTypes(metadata, oldTypes, newKeyspace.userTypes, null); + Map oldFunctions = + oldKeyspace == null + ? new HashMap() + : new HashMap(oldKeyspace.functions); + updateFunctions(metadata, oldFunctions, newKeyspace.functions, null); + Map oldAggregates = + oldKeyspace == null + ? new HashMap() + : new HashMap(oldKeyspace.aggregates); + updateAggregates(metadata, oldAggregates, newKeyspace.aggregates, null); + Map oldViews = + oldKeyspace == null + ? new HashMap() + : new HashMap(oldKeyspace.views); + updateViews(metadata, oldViews, newKeyspace.views, null); } - - private void updateAggregates(Metadata metadata, Map oldAggregates, Map newAggregates, String aggregateToRebuild) { - Iterator it = oldAggregates.values().iterator(); - while (it.hasNext()) { - AggregateMetadata oldAggregate = it.next(); - String oldAggregateName = Metadata.fullFunctionName(oldAggregate.getSimpleName(), oldAggregate.getArgumentTypes()); - if ((aggregateToRebuild == null || aggregateToRebuild.equals(oldAggregateName)) && !newAggregates.containsKey(oldAggregateName)) { - it.remove(); - metadata.triggerOnAggregateRemoved(oldAggregate); - } - } - for (AggregateMetadata newAggregate : newAggregates.values()) { - String newAggregateName = Metadata.fullFunctionName(newAggregate.getSimpleName(), newAggregate.getArgumentTypes()); - AggregateMetadata oldAggregate = oldAggregates.put(newAggregateName, newAggregate); - if (oldAggregate == null) { - metadata.triggerOnAggregateAdded(newAggregate); - } else if (!newAggregate.equals(oldAggregate)) { - metadata.triggerOnAggregateChanged(newAggregate, oldAggregate); - } - } + } + + private void updateTables( + Metadata metadata, + Map oldTables, + Map newTables, + String tableToRebuild) { + Iterator it = oldTables.values().iterator(); + while (it.hasNext()) { + TableMetadata oldTable = it.next(); + String tableName = oldTable.getName(); + // If we're rebuilding only a single table, we should only consider that one + // because newTables will only contain that table. + if ((tableToRebuild == null || tableToRebuild.equals(tableName)) + && !newTables.containsKey(tableName)) { + it.remove(); + metadata.triggerOnTableRemoved(oldTable); + } } - - private void updateViews(Metadata metadata, Map oldViews, Map newViews, String viewToRebuild) { - Iterator it = oldViews.values().iterator(); - while (it.hasNext()) { - MaterializedViewMetadata oldView = it.next(); - String aggregateName = oldView.getName(); - if ((viewToRebuild == null || viewToRebuild.equals(aggregateName)) && !newViews.containsKey(aggregateName)) { - it.remove(); - metadata.triggerOnMaterializedViewRemoved(oldView); - } + for (TableMetadata newTable : newTables.values()) { + TableMetadata oldTable = oldTables.put(newTable.getName(), newTable); + if (oldTable == null) { + metadata.triggerOnTableAdded(newTable); + } else { + // if we're updating a table only, we need to copy views from old table to the new table. + if (tableToRebuild != null) { + for (MaterializedViewMetadata view : oldTable.getViews()) { + view.setBaseTable(newTable); + } } - for (MaterializedViewMetadata newView : newViews.values()) { - MaterializedViewMetadata oldView = oldViews.put(newView.getName(), newView); - if (oldView == null) { - metadata.triggerOnMaterializedViewAdded(newView); - } else if (!newView.equals(oldView)) { - metadata.triggerOnMaterializedViewChanged(newView, oldView); - } + if (!oldTable.equals(newTable)) { + metadata.triggerOnTableChanged(newTable, oldTable); } + } } - - static Map> groupByKeyspace(ResultSet rs) { - if (rs == null) - return Collections.emptyMap(); - - Map> result = new HashMap>(); - for (Row row : rs) { - String ksName = row.getString(KeyspaceMetadata.KS_NAME); - List l = result.get(ksName); - if (l == null) { - l = new ArrayList(); - result.put(ksName, l); - } - l.add(row); - } - return result; + } + + private void updateUserTypes( + Metadata metadata, + Map oldTypes, + Map newTypes, + String typeToRebuild) { + Iterator it = oldTypes.values().iterator(); + while (it.hasNext()) { + UserType oldType = it.next(); + String typeName = oldType.getTypeName(); + if ((typeToRebuild == null || typeToRebuild.equals(typeName)) + && !newTypes.containsKey(typeName)) { + it.remove(); + metadata.triggerOnUserTypeRemoved(oldType); + } } - - static Map>> groupByKeyspaceAndCf(ResultSet rs, String tableName) { - if (rs == null) - return Collections.emptyMap(); - - Map>> result = Maps.newHashMap(); - for (Row row : rs) { - String ksName = row.getString(KeyspaceMetadata.KS_NAME); - String cfName = row.getString(tableName); - Map> rowsByCf = result.get(ksName); - if (rowsByCf == null) { - rowsByCf = Maps.newHashMap(); - result.put(ksName, rowsByCf); - } - List l = rowsByCf.get(cfName); - if (l == null) { - l = Lists.newArrayList(); - rowsByCf.put(cfName, l); - } - l.add(row); - } - return result; - } - - static Map>> groupByKeyspaceAndCf(ResultSet rs, VersionNumber cassandraVersion, String tableName) { - if (rs == null) - return Collections.emptyMap(); - - Map>> result = - new HashMap>>(); - for (Row row : rs) { - String ksName = row.getString(KeyspaceMetadata.KS_NAME); - String cfName = row.getString(tableName); - Map> colsByCf = result.get(ksName); - if (colsByCf == null) { - colsByCf = new HashMap>(); - result.put(ksName, colsByCf); - } - Map l = colsByCf.get(cfName); - if (l == null) { - l = new HashMap(); - colsByCf.put(cfName, l); - } - ColumnMetadata.Raw c = ColumnMetadata.Raw.fromRow(row, cassandraVersion); - l.put(c.name, c); - } - return result; - } - - private static ResultSetFuture queryAsync(String query, Connection connection, ProtocolVersion protocolVersion) throws ConnectionException, BusyConnectionException { - DefaultResultSetFuture future = new DefaultResultSetFuture(null, protocolVersion, new Requests.Query(query)); - connection.write(future); - return future; - } - - private static ResultSet get(ResultSetFuture future) throws InterruptedException, ExecutionException { - return (future == null) ? null : future.get(); - } - - /** - * The rows from the system tables that we want to parse to metadata classes. - * The format of these rows depends on the Cassandra version, but our parsing code knows how to handle the differences. - */ - private static class SystemRows { - final ResultSet keyspaces; - final Map> tables; - final Map>> columns; - final Map> udts; - final Map> functions; - final Map> aggregates; - final Map> views; - final Map>> indexes; - - public SystemRows(ResultSet keyspaces, Map> tables, Map>> columns, Map> udts, Map> functions, - Map> aggregates, Map> views, Map>> indexes) { - this.keyspaces = keyspaces; - this.tables = tables; - this.columns = columns; - this.udts = udts; - this.functions = functions; - this.aggregates = aggregates; - this.views = views; - this.indexes = indexes; - } + for (UserType newType : newTypes.values()) { + UserType oldType = oldTypes.put(newType.getTypeName(), newType); + if (oldType == null) { + metadata.triggerOnUserTypeAdded(newType); + } else if (!newType.equals(oldType)) { + metadata.triggerOnUserTypeChanged(newType, oldType); + } } - - private static class V2SchemaParser extends SchemaParser { - - private static final String SELECT_KEYSPACES = "SELECT * FROM system.schema_keyspaces"; - private static final String SELECT_COLUMN_FAMILIES = "SELECT * FROM system.schema_columnfamilies"; - private static final String SELECT_COLUMNS = "SELECT * FROM system.schema_columns"; - private static final String SELECT_USERTYPES = "SELECT * FROM system.schema_usertypes"; - private static final String SELECT_FUNCTIONS = "SELECT * FROM system.schema_functions"; - private static final String SELECT_AGGREGATES = "SELECT * FROM system.schema_aggregates"; - - private static final String CF_NAME = "columnfamily_name"; - - @Override - SystemRows fetchSystemRows(Cluster cluster, - SchemaElement targetType, String targetKeyspace, String targetName, List targetSignature, - Connection connection, VersionNumber cassandraVersion) - throws ConnectionException, BusyConnectionException, ExecutionException, InterruptedException { - - boolean isSchemaOrKeyspace = (targetType == null || targetType == KEYSPACE); - - String whereClause = ""; - if (targetType != null) { - whereClause = " WHERE keyspace_name = '" + targetKeyspace + '\''; - if (targetType == TABLE) - whereClause += " AND columnfamily_name = '" + targetName + '\''; - else if (targetType == TYPE) - whereClause += " AND type_name = '" + targetName + '\''; - else if (targetType == FUNCTION) - whereClause += " AND function_name = '" + targetName + "' AND signature = " + LIST_OF_TEXT_CODEC.format(targetSignature); - else if (targetType == AGGREGATE) - whereClause += " AND aggregate_name = '" + targetName + "' AND signature = " + LIST_OF_TEXT_CODEC.format(targetSignature); - } - - ResultSetFuture ksFuture = null, - udtFuture = null, - cfFuture = null, - colsFuture = null, - functionsFuture = null, - aggregatesFuture = null; - - ProtocolVersion protocolVersion = cluster.getConfiguration().getProtocolOptions().getProtocolVersion(); - - if (isSchemaOrKeyspace) - ksFuture = queryAsync(SELECT_KEYSPACES + whereClause, connection, protocolVersion); - - if (isSchemaOrKeyspace && supportsUdts(cassandraVersion) || targetType == TYPE) - udtFuture = queryAsync(SELECT_USERTYPES + whereClause, connection, protocolVersion); - - if (isSchemaOrKeyspace || targetType == TABLE) { - cfFuture = queryAsync(SELECT_COLUMN_FAMILIES + whereClause, connection, protocolVersion); - colsFuture = queryAsync(SELECT_COLUMNS + whereClause, connection, protocolVersion); - } - - if ((isSchemaOrKeyspace && supportsUdfs(cassandraVersion) || targetType == FUNCTION)) - functionsFuture = queryAsync(SELECT_FUNCTIONS + whereClause, connection, protocolVersion); - - if (isSchemaOrKeyspace && supportsUdfs(cassandraVersion) || targetType == AGGREGATE) - aggregatesFuture = queryAsync(SELECT_AGGREGATES + whereClause, connection, protocolVersion); - - return new SystemRows(get(ksFuture), - groupByKeyspace(get(cfFuture)), - groupByKeyspaceAndCf(get(colsFuture), cassandraVersion, CF_NAME), - groupByKeyspace(get(udtFuture)), - groupByKeyspace(get(functionsFuture)), - groupByKeyspace(get(aggregatesFuture)), - // No views nor separate indexes table in Cassandra 2: - Collections.>emptyMap(), - Collections.>>emptyMap()); - } - - @Override - String tableNameColumn() { - return CF_NAME; - } - - private boolean supportsUdts(VersionNumber cassandraVersion) { - return cassandraVersion.getMajor() > 2 || (cassandraVersion.getMajor() == 2 && cassandraVersion.getMinor() >= 1); - } - - private boolean supportsUdfs(VersionNumber cassandraVersion) { - return cassandraVersion.getMajor() > 2 || (cassandraVersion.getMajor() == 2 && cassandraVersion.getMinor() >= 2); - } - + } + + private void updateFunctions( + Metadata metadata, + Map oldFunctions, + Map newFunctions, + String functionToRebuild) { + Iterator it = oldFunctions.values().iterator(); + while (it.hasNext()) { + FunctionMetadata oldFunction = it.next(); + String oldFunctionName = + Metadata.fullFunctionName( + oldFunction.getSimpleName(), oldFunction.getArguments().values()); + if ((functionToRebuild == null || functionToRebuild.equals(oldFunctionName)) + && !newFunctions.containsKey(oldFunctionName)) { + it.remove(); + metadata.triggerOnFunctionRemoved(oldFunction); + } + } + for (FunctionMetadata newFunction : newFunctions.values()) { + String newFunctionName = + Metadata.fullFunctionName( + newFunction.getSimpleName(), newFunction.getArguments().values()); + FunctionMetadata oldFunction = oldFunctions.put(newFunctionName, newFunction); + if (oldFunction == null) { + metadata.triggerOnFunctionAdded(newFunction); + } else if (!newFunction.equals(oldFunction)) { + metadata.triggerOnFunctionChanged(newFunction, oldFunction); + } + } + } + + private void updateAggregates( + Metadata metadata, + Map oldAggregates, + Map newAggregates, + String aggregateToRebuild) { + Iterator it = oldAggregates.values().iterator(); + while (it.hasNext()) { + AggregateMetadata oldAggregate = it.next(); + String oldAggregateName = + Metadata.fullFunctionName(oldAggregate.getSimpleName(), oldAggregate.getArgumentTypes()); + if ((aggregateToRebuild == null || aggregateToRebuild.equals(oldAggregateName)) + && !newAggregates.containsKey(oldAggregateName)) { + it.remove(); + metadata.triggerOnAggregateRemoved(oldAggregate); + } + } + for (AggregateMetadata newAggregate : newAggregates.values()) { + String newAggregateName = + Metadata.fullFunctionName(newAggregate.getSimpleName(), newAggregate.getArgumentTypes()); + AggregateMetadata oldAggregate = oldAggregates.put(newAggregateName, newAggregate); + if (oldAggregate == null) { + metadata.triggerOnAggregateAdded(newAggregate); + } else if (!newAggregate.equals(oldAggregate)) { + metadata.triggerOnAggregateChanged(newAggregate, oldAggregate); + } + } + } + + private void updateViews( + Metadata metadata, + Map oldViews, + Map newViews, + String viewToRebuild) { + Iterator it = oldViews.values().iterator(); + while (it.hasNext()) { + MaterializedViewMetadata oldView = it.next(); + String aggregateName = oldView.getName(); + if ((viewToRebuild == null || viewToRebuild.equals(aggregateName)) + && !newViews.containsKey(aggregateName)) { + it.remove(); + metadata.triggerOnMaterializedViewRemoved(oldView); + } + } + for (MaterializedViewMetadata newView : newViews.values()) { + MaterializedViewMetadata oldView = oldViews.put(newView.getName(), newView); + if (oldView == null) { + metadata.triggerOnMaterializedViewAdded(newView); + } else if (!newView.equals(oldView)) { + metadata.triggerOnMaterializedViewChanged(newView, oldView); + } + } + } + + static Map> groupByKeyspace(ResultSet rs) { + if (rs == null) return Collections.emptyMap(); + + Map> result = new HashMap>(); + for (Row row : rs) { + String ksName = row.getString(KeyspaceMetadata.KS_NAME); + List l = result.get(ksName); + if (l == null) { + l = new ArrayList(); + result.put(ksName, l); + } + l.add(row); + } + return result; + } + + static Map>> groupByKeyspaceAndCf(ResultSet rs, String tableName) { + if (rs == null) return Collections.emptyMap(); + + Map>> result = Maps.newHashMap(); + for (Row row : rs) { + String ksName = row.getString(KeyspaceMetadata.KS_NAME); + String cfName = row.getString(tableName); + Map> rowsByCf = result.get(ksName); + if (rowsByCf == null) { + rowsByCf = Maps.newHashMap(); + result.put(ksName, rowsByCf); + } + List l = rowsByCf.get(cfName); + if (l == null) { + l = Lists.newArrayList(); + rowsByCf.put(cfName, l); + } + l.add(row); + } + return result; + } + + static Map>> groupByKeyspaceAndCf( + ResultSet rs, VersionNumber cassandraVersion, String tableName) { + if (rs == null) return Collections.emptyMap(); + + Map>> result = + new HashMap>>(); + for (Row row : rs) { + String ksName = row.getString(KeyspaceMetadata.KS_NAME); + String cfName = row.getString(tableName); + Map> colsByCf = result.get(ksName); + if (colsByCf == null) { + colsByCf = new HashMap>(); + result.put(ksName, colsByCf); + } + Map l = colsByCf.get(cfName); + if (l == null) { + l = new HashMap(); + colsByCf.put(cfName, l); + } + ColumnMetadata.Raw c = ColumnMetadata.Raw.fromRow(row, cassandraVersion); + l.put(c.name, c); + } + return result; + } + + private static ResultSetFuture queryAsync( + String query, Connection connection, ProtocolVersion protocolVersion) + throws ConnectionException, BusyConnectionException { + DefaultResultSetFuture future = + new DefaultResultSetFuture(null, protocolVersion, new Requests.Query(query)); + connection.write(future); + return future; + } + + private static ResultSet get(ResultSetFuture future) + throws InterruptedException, ExecutionException { + return (future == null) ? null : future.get(); + } + + /** + * The rows from the system tables that we want to parse to metadata classes. The format of these + * rows depends on the Cassandra version, but our parsing code knows how to handle the + * differences. + */ + private static class SystemRows { + final ResultSet keyspaces; + final Map> tables; + final Map>> columns; + final Map> udts; + final Map> functions; + final Map> aggregates; + final Map> views; + final Map>> indexes; + final ResultSet virtualKeyspaces; + final Map> virtualTables; + final Map>> virtualColumns; + + public SystemRows( + ResultSet keyspaces, + Map> tables, + Map>> columns, + Map> udts, + Map> functions, + Map> aggregates, + Map> views, + Map>> indexes, + ResultSet virtualKeyspaces, + Map> virtualTables, + Map>> virtualColumns) { + this.keyspaces = keyspaces; + this.tables = tables; + this.columns = columns; + this.udts = udts; + this.functions = functions; + this.aggregates = aggregates; + this.views = views; + this.indexes = indexes; + this.virtualKeyspaces = virtualKeyspaces; + this.virtualTables = virtualTables; + this.virtualColumns = virtualColumns; + } + } + + private static class V2SchemaParser extends SchemaParser { + + private static final String SELECT_KEYSPACES = "SELECT * FROM system.schema_keyspaces"; + private static final String SELECT_COLUMN_FAMILIES = + "SELECT * FROM system.schema_columnfamilies"; + private static final String SELECT_COLUMNS = "SELECT * FROM system.schema_columns"; + private static final String SELECT_USERTYPES = "SELECT * FROM system.schema_usertypes"; + private static final String SELECT_FUNCTIONS = "SELECT * FROM system.schema_functions"; + private static final String SELECT_AGGREGATES = "SELECT * FROM system.schema_aggregates"; + + private static final String CF_NAME = "columnfamily_name"; + + @Override + SystemRows fetchSystemRows( + Cluster cluster, + SchemaElement targetType, + String targetKeyspace, + String targetName, + List targetSignature, + Connection connection, + VersionNumber cassandraVersion) + throws ConnectionException, BusyConnectionException, ExecutionException, + InterruptedException { + + boolean isSchemaOrKeyspace = (targetType == null || targetType == KEYSPACE); + + String whereClause = ""; + if (targetType != null) { + whereClause = " WHERE keyspace_name = '" + targetKeyspace + '\''; + if (targetType == TABLE) whereClause += " AND columnfamily_name = '" + targetName + '\''; + else if (targetType == TYPE) whereClause += " AND type_name = '" + targetName + '\''; + else if (targetType == FUNCTION) + whereClause += + " AND function_name = '" + + targetName + + "' AND signature = " + + LIST_OF_TEXT_CODEC.format(targetSignature); + else if (targetType == AGGREGATE) + whereClause += + " AND aggregate_name = '" + + targetName + + "' AND signature = " + + LIST_OF_TEXT_CODEC.format(targetSignature); + } + + ResultSetFuture ksFuture = null, + udtFuture = null, + cfFuture = null, + colsFuture = null, + functionsFuture = null, + aggregatesFuture = null; + + ProtocolVersion protocolVersion = + cluster.getConfiguration().getProtocolOptions().getProtocolVersion(); + + if (isSchemaOrKeyspace) + ksFuture = queryAsync(SELECT_KEYSPACES + whereClause, connection, protocolVersion); + + if (isSchemaOrKeyspace && supportsUdts(cassandraVersion) || targetType == TYPE) + udtFuture = queryAsync(SELECT_USERTYPES + whereClause, connection, protocolVersion); + + if (isSchemaOrKeyspace || targetType == TABLE) { + cfFuture = queryAsync(SELECT_COLUMN_FAMILIES + whereClause, connection, protocolVersion); + colsFuture = queryAsync(SELECT_COLUMNS + whereClause, connection, protocolVersion); + } + + if ((isSchemaOrKeyspace && supportsUdfs(cassandraVersion) || targetType == FUNCTION)) + functionsFuture = queryAsync(SELECT_FUNCTIONS + whereClause, connection, protocolVersion); + + if (isSchemaOrKeyspace && supportsUdfs(cassandraVersion) || targetType == AGGREGATE) + aggregatesFuture = queryAsync(SELECT_AGGREGATES + whereClause, connection, protocolVersion); + + return new SystemRows( + get(ksFuture), + groupByKeyspace(get(cfFuture)), + groupByKeyspaceAndCf(get(colsFuture), cassandraVersion, CF_NAME), + groupByKeyspace(get(udtFuture)), + groupByKeyspace(get(functionsFuture)), + groupByKeyspace(get(aggregatesFuture)), + // No views nor separate indexes table in Cassandra 2: + Collections.>emptyMap(), + Collections.>>emptyMap(), + null, + Collections.>emptyMap(), + Collections.>>emptyMap()); } - private static class V3SchemaParser extends SchemaParser { - - private static final String SELECT_KEYSPACES = "SELECT * FROM system_schema.keyspaces"; - private static final String SELECT_TABLES = "SELECT * FROM system_schema.tables"; - private static final String SELECT_COLUMNS = "SELECT * FROM system_schema.columns"; - private static final String SELECT_USERTYPES = "SELECT * FROM system_schema.types"; - private static final String SELECT_FUNCTIONS = "SELECT * FROM system_schema.functions"; - private static final String SELECT_AGGREGATES = "SELECT * FROM system_schema.aggregates"; - private static final String SELECT_INDEXES = "SELECT * FROM system_schema.indexes"; - private static final String SELECT_VIEWS = "SELECT * FROM system_schema.views"; - - private static final String TABLE_NAME = "table_name"; - - @Override - SystemRows fetchSystemRows(Cluster cluster, SchemaElement targetType, String targetKeyspace, String targetName, List targetSignature, Connection connection, VersionNumber cassandraVersion) - throws ConnectionException, BusyConnectionException, ExecutionException, InterruptedException { - - boolean isSchemaOrKeyspace = (targetType == null || targetType == KEYSPACE); - - ResultSetFuture ksFuture = null, - udtFuture = null, - cfFuture = null, - colsFuture = null, - functionsFuture = null, - aggregatesFuture = null, - indexesFuture = null, - viewsFuture = null; - - ProtocolVersion protocolVersion = cluster.getConfiguration().getProtocolOptions().getProtocolVersion(); - - if (isSchemaOrKeyspace) - ksFuture = queryAsync(SELECT_KEYSPACES + whereClause(targetType, targetKeyspace, targetName, targetSignature), connection, protocolVersion); - - if (isSchemaOrKeyspace || targetType == TYPE) - udtFuture = queryAsync(SELECT_USERTYPES + whereClause(targetType, targetKeyspace, targetName, targetSignature), connection, protocolVersion); - - if (isSchemaOrKeyspace || targetType == TABLE) { - cfFuture = queryAsync(SELECT_TABLES + whereClause(targetType, targetKeyspace, targetName, targetSignature), connection, protocolVersion); - colsFuture = queryAsync(SELECT_COLUMNS + whereClause(targetType, targetKeyspace, targetName, targetSignature), connection, protocolVersion); - indexesFuture = queryAsync(SELECT_INDEXES + whereClause(targetType, targetKeyspace, targetName, targetSignature), connection, protocolVersion); - viewsFuture = queryAsync(SELECT_VIEWS + whereClause(targetType == TABLE ? VIEW : targetType, targetKeyspace, targetName, targetSignature), connection, protocolVersion); - } + @Override + String tableNameColumn() { + return CF_NAME; + } - if (isSchemaOrKeyspace || targetType == FUNCTION) - functionsFuture = queryAsync(SELECT_FUNCTIONS + whereClause(targetType, targetKeyspace, targetName, targetSignature), connection, protocolVersion); + private boolean supportsUdts(VersionNumber cassandraVersion) { + return cassandraVersion.getMajor() > 2 + || (cassandraVersion.getMajor() == 2 && cassandraVersion.getMinor() >= 1); + } - if (isSchemaOrKeyspace || targetType == AGGREGATE) - aggregatesFuture = queryAsync(SELECT_AGGREGATES + whereClause(targetType, targetKeyspace, targetName, targetSignature), connection, protocolVersion); + private boolean supportsUdfs(VersionNumber cassandraVersion) { + return cassandraVersion.getMajor() > 2 + || (cassandraVersion.getMajor() == 2 && cassandraVersion.getMinor() >= 2); + } + } + + private static class V3SchemaParser extends SchemaParser { + + protected static final String SELECT_KEYSPACES = "SELECT * FROM system_schema.keyspaces"; + protected static final String SELECT_TABLES = "SELECT * FROM system_schema.tables"; + protected static final String SELECT_COLUMNS = "SELECT * FROM system_schema.columns"; + protected static final String SELECT_USERTYPES = "SELECT * FROM system_schema.types"; + protected static final String SELECT_FUNCTIONS = "SELECT * FROM system_schema.functions"; + protected static final String SELECT_AGGREGATES = "SELECT * FROM system_schema.aggregates"; + protected static final String SELECT_INDEXES = "SELECT * FROM system_schema.indexes"; + protected static final String SELECT_VIEWS = "SELECT * FROM system_schema.views"; + + private static final String TABLE_NAME = "table_name"; + + @Override + SystemRows fetchSystemRows( + Cluster cluster, + SchemaElement targetType, + String targetKeyspace, + String targetName, + List targetSignature, + Connection connection, + VersionNumber cassandraVersion) + throws ConnectionException, BusyConnectionException, ExecutionException, + InterruptedException { + + boolean isSchemaOrKeyspace = (targetType == null || targetType == KEYSPACE); + + ResultSetFuture ksFuture = null, + udtFuture = null, + cfFuture = null, + colsFuture = null, + functionsFuture = null, + aggregatesFuture = null, + indexesFuture = null, + viewsFuture = null; + + ProtocolVersion protocolVersion = + cluster.getConfiguration().getProtocolOptions().getProtocolVersion(); + + if (isSchemaOrKeyspace) + ksFuture = + queryAsync( + SELECT_KEYSPACES + + whereClause(targetType, targetKeyspace, targetName, targetSignature), + connection, + protocolVersion); + + if (isSchemaOrKeyspace || targetType == TYPE) + udtFuture = + queryAsync( + SELECT_USERTYPES + + whereClause(targetType, targetKeyspace, targetName, targetSignature), + connection, + protocolVersion); + + if (isSchemaOrKeyspace || targetType == TABLE) { + cfFuture = + queryAsync( + SELECT_TABLES + + whereClause(targetType, targetKeyspace, targetName, targetSignature), + connection, + protocolVersion); + colsFuture = + queryAsync( + SELECT_COLUMNS + + whereClause(targetType, targetKeyspace, targetName, targetSignature), + connection, + protocolVersion); + indexesFuture = + queryAsync( + SELECT_INDEXES + + whereClause(targetType, targetKeyspace, targetName, targetSignature), + connection, + protocolVersion); + viewsFuture = + queryAsync( + SELECT_VIEWS + + whereClause( + targetType == TABLE ? VIEW : targetType, + targetKeyspace, + targetName, + targetSignature), + connection, + protocolVersion); + } + + if (isSchemaOrKeyspace || targetType == FUNCTION) + functionsFuture = + queryAsync( + SELECT_FUNCTIONS + + whereClause(targetType, targetKeyspace, targetName, targetSignature), + connection, + protocolVersion); + + if (isSchemaOrKeyspace || targetType == AGGREGATE) + aggregatesFuture = + queryAsync( + SELECT_AGGREGATES + + whereClause(targetType, targetKeyspace, targetName, targetSignature), + connection, + protocolVersion); + + return new SystemRows( + get(ksFuture), + groupByKeyspace(get(cfFuture)), + groupByKeyspaceAndCf(get(colsFuture), cassandraVersion, TABLE_NAME), + groupByKeyspace(get(udtFuture)), + groupByKeyspace(get(functionsFuture)), + groupByKeyspace(get(aggregatesFuture)), + groupByKeyspace(get(viewsFuture)), + groupByKeyspaceAndCf(get(indexesFuture), TABLE_NAME), + null, + Collections.>emptyMap(), + Collections.>>emptyMap()); + } - return new SystemRows(get(ksFuture), - groupByKeyspace(get(cfFuture)), - groupByKeyspaceAndCf(get(colsFuture), cassandraVersion, TABLE_NAME), - groupByKeyspace(get(udtFuture)), - groupByKeyspace(get(functionsFuture)), - groupByKeyspace(get(aggregatesFuture)), - groupByKeyspace(get(viewsFuture)), - groupByKeyspaceAndCf(get(indexesFuture), TABLE_NAME)); - } + @Override + String tableNameColumn() { + return TABLE_NAME; + } - @Override - String tableNameColumn() { - return TABLE_NAME; - } + protected String whereClause( + SchemaElement targetType, + String targetKeyspace, + String targetName, + List targetSignature) { + String whereClause = ""; + if (targetType != null) { + whereClause = " WHERE keyspace_name = '" + targetKeyspace + '\''; + if (targetType == TABLE) whereClause += " AND table_name = '" + targetName + '\''; + else if (targetType == VIEW) whereClause += " AND view_name = '" + targetName + '\''; + else if (targetType == TYPE) whereClause += " AND type_name = '" + targetName + '\''; + else if (targetType == FUNCTION) + whereClause += + " AND function_name = '" + + targetName + + "' AND argument_types = " + + LIST_OF_TEXT_CODEC.format(targetSignature); + else if (targetType == AGGREGATE) + whereClause += + " AND aggregate_name = '" + + targetName + + "' AND argument_types = " + + LIST_OF_TEXT_CODEC.format(targetSignature); + } + return whereClause; + } - private String whereClause(SchemaElement targetType, String targetKeyspace, String targetName, List targetSignature) { - String whereClause = ""; - if (targetType != null) { - whereClause = " WHERE keyspace_name = '" + targetKeyspace + '\''; - if (targetType == TABLE) - whereClause += " AND table_name = '" + targetName + '\''; - else if (targetType == VIEW) - whereClause += " AND view_name = '" + targetName + '\''; - else if (targetType == TYPE) - whereClause += " AND type_name = '" + targetName + '\''; - else if (targetType == FUNCTION) - whereClause += " AND function_name = '" + targetName + "' AND argument_types = " + LIST_OF_TEXT_CODEC.format(targetSignature); - else if (targetType == AGGREGATE) - whereClause += " AND aggregate_name = '" + targetName + "' AND argument_types = " + LIST_OF_TEXT_CODEC.format(targetSignature); + // Used by maybeSortUdts to sort at each dependency group alphabetically. + private static final Comparator sortByTypeName = + new Comparator() { + @Override + public int compare(Row o1, Row o2) { + String type1 = o1.getString(UserType.TYPE_NAME); + String type2 = o2.getString(UserType.TYPE_NAME); + + if (type1 == null && type2 == null) { + return 0; + } else if (type2 == null) { + return 1; + } else if (type1 == null) { + return -1; + } else { + return type1.compareTo(type2); } - return whereClause; - } - + } + }; - @Override - protected List maybeSortUdts(List udtRows, Cluster cluster, String keyspace) { - if (udtRows.size() < 2) - return udtRows; - - // For C* 3+, user-defined type resolution must be done in proper order - // to guarantee that nested UDTs get resolved - DirectedGraph graph = new DirectedGraph(udtRows); - for (Row from : udtRows) { - for (Row to : udtRows) { - if (from != to && dependsOn(to, from, cluster, keyspace)) - graph.addEdge(from, to); - } - } - return graph.topologicalSort(); + @Override + protected List maybeSortUdts(List udtRows, Cluster cluster, String keyspace) { + if (udtRows.size() < 2) return udtRows; + + // For C* 3+, user-defined type resolution must be done in proper order + // to guarantee that nested UDTs get resolved + DirectedGraph graph = new DirectedGraph(sortByTypeName, udtRows); + for (Row from : udtRows) { + for (Row to : udtRows) { + if (from != to && dependsOn(to, from, cluster, keyspace)) graph.addEdge(from, to); } + } + return graph.topologicalSort(); + } - private boolean dependsOn(Row udt1, Row udt2, Cluster cluster, String keyspace) { - List fieldTypes = udt1.getList(UserType.COLS_TYPES, String.class); - String typeName = udt2.getString(UserType.TYPE_NAME); - for (String fieldTypeStr : fieldTypes) { - // use shallow user types since some definitions might not be known at this stage - DataType fieldType = DataTypeCqlNameParser.parse(fieldTypeStr, cluster, keyspace, null, null, false, true); - if (references(fieldType, typeName)) - return true; - } - return false; - } + private boolean dependsOn(Row udt1, Row udt2, Cluster cluster, String keyspace) { + List fieldTypes = udt1.getList(UserType.COLS_TYPES, String.class); + String typeName = udt2.getString(UserType.TYPE_NAME); + for (String fieldTypeStr : fieldTypes) { + // use shallow user types since some definitions might not be known at this stage + DataType fieldType = + DataTypeCqlNameParser.parse(fieldTypeStr, cluster, keyspace, null, null, false, true); + if (references(fieldType, typeName)) return true; + } + return false; + } - private boolean references(DataType dataType, String typeName) { - if (dataType instanceof UserType.Shallow && ((UserType.Shallow) dataType).typeName.equals(typeName)) - return true; - for (DataType arg : dataType.getTypeArguments()) { - if (references(arg, typeName)) - return true; - } - if (dataType instanceof TupleType) { - for (DataType arg : ((TupleType) dataType).getComponentTypes()) { - if (references(arg, typeName)) - return true; - } - } - return false; + private boolean references(DataType dataType, String typeName) { + if (dataType instanceof UserType.Shallow + && ((UserType.Shallow) dataType).typeName.equals(typeName)) return true; + for (DataType arg : dataType.getTypeArguments()) { + if (references(arg, typeName)) return true; + } + if (dataType instanceof TupleType) { + for (DataType arg : ((TupleType) dataType).getComponentTypes()) { + if (references(arg, typeName)) return true; } + } + return false; + } + } + + private static class V4SchemaParser extends V3SchemaParser { + + private static final String SELECT_VIRTUAL_KEYSPACES = + "SELECT * FROM system_virtual_schema.keyspaces"; + private static final String SELECT_VIRTUAL_TABLES = + "SELECT * FROM system_virtual_schema.tables"; + private static final String SELECT_VIRTUAL_COLUMNS = + "SELECT * FROM system_virtual_schema.columns"; + + private static final String TABLE_NAME = "table_name"; + + @Override + SystemRows fetchSystemRows( + Cluster cluster, + SchemaElement targetType, + String targetKeyspace, + String targetName, + List targetSignature, + Connection connection, + VersionNumber cassandraVersion) + throws ConnectionException, BusyConnectionException, ExecutionException, + InterruptedException { + + boolean isSchemaOrKeyspace = (targetType == null || targetType == KEYSPACE); + + ResultSetFuture ksFuture = null, + udtFuture = null, + cfFuture = null, + colsFuture = null, + functionsFuture = null, + aggregatesFuture = null, + indexesFuture = null, + viewsFuture = null, + virtualKeyspacesFuture = null, + virtualTableFuture = null, + virtualColumnsFuture = null; + + ProtocolVersion protocolVersion = + cluster.getConfiguration().getProtocolOptions().getProtocolVersion(); + + if (isSchemaOrKeyspace) { + ksFuture = + queryAsync( + SELECT_KEYSPACES + + whereClause(targetType, targetKeyspace, targetName, targetSignature), + connection, + protocolVersion); + virtualKeyspacesFuture = + queryAsync( + SELECT_VIRTUAL_KEYSPACES + + whereClause(targetType, targetKeyspace, targetName, targetSignature), + connection, + protocolVersion); + virtualColumnsFuture = + queryAsync( + SELECT_VIRTUAL_COLUMNS + + whereClause(targetType, targetKeyspace, targetName, targetSignature), + connection, + protocolVersion); + virtualTableFuture = + queryAsync( + SELECT_VIRTUAL_TABLES + + whereClause(targetType, targetKeyspace, targetName, targetSignature), + connection, + protocolVersion); + } + + if (isSchemaOrKeyspace || targetType == TYPE) { + udtFuture = + queryAsync( + SELECT_USERTYPES + + whereClause(targetType, targetKeyspace, targetName, targetSignature), + connection, + protocolVersion); + } + + if (isSchemaOrKeyspace || targetType == TABLE) { + cfFuture = + queryAsync( + SELECT_TABLES + + whereClause(targetType, targetKeyspace, targetName, targetSignature), + connection, + protocolVersion); + colsFuture = + queryAsync( + SELECT_COLUMNS + + whereClause(targetType, targetKeyspace, targetName, targetSignature), + connection, + protocolVersion); + indexesFuture = + queryAsync( + SELECT_INDEXES + + whereClause(targetType, targetKeyspace, targetName, targetSignature), + connection, + protocolVersion); + viewsFuture = + queryAsync( + SELECT_VIEWS + + whereClause( + targetType == TABLE ? VIEW : targetType, + targetKeyspace, + targetName, + targetSignature), + connection, + protocolVersion); + } + + if (isSchemaOrKeyspace || targetType == FUNCTION) { + functionsFuture = + queryAsync( + SELECT_FUNCTIONS + + whereClause(targetType, targetKeyspace, targetName, targetSignature), + connection, + protocolVersion); + } + + if (isSchemaOrKeyspace || targetType == AGGREGATE) { + aggregatesFuture = + queryAsync( + SELECT_AGGREGATES + + whereClause(targetType, targetKeyspace, targetName, targetSignature), + connection, + protocolVersion); + } + + return new SystemRows( + get(ksFuture), + groupByKeyspace(get(cfFuture)), + groupByKeyspaceAndCf(get(colsFuture), cassandraVersion, TABLE_NAME), + groupByKeyspace(get(udtFuture)), + groupByKeyspace(get(functionsFuture)), + groupByKeyspace(get(aggregatesFuture)), + groupByKeyspace(get(viewsFuture)), + groupByKeyspaceAndCf(get(indexesFuture), TABLE_NAME), + get(virtualKeyspacesFuture), + groupByKeyspace(get(virtualTableFuture)), + groupByKeyspaceAndCf(get(virtualColumnsFuture), cassandraVersion, TABLE_NAME)); } + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/Segment.java b/driver-core/src/main/java/com/datastax/driver/core/Segment.java new file mode 100644 index 00000000000..be386826be9 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/Segment.java @@ -0,0 +1,62 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import io.netty.buffer.ByteBuf; + +/** + * A container of {@link Frame}s in protocol v5 and above. This is a new protocol construct that + * allows checksumming and compressing multiple messages together. + * + *

{@link #getPayload()} contains either: + * + *

    + *
  • a sequence of encoded {@link Frame}s, all concatenated together. In this case, {@link + * #isSelfContained()} return true. + *
  • or a slice of an encoded large {@link Frame} (if that frame is longer than {@link + * #MAX_PAYLOAD_LENGTH}). In this case, {@link #isSelfContained()} returns false. + *
+ * + * The payload is not compressed; compression is handled at a lower level when encoding or decoding + * this object. + * + *

Naming is provisional: "segment" is not the official name, I picked it arbitrarily for the + * driver code to avoid a name clash. It's possible that this type will be renamed to "frame", and + * {@link Frame} to something else, at some point in the future (this is an ongoing discussion on + * the server ticket). + */ +class Segment { + + static int MAX_PAYLOAD_LENGTH = 128 * 1024 - 1; + + private final ByteBuf payload; + private final boolean isSelfContained; + + Segment(ByteBuf payload, boolean isSelfContained) { + this.payload = payload; + this.isSelfContained = isSelfContained; + } + + public ByteBuf getPayload() { + return payload; + } + + public boolean isSelfContained() { + return isSelfContained; + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/SegmentBuilder.java b/driver-core/src/main/java/com/datastax/driver/core/SegmentBuilder.java new file mode 100644 index 00000000000..9e5e75eec76 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/SegmentBuilder.java @@ -0,0 +1,259 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import com.google.common.annotations.VisibleForTesting; +import com.google.common.collect.ImmutableList; +import io.netty.buffer.ByteBuf; +import io.netty.buffer.ByteBufAllocator; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelPromise; +import io.netty.util.concurrent.Future; +import io.netty.util.concurrent.GenericFutureListener; +import java.util.ArrayList; +import java.util.List; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Abstracts the details of batching a sequence of {@link Message.Request}s into one or more {@link + * Segment}s before sending them out on the network. + * + *

This class is not thread-safe. + */ +class SegmentBuilder { + + private static final Logger logger = LoggerFactory.getLogger(SegmentBuilder.class); + + private final ChannelHandlerContext context; + private final ByteBufAllocator allocator; + private final int maxPayloadLength; + private final Message.ProtocolEncoder requestEncoder; + + private final List currentPayloadHeaders = new ArrayList(); + private final List currentPayloadBodies = new ArrayList(); + private final List currentPayloadPromises = new ArrayList(); + private int currentPayloadLength; + + SegmentBuilder( + ChannelHandlerContext context, + ByteBufAllocator allocator, + Message.ProtocolEncoder requestEncoder) { + this(context, allocator, requestEncoder, Segment.MAX_PAYLOAD_LENGTH); + } + + /** Exposes the max length for unit tests; in production, this is hard-coded. */ + @VisibleForTesting + SegmentBuilder( + ChannelHandlerContext context, + ByteBufAllocator allocator, + Message.ProtocolEncoder requestEncoder, + int maxPayloadLength) { + this.context = context; + this.allocator = allocator; + this.requestEncoder = requestEncoder; + this.maxPayloadLength = maxPayloadLength; + } + + /** + * Adds a new request. It will be encoded into one or more segments, that will be passed to {@link + * #processSegment(Segment, ChannelPromise)} at some point in the future. + * + *

The caller must invoke {@link #flush()} after the last request. + */ + public void addRequest(Message.Request request, ChannelPromise promise) { + + // Wrap the request into a legacy frame, append that frame to the payload. + int frameHeaderLength = Frame.Header.lengthFor(requestEncoder.protocolVersion); + int frameBodyLength = requestEncoder.encodedSize(request); + int frameLength = frameHeaderLength + frameBodyLength; + + Frame.Header header = + new Frame.Header( + requestEncoder.protocolVersion, + requestEncoder.computeFlags(request), + request.getStreamId(), + request.type.opcode, + frameBodyLength); + + if (frameLength > maxPayloadLength) { + // Large request: split into multiple dedicated segments and process them immediately: + ByteBuf frame = allocator.ioBuffer(frameLength); + header.encodeInto(frame); + requestEncoder.encode(request, frame); + + int sliceCount = + (frameLength / maxPayloadLength) + (frameLength % maxPayloadLength == 0 ? 0 : 1); + + logger.trace( + "Splitting large request ({} bytes) into {} segments: {}", + frameLength, + sliceCount, + request); + + List segmentPromises = split(promise, sliceCount); + int i = 0; + do { + ByteBuf part = frame.readSlice(Math.min(maxPayloadLength, frame.readableBytes())); + part.retain(); + process(part, false, segmentPromises.get(i++)); + } while (frame.isReadable()); + // We've retained each slice, and won't reference this buffer anymore + frame.release(); + } else { + // Small request: append to an existing segment, together with other messages. + if (currentPayloadLength + frameLength > maxPayloadLength) { + // Current segment is full, process and start a new one: + processCurrentPayload(); + resetCurrentPayload(); + } + // Append frame to current segment + logger.trace( + "Adding {}th request to self-contained segment: {}", + currentPayloadHeaders.size() + 1, + request); + currentPayloadHeaders.add(header); + currentPayloadBodies.add(request); + currentPayloadPromises.add(promise); + currentPayloadLength += frameLength; + } + } + + /** + * Signals that we're done adding requests. + * + *

This must be called after adding the last request, it will possibly trigger the generation + * of one last segment. + */ + public void flush() { + if (currentPayloadLength > 0) { + processCurrentPayload(); + resetCurrentPayload(); + } + } + + /** What to do whenever a full segment is ready. */ + protected void processSegment(Segment segment, ChannelPromise segmentPromise) { + context.write(segment, segmentPromise); + } + + private void process(ByteBuf payload, boolean isSelfContained, ChannelPromise segmentPromise) { + processSegment(new Segment(payload, isSelfContained), segmentPromise); + } + + private void processCurrentPayload() { + int requestCount = currentPayloadHeaders.size(); + assert currentPayloadBodies.size() == requestCount + && currentPayloadPromises.size() == requestCount; + logger.trace("Emitting new self-contained segment with {} frame(s)", requestCount); + ByteBuf payload = this.allocator.ioBuffer(currentPayloadLength); + for (int i = 0; i < requestCount; i++) { + Frame.Header header = currentPayloadHeaders.get(i); + Message.Request request = currentPayloadBodies.get(i); + header.encodeInto(payload); + requestEncoder.encode(request, payload); + } + process(payload, true, merge(currentPayloadPromises)); + } + + private void resetCurrentPayload() { + currentPayloadHeaders.clear(); + currentPayloadBodies.clear(); + currentPayloadPromises.clear(); + currentPayloadLength = 0; + } + + // Merges multiple promises into a single one, that will notify all of them when done. + // This is used when multiple requests are sent as a single segment. + private ChannelPromise merge(List framePromises) { + if (framePromises.size() == 1) { + return framePromises.get(0); + } + ChannelPromise segmentPromise = context.newPromise(); + final ImmutableList dependents = ImmutableList.copyOf(framePromises); + segmentPromise.addListener( + new GenericFutureListener>() { + @Override + public void operationComplete(Future future) throws Exception { + if (future.isSuccess()) { + for (ChannelPromise framePromise : dependents) { + framePromise.setSuccess(); + } + } else { + Throwable cause = future.cause(); + for (ChannelPromise framePromise : dependents) { + framePromise.setFailure(cause); + } + } + } + }); + return segmentPromise; + } + + // Splits a single promise into multiple ones. The original promise will complete when all the + // splits have. + // This is used when a single request is sliced into multiple segment. + private List split(ChannelPromise framePromise, int sliceCount) { + // We split one frame into multiple slices. When all slices are written, the frame is written. + List slicePromises = new ArrayList(sliceCount); + for (int i = 0; i < sliceCount; i++) { + slicePromises.add(context.newPromise()); + } + GenericFutureListener> sliceListener = + new SliceWriteListener(framePromise, slicePromises); + for (int i = 0; i < sliceCount; i++) { + slicePromises.get(i).addListener(sliceListener); + } + return slicePromises; + } + + static class SliceWriteListener implements GenericFutureListener> { + + private final ChannelPromise parentPromise; + private final List slicePromises; + + // All slices are written to the same channel, and the segment is built from the Flusher which + // also runs on the same event loop, so we don't need synchronization. + private int remainingSlices; + + SliceWriteListener(ChannelPromise parentPromise, List slicePromises) { + this.parentPromise = parentPromise; + this.slicePromises = slicePromises; + this.remainingSlices = slicePromises.size(); + } + + @Override + public void operationComplete(Future future) { + if (!parentPromise.isDone()) { + if (future.isSuccess()) { + remainingSlices -= 1; + if (remainingSlices == 0) { + parentPromise.setSuccess(); + } + } else { + // If any slice fails, we can immediately mark the whole frame as failed: + parentPromise.setFailure(future.cause()); + // Cancel any remaining slice, Netty will not send the bytes. + for (ChannelPromise slicePromise : slicePromises) { + slicePromise.cancel(/*Netty ignores this*/ false); + } + } + } + } + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/SegmentCodec.java b/driver-core/src/main/java/com/datastax/driver/core/SegmentCodec.java new file mode 100644 index 00000000000..c098c842381 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/SegmentCodec.java @@ -0,0 +1,212 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import com.datastax.driver.core.exceptions.CrcMismatchException; +import com.google.common.annotations.VisibleForTesting; +import io.netty.buffer.ByteBuf; +import io.netty.buffer.ByteBufAllocator; +import java.io.IOException; +import java.util.List; + +class SegmentCodec { + + private static final int COMPRESSED_HEADER_LENGTH = 5; + private static final int UNCOMPRESSED_HEADER_LENGTH = 3; + static final int CRC24_LENGTH = 3; + static final int CRC32_LENGTH = 4; + + private final ByteBufAllocator allocator; + private final boolean compress; + private final FrameCompressor compressor; + + SegmentCodec(ByteBufAllocator allocator, ProtocolOptions.Compression compression) { + this.allocator = allocator; + this.compress = compression != ProtocolOptions.Compression.NONE; + this.compressor = compression.compressor(); + } + + /** The length of the segment header, excluding the 3-byte trailing CRC. */ + int headerLength() { + return compress ? COMPRESSED_HEADER_LENGTH : UNCOMPRESSED_HEADER_LENGTH; + } + + void encode(Segment segment, List out) throws IOException { + ByteBuf uncompressedPayload = segment.getPayload(); + int uncompressedPayloadLength = uncompressedPayload.readableBytes(); + assert uncompressedPayloadLength <= Segment.MAX_PAYLOAD_LENGTH; + ByteBuf encodedPayload; + if (compress) { + uncompressedPayload.markReaderIndex(); + ByteBuf compressedPayload = compressor.compress(uncompressedPayload); + if (compressedPayload.readableBytes() >= uncompressedPayloadLength) { + // Skip compression if it's not worth it + uncompressedPayload.resetReaderIndex(); + encodedPayload = uncompressedPayload; + compressedPayload.release(); + // By convention, this is how we signal this to the server: + uncompressedPayloadLength = 0; + } else { + encodedPayload = compressedPayload; + uncompressedPayload.release(); + } + } else { + encodedPayload = uncompressedPayload; + } + int payloadLength = encodedPayload.readableBytes(); + + ByteBuf header = + encodeHeader(payloadLength, uncompressedPayloadLength, segment.isSelfContained()); + + int payloadCrc = Crc.computeCrc32(encodedPayload); + ByteBuf trailer = allocator.ioBuffer(CRC32_LENGTH); + for (int i = 0; i < CRC32_LENGTH; i++) { + trailer.writeByte(payloadCrc & 0xFF); + payloadCrc >>= 8; + } + + out.add(header); + out.add(encodedPayload); + out.add(trailer); + } + + @VisibleForTesting + ByteBuf encodeHeader(int payloadLength, int uncompressedLength, boolean isSelfContained) { + assert payloadLength <= Segment.MAX_PAYLOAD_LENGTH; + assert !compress || uncompressedLength <= Segment.MAX_PAYLOAD_LENGTH; + + int headerLength = headerLength(); + + long headerData = payloadLength; + int flagOffset = 17; + if (compress) { + headerData |= (long) uncompressedLength << 17; + flagOffset += 17; + } + if (isSelfContained) { + headerData |= 1L << flagOffset; + } + + int headerCrc = Crc.computeCrc24(headerData, headerLength); + + ByteBuf header = allocator.ioBuffer(headerLength + CRC24_LENGTH); + // Write both data and CRC in little-endian order + for (int i = 0; i < headerLength; i++) { + int shift = i * 8; + header.writeByte((int) (headerData >> shift & 0xFF)); + } + for (int i = 0; i < CRC24_LENGTH; i++) { + int shift = i * 8; + header.writeByte(headerCrc >> shift & 0xFF); + } + return header; + } + + /** + * Decodes a segment header and checks its CRC. It is assumed that the caller has already checked + * that there are enough bytes. + */ + Header decodeHeader(ByteBuf buffer) throws CrcMismatchException { + int headerLength = headerLength(); + assert buffer.readableBytes() >= headerLength + CRC24_LENGTH; + + // Read header data (little endian): + long headerData = 0; + for (int i = 0; i < headerLength; i++) { + headerData |= (buffer.readByte() & 0xFFL) << 8 * i; + } + + // Read CRC (little endian) and check it: + int expectedHeaderCrc = 0; + for (int i = 0; i < CRC24_LENGTH; i++) { + expectedHeaderCrc |= (buffer.readByte() & 0xFF) << 8 * i; + } + int actualHeaderCrc = Crc.computeCrc24(headerData, headerLength); + if (actualHeaderCrc != expectedHeaderCrc) { + throw new CrcMismatchException( + String.format( + "CRC mismatch on header %s. Received %s, computed %s.", + Long.toHexString(headerData), + Integer.toHexString(expectedHeaderCrc), + Integer.toHexString(actualHeaderCrc))); + } + + int payloadLength = (int) headerData & Segment.MAX_PAYLOAD_LENGTH; + headerData >>= 17; + int uncompressedPayloadLength; + if (compress) { + uncompressedPayloadLength = (int) headerData & Segment.MAX_PAYLOAD_LENGTH; + headerData >>= 17; + } else { + uncompressedPayloadLength = -1; + } + boolean isSelfContained = (headerData & 1) == 1; + return new Header(payloadLength, uncompressedPayloadLength, isSelfContained); + } + + /** + * Decodes the rest of a segment from a previously decoded header, and checks the payload's CRC. + * It is assumed that the caller has already checked that there are enough bytes. + */ + Segment decode(Header header, ByteBuf buffer) throws CrcMismatchException, IOException { + assert buffer.readableBytes() == header.payloadLength + CRC32_LENGTH; + + // Extract payload: + ByteBuf encodedPayload = buffer.readSlice(header.payloadLength); + encodedPayload.retain(); + + // Read and check CRC: + int expectedPayloadCrc = 0; + for (int i = 0; i < CRC32_LENGTH; i++) { + expectedPayloadCrc |= (buffer.readByte() & 0xFF) << 8 * i; + } + buffer.release(); // done with this (we retained the payload independently) + int actualPayloadCrc = Crc.computeCrc32(encodedPayload); + if (actualPayloadCrc != expectedPayloadCrc) { + encodedPayload.release(); + throw new CrcMismatchException( + String.format( + "CRC mismatch on payload. Received %s, computed %s.", + Integer.toHexString(expectedPayloadCrc), Integer.toHexString(actualPayloadCrc))); + } + + // Decompress payload if needed: + ByteBuf payload; + if (compress && header.uncompressedPayloadLength > 0) { + payload = compressor.decompress(encodedPayload, header.uncompressedPayloadLength); + encodedPayload.release(); + } else { + payload = encodedPayload; + } + + return new Segment(payload, header.isSelfContained); + } + + /** Temporary holder for header data. During decoding, it is convenient to store it separately. */ + static class Header { + final int payloadLength; + final int uncompressedPayloadLength; + final boolean isSelfContained; + + public Header(int payloadLength, int uncompressedPayloadLength, boolean isSelfContained) { + this.payloadLength = payloadLength; + this.uncompressedPayloadLength = uncompressedPayloadLength; + this.isSelfContained = isSelfContained; + } + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/SegmentToBytesEncoder.java b/driver-core/src/main/java/com/datastax/driver/core/SegmentToBytesEncoder.java new file mode 100644 index 00000000000..58c51bf2c83 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/SegmentToBytesEncoder.java @@ -0,0 +1,40 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import io.netty.channel.ChannelHandler; +import io.netty.channel.ChannelHandlerContext; +import io.netty.handler.codec.MessageToMessageEncoder; +import java.util.List; + +@ChannelHandler.Sharable +class SegmentToBytesEncoder extends MessageToMessageEncoder { + + private final SegmentCodec codec; + + SegmentToBytesEncoder(SegmentCodec codec) { + super(Segment.class); + this.codec = codec; + } + + @Override + protected void encode(ChannelHandlerContext ctx, Segment segment, List out) + throws Exception { + codec.encode(segment, out); + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/SegmentToFrameDecoder.java b/driver-core/src/main/java/com/datastax/driver/core/SegmentToFrameDecoder.java new file mode 100644 index 00000000000..7155895e8e8 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/SegmentToFrameDecoder.java @@ -0,0 +1,97 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import com.datastax.driver.core.Frame.Header; +import io.netty.buffer.ByteBuf; +import io.netty.buffer.ByteBufAllocator; +import io.netty.buffer.CompositeByteBuf; +import io.netty.channel.ChannelHandlerContext; +import io.netty.handler.codec.MessageToMessageDecoder; +import java.util.ArrayList; +import java.util.List; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Converts the segments decoded by {@link BytesToSegmentDecoder} into legacy frames understood by + * the rest of the driver. + */ +class SegmentToFrameDecoder extends MessageToMessageDecoder { + + private static final Logger logger = LoggerFactory.getLogger(SegmentToFrameDecoder.class); + + // Accumulated state when we are reading a sequence of slices + private Header pendingHeader; + private final List accumulatedSlices = new ArrayList(); + private int accumulatedLength; + + SegmentToFrameDecoder() { + super(Segment.class); + } + + @Override + protected void decode(ChannelHandlerContext ctx, Segment segment, List out) { + if (segment.isSelfContained()) { + decodeSelfContained(segment, out); + } else { + decodeSlice(segment, ctx.alloc(), out); + } + } + + private void decodeSelfContained(Segment segment, List out) { + ByteBuf payload = segment.getPayload(); + int frameCount = 0; + do { + Header header = Header.decode(payload); + ByteBuf body = payload.readSlice(header.bodyLength); + body.retain(); + out.add(new Frame(header, body)); + frameCount += 1; + } while (payload.isReadable()); + payload.release(); + logger.trace("Decoded self-contained segment into {} frame(s)", frameCount); + } + + private void decodeSlice(Segment segment, ByteBufAllocator allocator, List out) { + assert pendingHeader != null ^ (accumulatedSlices.isEmpty() && accumulatedLength == 0); + ByteBuf payload = segment.getPayload(); + if (pendingHeader == null) { // first slice + pendingHeader = Header.decode(payload); // note: this consumes the header data + } + accumulatedSlices.add(payload); + accumulatedLength += payload.readableBytes(); + logger.trace( + "StreamId {}: decoded slice {}, {}/{} bytes", + pendingHeader.streamId, + accumulatedSlices.size(), + accumulatedLength, + pendingHeader.bodyLength); + assert accumulatedLength <= pendingHeader.bodyLength; + if (accumulatedLength == pendingHeader.bodyLength) { + // We've received enough data to reassemble the whole message + CompositeByteBuf body = allocator.compositeBuffer(accumulatedSlices.size()); + body.addComponents(true, accumulatedSlices); + out.add(new Frame(pendingHeader, body)); + // Reset our state + pendingHeader = null; + accumulatedSlices.clear(); + accumulatedLength = 0; + } + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/ServerSideTimestampGenerator.java b/driver-core/src/main/java/com/datastax/driver/core/ServerSideTimestampGenerator.java index 9ca292db118..4e917941614 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ServerSideTimestampGenerator.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ServerSideTimestampGenerator.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,16 +22,13 @@ * assign server-side timestamps. */ public class ServerSideTimestampGenerator implements TimestampGenerator { - /** - * The unique instance of this generator. - */ - public static final TimestampGenerator INSTANCE = new ServerSideTimestampGenerator(); + /** The unique instance of this generator. */ + public static final TimestampGenerator INSTANCE = new ServerSideTimestampGenerator(); - @Override - public long next() { - return Long.MIN_VALUE; - } + @Override + public long next() { + return Long.MIN_VALUE; + } - private ServerSideTimestampGenerator() { - } + private ServerSideTimestampGenerator() {} } diff --git a/driver-core/src/main/java/com/datastax/driver/core/Session.java b/driver-core/src/main/java/com/datastax/driver/core/Session.java index 4a591693e34..d4ab7bfbc75 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Session.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Session.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,453 +17,440 @@ */ package com.datastax.driver.core; -import com.datastax.driver.core.exceptions.*; +import com.datastax.driver.core.exceptions.AuthenticationException; +import com.datastax.driver.core.exceptions.NoHostAvailableException; +import com.datastax.driver.core.exceptions.QueryExecutionException; +import com.datastax.driver.core.exceptions.QueryValidationException; +import com.datastax.driver.core.exceptions.UnsupportedFeatureException; import com.google.common.util.concurrent.ListenableFuture; - import java.io.Closeable; import java.util.Collection; import java.util.Map; /** * A session holds connections to a Cassandra cluster, allowing it to be queried. - *

- * Each session maintains multiple connections to the cluster nodes, - * provides policies to choose which node to use for each query (round-robin on - * all nodes of the cluster by default), and handles retries for failed queries (when - * it makes sense), etc... - *

- * Session instances are thread-safe and usually a single instance is enough - * per application. As a given session can only be "logged" into one keyspace at - * a time (where the "logged" keyspace is the one used by queries that don't - * explicitly use a fully qualified table name), it can make sense to create one - * session per keyspace used. This is however not necessary when querying multiple keyspaces - * since it is always possible to use a single session with fully qualified table names - * in queries. + * + *

Each session maintains multiple connections to the cluster nodes, provides policies to choose + * which node to use for each query (round-robin on all nodes of the cluster by default), and + * handles retries for failed queries (when it makes sense), etc... + * + *

Session instances are thread-safe and usually a single instance is enough per application. As + * a given session can only be "logged" into one keyspace at a time (where the "logged" keyspace is + * the one used by queries that don't explicitly use a fully qualified table name), it can make + * sense to create one session per keyspace used. This is however not necessary when querying + * multiple keyspaces since it is always possible to use a single session with fully qualified table + * names in queries. */ public interface Session extends Closeable { - /** - * The keyspace to which this Session is currently logged in, if any. - *

- * This correspond to the name passed to {@link Cluster#connect(String)}, or to the - * last keyspace logged into through a "USE" CQL query if one was used. - * - * @return the name of the keyspace to which this Session is currently - * logged in, or {@code null} if the session is logged to no keyspace. - */ - String getLoggedKeyspace(); + /** + * The keyspace to which this Session is currently logged in, if any. + * + *

This correspond to the name passed to {@link Cluster#connect(String)}, or to the last + * keyspace logged into through a "USE" CQL query if one was used. + * + * @return the name of the keyspace to which this Session is currently logged in, or {@code null} + * if the session is logged to no keyspace. + */ + String getLoggedKeyspace(); - /** - * Force the initialization of this Session instance if it hasn't been - * initialized yet. - *

- * Please note first that most users won't need to call this method - * explicitly. If you use the {@link Cluster#connect} method - * to create your Session, the returned session will be already - * initialized. Even if you create a non-initialized session through - * {@link Cluster#newSession}, that session will get automatically - * initialized the first time it is used for querying. This method - * is thus only useful if you use {@link Cluster#newSession} and want to - * explicitly force initialization without querying. - *

- * Session initialization consists in connecting the Session to the known - * Cassandra hosts (at least those that should not be ignored due to - * the {@link com.datastax.driver.core.policies.LoadBalancingPolicy LoadBalancingPolicy} in place). - *

- * If the Cluster instance this Session depends on is not itself - * initialized, it will be initialized by this method. - *

- * If the session is already initialized, this method is a no-op. - * - * @return this {@code Session} object. - * @throws NoHostAvailableException if this initialization triggers the - * {@link Cluster} initialization and no host amongst the contact points can be - * reached. - * @throws AuthenticationException if this initialization triggers the - * {@link Cluster} initialization and an authentication error occurs while contacting - * the initial contact points. - */ - Session init(); + /** + * Force the initialization of this Session instance if it hasn't been initialized yet. + * + *

Please note first that most users won't need to call this method explicitly. If you use the + * {@link Cluster#connect} method to create your Session, the returned session will be already + * initialized. Even if you create a non-initialized session through {@link Cluster#newSession}, + * that session will get automatically initialized the first time it is used for querying. This + * method is thus only useful if you use {@link Cluster#newSession} and want to explicitly force + * initialization without querying. + * + *

Session initialization consists in connecting the Session to the known Cassandra hosts (at + * least those that should not be ignored due to the {@link + * com.datastax.driver.core.policies.LoadBalancingPolicy LoadBalancingPolicy} in place). + * + *

If the Cluster instance this Session depends on is not itself initialized, it will be + * initialized by this method. + * + *

If the session is already initialized, this method is a no-op. + * + * @return this {@code Session} object. + * @throws NoHostAvailableException if this initialization triggers the {@link Cluster} + * initialization and no host amongst the contact points can be reached. + * @throws AuthenticationException if this initialization triggers the {@link Cluster} + * initialization and an authentication error occurs while contacting the initial contact + * points. + */ + Session init(); - /** - * Initialize this session asynchronously. - * - * @return a future that will complete when the session is fully initialized. - * @see #init() - */ - ListenableFuture initAsync(); + /** + * Initialize this session asynchronously. + * + * @return a future that will complete when the session is fully initialized. + * @see #init() + */ + ListenableFuture initAsync(); - /** - * Executes the provided query. - *

- * This is a convenience method for {@code execute(new SimpleStatement(query))}. - * - * @param query the CQL query to execute. - * @return the result of the query. That result will never be null but can - * be empty (and will be for any non SELECT query). - * @throws NoHostAvailableException if no host in the cluster can be - * contacted successfully to execute this query. - * @throws QueryExecutionException if the query triggered an execution - * exception, i.e. an exception thrown by Cassandra when it cannot execute - * the query with the requested consistency level successfully. - * @throws QueryValidationException if the query if invalid (syntax error, - * unauthorized or any other validation problem). - */ - ResultSet execute(String query); + /** + * Executes the provided query. + * + *

This is a convenience method for {@code execute(new SimpleStatement(query))}. + * + * @param query the CQL query to execute. + * @return the result of the query. That result will never be null but can be empty (and will be + * for any non SELECT query). + * @throws NoHostAvailableException if no host in the cluster can be contacted successfully to + * execute this query. + * @throws QueryExecutionException if the query triggered an execution exception, i.e. an + * exception thrown by Cassandra when it cannot execute the query with the requested + * consistency level successfully. + * @throws QueryValidationException if the query if invalid (syntax error, unauthorized or any + * other validation problem). + */ + ResultSet execute(String query); - /** - * Executes the provided query using the provided values. - *

- * This is a convenience method for {@code execute(new SimpleStatement(query, values))}. - * - * @param query the CQL query to execute. - * @param values values required for the execution of {@code query}. See - * {@link SimpleStatement#SimpleStatement(String, Object...)} for more details. - * @return the result of the query. That result will never be null but can - * be empty (and will be for any non SELECT query). - * @throws NoHostAvailableException if no host in the cluster can be - * contacted successfully to execute this query. - * @throws QueryExecutionException if the query triggered an execution - * exception, i.e. an exception thrown by Cassandra when it cannot execute - * the query with the requested consistency level successfully. - * @throws QueryValidationException if the query if invalid (syntax error, - * unauthorized or any other validation problem). - * @throws UnsupportedFeatureException if version 1 of the protocol - * is in use (i.e. if you've forced version 1 through {@link Cluster.Builder#withProtocolVersion} - * or you use Cassandra 1.2). - */ - ResultSet execute(String query, Object... values); + /** + * Executes the provided query using the provided values. + * + *

This is a convenience method for {@code execute(new SimpleStatement(query, values))}. + * + * @param query the CQL query to execute. + * @param values values required for the execution of {@code query}. See {@link + * SimpleStatement#SimpleStatement(String, Object...)} for more details. + * @return the result of the query. That result will never be null but can be empty (and will be + * for any non SELECT query). + * @throws NoHostAvailableException if no host in the cluster can be contacted successfully to + * execute this query. + * @throws QueryExecutionException if the query triggered an execution exception, i.e. an + * exception thrown by Cassandra when it cannot execute the query with the requested + * consistency level successfully. + * @throws QueryValidationException if the query if invalid (syntax error, unauthorized or any + * other validation problem). + * @throws UnsupportedFeatureException if version 1 of the protocol is in use (i.e. if you've + * forced version 1 through {@link Cluster.Builder#withProtocolVersion} or you use Cassandra + * 1.2). + */ + ResultSet execute(String query, Object... values); - /** - * Executes the provided query using the provided named values. - *

- * This is a convenience method for {@code execute(new SimpleStatement(query, values))}. - * - * @param query the CQL query to execute. - * @param values values required for the execution of {@code query}. See - * {@link SimpleStatement#SimpleStatement(String, Map)} for more details. - * @return the result of the query. That result will never be null but can - * be empty (and will be for any non SELECT query). - * @throws NoHostAvailableException if no host in the cluster can be - * contacted successfully to execute this query. - * @throws QueryExecutionException if the query triggered an execution - * exception, i.e. an exception thrown by Cassandra when it cannot execute - * the query with the requested consistency level successfully. - * @throws QueryValidationException if the query if invalid (syntax error, - * unauthorized or any other validation problem). - * @throws UnsupportedFeatureException if version 1 or 2 of the protocol - * is in use (i.e. if you've forced it through {@link Cluster.Builder#withProtocolVersion} - * or you use Cassandra 1.2 or 2.0). - */ - ResultSet execute(String query, Map values); + /** + * Executes the provided query using the provided named values. + * + *

This is a convenience method for {@code execute(new SimpleStatement(query, values))}. + * + * @param query the CQL query to execute. + * @param values values required for the execution of {@code query}. See {@link + * SimpleStatement#SimpleStatement(String, Map)} for more details. + * @return the result of the query. That result will never be null but can be empty (and will be + * for any non SELECT query). + * @throws NoHostAvailableException if no host in the cluster can be contacted successfully to + * execute this query. + * @throws QueryExecutionException if the query triggered an execution exception, i.e. an + * exception thrown by Cassandra when it cannot execute the query with the requested + * consistency level successfully. + * @throws QueryValidationException if the query if invalid (syntax error, unauthorized or any + * other validation problem). + * @throws UnsupportedFeatureException if version 1 or 2 of the protocol is in use (i.e. if you've + * forced it through {@link Cluster.Builder#withProtocolVersion} or you use Cassandra 1.2 or + * 2.0). + */ + ResultSet execute(String query, Map values); - /** - * Executes the provided query. - *

- * This method blocks until at least some result has been received from the - * database. However, for SELECT queries, it does not guarantee that the - * result has been received in full. But it does guarantee that some - * response has been received from the database, and in particular - * guarantees that if the request is invalid, an exception will be thrown - * by this method. - * - * @param statement the CQL query to execute (that can be any {@link Statement}). - * @return the result of the query. That result will never be null but can - * be empty (and will be for any non SELECT query). - * @throws NoHostAvailableException if no host in the cluster can be - * contacted successfully to execute this query. - * @throws QueryExecutionException if the query triggered an execution - * exception, i.e. an exception thrown by Cassandra when it cannot execute - * the query with the requested consistency level successfully. - * @throws QueryValidationException if the query if invalid (syntax error, - * unauthorized or any other validation problem). - * @throws UnsupportedFeatureException if the protocol version 1 is in use and - * a feature not supported has been used. Features that are not supported by - * the version protocol 1 include: BatchStatement, ResultSet paging and binary - * values in RegularStatement. - */ - ResultSet execute(Statement statement); + /** + * Executes the provided query. + * + *

This method blocks until at least some result has been received from the database. However, + * for SELECT queries, it does not guarantee that the result has been received in full. But it + * does guarantee that some response has been received from the database, and in particular + * guarantees that if the request is invalid, an exception will be thrown by this method. + * + * @param statement the CQL query to execute (that can be any {@link Statement}). + * @return the result of the query. That result will never be null but can be empty (and will be + * for any non SELECT query). + * @throws NoHostAvailableException if no host in the cluster can be contacted successfully to + * execute this query. + * @throws QueryExecutionException if the query triggered an execution exception, i.e. an + * exception thrown by Cassandra when it cannot execute the query with the requested + * consistency level successfully. + * @throws QueryValidationException if the query if invalid (syntax error, unauthorized or any + * other validation problem). + * @throws UnsupportedFeatureException if the protocol version 1 is in use and a feature not + * supported has been used. Features that are not supported by the version protocol 1 include: + * BatchStatement, ResultSet paging and binary values in RegularStatement. + */ + ResultSet execute(Statement statement); - /** - * Executes the provided query asynchronously. - *

- * This is a convenience method for {@code executeAsync(new SimpleStatement(query))}. - * - * @param query the CQL query to execute. - * @return a future on the result of the query. - */ - ResultSetFuture executeAsync(String query); + /** + * Executes the provided query asynchronously. + * + *

This is a convenience method for {@code executeAsync(new SimpleStatement(query))}. + * + * @param query the CQL query to execute. + * @return a future on the result of the query. + */ + ResultSetFuture executeAsync(String query); - /** - * Executes the provided query asynchronously using the provided values. - *

- * This is a convenience method for {@code executeAsync(new SimpleStatement(query, values))}. - * - * @param query the CQL query to execute. - * @param values values required for the execution of {@code query}. See - * {@link SimpleStatement#SimpleStatement(String, Object...)} for more details. - * @return a future on the result of the query. - * @throws UnsupportedFeatureException if version 1 of the protocol - * is in use (i.e. if you've forced version 1 through {@link Cluster.Builder#withProtocolVersion} - * or you use Cassandra 1.2). - */ - ResultSetFuture executeAsync(String query, Object... values); + /** + * Executes the provided query asynchronously using the provided values. + * + *

This is a convenience method for {@code executeAsync(new SimpleStatement(query, values))}. + * + * @param query the CQL query to execute. + * @param values values required for the execution of {@code query}. See {@link + * SimpleStatement#SimpleStatement(String, Object...)} for more details. + * @return a future on the result of the query. + * @throws UnsupportedFeatureException if version 1 of the protocol is in use (i.e. if you've + * forced version 1 through {@link Cluster.Builder#withProtocolVersion} or you use Cassandra + * 1.2). + */ + ResultSetFuture executeAsync(String query, Object... values); - /** - * Executes the provided query asynchronously using the provided values. - *

- * This is a convenience method for {@code executeAsync(new SimpleStatement(query, values))}. - * - * @param query the CQL query to execute. - * @param values values required for the execution of {@code query}. See - * {@link SimpleStatement#SimpleStatement(String, Map)} for more details. - * @return a future on the result of the query. - * @throws UnsupportedFeatureException if version 1 or 2 of the protocol - * is in use (i.e. if you've forced it through {@link Cluster.Builder#withProtocolVersion} - * or you use Cassandra 1.2 or 2.0). - */ - ResultSetFuture executeAsync(String query, Map values); + /** + * Executes the provided query asynchronously using the provided values. + * + *

This is a convenience method for {@code executeAsync(new SimpleStatement(query, values))}. + * + * @param query the CQL query to execute. + * @param values values required for the execution of {@code query}. See {@link + * SimpleStatement#SimpleStatement(String, Map)} for more details. + * @return a future on the result of the query. + * @throws UnsupportedFeatureException if version 1 or 2 of the protocol is in use (i.e. if you've + * forced it through {@link Cluster.Builder#withProtocolVersion} or you use Cassandra 1.2 or + * 2.0). + */ + ResultSetFuture executeAsync(String query, Map values); - /** - * Executes the provided query asynchronously. - *

- * This method does not block. It returns as soon as the query has been - * passed to the underlying network stack. In particular, returning from - * this method does not guarantee that the query is valid or has even been - * submitted to a live node. Any exception pertaining to the failure of the - * query will be thrown when accessing the {@link ResultSetFuture}. - *

- * Note that for queries that don't return a result (INSERT, UPDATE and - * DELETE), you will need to access the ResultSetFuture (that is, call one of - * its {@code get} methods to make sure the query was successful. - * - * @param statement the CQL query to execute (that can be any {@code Statement}). - * @return a future on the result of the query. - * @throws UnsupportedFeatureException if the protocol version 1 is in use and - * a feature not supported has been used. Features that are not supported by - * the version protocol 1 include: BatchStatement, ResultSet paging and binary - * values in RegularStatement. - */ - ResultSetFuture executeAsync(Statement statement); + /** + * Executes the provided query asynchronously. + * + *

This method does not block. It returns as soon as the query has been passed to the + * underlying network stack. In particular, returning from this method does not guarantee that the + * query is valid or has even been submitted to a live node. Any exception pertaining to the + * failure of the query will be thrown when accessing the {@link ResultSetFuture}. + * + *

Note that for queries that don't return a result (INSERT, UPDATE and DELETE), you will need + * to access the ResultSetFuture (that is, call one of its {@code get} methods to make sure the + * query was successful. + * + * @param statement the CQL query to execute (that can be any {@code Statement}). + * @return a future on the result of the query. + * @throws UnsupportedFeatureException if the protocol version 1 is in use and a feature not + * supported has been used. Features that are not supported by the version protocol 1 include: + * BatchStatement, ResultSet paging and binary values in RegularStatement. + */ + ResultSetFuture executeAsync(Statement statement); - /** - * Prepares the provided query string. - * - * @param query the CQL query string to prepare - * @return the prepared statement corresponding to {@code query}. - * @throws NoHostAvailableException if no host in the cluster can be - * contacted successfully to prepare this query. - */ - PreparedStatement prepare(String query); + /** + * Prepares the provided query string. + * + * @param query the CQL query string to prepare + * @return the prepared statement corresponding to {@code query}. + * @throws NoHostAvailableException if no host in the cluster can be contacted successfully to + * prepare this query. + */ + PreparedStatement prepare(String query); - /** - * Prepares the provided query. - *

- * This method behaves like {@link #prepare(String)}, - * but note that the resulting {@code PreparedStatement} will inherit the query properties - * set on {@code statement}. Concretely, this means that in the following code: - *

-     * RegularStatement toPrepare = new SimpleStatement("SELECT * FROM test WHERE k=?").setConsistencyLevel(ConsistencyLevel.QUORUM);
-     * PreparedStatement prepared = session.prepare(toPrepare);
-     * session.execute(prepared.bind("someValue"));
-     * 
- * the final execution will be performed with Quorum consistency. - *

- * Please note that if the same CQL statement is prepared more than once, all - * calls to this method will return the same {@code PreparedStatement} object - * but the method will still apply the properties of the prepared - * {@code Statement} to this object. - * - * @param statement the statement to prepare - * @return the prepared statement corresponding to {@code statement}. - * @throws NoHostAvailableException if no host in the cluster can be - * contacted successfully to prepare this statement. - * @throws IllegalArgumentException if {@code statement.getValues() != null} - * (values for executing a prepared statement should be provided after preparation - * though the {@link PreparedStatement#bind} method or through a corresponding - * {@link BoundStatement}). - */ - PreparedStatement prepare(RegularStatement statement); + /** + * Prepares the provided query. + * + *

This method behaves like {@link #prepare(String)}, but the resulting {@code + * PreparedStatement} will inherit some of the properties set on {@code statement}: {@linkplain + * Statement#getRoutingKey(ProtocolVersion, CodecRegistry) routing key}, {@linkplain + * Statement#getConsistencyLevel() consistency level}, {@linkplain + * Statement#getSerialConsistencyLevel() serial consistency level}, {@linkplain + * Statement#isTracing() tracing flag}, {@linkplain Statement#getRetryPolicy() retry policy}, + * {@linkplain Statement#getOutgoingPayload() outgoing payload}, and {@linkplain + * Statement#isIdempotent() idempotence}. Concretely, this means that in the following code: + * + *

+   * RegularStatement toPrepare = new SimpleStatement("SELECT * FROM test WHERE k=?").setConsistencyLevel(ConsistencyLevel.QUORUM);
+   * PreparedStatement prepared = session.prepare(toPrepare);
+   * session.execute(prepared.bind("someValue"));
+   * 
+ * + * the final execution will be performed with Quorum consistency. + * + *

Please note that if the same CQL statement is prepared more than once, all calls to this + * method will return the same {@code PreparedStatement} object but the method will still apply + * the properties of the prepared {@code Statement} to this object. + * + * @param statement the statement to prepare + * @return the prepared statement corresponding to {@code statement}. + * @throws NoHostAvailableException if no host in the cluster can be contacted successfully to + * prepare this statement. + * @throws IllegalArgumentException if {@code statement.getValues() != null} (values for executing + * a prepared statement should be provided after preparation though the {@link + * PreparedStatement#bind} method or through a corresponding {@link BoundStatement}). + */ + PreparedStatement prepare(RegularStatement statement); - /** - * Prepares the provided query string asynchronously. - *

- * This method is equivalent to {@link #prepare(String)} except that it - * does not block but return a future instead. Any error during preparation will - * be thrown when accessing the future, not by this method itself. - * - * @param query the CQL query string to prepare - * @return a future on the prepared statement corresponding to {@code query}. - */ - ListenableFuture prepareAsync(String query); + /** + * Prepares the provided query string asynchronously. + * + *

This method is equivalent to {@link #prepare(String)} except that it does not block but + * return a future instead. Any error during preparation will be thrown when accessing the future, + * not by this method itself. + * + * @param query the CQL query string to prepare + * @return a future on the prepared statement corresponding to {@code query}. + */ + ListenableFuture prepareAsync(String query); - /** - * Prepares the provided query asynchronously. - * This method behaves like {@link #prepareAsync(String)}, - * but note that the resulting {@code PreparedStatement} will inherit the query properties - * set on {@code statement}. Concretely, this means that in the following code: - *

-     * RegularStatement toPrepare = new SimpleStatement("SELECT * FROM test WHERE k=?").setConsistencyLevel(ConsistencyLevel.QUORUM);
-     * PreparedStatement prepared = session.prepare(toPrepare);
-     * session.execute(prepared.bind("someValue"));
-     * 
- * the final execution will be performed with Quorum consistency. - *

- * Please note that if the same CQL statement is prepared more than once, all - * calls to this method will return the same {@code PreparedStatement} object - * but the method will still apply the properties of the prepared - * {@code Statement} to this object. - * - * @param statement the statement to prepare - * @return a future on the prepared statement corresponding to {@code statement}. - * @throws IllegalArgumentException if {@code statement.getValues() != null} - * (values for executing a prepared statement should be provided after preparation - * though the {@link PreparedStatement#bind} method or through a corresponding - * {@link BoundStatement}). - * @see Session#prepare(RegularStatement) - */ - ListenableFuture prepareAsync(RegularStatement statement); + /** + * Prepares the provided query asynchronously. This method behaves like {@link + * #prepareAsync(String)}, but note that the resulting {@code PreparedStatement} will inherit the + * query properties set on {@code statement}. Concretely, this means that in the following code: + * + *

+   * RegularStatement toPrepare = new SimpleStatement("SELECT * FROM test WHERE k=?").setConsistencyLevel(ConsistencyLevel.QUORUM);
+   * PreparedStatement prepared = session.prepare(toPrepare);
+   * session.execute(prepared.bind("someValue"));
+   * 
+ * + * the final execution will be performed with Quorum consistency. + * + *

Please note that if the same CQL statement is prepared more than once, all calls to this + * method will return the same {@code PreparedStatement} object but the method will still apply + * the properties of the prepared {@code Statement} to this object. + * + * @param statement the statement to prepare + * @return a future on the prepared statement corresponding to {@code statement}. + * @throws IllegalArgumentException if {@code statement.getValues() != null} (values for executing + * a prepared statement should be provided after preparation though the {@link + * PreparedStatement#bind} method or through a corresponding {@link BoundStatement}). + * @see Session#prepare(RegularStatement) + */ + ListenableFuture prepareAsync(RegularStatement statement); - /** - * Initiates a shutdown of this session instance. - *

- * This method is asynchronous and return a future on the completion - * of the shutdown process. As soon as the session is shutdown, no - * new request will be accepted, but already submitted queries are - * allowed to complete. This method closes all connections of this - * session and reclaims all resources used by it. - *

- * If for some reason you wish to expedite this process, the - * {@link CloseFuture#force} can be called on the result future. - *

- * This method has no particular effect if the session was already closed - * (in which case the returned future will return immediately). - *

- * Note that this method does not close the corresponding {@code Cluster} - * instance (which holds additional resources, in particular internal - * executors that must be shut down in order for the client program to - * terminate). - * If you want to do so, use {@link Cluster#close}, but note that it will - * close all sessions created from that cluster. - * - * @return a future on the completion of the shutdown process. - */ - CloseFuture closeAsync(); + /** + * Initiates a shutdown of this session instance. + * + *

This method is asynchronous and return a future on the completion of the shutdown process. + * As soon as the session is shutdown, no new request will be accepted, but already submitted + * queries are allowed to complete. This method closes all connections of this session and + * reclaims all resources used by it. + * + *

If for some reason you wish to expedite this process, the {@link CloseFuture#force} can be + * called on the result future. + * + *

This method has no particular effect if the session was already closed (in which case the + * returned future will return immediately). + * + *

Note that this method does not close the corresponding {@code Cluster} instance (which holds + * additional resources, in particular internal executors that must be shut down in order for the + * client program to terminate). If you want to do so, use {@link Cluster#close}, but note that it + * will close all sessions created from that cluster. + * + * @return a future on the completion of the shutdown process. + */ + CloseFuture closeAsync(); + + /** + * Initiates a shutdown of this session instance and blocks until that shutdown completes. + * + *

This method is a shortcut for {@code closeAsync().get()}. + * + *

Note that this method does not close the corresponding {@code Cluster} instance (which holds + * additional resources, in particular internal executors that must be shut down in order for the + * client program to terminate). If you want to do so, use {@link Cluster#close}, but note that it + * will close all sessions created from that cluster. + */ + @Override + void close(); + + /** + * Whether this Session instance has been closed. + * + *

Note that this method returns true as soon as the closing of this Session has started but it + * does not guarantee that the closing is done. If you want to guarantee that the closing is done, + * you can call {@code close()} and wait until it returns (or call the get method on {@code + * closeAsync()} with a very short timeout and check this doesn't timeout). + * + * @return {@code true} if this Session instance has been closed, {@code false} otherwise. + */ + boolean isClosed(); + /** + * Returns the {@code Cluster} object this session is part of. + * + * @return the {@code Cluster} object this session is part of. + */ + Cluster getCluster(); + + /** + * Return a snapshot of the state of this Session. + * + *

The returned object provides information on which hosts the session is connected to, how + * many connections are opened to each host, etc... The returned object is immutable, it is a + * snapshot of the Session State taken when this method is called. + * + * @return a snapshot of the state of this Session. + */ + State getState(); + + /** + * The state of a Session. + * + *

This mostly exposes information on the connections maintained by a Session: which host it is + * connected to, how many connections it has for each host, etc... + */ + interface State { /** - * Initiates a shutdown of this session instance and blocks until - * that shutdown completes. - *

- * This method is a shortcut for {@code closeAsync().get()}. - *

- * Note that this method does not close the corresponding {@code Cluster} - * instance (which holds additional resources, in particular internal - * executors that must be shut down in order for the client program to - * terminate). - * If you want to do so, use {@link Cluster#close}, but note that it will - * close all sessions created from that cluster. + * The Session to which this State corresponds to. + * + * @return the Session to which this State corresponds to. */ - @Override - void close(); + Session getSession(); /** - * Whether this Session instance has been closed. - *

- * Note that this method returns true as soon as the closing of this Session - * has started but it does not guarantee that the closing is done. If you - * want to guarantee that the closing is done, you can call {@code close()} - * and wait until it returns (or call the get method on {@code closeAsync()} - * with a very short timeout and check this doesn't timeout). + * The hosts to which the session is currently connected (more precisely, at the time this State + * has been grabbed). + * + *

Please note that this method really returns the hosts for which the session currently + * holds a connection pool. As such, it's unlikely but not impossible for a host to be listed in + * the output of this method but to have {@code getOpenConnections} return 0, if the pool itself + * is created but no connections have been successfully opened yet. * - * @return {@code true} if this Session instance has been closed, {@code false} - * otherwise. + * @return an immutable collection of the hosts to which the session is connected. */ - boolean isClosed(); + Collection getConnectedHosts(); /** - * Returns the {@code Cluster} object this session is part of. + * The number of open connections to a given host. * - * @return the {@code Cluster} object this session is part of. + *

Note that this refers to active connections. The actual number of connections + * also includes {@link #getTrashedConnections(Host)}. + * + * @param host the host to get open connections for. + * @return The number of open connections to {@code host}. If the session is not connected to + * that host, 0 is returned. */ - Cluster getCluster(); + int getOpenConnections(Host host); /** - * Return a snapshot of the state of this Session. - *

- * The returned object provides information on which hosts the session is - * connected to, how many connections are opened to each host, etc... - * The returned object is immutable, it is a snapshot of the Session State - * taken when this method is called. + * The number of "trashed" connections to a given host. + * + *

When the load to a host decreases, the driver will reclaim some connections in order to + * save resources. No requests are sent to these connections anymore, but they are kept open for + * an additional amount of time ({@link PoolingOptions#getIdleTimeoutSeconds()}), in case the + * load goes up again. This method counts connections in that state. * - * @return a snapshot of the state of this Session. + * @param host the host to get trashed connections for. + * @return The number of trashed connections to {@code host}. If the session is not connected to + * that host, 0 is returned. */ - State getState(); + int getTrashedConnections(Host host); /** - * The state of a Session. - *

- * This mostly exposes information on the connections maintained by a Session: - * which host it is connected to, how many connections it has for each host, etc... + * The number of queries that are currently being executed through a given host. + * + *

This corresponds to the number of queries that have been sent (by the session this is a + * State of) to the Cassandra Host on one of its connections but haven't yet returned. In that + * sense this provides a sort of measure of how busy the connections to that node are (at the + * time the {@code State} was grabbed at least). + * + * @param host the host to get in-flight queries for. + * @return the number of currently (as in 'at the time the state was grabbed') executing queries + * to {@code host}. */ - interface State { - /** - * The Session to which this State corresponds to. - * - * @return the Session to which this State corresponds to. - */ - Session getSession(); - - /** - * The hosts to which the session is currently connected (more precisely, at the time - * this State has been grabbed). - *

- * Please note that this method really returns the hosts for which the session currently - * holds a connection pool. As such, it's unlikely but not impossible for a host to be listed - * in the output of this method but to have {@code getOpenConnections} return 0, if the - * pool itself is created but no connections have been successfully opened yet. - * - * @return an immutable collection of the hosts to which the session is connected. - */ - Collection getConnectedHosts(); - - /** - * The number of open connections to a given host. - *

- * Note that this refers to active connections. The actual number of connections also - * includes {@link #getTrashedConnections(Host)}. - * - * @param host the host to get open connections for. - * @return The number of open connections to {@code host}. If the session - * is not connected to that host, 0 is returned. - */ - int getOpenConnections(Host host); - - /** - * The number of "trashed" connections to a given host. - *

- * When the load to a host decreases, the driver will reclaim some connections in order to save - * resources. No requests are sent to these connections anymore, but they are kept open for an - * additional amount of time ({@link PoolingOptions#getIdleTimeoutSeconds()}), in case the load - * goes up again. This method counts connections in that state. - * - * @param host the host to get trashed connections for. - * @return The number of trashed connections to {@code host}. If the session - * is not connected to that host, 0 is returned. - */ - int getTrashedConnections(Host host); - - /** - * The number of queries that are currently being executed through a given host. - *

- * This corresponds to the number of queries that have been sent (by the session this - * is a State of) to the Cassandra Host on one of its connections but haven't yet returned. - * In that sense this provides a sort of measure of how busy the connections to that node - * are (at the time the {@code State} was grabbed at least). - * - * @param host the host to get in-flight queries for. - * @return the number of currently (as in 'at the time the state was grabbed') executing - * queries to {@code host}. - */ - int getInFlightQueries(Host host); - } + int getInFlightQueries(Host host); + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/SessionManager.java b/driver-core/src/main/java/com/datastax/driver/core/SessionManager.java index 6b56248f69a..476ddc55269 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/SessionManager.java +++ b/driver-core/src/main/java/com/datastax/driver/core/SessionManager.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -27,731 +29,833 @@ import com.google.common.base.Functions; import com.google.common.collect.ImmutableList; import com.google.common.collect.Lists; -import com.google.common.util.concurrent.*; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.net.InetSocketAddress; +import com.google.common.util.concurrent.AsyncFunction; +import com.google.common.util.concurrent.FutureCallback; +import com.google.common.util.concurrent.Futures; +import com.google.common.util.concurrent.ListenableFuture; +import com.google.common.util.concurrent.ListeningExecutorService; +import com.google.common.util.concurrent.SettableFuture; +import com.google.common.util.concurrent.Uninterruptibles; import java.nio.ByteBuffer; -import java.util.*; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; -/** - * Driver implementation of the Session interface. - */ +/** Driver implementation of the Session interface. */ class SessionManager extends AbstractSession { - private static final Logger logger = LoggerFactory.getLogger(Session.class); - - final Cluster cluster; - final ConcurrentMap pools; - final HostConnectionPool.PoolState poolsState; - private final AtomicReference> initFuture = new AtomicReference>(); - final AtomicReference closeFuture = new AtomicReference(); - - private volatile boolean isInit; - private volatile boolean isClosing; - - // Package protected, only Cluster should construct that. - SessionManager(Cluster cluster) { - this.cluster = cluster; - this.pools = new ConcurrentHashMap(); - this.poolsState = new HostConnectionPool.PoolState(); - } - - @Override - public Session init() { - try { - return Uninterruptibles.getUninterruptibly(initAsync()); - } catch (ExecutionException e) { - throw DriverThrowables.propagateCause(e); - } - } - - @Override - public ListenableFuture initAsync() { - // If we haven't initialized the cluster, do it now - cluster.init(); - - ListenableFuture existing = initFuture.get(); - if (existing != null) - return existing; - - final SettableFuture myInitFuture = SettableFuture.create(); - if (!initFuture.compareAndSet(null, myInitFuture)) - return initFuture.get(); - - Collection hosts = cluster.getMetadata().allHosts(); - ListenableFuture allPoolsCreatedFuture = createPools(hosts); - ListenableFuture allPoolsUpdatedFuture = GuavaCompatibility.INSTANCE.transformAsync(allPoolsCreatedFuture, - new AsyncFunction() { - @Override - @SuppressWarnings("unchecked") - public ListenableFuture apply(Object input) throws Exception { - isInit = true; - return (ListenableFuture) updateCreatedPools(); - } - }); - - Futures.addCallback(allPoolsUpdatedFuture, new FutureCallback() { - @Override - public void onSuccess(Object result) { - myInitFuture.set(SessionManager.this); - } - - @Override - public void onFailure(Throwable t) { - SessionManager.this.closeAsync(); // don't leak the session - myInitFuture.setException(t); - } + private static final Logger logger = LoggerFactory.getLogger(Session.class); + + final Cluster cluster; + final ConcurrentMap pools; + final HostConnectionPool.PoolState poolsState; + private final AtomicReference> initFuture = + new AtomicReference>(); + final AtomicReference closeFuture = new AtomicReference(); + + private volatile boolean isInit; + private volatile boolean isClosing; + + // Package protected, only Cluster should construct that. + SessionManager(Cluster cluster) { + this.cluster = cluster; + this.pools = new ConcurrentHashMap(); + this.poolsState = new HostConnectionPool.PoolState(); + } + + @Override + public Session init() { + try { + return Uninterruptibles.getUninterruptibly(initAsync()); + } catch (ExecutionException e) { + throw DriverThrowables.propagateCause(e); + } + } + + @Override + public ListenableFuture initAsync() { + // If we haven't initialized the cluster, do it now + cluster.init(); + + ListenableFuture existing = initFuture.get(); + if (existing != null) return existing; + + final SettableFuture myInitFuture = SettableFuture.create(); + if (!initFuture.compareAndSet(null, myInitFuture)) return initFuture.get(); + + Collection hosts = cluster.getMetadata().allHosts(); + ListenableFuture allPoolsCreatedFuture = createPools(hosts); + ListenableFuture allPoolsUpdatedFuture = + GuavaCompatibility.INSTANCE.transformAsync( + allPoolsCreatedFuture, + new AsyncFunction() { + @Override + @SuppressWarnings("unchecked") + public ListenableFuture apply(Object input) throws Exception { + isInit = true; + return (ListenableFuture) updateCreatedPools(); + } + }); + + GuavaCompatibility.INSTANCE.addCallback( + allPoolsUpdatedFuture, + new FutureCallback() { + @Override + public void onSuccess(Object result) { + myInitFuture.set(SessionManager.this); + } + + @Override + public void onFailure(Throwable t) { + SessionManager.this.closeAsync(); // don't leak the session + myInitFuture.setException(t); + } }); - return myInitFuture; - } - - private ListenableFuture createPools(Collection hosts) { - List> futures = Lists.newArrayListWithCapacity(hosts.size()); - for (Host host : hosts) - if (host.state != Host.State.DOWN) - futures.add(maybeAddPool(host, null)); - return Futures.allAsList(futures); - } - - @Override - public String getLoggedKeyspace() { - return poolsState.keyspace; - } - - @Override - public ResultSetFuture executeAsync(final Statement statement) { - if (isInit) { - DefaultResultSetFuture future = new DefaultResultSetFuture(this, cluster.manager.protocolVersion(), makeRequestMessage(statement, null)); - new RequestHandler(this, future, statement).sendRequest(); - return future; - } else { - // If the session is not initialized, we can't call makeRequestMessage() synchronously, because it - // requires internal Cluster state that might not be initialized yet (like the protocol version). - // Because of the way the future is built, we need another 'proxy' future that we can return now. - final ChainedResultSetFuture chainedFuture = new ChainedResultSetFuture(); - this.initAsync().addListener(new Runnable() { + return myInitFuture; + } + + private ListenableFuture createPools(Collection hosts) { + List> futures = Lists.newArrayListWithCapacity(hosts.size()); + for (Host host : hosts) + if (host.state != Host.State.DOWN) futures.add(maybeAddPool(host, null)); + return Futures.allAsList(futures); + } + + @Override + public String getLoggedKeyspace() { + return poolsState.keyspace; + } + + @Override + public ResultSetFuture executeAsync(final Statement statement) { + if (isInit) { + DefaultResultSetFuture future = + new DefaultResultSetFuture( + this, cluster.manager.protocolVersion(), makeRequestMessage(statement, null)); + execute(future, statement); + return future; + } else { + // If the session is not initialized, we can't call makeRequestMessage() synchronously, + // because it + // requires internal Cluster state that might not be initialized yet (like the protocol + // version). + // Because of the way the future is built, we need another 'proxy' future that we can return + // now. + final ChainedResultSetFuture chainedFuture = new ChainedResultSetFuture(); + this.initAsync() + .addListener( + new Runnable() { @Override public void run() { - DefaultResultSetFuture actualFuture = new DefaultResultSetFuture(SessionManager.this, cluster.manager.protocolVersion(), makeRequestMessage(statement, null)); - execute(actualFuture, statement); - chainedFuture.setSource(actualFuture); + DefaultResultSetFuture actualFuture = + new DefaultResultSetFuture( + SessionManager.this, + cluster.manager.protocolVersion(), + makeRequestMessage(statement, null)); + execute(actualFuture, statement); + chainedFuture.setSource(actualFuture); } - }, executor()); - return chainedFuture; - } - } - - @Override - protected ListenableFuture prepareAsync(String query, Map customPayload) { - Requests.Prepare request = new Requests.Prepare(query); - request.setCustomPayload(customPayload); - Connection.Future future = new Connection.Future(request); - execute(future, Statement.DEFAULT); - return toPreparedStatement(query, future); - } - - @Override - public CloseFuture closeAsync() { - CloseFuture future = closeFuture.get(); - if (future != null) - return future; - - isClosing = true; - cluster.manager.removeSession(this); - - List futures = new ArrayList(pools.size()); - for (HostConnectionPool pool : pools.values()) - futures.add(pool.closeAsync()); - - future = new CloseFuture.Forwarding(futures); - - return closeFuture.compareAndSet(null, future) - ? future - : closeFuture.get(); // We raced, it's ok, return the future that was actually set - } - - @Override - public boolean isClosed() { - return closeFuture.get() != null; - } - - @Override - public Cluster getCluster() { - return cluster; - } - - @Override - public Session.State getState() { - return new State(this); - } - - private ListenableFuture toPreparedStatement(final String query, final Connection.Future future) { - return GuavaCompatibility.INSTANCE.transformAsync(future, new AsyncFunction() { - @Override - public ListenableFuture apply(Response response) { - switch (response.type) { - case RESULT: - Responses.Result rm = (Responses.Result) response; - switch (rm.kind) { - case PREPARED: - Responses.Result.Prepared pmsg = (Responses.Result.Prepared) rm; - PreparedStatement stmt = DefaultPreparedStatement.fromMessage(pmsg, cluster, query, poolsState.keyspace); - stmt = cluster.manager.addPrepared(stmt); - if (cluster.getConfiguration().getQueryOptions().isPrepareOnAllHosts()) { - // All Sessions are connected to the same nodes so it's enough to prepare only the nodes of this session. - // If that changes, we'll have to make sure this propagate to other sessions too. - return prepare(stmt, future.getAddress()); - } else { - return Futures.immediateFuture(stmt); - } - default: - return Futures.immediateFailedFuture( - new DriverInternalError(String.format("%s response received when prepared statement was expected", rm.kind))); - } - case ERROR: - return Futures.immediateFailedFuture( - ((Responses.Error) response).asException(future.getAddress())); - default: - return Futures.immediateFailedFuture( - new DriverInternalError(String.format("%s response received when prepared statement was expected", response.type))); + }, + executor()); + return chainedFuture; + } + } + + @Override + protected ListenableFuture prepareAsync( + String query, Map customPayload) { + Requests.Prepare request = new Requests.Prepare(query); + request.setCustomPayload(customPayload); + Connection.Future future = new Connection.Future(request); + execute(future, Statement.DEFAULT); + return toPreparedStatement(query, future); + } + + @Override + public CloseFuture closeAsync() { + CloseFuture future = closeFuture.get(); + if (future != null) return future; + + isClosing = true; + cluster.manager.removeSession(this); + + List futures = new ArrayList(pools.size()); + for (HostConnectionPool pool : pools.values()) futures.add(pool.closeAsync()); + + future = new CloseFuture.Forwarding(futures); + + return closeFuture.compareAndSet(null, future) + ? future + : closeFuture.get(); // We raced, it's ok, return the future that was actually set + } + + @Override + public boolean isClosed() { + return closeFuture.get() != null; + } + + @Override + public Cluster getCluster() { + return cluster; + } + + @Override + public Session.State getState() { + return new State(this); + } + + private ListenableFuture toPreparedStatement( + final String query, final Connection.Future future) { + return GuavaCompatibility.INSTANCE.transformAsync( + future, + new AsyncFunction() { + @Override + public ListenableFuture apply(Response response) { + switch (response.type) { + case RESULT: + Responses.Result rm = (Responses.Result) response; + switch (rm.kind) { + case PREPARED: + Responses.Result.Prepared pmsg = (Responses.Result.Prepared) rm; + PreparedStatement stmt = + DefaultPreparedStatement.fromMessage( + pmsg, cluster, query, poolsState.keyspace); + stmt = cluster.manager.addPrepared(stmt); + if (cluster.getConfiguration().getQueryOptions().isPrepareOnAllHosts()) { + // All Sessions are connected to the same nodes so it's enough to prepare only + // the nodes of this session. + // If that changes, we'll have to make sure this propagate to other sessions + // too. + return prepare(stmt, future.getEndPoint()); + } else { + return Futures.immediateFuture(stmt); + } + default: + return Futures.immediateFailedFuture( + new DriverInternalError( + String.format( + "%s response received when prepared statement was expected", + rm.kind))); } + case ERROR: + return Futures.immediateFailedFuture( + ((Responses.Error) response).asException(future.getEndPoint())); + default: + return Futures.immediateFailedFuture( + new DriverInternalError( + String.format( + "%s response received when prepared statement was expected", + response.type))); } - }, executor()); - } - - Connection.Factory connectionFactory() { - return cluster.manager.connectionFactory; - } - - Configuration configuration() { - return cluster.manager.configuration; - } - - LoadBalancingPolicy loadBalancingPolicy() { - return cluster.manager.loadBalancingPolicy(); - } - - SpeculativeExecutionPolicy speculativeExecutionPolicy() { - return cluster.manager.speculativeExecutionPolicy(); - } - - ReconnectionPolicy reconnectionPolicy() { - return cluster.manager.reconnectionPolicy(); - } - - ListeningExecutorService executor() { - return cluster.manager.executor; - } - - ListeningExecutorService blockingExecutor() { - return cluster.manager.blockingExecutor; - } - - // Returns whether there was problem creating the pool - ListenableFuture forceRenewPool(final Host host, Connection reusedConnection) { - final HostDistance distance = cluster.manager.loadBalancingPolicy().distance(host); - if (distance == HostDistance.IGNORED) - return Futures.immediateFuture(true); - - if (isClosing) - return Futures.immediateFuture(false); - - final HostConnectionPool newPool = new HostConnectionPool(host, distance, this); - ListenableFuture poolInitFuture = newPool.initAsync(reusedConnection); - - final SettableFuture future = SettableFuture.create(); - - Futures.addCallback(poolInitFuture, new FutureCallback() { - @Override - public void onSuccess(Void result) { - HostConnectionPool previous = pools.put(host, newPool); - if (previous == null) { - logger.debug("Added connection pool for {}", host); - } else { - logger.debug("Renewed connection pool for {}", host); - previous.closeAsync(); - } - - // If we raced with a session shutdown, ensure that the pool will be closed. - if (isClosing) { - newPool.closeAsync(); - pools.remove(host); - future.set(false); - } else { - future.set(true); - } + } + }, + executor()); + } + + Connection.Factory connectionFactory() { + return cluster.manager.connectionFactory; + } + + Configuration configuration() { + return cluster.manager.configuration; + } + + LoadBalancingPolicy loadBalancingPolicy() { + return cluster.manager.loadBalancingPolicy(); + } + + SpeculativeExecutionPolicy speculativeExecutionPolicy() { + return cluster.manager.speculativeExecutionPolicy(); + } + + ReconnectionPolicy reconnectionPolicy() { + return cluster.manager.reconnectionPolicy(); + } + + ListeningExecutorService executor() { + return cluster.manager.executor; + } + + ListeningExecutorService blockingExecutor() { + return cluster.manager.blockingExecutor; + } + + // Returns whether there was problem creating the pool + ListenableFuture forceRenewPool(final Host host, Connection reusedConnection) { + final HostDistance distance = cluster.manager.loadBalancingPolicy().distance(host); + if (distance == HostDistance.IGNORED) return Futures.immediateFuture(true); + + if (isClosing) return Futures.immediateFuture(false); + + final HostConnectionPool newPool = new HostConnectionPool(host, distance, this); + ListenableFuture poolInitFuture = newPool.initAsync(reusedConnection); + + final SettableFuture future = SettableFuture.create(); + + GuavaCompatibility.INSTANCE.addCallback( + poolInitFuture, + new FutureCallback() { + @Override + public void onSuccess(Void result) { + HostConnectionPool previous = pools.put(host, newPool); + if (previous == null) { + logger.debug("Added connection pool for {}", host); + } else { + logger.debug("Renewed connection pool for {}", host); + previous.closeAsync(); } - @Override - public void onFailure(Throwable t) { - logger.warn("Error creating pool to " + host, t); - future.set(false); + // If we raced with a session shutdown, ensure that the pool will be closed. + if (isClosing) { + newPool.closeAsync(); + pools.remove(host); + future.set(false); + } else { + future.set(true); } - }); - - return future; - } - - // Replace pool for a given host only if it's the given previous value (which can be null) - // This returns a future if the replacement was successful, or null if we raced. - private ListenableFuture replacePool(final Host host, HostDistance distance, HostConnectionPool previous, Connection reusedConnection) { - if (isClosing) - return MoreFutures.VOID_SUCCESS; + } - final HostConnectionPool newPool = new HostConnectionPool(host, distance, this); - if (previous == null) { - if (pools.putIfAbsent(host, newPool) != null) { - return null; - } - } else { - if (!pools.replace(host, previous, newPool)) { - return null; - } - if (!previous.isClosed()) { - logger.warn("Replacing a pool that wasn't closed. Closing it now, but this was not expected."); - previous.closeAsync(); - } - } - - ListenableFuture poolInitFuture = newPool.initAsync(reusedConnection); - - Futures.addCallback(poolInitFuture, new FutureCallback() { - @Override - public void onSuccess(Void result) { - // If we raced with a session shutdown, ensure that the pool will be closed. - if (isClosing) { - newPool.closeAsync(); - pools.remove(host); - } - } - - @Override - public void onFailure(Throwable t) { - pools.remove(host); - } + @Override + public void onFailure(Throwable t) { + logger.warn("Error creating pool to " + host, t); + future.set(false); + } }); - return poolInitFuture; - } - - // Returns whether there was problem creating the pool - ListenableFuture maybeAddPool(final Host host, Connection reusedConnection) { - final HostDistance distance = cluster.manager.loadBalancingPolicy().distance(host); - if (distance == HostDistance.IGNORED) - return Futures.immediateFuture(true); - - HostConnectionPool previous = pools.get(host); - if (previous != null && !previous.isClosed()) - return Futures.immediateFuture(true); - - while (true) { - previous = pools.get(host); - if (previous != null && !previous.isClosed()) - return Futures.immediateFuture(true); - - final SettableFuture future = SettableFuture.create(); - ListenableFuture newPoolInit = replacePool(host, distance, previous, reusedConnection); - if (newPoolInit != null) { - Futures.addCallback(newPoolInit, new FutureCallback() { - @Override - public void onSuccess(Void result) { - logger.debug("Added connection pool for {}", host); - future.set(true); - } - - @Override - public void onFailure(Throwable t) { - if (t instanceof UnsupportedProtocolVersionException) { - cluster.manager.logUnsupportedVersionProtocol(host, ((UnsupportedProtocolVersionException) t).getUnsupportedVersion()); - cluster.manager.triggerOnDown(host, false); - } else if (t instanceof ClusterNameMismatchException) { - ClusterNameMismatchException e = (ClusterNameMismatchException) t; - cluster.manager.logClusterNameMismatch(host, e.expectedClusterName, e.actualClusterName); - cluster.manager.triggerOnDown(host, false); - } else { - logger.warn("Error creating pool to " + host, t); - // do not mark the host down, as there could be other connections to it - // (e.g. the control connection, or another session pool). - // The conviction policy will mark it down if it has no more active connections. - } - // propagate errors; for all other exceptions, consider the pool init failed - // but allow the session init process to continue normally - if (t instanceof Error) - future.setException(t); - else - future.set(false); - } - }); - return future; - } - } - } - - CloseFuture removePool(Host host) { - final HostConnectionPool pool = pools.remove(host); - return pool == null - ? CloseFuture.immediateFuture() - : pool.closeAsync(); - } - /* - * When the set of live nodes change, the loadbalancer will change his - * mind on host distances. It might change it on the node that came/left - * but also on other nodes (for instance, if a node dies, another - * previously ignored node may be now considered). - * - * This method ensures that all hosts for which a pool should exist - * have one, and hosts that shouldn't don't. - */ - ListenableFuture updateCreatedPools() { - // This method does nothing during initialization. Some hosts may be non-responsive but not yet marked DOWN; if - // we execute the code below we would try to create their pool over and over again. - // It's called explicitly at the end of init(), once isInit has been set to true. - if (!isInit) - return MoreFutures.VOID_SUCCESS; - - // We do 2 iterations, so that we add missing pools first, and them remove all unecessary pool second. - // That way, we'll avoid situation where we'll temporarily lose connectivity - final List toRemove = new ArrayList(); - List> poolCreatedFutures = Lists.newArrayList(); - - for (Host h : cluster.getMetadata().allHosts()) { - HostDistance dist = loadBalancingPolicy().distance(h); - HostConnectionPool pool = pools.get(h); - - if (pool == null) { - if (dist != HostDistance.IGNORED && h.state == Host.State.UP) - poolCreatedFutures.add(maybeAddPool(h, null)); - } else if (dist != pool.hostDistance) { - if (dist == HostDistance.IGNORED) { - toRemove.add(h); - } else { - pool.hostDistance = dist; - pool.ensureCoreConnections(); - } + return future; + } + + // Replace pool for a given host only if it's the given previous value (which can be null) + // This returns a future if the replacement was successful, or null if we raced. + private ListenableFuture replacePool( + final Host host, + HostDistance distance, + HostConnectionPool previous, + Connection reusedConnection) { + if (isClosing) return MoreFutures.VOID_SUCCESS; + + final HostConnectionPool newPool = new HostConnectionPool(host, distance, this); + if (previous == null) { + if (pools.putIfAbsent(host, newPool) != null) { + return null; + } + } else { + if (!pools.replace(host, previous, newPool)) { + return null; + } + if (!previous.isClosed()) { + logger.warn( + "Replacing a pool that wasn't closed. Closing it now, but this was not expected."); + previous.closeAsync(); + } + } + + ListenableFuture poolInitFuture = newPool.initAsync(reusedConnection); + + GuavaCompatibility.INSTANCE.addCallback( + poolInitFuture, + new FutureCallback() { + @Override + public void onSuccess(Void result) { + // If we raced with a session shutdown, ensure that the pool will be closed. + if (isClosing) { + newPool.closeAsync(); + pools.remove(host); } - } - - // Wait pool creation before removing, so we don't lose connectivity - ListenableFuture allPoolsCreatedFuture = Futures.allAsList(poolCreatedFutures); - - return GuavaCompatibility.INSTANCE.transformAsync(allPoolsCreatedFuture, new AsyncFunction>() { - @Override - public ListenableFuture> apply(Object input) throws Exception { - List> poolRemovedFuture = Lists.newArrayListWithCapacity(toRemove.size()); - for (Host h : toRemove) - poolRemovedFuture.add(removePool(h)); + } - return Futures.successfulAsList(poolRemovedFuture); - } + @Override + public void onFailure(Throwable t) { + pools.remove(host); + } }); - } - - void updateCreatedPools(Host h) { - HostDistance dist = loadBalancingPolicy().distance(h); - HostConnectionPool pool = pools.get(h); - - try { - if (pool == null) { - if (dist != HostDistance.IGNORED && h.state == Host.State.UP) - maybeAddPool(h, null).get(); - } else if (dist != pool.hostDistance) { - if (dist == HostDistance.IGNORED) { - removePool(h).get(); + return poolInitFuture; + } + + // Returns whether there was problem creating the pool + ListenableFuture maybeAddPool(final Host host, Connection reusedConnection) { + final HostDistance distance = cluster.manager.loadBalancingPolicy().distance(host); + if (distance == HostDistance.IGNORED) return Futures.immediateFuture(true); + + HostConnectionPool previous = pools.get(host); + if (previous != null && !previous.isClosed()) return Futures.immediateFuture(true); + + while (true) { + previous = pools.get(host); + if (previous != null && !previous.isClosed()) return Futures.immediateFuture(true); + + final SettableFuture future = SettableFuture.create(); + ListenableFuture newPoolInit = replacePool(host, distance, previous, reusedConnection); + if (newPoolInit != null) { + GuavaCompatibility.INSTANCE.addCallback( + newPoolInit, + new FutureCallback() { + @Override + public void onSuccess(Void result) { + logger.debug("Added connection pool for {}", host); + future.set(true); + } + + @Override + public void onFailure(Throwable t) { + if (t instanceof UnsupportedProtocolVersionException) { + cluster.manager.logUnsupportedVersionProtocol( + host, ((UnsupportedProtocolVersionException) t).getUnsupportedVersion()); + cluster.manager.triggerOnDown(host, false); + } else if (t instanceof ClusterNameMismatchException) { + ClusterNameMismatchException e = (ClusterNameMismatchException) t; + cluster.manager.logClusterNameMismatch( + host, e.expectedClusterName, e.actualClusterName); + cluster.manager.triggerOnDown(host, false); } else { - pool.hostDistance = dist; - pool.ensureCoreConnections(); + logger.warn("Error creating pool to " + host, t); + // do not mark the host down, as there could be other connections to it + // (e.g. the control connection, or another session pool). + // The conviction policy will mark it down if it has no more active connections. } - } - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - } catch (ExecutionException e) { - Throwable cause = e.getCause(); - logger.error("Unexpected error while refreshing connection pools", cause); - if (cause instanceof Error) - throw ((Error) cause); + // propagate errors; for all other exceptions, consider the pool init failed + // but allow the session init process to continue normally + if (t instanceof Error) future.setException(t); + else future.set(false); + } + }); + return future; + } + } + } + + CloseFuture removePool(Host host) { + final HostConnectionPool pool = pools.remove(host); + return pool == null ? CloseFuture.immediateFuture() : pool.closeAsync(); + } + + /* + * When the set of live nodes change, the loadbalancer will change his + * mind on host distances. It might change it on the node that came/left + * but also on other nodes (for instance, if a node dies, another + * previously ignored node may be now considered). + * + * This method ensures that all hosts for which a pool should exist + * have one, and hosts that shouldn't don't. + */ + ListenableFuture updateCreatedPools() { + // This method does nothing during initialization. Some hosts may be non-responsive but not yet + // marked DOWN; if + // we execute the code below we would try to create their pool over and over again. + // It's called explicitly at the end of init(), once isInit has been set to true. + if (!isInit) return MoreFutures.VOID_SUCCESS; + + // We do 2 iterations, so that we add missing pools first, and them remove all unecessary pool + // second. + // That way, we'll avoid situation where we'll temporarily lose connectivity + final List toRemove = new ArrayList(); + List> poolCreatedFutures = Lists.newArrayList(); + + for (Host h : cluster.getMetadata().allHosts()) { + HostDistance dist = loadBalancingPolicy().distance(h); + HostConnectionPool pool = pools.get(h); + + if (pool == null) { + if (dist != HostDistance.IGNORED && h.state == Host.State.UP) + poolCreatedFutures.add(maybeAddPool(h, null)); + } else if (dist != pool.hostDistance) { + if (dist == HostDistance.IGNORED) { + toRemove.add(h); + } else { + pool.hostDistance = dist; + pool.ensureCoreConnections(); } + } } - void onDown(Host host) throws InterruptedException, ExecutionException { - // Note that with well behaved balancing policy (that ignore dead nodes), the removePool call is not necessary - // since updateCreatedPools should take care of it. But better protect against non well behaving policies. - removePool(host).force().get(); - updateCreatedPools().get(); - } - - void onRemove(Host host) throws InterruptedException, ExecutionException { - onDown(host); - } - - Message.Request makeRequestMessage(Statement statement, ByteBuffer pagingState) { - // We need the protocol version, which is only available once the cluster has initialized. Initialize the session to ensure this is the case. - // init() locks, so avoid if we know we don't need it. - if (!isInit) - init(); - ProtocolVersion protocolVersion = cluster.manager.protocolVersion(); - CodecRegistry codecRegistry = cluster.manager.configuration.getCodecRegistry(); - - ConsistencyLevel consistency = statement.getConsistencyLevel(); - if (consistency == null) - consistency = configuration().getQueryOptions().getConsistencyLevel(); - - ConsistencyLevel serialConsistency = statement.getSerialConsistencyLevel(); - if (protocolVersion.compareTo(ProtocolVersion.V3) < 0 && statement instanceof BatchStatement) { - if (serialConsistency != null) - throw new UnsupportedFeatureException(protocolVersion, "Serial consistency on batch statements is not supported"); - } else if (serialConsistency == null) - serialConsistency = configuration().getQueryOptions().getSerialConsistencyLevel(); - - if (statement.getOutgoingPayload() != null && protocolVersion.compareTo(ProtocolVersion.V4) < 0) - throw new UnsupportedFeatureException(protocolVersion, "Custom payloads are only supported since native protocol V4"); - - long defaultTimestamp = Long.MIN_VALUE; - if (protocolVersion.compareTo(ProtocolVersion.V3) >= 0) { - defaultTimestamp = statement.getDefaultTimestamp(); - if (defaultTimestamp == Long.MIN_VALUE) - defaultTimestamp = cluster.getConfiguration().getPolicies().getTimestampGenerator().next(); - } - - int fetchSize = statement.getFetchSize(); - ByteBuffer usedPagingState = pagingState; - - if (protocolVersion == ProtocolVersion.V1) { - assert pagingState == null; - // We don't let the user change the fetchSize globally if the proto v1 is used, so we just need to - // check for the case of a per-statement override - if (fetchSize <= 0) - fetchSize = -1; - else if (fetchSize != Integer.MAX_VALUE) - throw new UnsupportedFeatureException(protocolVersion, "Paging is not supported"); - } else if (fetchSize <= 0) { - fetchSize = configuration().getQueryOptions().getFetchSize(); - } - - if (fetchSize == Integer.MAX_VALUE) - fetchSize = -1; - - if (pagingState == null) { - usedPagingState = statement.getPagingState(); - } - - if (statement instanceof StatementWrapper) - statement = ((StatementWrapper) statement).getWrappedStatement(); - - Message.Request request; - - if (statement instanceof RegularStatement) { - RegularStatement rs = (RegularStatement) statement; - - // It saddens me that we special case for the query builder here, but for now this is simpler. - // We could provide a general API in RegularStatement instead at some point but it's unclear what's - // the cleanest way to do that is right now (and it's probably not really that useful anyway). - if (protocolVersion == ProtocolVersion.V1 && rs instanceof com.datastax.driver.core.querybuilder.BuiltStatement) - ((com.datastax.driver.core.querybuilder.BuiltStatement) rs).setForceNoValues(true); - - ByteBuffer[] rawPositionalValues = rs.getValues(protocolVersion, codecRegistry); - Map rawNamedValues = rs.getNamedValues(protocolVersion, codecRegistry); + // Wait pool creation before removing, so we don't lose connectivity + ListenableFuture allPoolsCreatedFuture = Futures.allAsList(poolCreatedFutures); - if (protocolVersion == ProtocolVersion.V1 && (rawPositionalValues != null || rawNamedValues != null)) - throw new UnsupportedFeatureException(protocolVersion, "Binary values are not supported"); + return GuavaCompatibility.INSTANCE.transformAsync( + allPoolsCreatedFuture, + new AsyncFunction>() { + @Override + public ListenableFuture> apply(Object input) throws Exception { + List> poolRemovedFuture = + Lists.newArrayListWithCapacity(toRemove.size()); + for (Host h : toRemove) poolRemovedFuture.add(removePool(h)); - if (protocolVersion == ProtocolVersion.V2 && rawNamedValues != null) - throw new UnsupportedFeatureException(protocolVersion, "Named values are not supported"); - - List positionalValues = rawPositionalValues == null ? Collections.emptyList() : Arrays.asList(rawPositionalValues); - Map namedValues = rawNamedValues == null ? Collections.emptyMap() : rawNamedValues; - - String qString = rs.getQueryString(codecRegistry); - - Requests.QueryProtocolOptions options = new Requests.QueryProtocolOptions(Message.Request.Type.QUERY, consistency, positionalValues, namedValues, - false, fetchSize, usedPagingState, serialConsistency, defaultTimestamp); - request = new Requests.Query(qString, options, statement.isTracing()); - } else if (statement instanceof BoundStatement) { - BoundStatement bs = (BoundStatement) statement; - if (!cluster.manager.preparedQueries.containsKey(bs.statement.getPreparedId().id)) { - throw new InvalidQueryException(String.format("Tried to execute unknown prepared query : %s. " - + "You may have used a PreparedStatement that was created with another Cluster instance.", bs.statement.getPreparedId().id)); - } - if (protocolVersion.compareTo(ProtocolVersion.V4) < 0) - bs.ensureAllSet(); - boolean skipMetadata = protocolVersion != ProtocolVersion.V1 && bs.statement.getPreparedId().resultSetMetadata != null; - Requests.QueryProtocolOptions options = new Requests.QueryProtocolOptions(Message.Request.Type.EXECUTE, consistency, Arrays.asList(bs.wrapper.values), Collections.emptyMap(), - skipMetadata, fetchSize, usedPagingState, serialConsistency, defaultTimestamp); - request = new Requests.Execute(bs.statement.getPreparedId().id, options, statement.isTracing()); + return Futures.successfulAsList(poolRemovedFuture); + } + }); + } + + void updateCreatedPools(Host h) { + HostDistance dist = loadBalancingPolicy().distance(h); + HostConnectionPool pool = pools.get(h); + + try { + if (pool == null) { + if (dist != HostDistance.IGNORED && h.state == Host.State.UP) maybeAddPool(h, null).get(); + } else if (dist != pool.hostDistance) { + if (dist == HostDistance.IGNORED) { + removePool(h).get(); } else { - assert statement instanceof BatchStatement : statement; - assert pagingState == null; - - if (protocolVersion == ProtocolVersion.V1) - throw new UnsupportedFeatureException(protocolVersion, "Protocol level batching is not supported"); - - BatchStatement bs = (BatchStatement) statement; - if (protocolVersion.compareTo(ProtocolVersion.V4) < 0) - bs.ensureAllSet(); - BatchStatement.IdAndValues idAndVals = bs.getIdAndValues(protocolVersion, codecRegistry); - Requests.BatchProtocolOptions options = new Requests.BatchProtocolOptions(consistency, serialConsistency, defaultTimestamp); - request = new Requests.Batch(bs.batchType, idAndVals.ids, idAndVals.values, options, statement.isTracing()); + pool.hostDistance = dist; + pool.ensureCoreConnections(); } - - request.setCustomPayload(statement.getOutgoingPayload()); - return request; - } - - /** - * Execute the provided request. - *

- * This method will find a suitable node to connect to using the - * {@link LoadBalancingPolicy} and handle host failover. - */ - void execute(final RequestHandler.Callback callback, final Statement statement) { - if (isInit) - new RequestHandler(this, callback, statement).sendRequest(); - else - this.initAsync().addListener(new Runnable() { + } + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } catch (ExecutionException e) { + Throwable cause = e.getCause(); + logger.error("Unexpected error while refreshing connection pools", cause); + if (cause instanceof Error) throw ((Error) cause); + } + } + + void onDown(Host host) throws InterruptedException, ExecutionException { + // Note that with well behaved balancing policy (that ignore dead nodes), the removePool call is + // not necessary + // since updateCreatedPools should take care of it. But better protect against non well behaving + // policies. + removePool(host).force().get(); + updateCreatedPools().get(); + } + + void onRemove(Host host) throws InterruptedException, ExecutionException { + onDown(host); + } + + Message.Request makeRequestMessage(Statement statement, ByteBuffer pagingState) { + // We need the protocol version, which is only available once the cluster has initialized. + // Initialize the session to ensure this is the case. + // init() locks, so avoid if we know we don't need it. + if (!isInit) init(); + ProtocolVersion protocolVersion = cluster.manager.protocolVersion(); + CodecRegistry codecRegistry = cluster.manager.configuration.getCodecRegistry(); + + ConsistencyLevel consistency = statement.getConsistencyLevel(); + if (consistency == null) consistency = configuration().getQueryOptions().getConsistencyLevel(); + + ConsistencyLevel serialConsistency = statement.getSerialConsistencyLevel(); + if (protocolVersion.compareTo(ProtocolVersion.V3) < 0 && statement instanceof BatchStatement) { + if (serialConsistency != null) + throw new UnsupportedFeatureException( + protocolVersion, "Serial consistency on batch statements is not supported"); + } else if (serialConsistency == null) + serialConsistency = configuration().getQueryOptions().getSerialConsistencyLevel(); + + if (statement.getOutgoingPayload() != null && protocolVersion.compareTo(ProtocolVersion.V4) < 0) + throw new UnsupportedFeatureException( + protocolVersion, "Custom payloads are only supported since native protocol V4"); + + long defaultTimestamp = Long.MIN_VALUE; + if (protocolVersion.compareTo(ProtocolVersion.V3) >= 0) { + defaultTimestamp = statement.getDefaultTimestamp(); + if (defaultTimestamp == Long.MIN_VALUE) + defaultTimestamp = cluster.getConfiguration().getPolicies().getTimestampGenerator().next(); + } + + int fetchSize = statement.getFetchSize(); + ByteBuffer usedPagingState = pagingState; + + if (protocolVersion == ProtocolVersion.V1) { + assert pagingState == null; + // We don't let the user change the fetchSize globally if the proto v1 is used, so we just + // need to + // check for the case of a per-statement override + if (fetchSize <= 0) fetchSize = -1; + else if (fetchSize != Integer.MAX_VALUE) + throw new UnsupportedFeatureException(protocolVersion, "Paging is not supported"); + } else if (fetchSize <= 0) { + fetchSize = configuration().getQueryOptions().getFetchSize(); + } + + if (fetchSize == Integer.MAX_VALUE) fetchSize = -1; + + if (pagingState == null) { + usedPagingState = statement.getPagingState(); + } + + int nowInSeconds = statement.getNowInSeconds(); + if (nowInSeconds != Integer.MIN_VALUE && protocolVersion.compareTo(ProtocolVersion.V5) < 0) { + throw new UnsupportedFeatureException( + protocolVersion, "Now in seconds is only supported since native protocol V5"); + } + + if (statement instanceof StatementWrapper) + statement = ((StatementWrapper) statement).getWrappedStatement(); + + Message.Request request; + + if (statement instanceof RegularStatement) { + RegularStatement rs = (RegularStatement) statement; + + // It saddens me that we special case for the query builder here, but for now this is simpler. + // We could provide a general API in RegularStatement instead at some point but it's unclear + // what's + // the cleanest way to do that is right now (and it's probably not really that useful anyway). + if (protocolVersion == ProtocolVersion.V1 + && rs instanceof com.datastax.driver.core.querybuilder.BuiltStatement) + ((com.datastax.driver.core.querybuilder.BuiltStatement) rs).setForceNoValues(true); + + ByteBuffer[] rawPositionalValues = rs.getValues(protocolVersion, codecRegistry); + Map rawNamedValues = rs.getNamedValues(protocolVersion, codecRegistry); + + if (protocolVersion == ProtocolVersion.V1 + && (rawPositionalValues != null || rawNamedValues != null)) + throw new UnsupportedFeatureException(protocolVersion, "Binary values are not supported"); + + if (protocolVersion == ProtocolVersion.V2 && rawNamedValues != null) + throw new UnsupportedFeatureException(protocolVersion, "Named values are not supported"); + + ByteBuffer[] positionalValues = + rawPositionalValues == null ? Requests.EMPTY_BB_ARRAY : rawPositionalValues; + Map namedValues = + rawNamedValues == null ? Collections.emptyMap() : rawNamedValues; + + String qString = rs.getQueryString(codecRegistry); + + Requests.QueryProtocolOptions options = + new Requests.QueryProtocolOptions( + Message.Request.Type.QUERY, + consistency, + positionalValues, + namedValues, + false, + fetchSize, + usedPagingState, + serialConsistency, + defaultTimestamp, + nowInSeconds); + request = new Requests.Query(qString, options, statement.isTracing()); + } else if (statement instanceof BoundStatement) { + BoundStatement bs = (BoundStatement) statement; + if (!cluster.manager.preparedQueries.containsKey( + bs.statement.getPreparedId().boundValuesMetadata.id)) { + throw new InvalidQueryException( + String.format( + "Tried to execute unknown prepared query : %s. " + + "You may have used a PreparedStatement that was created with another Cluster instance.", + bs.statement.getPreparedId().boundValuesMetadata.id)); + } + if (protocolVersion.compareTo(ProtocolVersion.V4) < 0) bs.ensureAllSet(); + + // skip resultset metadata if version > 1 (otherwise this feature is not supported) + // and if we already have metadata for the prepared statement being executed. + boolean skipMetadata = + protocolVersion != ProtocolVersion.V1 + && bs.statement.getPreparedId().resultSetMetadata.variables != null; + Requests.QueryProtocolOptions options = + new Requests.QueryProtocolOptions( + Message.Request.Type.EXECUTE, + consistency, + bs.wrapper.values, + Collections.emptyMap(), + skipMetadata, + fetchSize, + usedPagingState, + serialConsistency, + defaultTimestamp, + nowInSeconds); + request = + new Requests.Execute( + bs.statement.getPreparedId().boundValuesMetadata.id, + bs.statement.getPreparedId().resultSetMetadata.id, + options, + statement.isTracing()); + } else { + assert statement instanceof BatchStatement : statement; + assert pagingState == null; + + if (protocolVersion == ProtocolVersion.V1) + throw new UnsupportedFeatureException( + protocolVersion, "Protocol level batching is not supported"); + + BatchStatement bs = (BatchStatement) statement; + if (protocolVersion.compareTo(ProtocolVersion.V4) < 0) bs.ensureAllSet(); + BatchStatement.IdAndValues idAndVals = bs.getIdAndValues(protocolVersion, codecRegistry); + Requests.BatchProtocolOptions options = + new Requests.BatchProtocolOptions( + consistency, serialConsistency, defaultTimestamp, nowInSeconds); + request = + new Requests.Batch( + bs.batchType, idAndVals.ids, idAndVals.values, options, statement.isTracing()); + } + + request.setCustomPayload(statement.getOutgoingPayload()); + return request; + } + + /** + * Execute the provided request. + * + *

This method will find a suitable node to connect to using the {@link LoadBalancingPolicy} + * and handle host failover. + */ + void execute(final RequestHandler.Callback callback, final Statement statement) { + if (this.isClosed()) { + callback.onException( + null, new IllegalStateException("Could not send request, session is closed"), 0, 0); + return; + } + if (isInit) new RequestHandler(this, callback, statement).sendRequest(); + else + this.initAsync() + .addListener( + new Runnable() { @Override public void run() { - new RequestHandler(SessionManager.this, callback, statement).sendRequest(); + new RequestHandler(SessionManager.this, callback, statement).sendRequest(); } - }, executor()); - } - - private ListenableFuture prepare(final PreparedStatement statement, InetSocketAddress toExclude) { - final String query = statement.getQueryString(); - List> futures = Lists.newArrayListWithExpectedSize(pools.size()); - for (final Map.Entry entry : pools.entrySet()) { - if (entry.getKey().getSocketAddress().equals(toExclude)) - continue; - - try { - // Preparing is not critical: if it fails, it will fix itself later when the user tries to execute - // the prepared query. So don't wait if no connection is available, simply abort. - ListenableFuture connectionFuture = entry.getValue().borrowConnection( - 0, TimeUnit.MILLISECONDS, 0); - ListenableFuture prepareFuture = GuavaCompatibility.INSTANCE.transformAsync(connectionFuture, - new AsyncFunction() { - @Override - public ListenableFuture apply(final Connection c) throws Exception { - Connection.Future responseFuture = c.write(new Requests.Prepare(query)); - Futures.addCallback(responseFuture, new FutureCallback() { - @Override - public void onSuccess(Response result) { - c.release(); - } - - @Override - public void onFailure(Throwable t) { - logger.debug(String.format("Unexpected error while preparing query (%s) on %s", - query, entry.getKey()), t); - c.release(); - } - }); - return responseFuture; - } + }, + executor()); + } + + private ListenableFuture prepare( + final PreparedStatement statement, EndPoint toExclude) { + final String query = statement.getQueryString(); + List> futures = Lists.newArrayListWithExpectedSize(pools.size()); + for (final Map.Entry entry : pools.entrySet()) { + if (entry.getKey().getEndPoint().equals(toExclude)) continue; + + try { + // Preparing is not critical: if it fails, it will fix itself later when the user tries to + // execute + // the prepared query. So don't wait if no connection is available, simply abort. + ListenableFuture connectionFuture = + entry.getValue().borrowConnection(0, TimeUnit.MILLISECONDS, 0); + ListenableFuture prepareFuture = + GuavaCompatibility.INSTANCE.transformAsync( + connectionFuture, + new AsyncFunction() { + @Override + public ListenableFuture apply(final Connection c) throws Exception { + Connection.Future responseFuture = c.write(new Requests.Prepare(query)); + GuavaCompatibility.INSTANCE.addCallback( + responseFuture, + new FutureCallback() { + @Override + public void onSuccess(Response result) { + c.release(); + } + + @Override + public void onFailure(Throwable t) { + logger.debug( + String.format( + "Unexpected error while preparing query (%s) on %s", + query, entry.getKey()), + t); + c.release(); + } }); - futures.add(prepareFuture); - } catch (Exception e) { - // Again, not being able to prepare the query right now is no big deal, so just ignore - } + return responseFuture; + } + }); + futures.add(prepareFuture); + } catch (Exception e) { + // Again, not being able to prepare the query right now is no big deal, so just ignore + } + } + // Return the statement when all futures are done + return GuavaCompatibility.INSTANCE.transform( + Futures.successfulAsList(futures), Functions.constant(statement)); + } + + ResultSetFuture executeQuery(Message.Request msg, Statement statement) { + DefaultResultSetFuture future = + new DefaultResultSetFuture( + this, configuration().getProtocolOptions().getProtocolVersion(), msg); + execute(future, statement); + return future; + } + + void cleanupIdleConnections(long now) { + for (HostConnectionPool pool : pools.values()) { + pool.cleanupIdleConnections(now); + } + } + + private static class State implements Session.State { + + private final SessionManager session; + private final List connectedHosts; + private final int[] openConnections; + private final int[] trashedConnections; + private final int[] inFlightQueries; + + private State(SessionManager session) { + this.session = session; + this.connectedHosts = ImmutableList.copyOf(session.pools.keySet()); + + this.openConnections = new int[connectedHosts.size()]; + this.trashedConnections = new int[connectedHosts.size()]; + this.inFlightQueries = new int[connectedHosts.size()]; + + int i = 0; + for (Host h : connectedHosts) { + HostConnectionPool p = session.pools.get(h); + // It's possible we race and the host has been removed since the beginning of this + // functions. In that case, the fact it's part of getConnectedHosts() but has no opened + // connections will be slightly weird, but it's unlikely enough that we don't bother + // avoiding. + if (p == null) { + openConnections[i] = 0; + trashedConnections[i] = 0; + inFlightQueries[i] = 0; + continue; } - // Return the statement when all futures are done - return Futures.transform( - Futures.successfulAsList(futures), - Functions.constant(statement)); - } - ResultSetFuture executeQuery(Message.Request msg, Statement statement) { - DefaultResultSetFuture future = new DefaultResultSetFuture(this, configuration().getProtocolOptions().getProtocolVersion(), msg); - execute(future, statement); - return future; + openConnections[i] = p.opened(); + inFlightQueries[i] = p.totalInFlight.get(); + trashedConnections[i] = p.trashed(); + i++; + } } - void cleanupIdleConnections(long now) { - for (HostConnectionPool pool : pools.values()) { - pool.cleanupIdleConnections(now); - } + private int getIdx(Host h) { + // We guarantee that we only ever create one Host object per-address, which means that '==' + // comparison is a proper way to test Host equality. Given that, the number of hosts + // per-session will always be small enough (even 1000 is kind of small and even with a 1000+ + // node cluster, you probably don't want a Session to connect to all of them) that iterating + // over connectedHosts will never be much more inefficient than keeping a + // Map. And it's less garbage/memory consumption so... + for (int i = 0; i < connectedHosts.size(); i++) if (h == connectedHosts.get(i)) return i; + return -1; } - private static class State implements Session.State { - - private final SessionManager session; - private final List connectedHosts; - private final int[] openConnections; - private final int[] trashedConnections; - private final int[] inFlightQueries; - - private State(SessionManager session) { - this.session = session; - this.connectedHosts = ImmutableList.copyOf(session.pools.keySet()); - - this.openConnections = new int[connectedHosts.size()]; - this.trashedConnections = new int[connectedHosts.size()]; - this.inFlightQueries = new int[connectedHosts.size()]; - - int i = 0; - for (Host h : connectedHosts) { - HostConnectionPool p = session.pools.get(h); - // It's possible we race and the host has been removed since the beginning of this - // functions. In that case, the fact it's part of getConnectedHosts() but has no opened - // connections will be slightly weird, but it's unlikely enough that we don't bother avoiding. - if (p == null) { - openConnections[i] = 0; - trashedConnections[i] = 0; - inFlightQueries[i] = 0; - continue; - } - - openConnections[i] = p.opened(); - inFlightQueries[i] = p.totalInFlight.get(); - trashedConnections[i] = p.trashed(); - i++; - } - } - - private int getIdx(Host h) { - // We guarantee that we only ever create one Host object per-address, which means that '==' - // comparison is a proper way to test Host equality. Given that, the number of hosts - // per-session will always be small enough (even 1000 is kind of small and even with a 1000+ - // node cluster, you probably don't want a Session to connect to all of them) that iterating - // over connectedHosts will never be much more inefficient than keeping a - // Map. And it's less garbage/memory consumption so... - for (int i = 0; i < connectedHosts.size(); i++) - if (h == connectedHosts.get(i)) - return i; - return -1; - } - - @Override - public Session getSession() { - return session; - } + @Override + public Session getSession() { + return session; + } - @Override - public Collection getConnectedHosts() { - return connectedHosts; - } + @Override + public Collection getConnectedHosts() { + return connectedHosts; + } - @Override - public int getOpenConnections(Host host) { - int i = getIdx(host); - return i < 0 ? 0 : openConnections[i]; - } + @Override + public int getOpenConnections(Host host) { + int i = getIdx(host); + return i < 0 ? 0 : openConnections[i]; + } - @Override - public int getTrashedConnections(Host host) { - int i = getIdx(host); - return i < 0 ? 0 : trashedConnections[i]; - } + @Override + public int getTrashedConnections(Host host) { + int i = getIdx(host); + return i < 0 ? 0 : trashedConnections[i]; + } - @Override - public int getInFlightQueries(Host host) { - int i = getIdx(host); - return i < 0 ? 0 : inFlightQueries[i]; - } + @Override + public int getInFlightQueries(Host host) { + int i = getIdx(host); + return i < 0 ? 0 : inFlightQueries[i]; } + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/SettableByIndexData.java b/driver-core/src/main/java/com/datastax/driver/core/SettableByIndexData.java index 220256ab54e..6c37476036e 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/SettableByIndexData.java +++ b/driver-core/src/main/java/com/datastax/driver/core/SettableByIndexData.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,555 +20,563 @@ import com.datastax.driver.core.exceptions.CodecNotFoundException; import com.datastax.driver.core.exceptions.InvalidTypeException; import com.google.common.reflect.TypeToken; - import java.math.BigDecimal; import java.math.BigInteger; import java.net.InetAddress; import java.nio.ByteBuffer; -import java.util.*; +import java.util.Date; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.UUID; -/** - * Collection of (typed) CQL values that can be set by index (starting at zero). - */ +/** Collection of (typed) CQL values that can be set by index (starting at zero). */ public interface SettableByIndexData> { - /** - * Sets the {@code i}th value to the provided boolean. - *

- * This method uses the {@link CodecRegistry} to find a codec to handle the conversion - * to the underlying CQL type (for CQL type {@code boolean}, this will be the built-in codec). - * - * @param i the index of the value to set. - * @param v the value to set. To set the value to NULL, use {@link #setToNull(int)} or - * {@code set(i, v, Boolean.class)} - * @return this object. - * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. - * @throws CodecNotFoundException if there is no registered codec to convert the value to the - * underlying CQL type. - */ - public T setBool(int i, boolean v); - - /** - * Set the {@code i}th value to the provided byte. - *

- * This method uses the {@link CodecRegistry} to find a codec to handle the conversion - * to the underlying CQL type (for CQL type {@code tinyint}, this will be the built-in codec). - * - * @param i the index of the value to set. - * @param v the value to set. To set the value to NULL, use {@link #setToNull(int)} or - * {@code set(i, v, Byte.class)} - * @return this object. - * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. - * @throws CodecNotFoundException if there is no registered codec to convert the value to the - * underlying CQL type. - */ - public T setByte(int i, byte v); - - /** - * Set the {@code i}th value to the provided short. - *

- * This method uses the {@link CodecRegistry} to find a codec to handle the conversion - * to the underlying CQL type (for CQL type {@code smallint}, this will be the built-in codec). - * - * @param i the index of the value to set. - * @param v the value to set. To set the value to NULL, use {@link #setToNull(int)} or - * {@code set(i, v, Short.class)} - * @return this object. - * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. - * @throws CodecNotFoundException if there is no registered codec to convert the value to the - * underlying CQL type. - */ - public T setShort(int i, short v); - - /** - * Set the {@code i}th value to the provided integer. - *

- * This method uses the {@link CodecRegistry} to find a codec to handle the conversion - * to the underlying CQL type (for CQL type {@code int}, this will be the built-in codec). - * - * @param i the index of the value to set. - * @param v the value to set. To set the value to NULL, use {@link #setToNull(int)} or - * {@code set(i, v, Integer.class)} - * @return this object. - * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. - * @throws CodecNotFoundException if there is no registered codec to convert the value to the - * underlying CQL type. - */ - public T setInt(int i, int v); - - /** - * Sets the {@code i}th value to the provided long. - *

- * This method uses the {@link CodecRegistry} to find a codec to handle the conversion - * to the underlying CQL type (for CQL type {@code bigint}, this will be the built-in codec). - * - * @param i the index of the value to set. - * @param v the value to set. To set the value to NULL, use {@link #setToNull(int)} or - * {@code set(i, v, Long.class)} - * @return this object. - * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. - * @throws CodecNotFoundException if there is no registered codec to convert the value to the - * underlying CQL type. - */ - public T setLong(int i, long v); - - /** - * Set the {@code i}th value to the provided date. - *

- * This method uses the {@link CodecRegistry} to find a codec to handle the conversion - * to the underlying CQL type (for CQL type {@code timestamp}, this will be the built-in codec). - * - * @param i the index of the value to set. - * @param v the value to set. - * @return this object. - * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. - * @throws CodecNotFoundException if there is no registered codec to convert the value to the - * underlying CQL type. - */ - public T setTimestamp(int i, Date v); - - /** - * Set the {@code i}th value to the provided date (without time). - *

- * This method uses the {@link CodecRegistry} to find a codec to handle the conversion - * to the underlying CQL type (for CQL type {@code date}, this will be the built-in codec). - * - * @param i the index of the value to set. - * @param v the value to set. - * @return this object. - * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. - * @throws CodecNotFoundException if there is no registered codec to convert the value to the - * underlying CQL type. - */ - public T setDate(int i, LocalDate v); - - /** - * Set the {@code i}th value to the provided time as a long in nanoseconds since midnight. - *

- * This method uses the {@link CodecRegistry} to find a codec to handle the conversion - * to the underlying CQL type (for CQL type {@code time}, this will be the built-in codec). - * - * @param i the index of the value to set. - * @param v the value to set. - * @return this object. - * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. - * @throws CodecNotFoundException if there is no registered codec to convert the value to the - * underlying CQL type. - */ - public T setTime(int i, long v); - - /** - * Sets the {@code i}th value to the provided float. - *

- * This method uses the {@link CodecRegistry} to find a codec to handle the conversion - * to the underlying CQL type (for CQL type {@code float}, this will be the built-in codec). - * - * @param i the index of the value to set. - * @param v the value to set. To set the value to NULL, use {@link #setToNull(int)} or - * {@code set(i, v, Float.class)} - * @return this object. - * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. - * @throws CodecNotFoundException if there is no registered codec to convert the value to the - * underlying CQL type. - */ - public T setFloat(int i, float v); - - /** - * Sets the {@code i}th value to the provided double. - *

- * This method uses the {@link CodecRegistry} to find a codec to handle the conversion - * to the underlying CQL type (for CQL type {@code double}, this will be the built-in codec). - * - * @param i the index of the value to set. - * @param v the value to set. To set the value to NULL, use {@link #setToNull(int)} or - * {@code set(i, v, Double.class)}. - * @return this object. - * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. - * @throws CodecNotFoundException if there is no registered codec to convert the value to the - * underlying CQL type. - */ - public T setDouble(int i, double v); - - /** - * Sets the {@code i}th value to the provided string. - *

- * This method uses the {@link CodecRegistry} to find a codec to handle the conversion - * to the underlying CQL type (for CQL types {@code text}, {@code varchar} and {@code ascii}, - * this will be the built-in codec). - * - * @param i the index of the value to set. - * @param v the value to set. - * @return this object. - * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. - * @throws CodecNotFoundException if there is no registered codec to convert the value to the - * underlying CQL type. - */ - public T setString(int i, String v); - - /** - * Sets the {@code i}th value to the provided byte buffer. - *

- * This method uses the {@link CodecRegistry} to find a codec to handle the conversion - * to the underlying CQL type (for CQL type {@code blob}, this will be the built-in codec). - * - * @param i the index of the value to set. - * @param v the value to set. - * @return this object. - * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. - * @throws CodecNotFoundException if there is no registered codec to convert the value to the - * underlying CQL type. - */ - public T setBytes(int i, ByteBuffer v); - - /** - * Sets the {@code i}th value to the provided byte buffer. - *

- * This method does not use any codec; it sets the value in its binary form directly. If you insert - * data that is not compatible with the underlying CQL type, you will get an {@code InvalidQueryException} at - * execute time. - * - * @param i the index of the value to set. - * @param v the value to set. - * @return this object. - * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. - */ - public T setBytesUnsafe(int i, ByteBuffer v); - - /** - * Sets the {@code i}th value to the provided big integer. - *

- * This method uses the {@link CodecRegistry} to find a codec to handle the conversion - * to the underlying CQL type (for CQL type {@code varint}, this will be the built-in codec). - * - * @param i the index of the value to set. - * @param v the value to set. - * @return this object. - * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. - * @throws CodecNotFoundException if there is no registered codec to convert the value to the - * underlying CQL type. - */ - public T setVarint(int i, BigInteger v); - - /** - * Sets the {@code i}th value to the provided big decimal. - *

- * This method uses the {@link CodecRegistry} to find a codec to handle the conversion - * to the underlying CQL type (for CQL type {@code decimal}, this will be the built-in codec). - * - * @param i the index of the value to set. - * @param v the value to set. - * @return this object. - * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. - * @throws CodecNotFoundException if there is no registered codec to convert the value to the - * underlying CQL type. - */ - public T setDecimal(int i, BigDecimal v); - - /** - * Sets the {@code i}th value to the provided UUID. - *

- * This method uses the {@link CodecRegistry} to find a codec to handle the conversion - * to the underlying CQL type (for CQL types {@code uuid} and {@code timeuuid}, this will - * be the built-in codec). - * - * @param i the index of the value to set. - * @param v the value to set. - * @return this object. - * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. - * @throws CodecNotFoundException if there is no registered codec to convert the value to the - * underlying CQL type. - */ - public T setUUID(int i, UUID v); - - /** - * Sets the {@code i}th value to the provided inet address. - *

- * This method uses the {@link CodecRegistry} to find a codec to handle the conversion - * to the underlying CQL type (for CQL type {@code inet}, this will be the built-in codec). - * - * @param i the index of the value to set. - * @param v the value to set. - * @return this object. - * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. - * @throws CodecNotFoundException if there is no registered codec to convert the value to the - * underlying CQL type. - */ - public T setInet(int i, InetAddress v); - - /** - * Sets the {@code i}th value to the provided list. - *

- * This method uses the {@link CodecRegistry} to find a codec to handle the conversion - * to the underlying CQL type (the type of the elements in the Java list is not considered). - * If two or more codecs target that CQL type, the one that was first registered will be used. - * For this reason, it is generally preferable to use the more deterministic methods - * {@link #setList(int, List, Class)} or {@link #setList(int, List, TypeToken)}. - * - * @param i the index of the value to set. - * @param v the value to set. Note that {@code null} values inside collections are not supported by CQL. - * @return this object. - * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. - * @throws NullPointerException if {@code v} contains null values. Nulls are not supported in collections - * by CQL. - * @throws CodecNotFoundException if there is no registered codec to convert the value to the - * underlying CQL type. - */ - public T setList(int i, List v); - - /** - * Sets the {@code i}th value to the provided list, which elements are of the provided - * Java class. - *

- * This method uses the {@link CodecRegistry} to find a codec to handle the conversion of lists of the given - * Java type to the underlying CQL type. - *

- * If the type of the elements is generic, use {@link #setList(int, List, TypeToken)}. - * - * @param i the index of the value to set. - * @param v the value to set. Note that {@code null} values inside collections are not supported by CQL. - * @param elementsClass the class for the elements of the list. - * @return this object. - * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. - * @throws NullPointerException if {@code v} contains null values. Nulls are not supported in collections - * by CQL. - * @throws CodecNotFoundException if there is no registered codec to convert the value to the - * underlying CQL type. - */ - public T setList(int i, List v, Class elementsClass); - - /** - * Sets the {@code i}th value to the provided list, which elements are of the provided - * Java type. - *

- * This method uses the {@link CodecRegistry} to find a codec to handle the conversion of lists of the given - * Java type to the underlying CQL type. - * - * @param i the index of the value to set. - * @param v the value to set. Note that {@code null} values inside collections are not supported by CQL. - * @param elementsType the type for the elements of the list. - * @return this object. - * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. - * @throws NullPointerException if {@code v} contains null values. Nulls are not supported in collections - * by CQL. - * @throws CodecNotFoundException if there is no registered codec to convert the value to the - * underlying CQL type. - */ - public T setList(int i, List v, TypeToken elementsType); - - /** - * Sets the {@code i}th value to the provided map. - *

- * This method uses the {@link CodecRegistry} to find a codec to handle the conversion - * to the underlying CQL type (the type of the elements in the Java map is not considered). - * If two or more codecs target that CQL type, the one that was first registered will be used. - * For this reason, it is generally preferable to use the more deterministic methods - * {@link #setMap(int, Map, Class, Class)} or {@link #setMap(int, Map, TypeToken, TypeToken)}. - * - * @param i the index of the value to set. - * @param v the value to set. Note that {@code null} values inside collections are not supported by CQL. - * @return this object. - * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. - * @throws NullPointerException if {@code v} contains null values. Nulls are not supported in collections - * by CQL. - * @throws CodecNotFoundException if there is no registered codec to convert the value to the - * underlying CQL type. - */ - public T setMap(int i, Map v); - - /** - * Sets the {@code i}th value to the provided map, which keys and values are of the provided - * Java classes. - *

- * This method uses the {@link CodecRegistry} to find a codec to handle the conversion of lists of the given - * Java types to the underlying CQL type. - *

- * If the type of the keys or values is generic, use {@link #setMap(int, Map, TypeToken, TypeToken)}. - * - * @param i the index of the value to set. - * @param v the value to set. Note that {@code null} values inside collections are not supported by CQL. - * @param keysClass the class for the keys of the map. - * @param valuesClass the class for the values of the map. - * @return this object. - * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. - * @throws NullPointerException if {@code v} contains null values. Nulls are not supported in collections - * by CQL. - * @throws CodecNotFoundException if there is no registered codec to convert the value to the - * underlying CQL type. - */ - public T setMap(int i, Map v, Class keysClass, Class valuesClass); - - /** - * Sets the {@code i}th value to the provided map, which keys and values are of the provided - * Java types. - *

- * This method uses the {@link CodecRegistry} to find a codec to handle the conversion of lists of the given - * Java types to the underlying CQL type. - * - * @param i the index of the value to set. - * @param v the value to set. Note that {@code null} values inside collections are not supported by CQL. - * @param keysType the type for the keys of the map. - * @param valuesType the type for the values of the map. - * @return this object. - * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. - * @throws NullPointerException if {@code v} contains null values. Nulls are not supported in collections - * by CQL. - * @throws CodecNotFoundException if there is no registered codec to convert the value to the - * underlying CQL type. - */ - public T setMap(int i, Map v, TypeToken keysType, TypeToken valuesType); - - /** - * Sets the {@code i}th value to the provided set. - *

- * This method uses the {@link CodecRegistry} to find a codec to handle the conversion - * to the underlying CQL type (the type of the elements in the Java set is not considered). - * If two or more codecs target that CQL type, the one that was first registered will be used. - * For this reason, it is generally preferable to use the more deterministic methods - * {@link #setSet(int, Set, Class)} or {@link #setSet(int, Set, TypeToken)}. - * - * @param i the index of the value to set. - * @param v the value to set. Note that {@code null} values inside collections are not supported by CQL. - * @return this object. - * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. - * @throws NullPointerException if {@code v} contains null values. Nulls are not supported in collections - * by CQL. - * @throws CodecNotFoundException if there is no registered codec to convert the value to the - * underlying CQL type. - */ - public T setSet(int i, Set v); - - /** - * Sets the {@code i}th value to the provided set, which elements are of the provided - * Java class. - *

- * This method uses the {@link CodecRegistry} to find a codec to handle the conversion of sets of the given - * Java type to the underlying CQL type. - *

- * If the type of the elements is generic, use {@link #setSet(int, Set, TypeToken)}. - * - * @param i the index of the value to set. - * @param v the value to set. Note that {@code null} values inside collections are not supported by CQL. - * @param elementsClass the class for the elements of the set. - * @return this object. - * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. - * @throws NullPointerException if {@code v} contains null values. Nulls are not supported in collections - * by CQL. - * @throws CodecNotFoundException if there is no registered codec to convert the value to the - * underlying CQL type. - */ - public T setSet(int i, Set v, Class elementsClass); - - /** - * Sets the {@code i}th value to the provided set, which elements are of the provided - * Java type. - *

- * This method uses the {@link CodecRegistry} to find a codec to handle the conversion of sets of the given - * Java type to the underlying CQL type. - * - * @param i the index of the value to set. - * @param v the value to set. Note that {@code null} values inside collections are not supported by CQL. - * @param elementsType the type for the elements of the set. - * @return this object. - * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. - * @throws NullPointerException if {@code v} contains null values. Nulls are not supported in collections - * by CQL. - * @throws CodecNotFoundException if there is no registered codec to convert the value to the - * underlying CQL type. - */ - public T setSet(int i, Set v, TypeToken elementsType); - - /** - * Sets the {@code i}th value to the provided UDT value. - *

- * This method uses the {@link CodecRegistry} to find a codec to handle the conversion of {@code UDTValue} - * to the underlying CQL type. - * - * @param i the index of the value to set. - * @param v the value to set. - * @return this object. - * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. - * @throws CodecNotFoundException if there is no registered codec to convert the value to the - * underlying CQL type. - */ - public T setUDTValue(int i, UDTValue v); - - /** - * Sets the {@code i}th value to the provided tuple value. - *

- * This method uses the {@link CodecRegistry} to find a codec to handle the conversion of {@code TupleValue} - * to the underlying CQL type. - * - * @param i the index of the value to set. - * @param v the value to set. - * @return this object. - * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. - * @throws CodecNotFoundException if there is no registered codec to convert the value to the - * underlying CQL type. - */ - public T setTupleValue(int i, TupleValue v); - - /** - * Sets the {@code i}th value to {@code null}. - *

- * This is mainly intended for CQL types which map to native Java types. - * - * @param i the index of the value to set. - * @return this object. - * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. - */ - public T setToNull(int i); - - /** - * Sets the {@code i}th value to the provided value of the provided Java class. - *

- * This method uses the {@link CodecRegistry} to find a codec to handle the conversion of the provided Java class - * to the underlying CQL type. - *

- * If the Java type is generic, use {@link #set(int, Object, TypeToken)} instead. - * - * @param i the index of the value to set. - * @param v the value to set; may be {@code null}. - * @param targetClass The Java class to convert to; must not be {@code null}; - * @return this object. - * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. - * @throws CodecNotFoundException if there is no registered codec to convert the value to the - * underlying CQL type. - */ - T set(int i, V v, Class targetClass); - - /** - * Sets the {@code i}th value to the provided value of the provided Java type. - *

- * This method uses the {@link CodecRegistry} to find a codec to handle the conversion of the provided Java type - * to the underlying CQL type. - * - * @param i the index of the value to set. - * @param v the value to set; may be {@code null}. - * @param targetType The Java type to convert to; must not be {@code null}; - * @return this object. - * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. - * @throws CodecNotFoundException if there is no registered codec to convert the value to the - * underlying CQL type. - */ - T set(int i, V v, TypeToken targetType); - - /** - * Sets the {@code i}th value to the provided value, converted using the given {@link TypeCodec}. - *

- * This method entirely bypasses the {@link CodecRegistry} and forces the driver to use the given codec instead. - * This can be useful if the codec would collide with a previously registered one, or if you want to use the - * codec just once without registering it. - *

- * It is the caller's responsibility to ensure that the given codec {@link TypeCodec#accepts(DataType) accepts} - * the underlying CQL type; failing to do so may result in {@link InvalidTypeException}s being thrown. - * - * @param i the index of the value to set. - * @param v the value to set; may be {@code null}. - * @param codec The {@link TypeCodec} to use to serialize the value; may not be {@code null}. - * @return this object. - * @throws InvalidTypeException if the given codec does not {@link TypeCodec#accepts(DataType) accept} the underlying CQL type. - * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. - */ - T set(int i, V v, TypeCodec codec); - + /** + * Sets the {@code i}th value to the provided boolean. + * + *

This method uses the {@link CodecRegistry} to find a codec to handle the conversion to the + * underlying CQL type (for CQL type {@code boolean}, this will be the built-in codec). + * + * @param i the index of the value to set. + * @param v the value to set. To set the value to NULL, use {@link #setToNull(int)} or {@code + * set(i, v, Boolean.class)} + * @return this object. + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + * @throws CodecNotFoundException if there is no registered codec to convert the value to the + * underlying CQL type. + */ + public T setBool(int i, boolean v); + + /** + * Set the {@code i}th value to the provided byte. + * + *

This method uses the {@link CodecRegistry} to find a codec to handle the conversion to the + * underlying CQL type (for CQL type {@code tinyint}, this will be the built-in codec). + * + * @param i the index of the value to set. + * @param v the value to set. To set the value to NULL, use {@link #setToNull(int)} or {@code + * set(i, v, Byte.class)} + * @return this object. + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + * @throws CodecNotFoundException if there is no registered codec to convert the value to the + * underlying CQL type. + */ + public T setByte(int i, byte v); + + /** + * Set the {@code i}th value to the provided short. + * + *

This method uses the {@link CodecRegistry} to find a codec to handle the conversion to the + * underlying CQL type (for CQL type {@code smallint}, this will be the built-in codec). + * + * @param i the index of the value to set. + * @param v the value to set. To set the value to NULL, use {@link #setToNull(int)} or {@code + * set(i, v, Short.class)} + * @return this object. + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + * @throws CodecNotFoundException if there is no registered codec to convert the value to the + * underlying CQL type. + */ + public T setShort(int i, short v); + + /** + * Set the {@code i}th value to the provided integer. + * + *

This method uses the {@link CodecRegistry} to find a codec to handle the conversion to the + * underlying CQL type (for CQL type {@code int}, this will be the built-in codec). + * + * @param i the index of the value to set. + * @param v the value to set. To set the value to NULL, use {@link #setToNull(int)} or {@code + * set(i, v, Integer.class)} + * @return this object. + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + * @throws CodecNotFoundException if there is no registered codec to convert the value to the + * underlying CQL type. + */ + public T setInt(int i, int v); + + /** + * Sets the {@code i}th value to the provided long. + * + *

This method uses the {@link CodecRegistry} to find a codec to handle the conversion to the + * underlying CQL type (for CQL type {@code bigint}, this will be the built-in codec). + * + * @param i the index of the value to set. + * @param v the value to set. To set the value to NULL, use {@link #setToNull(int)} or {@code + * set(i, v, Long.class)} + * @return this object. + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + * @throws CodecNotFoundException if there is no registered codec to convert the value to the + * underlying CQL type. + */ + public T setLong(int i, long v); + + /** + * Set the {@code i}th value to the provided date. + * + *

This method uses the {@link CodecRegistry} to find a codec to handle the conversion to the + * underlying CQL type (for CQL type {@code timestamp}, this will be the built-in codec). + * + * @param i the index of the value to set. + * @param v the value to set. + * @return this object. + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + * @throws CodecNotFoundException if there is no registered codec to convert the value to the + * underlying CQL type. + */ + public T setTimestamp(int i, Date v); + + /** + * Set the {@code i}th value to the provided date (without time). + * + *

This method uses the {@link CodecRegistry} to find a codec to handle the conversion to the + * underlying CQL type (for CQL type {@code date}, this will be the built-in codec). + * + * @param i the index of the value to set. + * @param v the value to set. + * @return this object. + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + * @throws CodecNotFoundException if there is no registered codec to convert the value to the + * underlying CQL type. + */ + public T setDate(int i, LocalDate v); + + /** + * Set the {@code i}th value to the provided time as a long in nanoseconds since midnight. + * + *

This method uses the {@link CodecRegistry} to find a codec to handle the conversion to the + * underlying CQL type (for CQL type {@code time}, this will be the built-in codec). + * + * @param i the index of the value to set. + * @param v the value to set. + * @return this object. + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + * @throws CodecNotFoundException if there is no registered codec to convert the value to the + * underlying CQL type. + */ + public T setTime(int i, long v); + + /** + * Sets the {@code i}th value to the provided float. + * + *

This method uses the {@link CodecRegistry} to find a codec to handle the conversion to the + * underlying CQL type (for CQL type {@code float}, this will be the built-in codec). + * + * @param i the index of the value to set. + * @param v the value to set. To set the value to NULL, use {@link #setToNull(int)} or {@code + * set(i, v, Float.class)} + * @return this object. + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + * @throws CodecNotFoundException if there is no registered codec to convert the value to the + * underlying CQL type. + */ + public T setFloat(int i, float v); + + /** + * Sets the {@code i}th value to the provided double. + * + *

This method uses the {@link CodecRegistry} to find a codec to handle the conversion to the + * underlying CQL type (for CQL type {@code double}, this will be the built-in codec). + * + * @param i the index of the value to set. + * @param v the value to set. To set the value to NULL, use {@link #setToNull(int)} or {@code + * set(i, v, Double.class)}. + * @return this object. + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + * @throws CodecNotFoundException if there is no registered codec to convert the value to the + * underlying CQL type. + */ + public T setDouble(int i, double v); + + /** + * Sets the {@code i}th value to the provided string. + * + *

This method uses the {@link CodecRegistry} to find a codec to handle the conversion to the + * underlying CQL type (for CQL types {@code text}, {@code varchar} and {@code ascii}, this will + * be the built-in codec). + * + * @param i the index of the value to set. + * @param v the value to set. + * @return this object. + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + * @throws CodecNotFoundException if there is no registered codec to convert the value to the + * underlying CQL type. + */ + public T setString(int i, String v); + + /** + * Sets the {@code i}th value to the provided byte buffer. + * + *

This method uses the {@link CodecRegistry} to find a codec to handle the conversion to the + * underlying CQL type (for CQL type {@code blob}, this will be the built-in codec). + * + * @param i the index of the value to set. + * @param v the value to set. + * @return this object. + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + * @throws CodecNotFoundException if there is no registered codec to convert the value to the + * underlying CQL type. + */ + public T setBytes(int i, ByteBuffer v); + + /** + * Sets the {@code i}th value to the provided byte buffer. + * + *

This method does not use any codec; it sets the value in its binary form directly. If you + * insert data that is not compatible with the underlying CQL type, you will get an {@code + * InvalidQueryException} at execute time. + * + * @param i the index of the value to set. + * @param v the value to set. + * @return this object. + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + */ + public T setBytesUnsafe(int i, ByteBuffer v); + + /** + * Sets the {@code i}th value to the provided big integer. + * + *

This method uses the {@link CodecRegistry} to find a codec to handle the conversion to the + * underlying CQL type (for CQL type {@code varint}, this will be the built-in codec). + * + * @param i the index of the value to set. + * @param v the value to set. + * @return this object. + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + * @throws CodecNotFoundException if there is no registered codec to convert the value to the + * underlying CQL type. + */ + public T setVarint(int i, BigInteger v); + + /** + * Sets the {@code i}th value to the provided big decimal. + * + *

This method uses the {@link CodecRegistry} to find a codec to handle the conversion to the + * underlying CQL type (for CQL type {@code decimal}, this will be the built-in codec). + * + * @param i the index of the value to set. + * @param v the value to set. + * @return this object. + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + * @throws CodecNotFoundException if there is no registered codec to convert the value to the + * underlying CQL type. + */ + public T setDecimal(int i, BigDecimal v); + + /** + * Sets the {@code i}th value to the provided UUID. + * + *

This method uses the {@link CodecRegistry} to find a codec to handle the conversion to the + * underlying CQL type (for CQL types {@code uuid} and {@code timeuuid}, this will be the built-in + * codec). + * + * @param i the index of the value to set. + * @param v the value to set. + * @return this object. + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + * @throws CodecNotFoundException if there is no registered codec to convert the value to the + * underlying CQL type. + */ + public T setUUID(int i, UUID v); + + /** + * Sets the {@code i}th value to the provided inet address. + * + *

This method uses the {@link CodecRegistry} to find a codec to handle the conversion to the + * underlying CQL type (for CQL type {@code inet}, this will be the built-in codec). + * + * @param i the index of the value to set. + * @param v the value to set. + * @return this object. + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + * @throws CodecNotFoundException if there is no registered codec to convert the value to the + * underlying CQL type. + */ + public T setInet(int i, InetAddress v); + + /** + * Sets the {@code i}th value to the provided list. + * + *

This method uses the {@link CodecRegistry} to find a codec to handle the conversion to the + * underlying CQL type (the type of the elements in the Java list is not considered). If two or + * more codecs target that CQL type, the one that was first registered will be used. For this + * reason, it is generally preferable to use the more deterministic methods {@link #setList(int, + * List, Class)} or {@link #setList(int, List, TypeToken)}. + * + * @param i the index of the value to set. + * @param v the value to set. Note that {@code null} values inside collections are not supported + * by CQL. + * @return this object. + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + * @throws NullPointerException if {@code v} contains null values. Nulls are not supported in + * collections by CQL. + * @throws CodecNotFoundException if there is no registered codec to convert the value to the + * underlying CQL type. + */ + public T setList(int i, List v); + + /** + * Sets the {@code i}th value to the provided list, which elements are of the provided Java class. + * + *

This method uses the {@link CodecRegistry} to find a codec to handle the conversion of lists + * of the given Java type to the underlying CQL type. + * + *

If the type of the elements is generic, use {@link #setList(int, List, TypeToken)}. + * + * @param i the index of the value to set. + * @param v the value to set. Note that {@code null} values inside collections are not supported + * by CQL. + * @param elementsClass the class for the elements of the list. + * @return this object. + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + * @throws NullPointerException if {@code v} contains null values. Nulls are not supported in + * collections by CQL. + * @throws CodecNotFoundException if there is no registered codec to convert the value to the + * underlying CQL type. + */ + public T setList(int i, List v, Class elementsClass); + + /** + * Sets the {@code i}th value to the provided list, which elements are of the provided Java type. + * + *

This method uses the {@link CodecRegistry} to find a codec to handle the conversion of lists + * of the given Java type to the underlying CQL type. + * + * @param i the index of the value to set. + * @param v the value to set. Note that {@code null} values inside collections are not supported + * by CQL. + * @param elementsType the type for the elements of the list. + * @return this object. + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + * @throws NullPointerException if {@code v} contains null values. Nulls are not supported in + * collections by CQL. + * @throws CodecNotFoundException if there is no registered codec to convert the value to the + * underlying CQL type. + */ + public T setList(int i, List v, TypeToken elementsType); + + /** + * Sets the {@code i}th value to the provided map. + * + *

This method uses the {@link CodecRegistry} to find a codec to handle the conversion to the + * underlying CQL type (the type of the elements in the Java map is not considered). If two or + * more codecs target that CQL type, the one that was first registered will be used. For this + * reason, it is generally preferable to use the more deterministic methods {@link #setMap(int, + * Map, Class, Class)} or {@link #setMap(int, Map, TypeToken, TypeToken)}. + * + * @param i the index of the value to set. + * @param v the value to set. Note that {@code null} values inside collections are not supported + * by CQL. + * @return this object. + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + * @throws NullPointerException if {@code v} contains null values. Nulls are not supported in + * collections by CQL. + * @throws CodecNotFoundException if there is no registered codec to convert the value to the + * underlying CQL type. + */ + public T setMap(int i, Map v); + + /** + * Sets the {@code i}th value to the provided map, which keys and values are of the provided Java + * classes. + * + *

This method uses the {@link CodecRegistry} to find a codec to handle the conversion of lists + * of the given Java types to the underlying CQL type. + * + *

If the type of the keys or values is generic, use {@link #setMap(int, Map, TypeToken, + * TypeToken)}. + * + * @param i the index of the value to set. + * @param v the value to set. Note that {@code null} values inside collections are not supported + * by CQL. + * @param keysClass the class for the keys of the map. + * @param valuesClass the class for the values of the map. + * @return this object. + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + * @throws NullPointerException if {@code v} contains null values. Nulls are not supported in + * collections by CQL. + * @throws CodecNotFoundException if there is no registered codec to convert the value to the + * underlying CQL type. + */ + public T setMap(int i, Map v, Class keysClass, Class valuesClass); + + /** + * Sets the {@code i}th value to the provided map, which keys and values are of the provided Java + * types. + * + *

This method uses the {@link CodecRegistry} to find a codec to handle the conversion of lists + * of the given Java types to the underlying CQL type. + * + * @param i the index of the value to set. + * @param v the value to set. Note that {@code null} values inside collections are not supported + * by CQL. + * @param keysType the type for the keys of the map. + * @param valuesType the type for the values of the map. + * @return this object. + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + * @throws NullPointerException if {@code v} contains null values. Nulls are not supported in + * collections by CQL. + * @throws CodecNotFoundException if there is no registered codec to convert the value to the + * underlying CQL type. + */ + public T setMap(int i, Map v, TypeToken keysType, TypeToken valuesType); + + /** + * Sets the {@code i}th value to the provided set. + * + *

This method uses the {@link CodecRegistry} to find a codec to handle the conversion to the + * underlying CQL type (the type of the elements in the Java set is not considered). If two or + * more codecs target that CQL type, the one that was first registered will be used. For this + * reason, it is generally preferable to use the more deterministic methods {@link #setSet(int, + * Set, Class)} or {@link #setSet(int, Set, TypeToken)}. + * + * @param i the index of the value to set. + * @param v the value to set. Note that {@code null} values inside collections are not supported + * by CQL. + * @return this object. + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + * @throws NullPointerException if {@code v} contains null values. Nulls are not supported in + * collections by CQL. + * @throws CodecNotFoundException if there is no registered codec to convert the value to the + * underlying CQL type. + */ + public T setSet(int i, Set v); + + /** + * Sets the {@code i}th value to the provided set, which elements are of the provided Java class. + * + *

This method uses the {@link CodecRegistry} to find a codec to handle the conversion of sets + * of the given Java type to the underlying CQL type. + * + *

If the type of the elements is generic, use {@link #setSet(int, Set, TypeToken)}. + * + * @param i the index of the value to set. + * @param v the value to set. Note that {@code null} values inside collections are not supported + * by CQL. + * @param elementsClass the class for the elements of the set. + * @return this object. + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + * @throws NullPointerException if {@code v} contains null values. Nulls are not supported in + * collections by CQL. + * @throws CodecNotFoundException if there is no registered codec to convert the value to the + * underlying CQL type. + */ + public T setSet(int i, Set v, Class elementsClass); + + /** + * Sets the {@code i}th value to the provided set, which elements are of the provided Java type. + * + *

This method uses the {@link CodecRegistry} to find a codec to handle the conversion of sets + * of the given Java type to the underlying CQL type. + * + * @param i the index of the value to set. + * @param v the value to set. Note that {@code null} values inside collections are not supported + * by CQL. + * @param elementsType the type for the elements of the set. + * @return this object. + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + * @throws NullPointerException if {@code v} contains null values. Nulls are not supported in + * collections by CQL. + * @throws CodecNotFoundException if there is no registered codec to convert the value to the + * underlying CQL type. + */ + public T setSet(int i, Set v, TypeToken elementsType); + + /** + * Sets the {@code i}th value to the provided UDT value. + * + *

This method uses the {@link CodecRegistry} to find a codec to handle the conversion of + * {@code UDTValue} to the underlying CQL type. + * + * @param i the index of the value to set. + * @param v the value to set. + * @return this object. + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + * @throws CodecNotFoundException if there is no registered codec to convert the value to the + * underlying CQL type. + */ + public T setUDTValue(int i, UDTValue v); + + /** + * Sets the {@code i}th value to the provided tuple value. + * + *

This method uses the {@link CodecRegistry} to find a codec to handle the conversion of + * {@code TupleValue} to the underlying CQL type. + * + * @param i the index of the value to set. + * @param v the value to set. + * @return this object. + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + * @throws CodecNotFoundException if there is no registered codec to convert the value to the + * underlying CQL type. + */ + public T setTupleValue(int i, TupleValue v); + + /** + * Sets the {@code i}th value to {@code null}. + * + *

This is mainly intended for CQL types which map to native Java types. + * + * @param i the index of the value to set. + * @return this object. + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + */ + public T setToNull(int i); + + /** + * Sets the {@code i}th value to the provided value of the provided Java class. + * + *

This method uses the {@link CodecRegistry} to find a codec to handle the conversion of the + * provided Java class to the underlying CQL type. + * + *

If the Java type is generic, use {@link #set(int, Object, TypeToken)} instead. + * + * @param i the index of the value to set. + * @param v the value to set; may be {@code null}. + * @param targetClass The Java class to convert to; must not be {@code null}; + * @return this object. + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + * @throws CodecNotFoundException if there is no registered codec to convert the value to the + * underlying CQL type. + */ + T set(int i, V v, Class targetClass); + + /** + * Sets the {@code i}th value to the provided value of the provided Java type. + * + *

This method uses the {@link CodecRegistry} to find a codec to handle the conversion of the + * provided Java type to the underlying CQL type. + * + * @param i the index of the value to set. + * @param v the value to set; may be {@code null}. + * @param targetType The Java type to convert to; must not be {@code null}; + * @return this object. + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + * @throws CodecNotFoundException if there is no registered codec to convert the value to the + * underlying CQL type. + */ + T set(int i, V v, TypeToken targetType); + + /** + * Sets the {@code i}th value to the provided value, converted using the given {@link TypeCodec}. + * + *

This method entirely bypasses the {@link CodecRegistry} and forces the driver to use the + * given codec instead. This can be useful if the codec would collide with a previously registered + * one, or if you want to use the codec just once without registering it. + * + *

It is the caller's responsibility to ensure that the given codec {@link + * TypeCodec#accepts(DataType) accepts} the underlying CQL type; failing to do so may result in + * {@link InvalidTypeException}s being thrown. + * + * @param i the index of the value to set. + * @param v the value to set; may be {@code null}. + * @param codec The {@link TypeCodec} to use to serialize the value; may not be {@code null}. + * @return this object. + * @throws InvalidTypeException if the given codec does not {@link TypeCodec#accepts(DataType) + * accept} the underlying CQL type. + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + */ + T set(int i, V v, TypeCodec codec); } diff --git a/driver-core/src/main/java/com/datastax/driver/core/SettableByNameData.java b/driver-core/src/main/java/com/datastax/driver/core/SettableByNameData.java index 1aaa2292b02..f9ae68525d1 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/SettableByNameData.java +++ b/driver-core/src/main/java/com/datastax/driver/core/SettableByNameData.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,606 +20,600 @@ import com.datastax.driver.core.exceptions.CodecNotFoundException; import com.datastax.driver.core.exceptions.InvalidTypeException; import com.google.common.reflect.TypeToken; - import java.math.BigDecimal; import java.math.BigInteger; import java.net.InetAddress; import java.nio.ByteBuffer; -import java.util.*; +import java.util.Date; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.UUID; -/** - * Collection of (typed) CQL values that can set by name. - */ +/** Collection of (typed) CQL values that can set by name. */ public interface SettableByNameData> { - /** - * Sets the value for (all occurrences of) variable {@code name} to the - * provided boolean. - *

- * This method uses the {@link CodecRegistry} to find a codec to handle the conversion - * to the underlying CQL type (for CQL type {@code boolean}, this will be the built-in codec). - * - * @param name the name of the value to set; if {@code name} is present multiple - * times, all its values are set. - * @param v the value to set. To set the value to NULL, use {@link #setToNull(String)} or - * {@code set(name, v, Boolean.class)}. - * @return this object. - * @throws IllegalArgumentException if {@code name} is not a valid name for this object. - * @throws CodecNotFoundException if there is no registered codec to convert the value to the - * underlying CQL type. - */ - public T setBool(String name, boolean v); - - /** - * Sets the value for (all occurrences of) variable {@code name} to the - * provided byte. - *

- * This method uses the {@link CodecRegistry} to find a codec to handle the conversion - * to the underlying CQL type (for CQL type {@code tinyint}, this will be the built-in codec). - * - * @param name the name of the value to set; if {@code name} is present multiple - * times, all its values are set. - * @param v the value to set. To set the value to NULL, use {@link #setToNull(String)} or - * {@code set(name, v, Byte.class)}. - * @return this object. - * @throws IllegalArgumentException if {@code name} is not a valid name for this object. - * @throws CodecNotFoundException if there is no registered codec to convert the value to the - * underlying CQL type. - */ - public T setByte(String name, byte v); - - /** - * Sets the value for (all occurrences of) variable {@code name} to the - * provided short. - *

- * This method uses the {@link CodecRegistry} to find a codec to handle the conversion - * to the underlying CQL type (for CQL type {@code smallint}, this will be the built-in codec). - * - * @param name the name of the value to set; if {@code name} is present multiple - * times, all its values are set. - * @param v the value to set. To set the value to NULL, use {@link #setToNull(String)} or - * {@code set(name, v, Short.class)}. - * @return this object. - * @throws IllegalArgumentException if {@code name} is not a valid name for this object. - * @throws CodecNotFoundException if there is no registered codec to convert the value to the - * underlying CQL type. - */ - public T setShort(String name, short v); - - /** - * Sets the value for (all occurrences of) variable {@code name} to the - * provided integer. - *

- * This method uses the {@link CodecRegistry} to find a codec to handle the conversion - * to the underlying CQL type (for CQL type {@code int}, this will be the built-in codec). - * - * @param name the name of the value to set; if {@code name} is present multiple - * times, all its values are set. - * @param v the value to set. To set the value to NULL, use {@link #setToNull(String)} or - * {@code set(name, v, Integer.class)}. - * @return this object. - * @throws IllegalArgumentException if {@code name} is not a valid name for this object. - * @throws CodecNotFoundException if there is no registered codec to convert the value to the - * underlying CQL type. - */ - public T setInt(String name, int v); - - /** - * Sets the value for (all occurrences of) variable {@code name} to the - * provided long. - *

- * This method uses the {@link CodecRegistry} to find a codec to handle the conversion - * to the underlying CQL type (for CQL type {@code bigint}, this will be the built-in codec). - * - * @param name the name of the value to set; if {@code name} is present multiple - * times, all its values are set. - * @param v the value to set. To set the value to NULL, use {@link #setToNull(String)} or - * {@code set(name, v, Long.class)}. - * @return this object. - * @throws IllegalArgumentException if {@code name} is not a valid name for this object. - * @throws CodecNotFoundException if there is no registered codec to convert the value to the - * underlying CQL type. - */ - public T setLong(String name, long v); - - /** - * Sets the value for (all occurrences of) variable {@code name} to the - * provided date. - *

- * This method uses the {@link CodecRegistry} to find a codec to handle the conversion - * to the underlying CQL type (for CQL type {@code timestamp}, this will be the built-in codec). - * - * @param name the name of the value to set; if {@code name} is present multiple - * times, all its values are set. - * @param v the value to set. - * @return this object. - * @throws IllegalArgumentException if {@code name} is not a valid name for this object. - * @throws CodecNotFoundException if there is no registered codec to convert the value to the - * underlying CQL type. - */ - public T setTimestamp(String name, Date v); - - /** - * Sets the value for (all occurrences of) variable {@code name} to the - * provided date (without time). - *

- * This method uses the {@link CodecRegistry} to find a codec to handle the conversion - * to the underlying CQL type (for CQL type {@code date}, this will be the built-in codec). - * - * @param name the name of the value to set; if {@code name} is present multiple - * times, all its values are set. - * @param v the value to set. - * @return this object. - * @throws IllegalArgumentException if {@code name} is not a valid name for this object. - * @throws CodecNotFoundException if there is no registered codec to convert the value to the - * underlying CQL type. - */ - public T setDate(String name, LocalDate v); - - /** - * Sets the value for (all occurrences of) variable {@code name} to the - * provided time as a long in nanoseconds since midnight. - *

- * This method uses the {@link CodecRegistry} to find a codec to handle the conversion - * to the underlying CQL type (for CQL type {@code time}, this will be the built-in codec). - * - * @param name the name of the value to set; if {@code name} is present multiple - * times, all its values are set. - * @param v the value to set. - * @return this object. - * @throws IllegalArgumentException if {@code name} is not a valid name for this object. - * @throws CodecNotFoundException if there is no registered codec to convert the value to the - * underlying CQL type. - */ - public T setTime(String name, long v); - - /** - * Sets the value for (all occurrences of) variable {@code name} to the - * provided float. - *

- * This method uses the {@link CodecRegistry} to find a codec to handle the conversion - * to the underlying CQL type (for CQL type {@code float}, this will be the built-in codec). - * - * @param name the name of the value to set; if {@code name} is present multiple - * times, all its values are set. - * @param v the value to set. To set the value to NULL, use {@link #setToNull(String)} or - * {@code set(name, v, Float.class)}. - * @return this object. - * @throws IllegalArgumentException if {@code name} is not a valid name for this object. - * @throws CodecNotFoundException if there is no registered codec to convert the value to the - * underlying CQL type. - */ - public T setFloat(String name, float v); - - /** - * Sets the value for (all occurrences of) variable {@code name} to the - * provided double. - *

- * This method uses the {@link CodecRegistry} to find a codec to handle the conversion - * to the underlying CQL type (for CQL type {@code double}, this will be the built-in codec). - * - * @param name the name of the value to set; if {@code name} is present multiple - * times, all its values are set. - * @param v the value to set. To set the value to NULL, use {@link #setToNull(String)} or - * {@code set(name, v, Double.class)}. - * @return this object. - * @throws IllegalArgumentException if {@code name} is not a valid name for this object. - * @throws CodecNotFoundException if there is no registered codec to convert the value to the - * underlying CQL type. - */ - public T setDouble(String name, double v); - - /** - * Sets the value for (all occurrences of) variable {@code name} to the - * provided string. - *

- * This method uses the {@link CodecRegistry} to find a codec to handle the conversion - * to the underlying CQL type (for CQL types {@code text}, {@code varchar} and {@code ascii}, - * this will be the built-in codec). - * - * @param name the name of the value to set; if {@code name} is present multiple - * times, all its values are set. - * @param v the value to set. - * @return this object. - * @throws IllegalArgumentException if {@code name} is not a valid name for this object. - * @throws CodecNotFoundException if there is no registered codec to convert the value to the - * underlying CQL type. - */ - public T setString(String name, String v); - - /** - * Sets the value for (all occurrences of) variable {@code name} to the - * provided byte buffer. - *

- * This method uses the {@link CodecRegistry} to find a codec to handle the conversion - * to the underlying CQL type (for CQL type {@code blob}, this will be the built-in codec). - * - * @param name the name of the value to set; if {@code name} is present multiple - * times, all its values are set. - * @param v the value to set. - * @return this object. - * @throws IllegalArgumentException if {@code name} is not a valid name for this object. - * @throws CodecNotFoundException if there is no registered codec to convert the value to the - * underlying CQL type. - */ - public T setBytes(String name, ByteBuffer v); - - /** - * Sets the value for (all occurrences of) variable {@code name} to the - * provided byte buffer. - *

- * This method does not use any codec; it sets the value in its binary form directly. If you insert - * data that is not compatible with the underlying CQL type, you will get an {@code InvalidQueryException} at - * execute time. - * - * @param name the name of the value to set; if {@code name} is present multiple - * times, all its values are set. - * @param v the value to set. - * @return this object. - * @throws IllegalArgumentException if {@code name} is not a valid name for this object. - */ - public T setBytesUnsafe(String name, ByteBuffer v); - - /** - * Sets the value for (all occurrences of) variable {@code name} to the - * provided big integer. - *

- * This method uses the {@link CodecRegistry} to find a codec to handle the conversion - * to the underlying CQL type (for CQL type {@code varint}, this will be the built-in codec). - * - * @param name the name of the value to set; if {@code name} is present multiple - * times, all its values are set. - * @param v the value to set. - * @return this object. - * @throws IllegalArgumentException if {@code name} is not a valid name for this object. - * @throws CodecNotFoundException if there is no registered codec to convert the value to the - * underlying CQL type. - */ - public T setVarint(String name, BigInteger v); - - /** - * Sets the value for (all occurrences of) variable {@code name} to the - * provided big decimal. - *

- * This method uses the {@link CodecRegistry} to find a codec to handle the conversion - * to the underlying CQL type (for CQL type {@code decimal}, this will be the built-in codec). - * - * @param name the name of the value to set; if {@code name} is present multiple - * times, all its values are set. - * @param v the value to set. - * @return this object. - * @throws IllegalArgumentException if {@code name} is not a valid name for this object. - * @throws CodecNotFoundException if there is no registered codec to convert the value to the - * underlying CQL type. - */ - public T setDecimal(String name, BigDecimal v); - - /** - * Sets the value for (all occurrences of) variable {@code name} to the - * provided UUID. - *

- * This method uses the {@link CodecRegistry} to find a codec to handle the conversion - * to the underlying CQL type (for CQL types {@code uuid} and {@code timeuuid}, this will - * be the built-in codec). - * - * @param name the name of the value to set; if {@code name} is present multiple - * times, all its values are set. - * @param v the value to set. - * @return this object. - * @throws IllegalArgumentException if {@code name} is not a valid name for this object. - * @throws CodecNotFoundException if there is no registered codec to convert the value to the - * underlying CQL type. - */ - public T setUUID(String name, UUID v); - - /** - * Sets the value for (all occurrences of) variable {@code name} to the - * provided inet address. - *

- * This method uses the {@link CodecRegistry} to find a codec to handle the conversion - * to the underlying CQL type (for CQL type {@code inet}, this will be the built-in codec). - * - * @param name the name of the value to set; if {@code name} is present multiple - * times, all its values are set. - * @param v the value to set. - * @return this object. - * @throws IllegalArgumentException if {@code name} is not a valid name for this object. - * @throws CodecNotFoundException if there is no registered codec to convert the value to the - * underlying CQL type. - */ - public T setInet(String name, InetAddress v); - - /** - * Sets the value for (all occurrences of) variable {@code name} to the - * provided list. - *

- * This method uses the {@link CodecRegistry} to find a codec to handle the conversion - * to the underlying CQL type (the type of the elements in the Java list is not considered). - * If two or more codecs target that CQL type, the one that was first registered will be used. - * For this reason, it is generally preferable to use the more deterministic methods - * {@link #setList(String, List, Class)} or {@link #setList(String, List, TypeToken)}. - * - * @param name the name of the value to set; if {@code name} is present multiple - * times, all its values are set. - * @param v the value to set. Note that {@code null} values inside collections are not supported by CQL. - * @return this object. - * @throws IllegalArgumentException if {@code name} is not a valid name for this object. - * @throws NullPointerException if {@code v} contains null values. Nulls are not supported in collections - * by CQL. - * @throws CodecNotFoundException if there is no registered codec to convert the value to the - * underlying CQL type. - */ - public T setList(String name, List v); - - /** - * Sets the value for (all occurrences of) variable {@code name} to the provided list, - * which elements are of the provided Java class. - *

- * This method uses the {@link CodecRegistry} to find a codec to handle the conversion of lists of the given - * Java type to the underlying CQL type. - *

- * If the type of the elements is generic, use {@link #setList(String, List, TypeToken)}. - * - * @param name the name of the value to set; if {@code name} is present multiple - * @param v the value to set. Note that {@code null} values inside collections are not supported by CQL. - * @param elementsClass the class for the elements of the list. - * @return this object. - * @throws IllegalArgumentException if {@code name} is not a valid name for this object. - * @throws NullPointerException if {@code v} contains null values. Nulls are not supported in collections - * by CQL. - * @throws CodecNotFoundException if there is no registered codec to convert the value to the - * underlying CQL type. - */ - public T setList(String name, List v, Class elementsClass); - - /** - * Sets the value for (all occurrences of) variable {@code name} to the provided list, - * which elements are of the provided Java type. - *

- * This method uses the {@link CodecRegistry} to find a codec to handle the conversion of lists of the given - * Java type to the underlying CQL type. - * - * @param name the name of the value to set; if {@code name} is present multiple - * @param v the value to set. Note that {@code null} values inside collections are not supported by CQL. - * @param elementsType the type for the elements of the list. - * @return this object. - * @throws IllegalArgumentException if {@code name} is not a valid name for this object. - * @throws NullPointerException if {@code v} contains null values. Nulls are not supported in collections - * by CQL. - * @throws CodecNotFoundException if there is no registered codec to convert the value to the - * underlying CQL type. - */ - public T setList(String name, List v, TypeToken elementsType); - - /** - * Sets the value for (all occurrences of) variable {@code name} to the - * provided map. - *

- * This method uses the {@link CodecRegistry} to find a codec to handle the conversion - * to the underlying CQL type (the type of the elements in the Java map is not considered). - * If two or more codecs target that CQL type, the one that was first registered will be used. - * For this reason, it is generally preferable to use the more deterministic methods - * {@link #setMap(String, Map, Class, Class)} or {@link #setMap(String, Map, TypeToken, TypeToken)}. - * - * @param name the name of the value to set; if {@code name} is present multiple - * times, all its values are set. - * @param v the value to set. Note that {@code null} values inside collections are not supported by CQL. - * @return this object. - * @throws IllegalArgumentException if {@code name} is not a valid name for this object. - * @throws NullPointerException if {@code v} contains null values. Nulls are not supported in collections - * by CQL. - * @throws CodecNotFoundException if there is no registered codec to convert the value to the - * underlying CQL type. - */ - public T setMap(String name, Map v); - - /** - * Sets the value for (all occurrences of) variable {@code name} to the provided map, - * which keys and values are of the provided Java classes. - *

- * This method uses the {@link CodecRegistry} to find a codec to handle the conversion of lists of the given - * Java types to the underlying CQL type. - *

- * If the type of the keys or values is generic, use {@link #setMap(String, Map, TypeToken, TypeToken)}. - * - * @param name the name of the value to set; if {@code name} is present multiple - * times, all its values are set. - * @param v the value to set. Note that {@code null} values inside collections are not supported by CQL. - * @param keysClass the class for the keys of the map. - * @param valuesClass the class for the values of the map. - * @return this object. - * @throws IllegalArgumentException if {@code name} is not a valid name for this object. - * @throws NullPointerException if {@code v} contains null values. Nulls are not supported in collections - * by CQL. - * @throws CodecNotFoundException if there is no registered codec to convert the value to the - * underlying CQL type. - */ - public T setMap(String name, Map v, Class keysClass, Class valuesClass); - - /** - * Sets the value for (all occurrences of) variable {@code name} to the provided map, - * which keys and values are of the provided Java types. - *

- * This method uses the {@link CodecRegistry} to find a codec to handle the conversion of lists of the given - * Java types to the underlying CQL type. - * - * @param name the name of the value to set; if {@code name} is present multiple - * times, all its values are set. - * @param v the value to set. Note that {@code null} values inside collections are not supported by CQL. - * @param keysType the type for the keys of the map. - * @param valuesType the type for the values of the map. - * @return this object. - * @throws IllegalArgumentException if {@code name} is not a valid name for this object. - * @throws NullPointerException if {@code v} contains null values. Nulls are not supported in collections - * by CQL. - * @throws CodecNotFoundException if there is no registered codec to convert the value to the - * underlying CQL type. - */ - public T setMap(String name, Map v, TypeToken keysType, TypeToken valuesType); - - /** - * Sets the value for (all occurrences of) variable {@code name} to the - * provided set. - *

- * This method uses the {@link CodecRegistry} to find a codec to handle the conversion - * to the underlying CQL type (the type of the elements in the Java set is not considered). - * If two or more codecs target that CQL type, the one that was first registered will be used. - * For this reason, it is generally preferable to use the more deterministic methods - * {@link #setSet(String, Set, Class)} or {@link #setSet(String, Set, TypeToken)}. - * - * @param name the name of the value to set; if {@code name} is present multiple - * times, all its values are set. - * @param v the value to set. Note that {@code null} values inside collections are not supported by CQL. - * @return this object. - * @throws IllegalArgumentException if {@code name} is not a valid name for this object. - * @throws NullPointerException if {@code v} contains null values. Nulls are not supported in collections - * by CQL. - * @throws CodecNotFoundException if there is no registered codec to convert the value to the - * underlying CQL type. - */ - public T setSet(String name, Set v); - - /** - * Sets the value for (all occurrences of) variable {@code name} to the provided set, - * which elements are of the provided Java class. - *

- * This method uses the {@link CodecRegistry} to find a codec to handle the conversion of sets of the given - * Java type to the underlying CQL type. - *

- * If the type of the elements is generic, use {@link #setSet(String, Set, TypeToken)}. - * - * @param name the name of the value to set; if {@code name} is present multiple - * @param v the value to set. Note that {@code null} values inside collections are not supported by CQL. - * @param elementsClass the class for the elements of the set. - * @return this object. - * @throws IllegalArgumentException if {@code name} is not a valid name for this object. - * @throws NullPointerException if {@code v} contains null values. Nulls are not supported in collections - * by CQL. - * @throws CodecNotFoundException if there is no registered codec to convert the value to the - * underlying CQL type. - */ - public T setSet(String name, Set v, Class elementsClass); - - /** - * Sets the value for (all occurrences of) variable {@code name} to the provided set, - * which elements are of the provided Java type. - *

- * This method uses the {@link CodecRegistry} to find a codec to handle the conversion of sets of the given - * Java type to the underlying CQL type. - * - * @param name the name of the value to set; if {@code name} is present multiple - * @param v the value to set. Note that {@code null} values inside collections are not supported by CQL. - * @param elementsType the type for the elements of the set. - * @return this object. - * @throws IllegalArgumentException if {@code name} is not a valid name for this object. - * @throws NullPointerException if {@code v} contains null values. Nulls are not supported in collections - * by CQL. - * @throws CodecNotFoundException if there is no registered codec to convert the value to the - * underlying CQL type. - */ - public T setSet(String name, Set v, TypeToken elementsType); - - /** - * Sets the value for (all occurrences of) variable {@code name} to the - * provided UDT value. - *

- * This method uses the {@link CodecRegistry} to find a codec to handle the conversion of {@code UDTValue} - * to the underlying CQL type. - * - * @param name the name of the value to set; if {@code name} is present multiple - * times, all its values are set. - * @param v the value to set. - * @return this object. - * @throws IllegalArgumentException if {@code name} is not a valid name for this object. - * @throws CodecNotFoundException if there is no registered codec to convert the value to the - * underlying CQL type. - */ - public T setUDTValue(String name, UDTValue v); - - /** - * Sets the value for (all occurrences of) variable {@code name} to the - * provided tuple value. - *

- * This method uses the {@link CodecRegistry} to find a codec to handle the conversion of {@code TupleValue} - * to the underlying CQL type. - * - * @param name the name of the value to set; if {@code name} is present multiple - * times, all its values are set. - * @param v the value to set. - * @return this object. - * @throws IllegalArgumentException if {@code name} is not a valid name for this object. - * @throws CodecNotFoundException if there is no registered codec to convert the value to the - * underlying CQL type. - */ - public T setTupleValue(String name, TupleValue v); - - /** - * Sets the value for (all occurrences of) variable {@code name} to {@code null}. - *

- * This is mainly intended for CQL types which map to native Java types. - * - * @param name the name of the value to set; if {@code name} is present multiple - * times, all its values are set. - * @return this object. - * @throws IllegalArgumentException if {@code name} is not a valid name for this object. - */ - public T setToNull(String name); - - /** - * Sets the value for (all occurrences of) variable {@code name} to the provided value of the provided Java class. - *

- * This method uses the {@link CodecRegistry} to find a codec to handle the conversion of the provided Java class - * to the underlying CQL type. - *

- * If the Java type is generic, use {@link #set(String, Object, TypeToken)} instead. - * - * @param name the name of the value to set; if {@code name} is present multiple - * times, all its values are set. - * @param v the value to set; may be {@code null}. - * @param targetClass The Java class to convert to; must not be {@code null}; - * @return this object. - * @throws IllegalArgumentException if {@code name} is not a valid name for this object. - * @throws CodecNotFoundException if there is no registered codec to convert the value to the - * underlying CQL type. - */ - T set(String name, V v, Class targetClass); - - /** - * Sets the value for (all occurrences of) variable {@code name} to the provided value of the provided Java type. - *

- * This method uses the {@link CodecRegistry} to find a codec to handle the conversion of the provided Java type - * to the underlying CQL type. - * - * @param name the name of the value to set; if {@code name} is present multiple - * times, all its values are set. - * @param v the value to set; may be {@code null}. - * @param targetType The Java type to convert to; must not be {@code null}; - * @return this object. - * @throws IllegalArgumentException if {@code name} is not a valid name for this object. - * @throws CodecNotFoundException if there is no registered codec to convert the value to the - * underlying CQL type. - */ - T set(String name, V v, TypeToken targetType); - - /** - * Sets the value for (all occurrences of) variable {@code name} to the provided value, - * converted using the given {@link TypeCodec}. - *

- * This method entirely bypasses the {@link CodecRegistry} and forces the driver to use the given codec instead. - * This can be useful if the codec would collide with a previously registered one, or if you want to use the - * codec just once without registering it. - *

- * It is the caller's responsibility to ensure that the given codec {@link TypeCodec#accepts(DataType) accepts} - * the underlying CQL type; failing to do so may result in {@link InvalidTypeException}s being thrown. - * - * @param name the name of the value to set; if {@code name} is present multiple - * times, all its values are set. - * @param v the value to set; may be {@code null}. - * @param codec The {@link TypeCodec} to use to serialize the value; may not be {@code null}. - * @return this object. - * @throws InvalidTypeException if the given codec does not {@link TypeCodec#accepts(DataType) accept} the underlying CQL type. - * @throws IllegalArgumentException if {@code name} is not a valid name for this object. - */ - T set(String name, V v, TypeCodec codec); - + /** + * Sets the value for (all occurrences of) variable {@code name} to the provided boolean. + * + *

This method uses the {@link CodecRegistry} to find a codec to handle the conversion to the + * underlying CQL type (for CQL type {@code boolean}, this will be the built-in codec). + * + * @param name the name of the value to set; if {@code name} is present multiple times, all its + * values are set. + * @param v the value to set. To set the value to NULL, use {@link #setToNull(String)} or {@code + * set(name, v, Boolean.class)}. + * @return this object. + * @throws IllegalArgumentException if {@code name} is not a valid name for this object. + * @throws CodecNotFoundException if there is no registered codec to convert the value to the + * underlying CQL type. + */ + public T setBool(String name, boolean v); + + /** + * Sets the value for (all occurrences of) variable {@code name} to the provided byte. + * + *

This method uses the {@link CodecRegistry} to find a codec to handle the conversion to the + * underlying CQL type (for CQL type {@code tinyint}, this will be the built-in codec). + * + * @param name the name of the value to set; if {@code name} is present multiple times, all its + * values are set. + * @param v the value to set. To set the value to NULL, use {@link #setToNull(String)} or {@code + * set(name, v, Byte.class)}. + * @return this object. + * @throws IllegalArgumentException if {@code name} is not a valid name for this object. + * @throws CodecNotFoundException if there is no registered codec to convert the value to the + * underlying CQL type. + */ + public T setByte(String name, byte v); + + /** + * Sets the value for (all occurrences of) variable {@code name} to the provided short. + * + *

This method uses the {@link CodecRegistry} to find a codec to handle the conversion to the + * underlying CQL type (for CQL type {@code smallint}, this will be the built-in codec). + * + * @param name the name of the value to set; if {@code name} is present multiple times, all its + * values are set. + * @param v the value to set. To set the value to NULL, use {@link #setToNull(String)} or {@code + * set(name, v, Short.class)}. + * @return this object. + * @throws IllegalArgumentException if {@code name} is not a valid name for this object. + * @throws CodecNotFoundException if there is no registered codec to convert the value to the + * underlying CQL type. + */ + public T setShort(String name, short v); + + /** + * Sets the value for (all occurrences of) variable {@code name} to the provided integer. + * + *

This method uses the {@link CodecRegistry} to find a codec to handle the conversion to the + * underlying CQL type (for CQL type {@code int}, this will be the built-in codec). + * + * @param name the name of the value to set; if {@code name} is present multiple times, all its + * values are set. + * @param v the value to set. To set the value to NULL, use {@link #setToNull(String)} or {@code + * set(name, v, Integer.class)}. + * @return this object. + * @throws IllegalArgumentException if {@code name} is not a valid name for this object. + * @throws CodecNotFoundException if there is no registered codec to convert the value to the + * underlying CQL type. + */ + public T setInt(String name, int v); + + /** + * Sets the value for (all occurrences of) variable {@code name} to the provided long. + * + *

This method uses the {@link CodecRegistry} to find a codec to handle the conversion to the + * underlying CQL type (for CQL type {@code bigint}, this will be the built-in codec). + * + * @param name the name of the value to set; if {@code name} is present multiple times, all its + * values are set. + * @param v the value to set. To set the value to NULL, use {@link #setToNull(String)} or {@code + * set(name, v, Long.class)}. + * @return this object. + * @throws IllegalArgumentException if {@code name} is not a valid name for this object. + * @throws CodecNotFoundException if there is no registered codec to convert the value to the + * underlying CQL type. + */ + public T setLong(String name, long v); + + /** + * Sets the value for (all occurrences of) variable {@code name} to the provided date. + * + *

This method uses the {@link CodecRegistry} to find a codec to handle the conversion to the + * underlying CQL type (for CQL type {@code timestamp}, this will be the built-in codec). + * + * @param name the name of the value to set; if {@code name} is present multiple times, all its + * values are set. + * @param v the value to set. + * @return this object. + * @throws IllegalArgumentException if {@code name} is not a valid name for this object. + * @throws CodecNotFoundException if there is no registered codec to convert the value to the + * underlying CQL type. + */ + public T setTimestamp(String name, Date v); + + /** + * Sets the value for (all occurrences of) variable {@code name} to the provided date (without + * time). + * + *

This method uses the {@link CodecRegistry} to find a codec to handle the conversion to the + * underlying CQL type (for CQL type {@code date}, this will be the built-in codec). + * + * @param name the name of the value to set; if {@code name} is present multiple times, all its + * values are set. + * @param v the value to set. + * @return this object. + * @throws IllegalArgumentException if {@code name} is not a valid name for this object. + * @throws CodecNotFoundException if there is no registered codec to convert the value to the + * underlying CQL type. + */ + public T setDate(String name, LocalDate v); + + /** + * Sets the value for (all occurrences of) variable {@code name} to the provided time as a long in + * nanoseconds since midnight. + * + *

This method uses the {@link CodecRegistry} to find a codec to handle the conversion to the + * underlying CQL type (for CQL type {@code time}, this will be the built-in codec). + * + * @param name the name of the value to set; if {@code name} is present multiple times, all its + * values are set. + * @param v the value to set. + * @return this object. + * @throws IllegalArgumentException if {@code name} is not a valid name for this object. + * @throws CodecNotFoundException if there is no registered codec to convert the value to the + * underlying CQL type. + */ + public T setTime(String name, long v); + + /** + * Sets the value for (all occurrences of) variable {@code name} to the provided float. + * + *

This method uses the {@link CodecRegistry} to find a codec to handle the conversion to the + * underlying CQL type (for CQL type {@code float}, this will be the built-in codec). + * + * @param name the name of the value to set; if {@code name} is present multiple times, all its + * values are set. + * @param v the value to set. To set the value to NULL, use {@link #setToNull(String)} or {@code + * set(name, v, Float.class)}. + * @return this object. + * @throws IllegalArgumentException if {@code name} is not a valid name for this object. + * @throws CodecNotFoundException if there is no registered codec to convert the value to the + * underlying CQL type. + */ + public T setFloat(String name, float v); + + /** + * Sets the value for (all occurrences of) variable {@code name} to the provided double. + * + *

This method uses the {@link CodecRegistry} to find a codec to handle the conversion to the + * underlying CQL type (for CQL type {@code double}, this will be the built-in codec). + * + * @param name the name of the value to set; if {@code name} is present multiple times, all its + * values are set. + * @param v the value to set. To set the value to NULL, use {@link #setToNull(String)} or {@code + * set(name, v, Double.class)}. + * @return this object. + * @throws IllegalArgumentException if {@code name} is not a valid name for this object. + * @throws CodecNotFoundException if there is no registered codec to convert the value to the + * underlying CQL type. + */ + public T setDouble(String name, double v); + + /** + * Sets the value for (all occurrences of) variable {@code name} to the provided string. + * + *

This method uses the {@link CodecRegistry} to find a codec to handle the conversion to the + * underlying CQL type (for CQL types {@code text}, {@code varchar} and {@code ascii}, this will + * be the built-in codec). + * + * @param name the name of the value to set; if {@code name} is present multiple times, all its + * values are set. + * @param v the value to set. + * @return this object. + * @throws IllegalArgumentException if {@code name} is not a valid name for this object. + * @throws CodecNotFoundException if there is no registered codec to convert the value to the + * underlying CQL type. + */ + public T setString(String name, String v); + + /** + * Sets the value for (all occurrences of) variable {@code name} to the provided byte buffer. + * + *

This method uses the {@link CodecRegistry} to find a codec to handle the conversion to the + * underlying CQL type (for CQL type {@code blob}, this will be the built-in codec). + * + * @param name the name of the value to set; if {@code name} is present multiple times, all its + * values are set. + * @param v the value to set. + * @return this object. + * @throws IllegalArgumentException if {@code name} is not a valid name for this object. + * @throws CodecNotFoundException if there is no registered codec to convert the value to the + * underlying CQL type. + */ + public T setBytes(String name, ByteBuffer v); + + /** + * Sets the value for (all occurrences of) variable {@code name} to the provided byte buffer. + * + *

This method does not use any codec; it sets the value in its binary form directly. If you + * insert data that is not compatible with the underlying CQL type, you will get an {@code + * InvalidQueryException} at execute time. + * + * @param name the name of the value to set; if {@code name} is present multiple times, all its + * values are set. + * @param v the value to set. + * @return this object. + * @throws IllegalArgumentException if {@code name} is not a valid name for this object. + */ + public T setBytesUnsafe(String name, ByteBuffer v); + + /** + * Sets the value for (all occurrences of) variable {@code name} to the provided big integer. + * + *

This method uses the {@link CodecRegistry} to find a codec to handle the conversion to the + * underlying CQL type (for CQL type {@code varint}, this will be the built-in codec). + * + * @param name the name of the value to set; if {@code name} is present multiple times, all its + * values are set. + * @param v the value to set. + * @return this object. + * @throws IllegalArgumentException if {@code name} is not a valid name for this object. + * @throws CodecNotFoundException if there is no registered codec to convert the value to the + * underlying CQL type. + */ + public T setVarint(String name, BigInteger v); + + /** + * Sets the value for (all occurrences of) variable {@code name} to the provided big decimal. + * + *

This method uses the {@link CodecRegistry} to find a codec to handle the conversion to the + * underlying CQL type (for CQL type {@code decimal}, this will be the built-in codec). + * + * @param name the name of the value to set; if {@code name} is present multiple times, all its + * values are set. + * @param v the value to set. + * @return this object. + * @throws IllegalArgumentException if {@code name} is not a valid name for this object. + * @throws CodecNotFoundException if there is no registered codec to convert the value to the + * underlying CQL type. + */ + public T setDecimal(String name, BigDecimal v); + + /** + * Sets the value for (all occurrences of) variable {@code name} to the provided UUID. + * + *

This method uses the {@link CodecRegistry} to find a codec to handle the conversion to the + * underlying CQL type (for CQL types {@code uuid} and {@code timeuuid}, this will be the built-in + * codec). + * + * @param name the name of the value to set; if {@code name} is present multiple times, all its + * values are set. + * @param v the value to set. + * @return this object. + * @throws IllegalArgumentException if {@code name} is not a valid name for this object. + * @throws CodecNotFoundException if there is no registered codec to convert the value to the + * underlying CQL type. + */ + public T setUUID(String name, UUID v); + + /** + * Sets the value for (all occurrences of) variable {@code name} to the provided inet address. + * + *

This method uses the {@link CodecRegistry} to find a codec to handle the conversion to the + * underlying CQL type (for CQL type {@code inet}, this will be the built-in codec). + * + * @param name the name of the value to set; if {@code name} is present multiple times, all its + * values are set. + * @param v the value to set. + * @return this object. + * @throws IllegalArgumentException if {@code name} is not a valid name for this object. + * @throws CodecNotFoundException if there is no registered codec to convert the value to the + * underlying CQL type. + */ + public T setInet(String name, InetAddress v); + + /** + * Sets the value for (all occurrences of) variable {@code name} to the provided list. + * + *

This method uses the {@link CodecRegistry} to find a codec to handle the conversion to the + * underlying CQL type (the type of the elements in the Java list is not considered). If two or + * more codecs target that CQL type, the one that was first registered will be used. For this + * reason, it is generally preferable to use the more deterministic methods {@link + * #setList(String, List, Class)} or {@link #setList(String, List, TypeToken)}. + * + * @param name the name of the value to set; if {@code name} is present multiple times, all its + * values are set. + * @param v the value to set. Note that {@code null} values inside collections are not supported + * by CQL. + * @return this object. + * @throws IllegalArgumentException if {@code name} is not a valid name for this object. + * @throws NullPointerException if {@code v} contains null values. Nulls are not supported in + * collections by CQL. + * @throws CodecNotFoundException if there is no registered codec to convert the value to the + * underlying CQL type. + */ + public T setList(String name, List v); + + /** + * Sets the value for (all occurrences of) variable {@code name} to the provided list, which + * elements are of the provided Java class. + * + *

This method uses the {@link CodecRegistry} to find a codec to handle the conversion of lists + * of the given Java type to the underlying CQL type. + * + *

If the type of the elements is generic, use {@link #setList(String, List, TypeToken)}. + * + * @param name the name of the value to set; if {@code name} is present multiple + * @param v the value to set. Note that {@code null} values inside collections are not supported + * by CQL. + * @param elementsClass the class for the elements of the list. + * @return this object. + * @throws IllegalArgumentException if {@code name} is not a valid name for this object. + * @throws NullPointerException if {@code v} contains null values. Nulls are not supported in + * collections by CQL. + * @throws CodecNotFoundException if there is no registered codec to convert the value to the + * underlying CQL type. + */ + public T setList(String name, List v, Class elementsClass); + + /** + * Sets the value for (all occurrences of) variable {@code name} to the provided list, which + * elements are of the provided Java type. + * + *

This method uses the {@link CodecRegistry} to find a codec to handle the conversion of lists + * of the given Java type to the underlying CQL type. + * + * @param name the name of the value to set; if {@code name} is present multiple + * @param v the value to set. Note that {@code null} values inside collections are not supported + * by CQL. + * @param elementsType the type for the elements of the list. + * @return this object. + * @throws IllegalArgumentException if {@code name} is not a valid name for this object. + * @throws NullPointerException if {@code v} contains null values. Nulls are not supported in + * collections by CQL. + * @throws CodecNotFoundException if there is no registered codec to convert the value to the + * underlying CQL type. + */ + public T setList(String name, List v, TypeToken elementsType); + + /** + * Sets the value for (all occurrences of) variable {@code name} to the provided map. + * + *

This method uses the {@link CodecRegistry} to find a codec to handle the conversion to the + * underlying CQL type (the type of the elements in the Java map is not considered). If two or + * more codecs target that CQL type, the one that was first registered will be used. For this + * reason, it is generally preferable to use the more deterministic methods {@link #setMap(String, + * Map, Class, Class)} or {@link #setMap(String, Map, TypeToken, TypeToken)}. + * + * @param name the name of the value to set; if {@code name} is present multiple times, all its + * values are set. + * @param v the value to set. Note that {@code null} values inside collections are not supported + * by CQL. + * @return this object. + * @throws IllegalArgumentException if {@code name} is not a valid name for this object. + * @throws NullPointerException if {@code v} contains null values. Nulls are not supported in + * collections by CQL. + * @throws CodecNotFoundException if there is no registered codec to convert the value to the + * underlying CQL type. + */ + public T setMap(String name, Map v); + + /** + * Sets the value for (all occurrences of) variable {@code name} to the provided map, which keys + * and values are of the provided Java classes. + * + *

This method uses the {@link CodecRegistry} to find a codec to handle the conversion of lists + * of the given Java types to the underlying CQL type. + * + *

If the type of the keys or values is generic, use {@link #setMap(String, Map, TypeToken, + * TypeToken)}. + * + * @param name the name of the value to set; if {@code name} is present multiple times, all its + * values are set. + * @param v the value to set. Note that {@code null} values inside collections are not supported + * by CQL. + * @param keysClass the class for the keys of the map. + * @param valuesClass the class for the values of the map. + * @return this object. + * @throws IllegalArgumentException if {@code name} is not a valid name for this object. + * @throws NullPointerException if {@code v} contains null values. Nulls are not supported in + * collections by CQL. + * @throws CodecNotFoundException if there is no registered codec to convert the value to the + * underlying CQL type. + */ + public T setMap(String name, Map v, Class keysClass, Class valuesClass); + + /** + * Sets the value for (all occurrences of) variable {@code name} to the provided map, which keys + * and values are of the provided Java types. + * + *

This method uses the {@link CodecRegistry} to find a codec to handle the conversion of lists + * of the given Java types to the underlying CQL type. + * + * @param name the name of the value to set; if {@code name} is present multiple times, all its + * values are set. + * @param v the value to set. Note that {@code null} values inside collections are not supported + * by CQL. + * @param keysType the type for the keys of the map. + * @param valuesType the type for the values of the map. + * @return this object. + * @throws IllegalArgumentException if {@code name} is not a valid name for this object. + * @throws NullPointerException if {@code v} contains null values. Nulls are not supported in + * collections by CQL. + * @throws CodecNotFoundException if there is no registered codec to convert the value to the + * underlying CQL type. + */ + public T setMap(String name, Map v, TypeToken keysType, TypeToken valuesType); + + /** + * Sets the value for (all occurrences of) variable {@code name} to the provided set. + * + *

This method uses the {@link CodecRegistry} to find a codec to handle the conversion to the + * underlying CQL type (the type of the elements in the Java set is not considered). If two or + * more codecs target that CQL type, the one that was first registered will be used. For this + * reason, it is generally preferable to use the more deterministic methods {@link #setSet(String, + * Set, Class)} or {@link #setSet(String, Set, TypeToken)}. + * + * @param name the name of the value to set; if {@code name} is present multiple times, all its + * values are set. + * @param v the value to set. Note that {@code null} values inside collections are not supported + * by CQL. + * @return this object. + * @throws IllegalArgumentException if {@code name} is not a valid name for this object. + * @throws NullPointerException if {@code v} contains null values. Nulls are not supported in + * collections by CQL. + * @throws CodecNotFoundException if there is no registered codec to convert the value to the + * underlying CQL type. + */ + public T setSet(String name, Set v); + + /** + * Sets the value for (all occurrences of) variable {@code name} to the provided set, which + * elements are of the provided Java class. + * + *

This method uses the {@link CodecRegistry} to find a codec to handle the conversion of sets + * of the given Java type to the underlying CQL type. + * + *

If the type of the elements is generic, use {@link #setSet(String, Set, TypeToken)}. + * + * @param name the name of the value to set; if {@code name} is present multiple + * @param v the value to set. Note that {@code null} values inside collections are not supported + * by CQL. + * @param elementsClass the class for the elements of the set. + * @return this object. + * @throws IllegalArgumentException if {@code name} is not a valid name for this object. + * @throws NullPointerException if {@code v} contains null values. Nulls are not supported in + * collections by CQL. + * @throws CodecNotFoundException if there is no registered codec to convert the value to the + * underlying CQL type. + */ + public T setSet(String name, Set v, Class elementsClass); + + /** + * Sets the value for (all occurrences of) variable {@code name} to the provided set, which + * elements are of the provided Java type. + * + *

This method uses the {@link CodecRegistry} to find a codec to handle the conversion of sets + * of the given Java type to the underlying CQL type. + * + * @param name the name of the value to set; if {@code name} is present multiple + * @param v the value to set. Note that {@code null} values inside collections are not supported + * by CQL. + * @param elementsType the type for the elements of the set. + * @return this object. + * @throws IllegalArgumentException if {@code name} is not a valid name for this object. + * @throws NullPointerException if {@code v} contains null values. Nulls are not supported in + * collections by CQL. + * @throws CodecNotFoundException if there is no registered codec to convert the value to the + * underlying CQL type. + */ + public T setSet(String name, Set v, TypeToken elementsType); + + /** + * Sets the value for (all occurrences of) variable {@code name} to the provided UDT value. + * + *

This method uses the {@link CodecRegistry} to find a codec to handle the conversion of + * {@code UDTValue} to the underlying CQL type. + * + * @param name the name of the value to set; if {@code name} is present multiple times, all its + * values are set. + * @param v the value to set. + * @return this object. + * @throws IllegalArgumentException if {@code name} is not a valid name for this object. + * @throws CodecNotFoundException if there is no registered codec to convert the value to the + * underlying CQL type. + */ + public T setUDTValue(String name, UDTValue v); + + /** + * Sets the value for (all occurrences of) variable {@code name} to the provided tuple value. + * + *

This method uses the {@link CodecRegistry} to find a codec to handle the conversion of + * {@code TupleValue} to the underlying CQL type. + * + * @param name the name of the value to set; if {@code name} is present multiple times, all its + * values are set. + * @param v the value to set. + * @return this object. + * @throws IllegalArgumentException if {@code name} is not a valid name for this object. + * @throws CodecNotFoundException if there is no registered codec to convert the value to the + * underlying CQL type. + */ + public T setTupleValue(String name, TupleValue v); + + /** + * Sets the value for (all occurrences of) variable {@code name} to {@code null}. + * + *

This is mainly intended for CQL types which map to native Java types. + * + * @param name the name of the value to set; if {@code name} is present multiple times, all its + * values are set. + * @return this object. + * @throws IllegalArgumentException if {@code name} is not a valid name for this object. + */ + public T setToNull(String name); + + /** + * Sets the value for (all occurrences of) variable {@code name} to the provided value of the + * provided Java class. + * + *

This method uses the {@link CodecRegistry} to find a codec to handle the conversion of the + * provided Java class to the underlying CQL type. + * + *

If the Java type is generic, use {@link #set(String, Object, TypeToken)} instead. + * + * @param name the name of the value to set; if {@code name} is present multiple times, all its + * values are set. + * @param v the value to set; may be {@code null}. + * @param targetClass The Java class to convert to; must not be {@code null}; + * @return this object. + * @throws IllegalArgumentException if {@code name} is not a valid name for this object. + * @throws CodecNotFoundException if there is no registered codec to convert the value to the + * underlying CQL type. + */ + T set(String name, V v, Class targetClass); + + /** + * Sets the value for (all occurrences of) variable {@code name} to the provided value of the + * provided Java type. + * + *

This method uses the {@link CodecRegistry} to find a codec to handle the conversion of the + * provided Java type to the underlying CQL type. + * + * @param name the name of the value to set; if {@code name} is present multiple times, all its + * values are set. + * @param v the value to set; may be {@code null}. + * @param targetType The Java type to convert to; must not be {@code null}; + * @return this object. + * @throws IllegalArgumentException if {@code name} is not a valid name for this object. + * @throws CodecNotFoundException if there is no registered codec to convert the value to the + * underlying CQL type. + */ + T set(String name, V v, TypeToken targetType); + + /** + * Sets the value for (all occurrences of) variable {@code name} to the provided value, converted + * using the given {@link TypeCodec}. + * + *

This method entirely bypasses the {@link CodecRegistry} and forces the driver to use the + * given codec instead. This can be useful if the codec would collide with a previously registered + * one, or if you want to use the codec just once without registering it. + * + *

It is the caller's responsibility to ensure that the given codec {@link + * TypeCodec#accepts(DataType) accepts} the underlying CQL type; failing to do so may result in + * {@link InvalidTypeException}s being thrown. + * + * @param name the name of the value to set; if {@code name} is present multiple times, all its + * values are set. + * @param v the value to set; may be {@code null}. + * @param codec The {@link TypeCodec} to use to serialize the value; may not be {@code null}. + * @return this object. + * @throws InvalidTypeException if the given codec does not {@link TypeCodec#accepts(DataType) + * accept} the underlying CQL type. + * @throws IllegalArgumentException if {@code name} is not a valid name for this object. + */ + T set(String name, V v, TypeCodec codec); } diff --git a/driver-core/src/main/java/com/datastax/driver/core/SettableData.java b/driver-core/src/main/java/com/datastax/driver/core/SettableData.java index 878c025faa0..f9ac384820e 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/SettableData.java +++ b/driver-core/src/main/java/com/datastax/driver/core/SettableData.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,5 +20,5 @@ /** * Collection of (typed) CQL values that can be set either by index (starting at zero) or by name. */ -public interface SettableData> extends SettableByIndexData, SettableByNameData { -} +public interface SettableData> + extends SettableByIndexData, SettableByNameData {} diff --git a/driver-core/src/main/java/com/datastax/driver/core/SimpleJSONParser.java b/driver-core/src/main/java/com/datastax/driver/core/SimpleJSONParser.java index a59bfffda7c..d8d41e1d9c7 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/SimpleJSONParser.java +++ b/driver-core/src/main/java/com/datastax/driver/core/SimpleJSONParser.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,159 +17,145 @@ */ package com.datastax.driver.core; -import java.util.*; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; /** - * A very simple json parser. - * The only reason we need to read json in the driver is because for - * historical reason Cassandra encodes a few properties using json in - * the schema and we need to decode them. - *

- * We however don't need a full-blown JSON library because: - * 1) we know we only need to decode string lists and string maps - * 2) we can basically assume the input is valid, we don't particularly - * have to bother about decoding exactly JSON as long as we at least - * decode what we need. - * 3) we don't really care much about performance, none of this is done - * in performance sensitive parts. - *

- * So instead of pulling a new dependency, we roll out our own very dumb - * parser. We should obviously not expose this publicly. + * A very simple json parser. The only reason we need to read json in the driver is because for + * historical reason Cassandra encodes a few properties using json in the schema and we need to + * decode them. + * + *

We however don't need a full-blown JSON library because: 1) we know we only need to decode + * string lists and string maps 2) we can basically assume the input is valid, we don't particularly + * have to bother about decoding exactly JSON as long as we at least decode what we need. 3) we + * don't really care much about performance, none of this is done in performance sensitive parts. + * + *

So instead of pulling a new dependency, we roll out our own very dumb parser. We should + * obviously not expose this publicly. */ class SimpleJSONParser { - private final String input; - private int idx; + private final String input; + private int idx; - private SimpleJSONParser(String input) { - this.input = input; - } + private SimpleJSONParser(String input) { + this.input = input; + } - public static List parseStringList(String input) { - if (input == null || input.isEmpty()) - return Collections.emptyList(); + public static List parseStringList(String input) { + if (input == null || input.isEmpty()) return Collections.emptyList(); - List output = new ArrayList(); - SimpleJSONParser parser = new SimpleJSONParser(input); - if (parser.nextCharSkipSpaces() != '[') - throw new IllegalArgumentException("Not a JSON list: " + input); + List output = new ArrayList(); + SimpleJSONParser parser = new SimpleJSONParser(input); + if (parser.nextCharSkipSpaces() != '[') + throw new IllegalArgumentException("Not a JSON list: " + input); - char c = parser.nextCharSkipSpaces(); - if (c == ']') - return output; + char c = parser.nextCharSkipSpaces(); + if (c == ']') return output; - while (true) { - assert c == '"'; - output.add(parser.nextString()); - c = parser.nextCharSkipSpaces(); - if (c == ']') - return output; - assert c == ','; - c = parser.nextCharSkipSpaces(); - } + while (true) { + assert c == '"'; + output.add(parser.nextString()); + c = parser.nextCharSkipSpaces(); + if (c == ']') return output; + assert c == ','; + c = parser.nextCharSkipSpaces(); } + } - public static Map parseStringMap(String input) { - if (input == null || input.isEmpty()) - return Collections.emptyMap(); + public static Map parseStringMap(String input) { + if (input == null || input.isEmpty()) return Collections.emptyMap(); - Map output = new HashMap(); - SimpleJSONParser parser = new SimpleJSONParser(input); - if (parser.nextCharSkipSpaces() != '{') - throw new IllegalArgumentException("Not a JSON map: " + input); + Map output = new HashMap(); + SimpleJSONParser parser = new SimpleJSONParser(input); + if (parser.nextCharSkipSpaces() != '{') + throw new IllegalArgumentException("Not a JSON map: " + input); - char c = parser.nextCharSkipSpaces(); - if (c == '}') - return output; + char c = parser.nextCharSkipSpaces(); + if (c == '}') return output; - while (true) { - assert c == '"'; - String key = parser.nextString(); - c = parser.nextCharSkipSpaces(); - assert c == ':'; - c = parser.nextCharSkipSpaces(); - assert c == '"'; - String value = parser.nextString(); - output.put(key, value); - c = parser.nextCharSkipSpaces(); - if (c == '}') - return output; - assert c == ','; - c = parser.nextCharSkipSpaces(); - } + while (true) { + assert c == '"'; + String key = parser.nextString(); + c = parser.nextCharSkipSpaces(); + assert c == ':'; + c = parser.nextCharSkipSpaces(); + assert c == '"'; + String value = parser.nextString(); + output.put(key, value); + c = parser.nextCharSkipSpaces(); + if (c == '}') return output; + assert c == ','; + c = parser.nextCharSkipSpaces(); } + } - /** - * Read the next char, the one at position idx, and advance ix. - */ - private char nextChar() { - if (idx >= input.length()) - throw new IllegalArgumentException("Invalid json input: " + input); - return input.charAt(idx++); - } + /** Read the next char, the one at position idx, and advance ix. */ + private char nextChar() { + if (idx >= input.length()) throw new IllegalArgumentException("Invalid json input: " + input); + return input.charAt(idx++); + } - /** - * Same as nextChar, except that it skips space characters (' ', '\t' and '\n'). - */ - private char nextCharSkipSpaces() { - char c = nextChar(); - while (c == ' ' || c == '\t' || c == '\n') - c = nextChar(); - return c; - } + /** Same as nextChar, except that it skips space characters (' ', '\t' and '\n'). */ + private char nextCharSkipSpaces() { + char c = nextChar(); + while (c == ' ' || c == '\t' || c == '\n') c = nextChar(); + return c; + } - /** - * Reads a String, assuming idx is on the first character of the string (i.e. the - * one after the opening double-quote character). - * After the string has been read, idx will be on the first character after - * the closing double-quote. - */ - private String nextString() { - assert input.charAt(idx - 1) == '"' : "Char is '" + input.charAt(idx - 1) + '\''; - StringBuilder sb = new StringBuilder(); - while (true) { - char c = nextChar(); - switch (c) { - case '\n': - case '\r': - throw new IllegalArgumentException("Unterminated string"); - case '\\': - c = nextChar(); - switch (c) { - case 'b': - sb.append('\b'); - break; - case 't': - sb.append('\t'); - break; - case 'n': - sb.append('\n'); - break; - case 'f': - sb.append('\f'); - break; - case 'r': - sb.append('\r'); - break; - case 'u': - sb.append((char) Integer.parseInt(input.substring(idx, idx + 4), 16)); - idx += 4; - break; - case '"': - case '\'': - case '\\': - case '/': - sb.append(c); - break; - default: - throw new IllegalArgumentException("Illegal escape"); - } - break; - default: - if (c == '"') - return sb.toString(); - sb.append(c); - } - } + /** + * Reads a String, assuming idx is on the first character of the string (i.e. the one after the + * opening double-quote character). After the string has been read, idx will be on the first + * character after the closing double-quote. + */ + private String nextString() { + assert input.charAt(idx - 1) == '"' : "Char is '" + input.charAt(idx - 1) + '\''; + StringBuilder sb = new StringBuilder(); + while (true) { + char c = nextChar(); + switch (c) { + case '\n': + case '\r': + throw new IllegalArgumentException("Unterminated string"); + case '\\': + c = nextChar(); + switch (c) { + case 'b': + sb.append('\b'); + break; + case 't': + sb.append('\t'); + break; + case 'n': + sb.append('\n'); + break; + case 'f': + sb.append('\f'); + break; + case 'r': + sb.append('\r'); + break; + case 'u': + sb.append((char) Integer.parseInt(input.substring(idx, idx + 4), 16)); + idx += 4; + break; + case '"': + case '\'': + case '\\': + case '/': + sb.append(c); + break; + default: + throw new IllegalArgumentException("Illegal escape"); + } + break; + default: + if (c == '"') return sb.toString(); + sb.append(c); + } } + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/SimpleStatement.java b/driver-core/src/main/java/com/datastax/driver/core/SimpleStatement.java index 6cf37e80f23..5b65957af2c 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/SimpleStatement.java +++ b/driver-core/src/main/java/com/datastax/driver/core/SimpleStatement.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,391 +18,395 @@ package com.datastax.driver.core; import com.datastax.driver.core.exceptions.InvalidTypeException; - import java.nio.ByteBuffer; import java.util.Collections; import java.util.HashMap; import java.util.Map; import java.util.Set; -/** - * A simple {@code RegularStatement} implementation built directly from a query - * string. - */ +/** A simple {@code RegularStatement} implementation built directly from a query string. */ public class SimpleStatement extends RegularStatement { - private final String query; - private final Object[] values; - private final Map namedValues; + private final String query; + private final Object[] values; + private final Map namedValues; - private volatile ByteBuffer routingKey; - private volatile String keyspace; + private volatile ByteBuffer routingKey; + private volatile String keyspace; - /** - * Creates a new {@code SimpleStatement} with the provided query string (and no values). - * - * @param query the query string. - */ - public SimpleStatement(String query) { - this(query, (Object[]) null); - } + /** + * Creates a new {@code SimpleStatement} with the provided query string (and no values). + * + * @param query the query string. + */ + public SimpleStatement(String query) { + this(query, (Object[]) null); + } - /** - * Creates a new {@code SimpleStatement} with the provided query string and values. - *

- * This version of SimpleStatement is useful when you want to execute a - * query only once (and thus do not want to resort to prepared statement), but - * do not want to convert all column values to string (typically, if you have blob - * values, encoding them to a hexadecimal string is not very efficient). In - * that case, you can provide a query string with bind markers to this constructor - * along with the values for those bind variables. When executed, the server will - * prepare the provided, bind the provided values to that prepare statement and - * execute the resulting statement. Thus, - *

-     *   session.execute(new SimpleStatement(query, value1, value2, value3));
-     * 
- * is functionally equivalent to - *
-     *   PreparedStatement ps = session.prepare(query);
-     *   session.execute(ps.bind(value1, value2, value3));
-     * 
- * except that the former version: - *
    - *
  • Requires only one round-trip to a Cassandra node.
  • - *
  • Does not left any prepared statement stored in memory (neither client or - * server side) once it has been executed.
  • - *
- *

- * Note that the types of the {@code values} provided to this method will - * not be validated by the driver as is done by {@link BoundStatement#bind} since - * {@code query} is not parsed (and hence the driver cannot know what those values - * should be). The codec to serialize each value will be chosen in the codec registry - * associated with the cluster executing this statement, based on the value's Java type - * (this is the equivalent to calling {@link CodecRegistry#codecFor(Object)}). - * If too many or too few values are provided, or if a value is not a valid one for - * the variable it is bound to, an - * {@link com.datastax.driver.core.exceptions.InvalidQueryException} will be thrown - * by Cassandra at execution time. A {@code CodecNotFoundException} may be - * thrown by this constructor however, if the codec registry does not know how to - * handle one of the values. - *

- * If you have a single value of type {@code Map}, you can't call this - * constructor using the varargs syntax, because the signature collides with - * {@link #SimpleStatement(String, Map)}. To prevent this, pass an explicit - * array object: - *

-     * new SimpleStatement("...", new Object[]{m});
-     * 
- * - * @param query the query string. - * @param values values required for the execution of {@code query}. - * @throws IllegalArgumentException if the number of values is greater than 65535. - */ - public SimpleStatement(String query, Object... values) { - if (values != null && values.length > 65535) - throw new IllegalArgumentException("Too many values, the maximum allowed is 65535"); - this.query = query; - this.values = values; - this.namedValues = null; - } + /** + * Creates a new {@code SimpleStatement} with the provided query string and values. + * + *

This version of SimpleStatement is useful when you want to execute a query only once (and + * thus do not want to resort to prepared statement), but do not want to convert all column values + * to string (typically, if you have blob values, encoding them to a hexadecimal string is not + * very efficient). In that case, you can provide a query string with bind markers to this + * constructor along with the values for those bind variables. When executed, the server will + * prepare the provided, bind the provided values to that prepare statement and execute the + * resulting statement. Thus, + * + *

+   *   session.execute(new SimpleStatement(query, value1, value2, value3));
+   * 
+ * + * is functionally equivalent to + * + *
+   *   PreparedStatement ps = session.prepare(query);
+   *   session.execute(ps.bind(value1, value2, value3));
+   * 
+ * + * except that the former version: + * + *
    + *
  • Requires only one round-trip to a Cassandra node. + *
  • Does not left any prepared statement stored in memory (neither client or server side) + * once it has been executed. + *
+ * + *

Note that the types of the {@code values} provided to this method will not be validated by + * the driver as is done by {@link BoundStatement#bind} since {@code query} is not parsed (and + * hence the driver cannot know what those values should be). The codec to serialize each value + * will be chosen in the codec registry associated with the cluster executing this statement, + * based on the value's Java type (this is the equivalent to calling {@link + * CodecRegistry#codecFor(Object)}). If too many or too few values are provided, or if a value is + * not a valid one for the variable it is bound to, an {@link + * com.datastax.driver.core.exceptions.InvalidQueryException} will be thrown by Cassandra at + * execution time. A {@code CodecNotFoundException} may be thrown by this constructor however, if + * the codec registry does not know how to handle one of the values. + * + *

If you have a single value of type {@code Map}, you can't call this + * constructor using the varargs syntax, because the signature collides with {@link + * #SimpleStatement(String, Map)}. To prevent this, pass an explicit array object: + * + *

+   * new SimpleStatement("...", new Object[]{m});
+   * 
+ * + * @param query the query string. + * @param values values required for the execution of {@code query}. + * @throws IllegalArgumentException if the number of values is greater than 65535. + */ + public SimpleStatement(String query, Object... values) { + if (values != null && values.length > 65535) + throw new IllegalArgumentException("Too many values, the maximum allowed is 65535"); + this.query = query; + this.values = values; + this.namedValues = null; + } - /** - * Creates a new {@code SimpleStatement} with the provided query string and named values. - *

- * This constructor requires that the query string use named placeholders, for example: - *

{@code
-     * new SimpleStatement("SELECT * FROM users WHERE id = :i", ImmutableMap.of("i", 1));}
-     * 
- * Make sure that the map is correctly typed {@code Map}, otherwise you might - * accidentally call {@link #SimpleStatement(String, Object...)} with a positional value of type map. - *

- * The types of the values will be handled the same way as with anonymous placeholders (see - * {@link #SimpleStatement(String, Object...)}). - *

- * Simple statements with named values are only supported starting with native protocol - * {@link ProtocolVersion#V3 v3}. With earlier versions, an - * {@link com.datastax.driver.core.exceptions.UnsupportedFeatureException} will be thrown at execution time. - * - * @param query the query string. - * @param values named values required for the execution of {@code query}. - * @throws IllegalArgumentException if the number of values is greater than 65535. - */ - public SimpleStatement(String query, Map values) { - if (values.size() > 65535) - throw new IllegalArgumentException("Too many values, the maximum allowed is 65535"); - this.query = query; - this.values = null; - this.namedValues = values; - } + /** + * Creates a new {@code SimpleStatement} with the provided query string and named values. + * + *

This constructor requires that the query string use named placeholders, for example: + * + *

{@code
+   * new SimpleStatement("SELECT * FROM users WHERE id = :i", ImmutableMap.of("i", 1));
+   * }
+ * + * Make sure that the map is correctly typed {@code Map}, otherwise you might + * accidentally call {@link #SimpleStatement(String, Object...)} with a positional value of type + * map. + * + *

The types of the values will be handled the same way as with anonymous placeholders (see + * {@link #SimpleStatement(String, Object...)}). + * + *

Simple statements with named values are only supported starting with native protocol {@link + * ProtocolVersion#V3 v3}. With earlier versions, an {@link + * com.datastax.driver.core.exceptions.UnsupportedFeatureException} will be thrown at execution + * time. + * + * @param query the query string. + * @param values named values required for the execution of {@code query}. + * @throws IllegalArgumentException if the number of values is greater than 65535. + */ + public SimpleStatement(String query, Map values) { + if (values.size() > 65535) + throw new IllegalArgumentException("Too many values, the maximum allowed is 65535"); + this.query = query; + this.values = null; + this.namedValues = values; + } - @Override - public String getQueryString(CodecRegistry codecRegistry) { - return query; - } + @Override + public String getQueryString(CodecRegistry codecRegistry) { + return query; + } - @Override - public ByteBuffer[] getValues(ProtocolVersion protocolVersion, CodecRegistry codecRegistry) { - if (values == null) - return null; - return convert(values, protocolVersion, codecRegistry); - } + @Override + public ByteBuffer[] getValues(ProtocolVersion protocolVersion, CodecRegistry codecRegistry) { + if (values == null) return null; + return convert(values, protocolVersion, codecRegistry); + } - @Override - public Map getNamedValues(ProtocolVersion protocolVersion, CodecRegistry codecRegistry) { - if (namedValues == null) - return null; - return convert(namedValues, protocolVersion, codecRegistry); - } + @Override + public Map getNamedValues( + ProtocolVersion protocolVersion, CodecRegistry codecRegistry) { + if (namedValues == null) return null; + return convert(namedValues, protocolVersion, codecRegistry); + } - /** - * The number of values for this statement, that is the size of the array - * that will be returned by {@code getValues}. - * - * @return the number of values. - */ - public int valuesCount() { - if (values != null) - return values.length; - else if (namedValues != null) - return namedValues.size(); - else - return 0; - } + /** + * The number of values for this statement, that is the size of the array that will be returned by + * {@code getValues}. + * + * @return the number of values. + */ + public int valuesCount() { + if (values != null) return values.length; + else if (namedValues != null) return namedValues.size(); + else return 0; + } - @Override - public boolean hasValues(CodecRegistry codecRegistry) { - return (values != null && values.length > 0) - || (namedValues != null && namedValues.size() > 0); - } + @Override + public boolean hasValues(CodecRegistry codecRegistry) { + return (values != null && values.length > 0) || (namedValues != null && namedValues.size() > 0); + } - @Override - public boolean usesNamedValues() { - return namedValues != null && namedValues.size() > 0; - } + @Override + public boolean usesNamedValues() { + return namedValues != null && namedValues.size() > 0; + } - /** - * Returns the {@code i}th positional value as the Java type matching its CQL type. - *

- * Note that, unlike with other driver types like {@link Row}, you can't retrieve named values by position. This - * getter will throw an exception if the statement was created with named values (or no values at all). Call - * {@link #usesNamedValues()} to check the type of values, and {@link #getObject(String)} if they are positional. - * - * @param i the index to retrieve. - * @return the {@code i}th value of this statement. - * @throws IllegalStateException if this statement does not have positional values. - * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. - */ - public Object getObject(int i) { - if (values == null) - throw new IllegalStateException("This statement does not have positional values"); - if (i < 0 || i >= values.length) - throw new ArrayIndexOutOfBoundsException(i); - return values[i]; - } + /** + * Returns the {@code i}th positional value as the Java type matching its CQL type. + * + *

Note that, unlike with other driver types like {@link Row}, you can't retrieve named values + * by position. This getter will throw an exception if the statement was created with named values + * (or no values at all). Call {@link #usesNamedValues()} to check the type of values, and {@link + * #getObject(String)} if they are positional. + * + * @param i the index to retrieve. + * @return the {@code i}th value of this statement. + * @throws IllegalStateException if this statement does not have positional values. + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + */ + public Object getObject(int i) { + if (values == null) + throw new IllegalStateException("This statement does not have positional values"); + if (i < 0 || i >= values.length) throw new ArrayIndexOutOfBoundsException(i); + return values[i]; + } - /** - * Returns a named value as the Java type matching its CQL type. - * - * @param name the name of the value to retrieve. - * @return the value that matches the name, or {@code null} if there is no such name. - * @throws IllegalStateException if this statement does not have named values. - */ - public Object getObject(String name) { - if (namedValues == null) - throw new IllegalStateException("This statement does not have named values"); - return namedValues.get(name); - } + /** + * Returns a named value as the Java type matching its CQL type. + * + * @param name the name of the value to retrieve. + * @return the value that matches the name, or {@code null} if there is no such name. + * @throws IllegalStateException if this statement does not have named values. + */ + public Object getObject(String name) { + if (namedValues == null) + throw new IllegalStateException("This statement does not have named values"); + return namedValues.get(name); + } - /** - * Returns the names of the named values of this statement. - * - * @return the names of the named values of this statement. - * @throws IllegalStateException if this statement does not have named values. - */ - public Set getValueNames() { - if (namedValues == null) - throw new IllegalStateException("This statement does not have named values"); - return Collections.unmodifiableSet(namedValues.keySet()); - } + /** + * Returns the names of the named values of this statement. + * + * @return the names of the named values of this statement. + * @throws IllegalStateException if this statement does not have named values. + */ + public Set getValueNames() { + if (namedValues == null) + throw new IllegalStateException("This statement does not have named values"); + return Collections.unmodifiableSet(namedValues.keySet()); + } - /** - * Returns the routing key for the query. - *

- * Unless the routing key has been explicitly set through - * {@link #setRoutingKey}, this method will return {@code null} to - * avoid having to parse the query string to retrieve the partition key. - * - * @param protocolVersion unused by this implementation (no internal serialization is required to compute the key). - * @param codecRegistry unused by this implementation (no internal serialization is required to compute the key). - * @return the routing key set through {@link #setRoutingKey} if such a key - * was set, {@code null} otherwise. - * @see Statement#getRoutingKey - */ - @Override - public ByteBuffer getRoutingKey(ProtocolVersion protocolVersion, CodecRegistry codecRegistry) { - return routingKey; - } + /** + * Returns the routing key for the query. + * + *

Unless the routing key has been explicitly set through {@link #setRoutingKey}, this method + * will return {@code null} to avoid having to parse the query string to retrieve the partition + * key. + * + * @param protocolVersion unused by this implementation (no internal serialization is required to + * compute the key). + * @param codecRegistry unused by this implementation (no internal serialization is required to + * compute the key). + * @return the routing key set through {@link #setRoutingKey} if such a key was set, {@code null} + * otherwise. + * @see Statement#getRoutingKey + */ + @Override + public ByteBuffer getRoutingKey(ProtocolVersion protocolVersion, CodecRegistry codecRegistry) { + return routingKey; + } - /** - * Sets the routing key for this query. - *

- * This method allows you to manually provide a routing key for this query. It - * is thus optional since the routing key is only an hint for token aware - * load balancing policy but is never mandatory. - *

- * If the partition key for the query is composite, use the - * {@link #setRoutingKey(ByteBuffer...)} method instead to build the - * routing key. - * - * @param routingKey the raw (binary) value to use as routing key. - * @return this {@code SimpleStatement} object. - * @see Statement#getRoutingKey - */ - public SimpleStatement setRoutingKey(ByteBuffer routingKey) { - this.routingKey = routingKey; - return this; - } + /** + * Sets the routing key for this query. + * + *

This method allows you to manually provide a routing key for this query. It is thus optional + * since the routing key is only an hint for token aware load balancing policy but is never + * mandatory. + * + *

If the partition key for the query is composite, use the {@link + * #setRoutingKey(ByteBuffer...)} method instead to build the routing key. + * + * @param routingKey the raw (binary) value to use as routing key. + * @return this {@code SimpleStatement} object. + * @see Statement#getRoutingKey + */ + public SimpleStatement setRoutingKey(ByteBuffer routingKey) { + this.routingKey = routingKey; + return this; + } - /** - * Returns the keyspace this query operates on. - *

- * Unless the keyspace has been explicitly set through {@link #setKeyspace}, - * this method will return {@code null} to avoid having to parse the query - * string. - * - * @return the keyspace set through {@link #setKeyspace} if such keyspace was - * set, {@code null} otherwise. - * @see Statement#getKeyspace - */ - @Override - public String getKeyspace() { - return keyspace; - } + /** + * Returns the keyspace this query operates on. + * + *

Unless the keyspace has been explicitly set through {@link #setKeyspace}, this method will + * return {@code null} to avoid having to parse the query string. + * + * @return the keyspace set through {@link #setKeyspace} if such keyspace was set, {@code null} + * otherwise. + * @see Statement#getKeyspace + */ + @Override + public String getKeyspace() { + return keyspace; + } - /** - * Sets the keyspace this query operates on. - *

- * This method allows you to manually provide a keyspace for this query. It - * is thus optional since the value returned by this method is only an hint - * for token aware load balancing policy but is never mandatory. - *

- * Do note that if the query does not use a fully qualified keyspace, then - * you do not need to set the keyspace through that method as the - * currently logged in keyspace will be used. - * - * @param keyspace the name of the keyspace this query operates on. - * @return this {@code SimpleStatement} object. - * @see Statement#getKeyspace - */ - public SimpleStatement setKeyspace(String keyspace) { - this.keyspace = keyspace; - return this; - } + /** + * Sets the keyspace this query operates on. + * + *

This method allows you to manually provide a keyspace for this query. It is thus optional + * since the value returned by this method is only an hint for token aware load balancing policy + * but is never mandatory. + * + *

Do note that if the query does not use a fully qualified keyspace, then you do not need to + * set the keyspace through that method as the currently logged in keyspace will be used. + * + * @param keyspace the name of the keyspace this query operates on. + * @return this {@code SimpleStatement} object. + * @see Statement#getKeyspace + */ + public SimpleStatement setKeyspace(String keyspace) { + this.keyspace = keyspace; + return this; + } - /** - * Sets the routing key for this query. - *

- * See {@link #setRoutingKey(ByteBuffer)} for more information. This - * method is a variant for when the query partition key is composite and - * thus the routing key must be built from multiple values. - * - * @param routingKeyComponents the raw (binary) values to compose to obtain - * the routing key. - * @return this {@code SimpleStatement} object. - * @see Statement#getRoutingKey - */ - public SimpleStatement setRoutingKey(ByteBuffer... routingKeyComponents) { - this.routingKey = compose(routingKeyComponents); - return this; - } + /** + * Sets the routing key for this query. + * + *

See {@link #setRoutingKey(ByteBuffer)} for more information. This method is a variant for + * when the query partition key is composite and thus the routing key must be built from multiple + * values. + * + * @param routingKeyComponents the raw (binary) values to compose to obtain the routing key. + * @return this {@code SimpleStatement} object. + * @see Statement#getRoutingKey + */ + public SimpleStatement setRoutingKey(ByteBuffer... routingKeyComponents) { + this.routingKey = compose(routingKeyComponents); + return this; + } - /* - * This method performs a best-effort heuristic to guess which codec to use. - * Note that this is not particularly efficient as the codec registry needs to iterate over - * the registered codecs until it finds a suitable one. - */ - private static ByteBuffer[] convert(Object[] values, ProtocolVersion protocolVersion, CodecRegistry codecRegistry) { - ByteBuffer[] serializedValues = new ByteBuffer[values.length]; - for (int i = 0; i < values.length; i++) { - Object value = values[i]; - if (value == null) { - // impossible to locate the right codec when object is null, - // so forcing the result to null - serializedValues[i] = null; - } else { - if (value instanceof Token) { - // bypass CodecRegistry for Token instances - serializedValues[i] = ((Token) value).serialize(protocolVersion); - } else { - try { - TypeCodec codec = codecRegistry.codecFor(value); - serializedValues[i] = codec.serialize(value, protocolVersion); - } catch (Exception e) { - // Catch and rethrow to provide a more helpful error message (one that include which value is bad) - throw new InvalidTypeException(String.format("Value %d of type %s does not correspond to any CQL3 type", i, value.getClass()), e); - } - } - } + /* + * This method performs a best-effort heuristic to guess which codec to use. + * Note that this is not particularly efficient as the codec registry needs to iterate over + * the registered codecs until it finds a suitable one. + */ + private static ByteBuffer[] convert( + Object[] values, ProtocolVersion protocolVersion, CodecRegistry codecRegistry) { + ByteBuffer[] serializedValues = new ByteBuffer[values.length]; + for (int i = 0; i < values.length; i++) { + Object value = values[i]; + if (value == null) { + // impossible to locate the right codec when object is null, + // so forcing the result to null + serializedValues[i] = null; + } else { + if (value instanceof Token) { + // bypass CodecRegistry for Token instances + serializedValues[i] = ((Token) value).serialize(protocolVersion); + } else { + try { + TypeCodec codec = codecRegistry.codecFor(value); + serializedValues[i] = codec.serialize(value, protocolVersion); + } catch (Exception e) { + // Catch and rethrow to provide a more helpful error message (one that include which + // value is bad) + throw new InvalidTypeException( + String.format( + "Value %d of type %s does not correspond to any CQL3 type", + i, value.getClass()), + e); + } } - return serializedValues; + } } + return serializedValues; + } - private static Map convert(Map values, ProtocolVersion protocolVersion, CodecRegistry codecRegistry) { - Map serializedValues = new HashMap(); - for (Map.Entry entry : values.entrySet()) { - String name = entry.getKey(); - Object value = entry.getValue(); - if (value == null) { - // impossible to locate the right codec when object is null, - // so forcing the result to null - serializedValues.put(name, null); - } else { - if (value instanceof Token) { - // bypass CodecRegistry for Token instances - serializedValues.put(name, ((Token) value).serialize(protocolVersion)); - } else { - try { - TypeCodec codec = codecRegistry.codecFor(value); - serializedValues.put(name, codec.serialize(value, protocolVersion)); - } catch (Exception e) { - // Catch and rethrow to provide a more helpful error message (one that include which value is bad) - throw new InvalidTypeException(String.format("Value '%s' of type %s does not correspond to any CQL3 type", name, value.getClass()), e); - } - } - } + private static Map convert( + Map values, ProtocolVersion protocolVersion, CodecRegistry codecRegistry) { + Map serializedValues = new HashMap(); + for (Map.Entry entry : values.entrySet()) { + String name = entry.getKey(); + Object value = entry.getValue(); + if (value == null) { + // impossible to locate the right codec when object is null, + // so forcing the result to null + serializedValues.put(name, null); + } else { + if (value instanceof Token) { + // bypass CodecRegistry for Token instances + serializedValues.put(name, ((Token) value).serialize(protocolVersion)); + } else { + try { + TypeCodec codec = codecRegistry.codecFor(value); + serializedValues.put(name, codec.serialize(value, protocolVersion)); + } catch (Exception e) { + // Catch and rethrow to provide a more helpful error message (one that include which + // value is bad) + throw new InvalidTypeException( + String.format( + "Value '%s' of type %s does not correspond to any CQL3 type", + name, value.getClass()), + e); + } } - return serializedValues; + } } + return serializedValues; + } - /** - * Utility method to assemble different routing key components into a single {@link ByteBuffer}. - * Mainly intended for statements that need to generate a routing key out of their current values. - * - * @param buffers the components of the routing key. - * @return A ByteBuffer containing the serialized routing key - */ - static ByteBuffer compose(ByteBuffer... buffers) { - if (buffers.length == 1) - return buffers[0]; - - int totalLength = 0; - for (ByteBuffer bb : buffers) - totalLength += 2 + bb.remaining() + 1; + /** + * Utility method to assemble different routing key components into a single {@link ByteBuffer}. + * Mainly intended for statements that need to generate a routing key out of their current values. + * + * @param buffers the components of the routing key. + * @return A ByteBuffer containing the serialized routing key + */ + static ByteBuffer compose(ByteBuffer... buffers) { + if (buffers.length == 1) return buffers[0]; - ByteBuffer out = ByteBuffer.allocate(totalLength); - for (ByteBuffer buffer : buffers) { - ByteBuffer bb = buffer.duplicate(); - putShortLength(out, bb.remaining()); - out.put(bb); - out.put((byte) 0); - } - out.flip(); - return out; - } + int totalLength = 0; + for (ByteBuffer bb : buffers) totalLength += 2 + bb.remaining() + 1; - static void putShortLength(ByteBuffer bb, int length) { - bb.put((byte) ((length >> 8) & 0xFF)); - bb.put((byte) (length & 0xFF)); + ByteBuffer out = ByteBuffer.allocate(totalLength); + for (ByteBuffer buffer : buffers) { + ByteBuffer bb = buffer.duplicate(); + putShortLength(out, bb.remaining()); + out.put(bb); + out.put((byte) 0); } + out.flip(); + return out; + } + static void putShortLength(ByteBuffer bb, int length) { + bb.put((byte) ((length >> 8) & 0xFF)); + bb.put((byte) (length & 0xFF)); + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/SnappyCompressor.java b/driver-core/src/main/java/com/datastax/driver/core/SnappyCompressor.java index d3d4a432098..fc1afdd2c6f 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/SnappyCompressor.java +++ b/driver-core/src/main/java/com/datastax/driver/core/SnappyCompressor.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,154 +19,170 @@ import com.datastax.driver.core.exceptions.DriverInternalError; import io.netty.buffer.ByteBuf; +import java.io.IOException; +import java.nio.ByteBuffer; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.xerial.snappy.Snappy; -import java.io.IOException; -import java.nio.ByteBuffer; - class SnappyCompressor extends FrameCompressor { - private static final Logger logger = LoggerFactory.getLogger(SnappyCompressor.class); - - static final SnappyCompressor instance; - - static { - SnappyCompressor i; - try { - i = new SnappyCompressor(); - } catch (NoClassDefFoundError e) { - i = null; - logger.warn("Cannot find Snappy class, you should make sure the Snappy library is in the classpath if you intend to use it. Snappy compression will not be available for the protocol."); - } catch (Throwable e) { - i = null; - logger.warn("Error loading Snappy library ({}). Snappy compression will not be available for the protocol.", e.toString()); - } - instance = i; - } - - private SnappyCompressor() { - // this would throw java.lang.NoClassDefFoundError if Snappy class - // wasn't found at runtime which should be processed by the calling method - Snappy.getNativeLibraryVersion(); + private static final Logger logger = LoggerFactory.getLogger(SnappyCompressor.class); + + static final SnappyCompressor instance; + + static { + SnappyCompressor i; + try { + i = new SnappyCompressor(); + } catch (NoClassDefFoundError e) { + i = null; + logger.warn( + "Cannot find Snappy class, you should make sure the Snappy library is in the classpath if you intend to use it. Snappy compression will not be available for the protocol."); + } catch (Throwable e) { + i = null; + logger.warn( + "Error loading Snappy library ({}). Snappy compression will not be available for the protocol.", + e.toString()); } - - @Override - Frame compress(Frame frame) throws IOException { - ByteBuf input = frame.body; - ByteBuf frameBody = input.isDirect() ? compressDirect(input) : compressHeap(input); - return frame.with(frameBody); - } - - private ByteBuf compressDirect(ByteBuf input) throws IOException { - int maxCompressedLength = Snappy.maxCompressedLength(input.readableBytes()); - // If the input is direct we will allocate a direct output buffer as well as this will allow us to use - // Snappy.compress(ByteBuffer, ByteBuffer) and so eliminate memory copies. - ByteBuf output = input.alloc().directBuffer(maxCompressedLength); - try { - ByteBuffer in = inputNioBuffer(input); - // Increase reader index. - input.readerIndex(input.writerIndex()); - - ByteBuffer out = outputNioBuffer(output); - int written = Snappy.compress(in, out); - // Set the writer index so the amount of written bytes is reflected - output.writerIndex(output.writerIndex() + written); - } catch (IOException e) { - // release output buffer so we not leak and rethrow exception. - output.release(); - throw e; - } - return output; - } - - private ByteBuf compressHeap(ByteBuf input) throws IOException { - int maxCompressedLength = Snappy.maxCompressedLength(input.readableBytes()); - int inOffset = input.arrayOffset() + input.readerIndex(); - byte[] in = input.array(); - int len = input.readableBytes(); - // Increase reader index. - input.readerIndex(input.writerIndex()); - - // Allocate a heap buffer from the ByteBufAllocator as we may use a PooledByteBufAllocator and so - // can eliminate the overhead of allocate a new byte[]. - ByteBuf output = input.alloc().heapBuffer(maxCompressedLength); - try { - // Calculate the correct offset. - int offset = output.arrayOffset() + output.writerIndex(); - byte[] out = output.array(); - int written = Snappy.compress(in, inOffset, len, out, offset); - - // Increase the writerIndex with the written bytes. - output.writerIndex(output.writerIndex() + written); - } catch (IOException e) { - // release output buffer so we not leak and rethrow exception. - output.release(); - throw e; - } - return output; + instance = i; + } + + private SnappyCompressor() { + // this would throw java.lang.NoClassDefFoundError if Snappy class + // wasn't found at runtime which should be processed by the calling method + Snappy.getNativeLibraryVersion(); + } + + @Override + Frame compress(Frame frame) throws IOException { + return frame.with(compress(frame.body)); + } + + @Override + ByteBuf compress(ByteBuf buffer) throws IOException { + return buffer.isDirect() ? compressDirect(buffer) : compressHeap(buffer); + } + + private ByteBuf compressDirect(ByteBuf input) throws IOException { + int maxCompressedLength = Snappy.maxCompressedLength(input.readableBytes()); + // If the input is direct we will allocate a direct output buffer as well as this will allow us + // to use + // Snappy.compress(ByteBuffer, ByteBuffer) and so eliminate memory copies. + ByteBuf output = input.alloc().directBuffer(maxCompressedLength); + try { + ByteBuffer in = inputNioBuffer(input); + // Increase reader index. + input.readerIndex(input.writerIndex()); + + ByteBuffer out = outputNioBuffer(output); + int written = Snappy.compress(in, out); + // Set the writer index so the amount of written bytes is reflected + output.writerIndex(output.writerIndex() + written); + } catch (IOException e) { + // release output buffer so we not leak and rethrow exception. + output.release(); + throw e; } - - @Override - Frame decompress(Frame frame) throws IOException { - ByteBuf input = frame.body; - ByteBuf frameBody = input.isDirect() ? decompressDirect(input) : decompressHeap(input); - return frame.with(frameBody); + return output; + } + + private ByteBuf compressHeap(ByteBuf input) throws IOException { + int maxCompressedLength = Snappy.maxCompressedLength(input.readableBytes()); + int inOffset = input.arrayOffset() + input.readerIndex(); + byte[] in = input.array(); + int len = input.readableBytes(); + // Increase reader index. + input.readerIndex(input.writerIndex()); + + // Allocate a heap buffer from the ByteBufAllocator as we may use a PooledByteBufAllocator and + // so + // can eliminate the overhead of allocate a new byte[]. + ByteBuf output = input.alloc().heapBuffer(maxCompressedLength); + try { + // Calculate the correct offset. + int offset = output.arrayOffset() + output.writerIndex(); + byte[] out = output.array(); + int written = Snappy.compress(in, inOffset, len, out, offset); + + // Increase the writerIndex with the written bytes. + output.writerIndex(output.writerIndex() + written); + } catch (IOException e) { + // release output buffer so we not leak and rethrow exception. + output.release(); + throw e; } - - private ByteBuf decompressDirect(ByteBuf input) throws IOException { - ByteBuffer in = inputNioBuffer(input); - // Increase reader index. - input.readerIndex(input.writerIndex()); - - if (!Snappy.isValidCompressedBuffer(in)) - throw new DriverInternalError("Provided frame does not appear to be Snappy compressed"); - - // If the input is direct we will allocate a direct output buffer as well as this will allow us to use - // Snappy.compress(ByteBuffer, ByteBuffer) and so eliminate memory copies. - ByteBuf output = input.alloc().directBuffer(Snappy.uncompressedLength(in)); - try { - ByteBuffer out = outputNioBuffer(output); - - int size = Snappy.uncompress(in, out); - // Set the writer index so the amount of written bytes is reflected - output.writerIndex(output.writerIndex() + size); - } catch (IOException e) { - // release output buffer so we not leak and rethrow exception. - output.release(); - throw e; - } - return output; + return output; + } + + @Override + Frame decompress(Frame frame) throws IOException { + ByteBuf input = frame.body; + ByteBuf frameBody = input.isDirect() ? decompressDirect(input) : decompressHeap(input); + return frame.with(frameBody); + } + + @Override + ByteBuf decompress(ByteBuf buffer, int uncompressedLength) throws IOException { + // Note that the Snappy algorithm already encodes the uncompressed length, we don't need the + // provided one. + return buffer.isDirect() ? decompressDirect(buffer) : decompressHeap(buffer); + } + + private ByteBuf decompressDirect(ByteBuf input) throws IOException { + ByteBuffer in = inputNioBuffer(input); + // Increase reader index. + input.readerIndex(input.writerIndex()); + + if (!Snappy.isValidCompressedBuffer(in)) + throw new DriverInternalError("Provided frame does not appear to be Snappy compressed"); + + // If the input is direct we will allocate a direct output buffer as well as this will allow us + // to use + // Snappy.compress(ByteBuffer, ByteBuffer) and so eliminate memory copies. + ByteBuf output = input.alloc().directBuffer(Snappy.uncompressedLength(in)); + try { + ByteBuffer out = outputNioBuffer(output); + + int size = Snappy.uncompress(in, out); + // Set the writer index so the amount of written bytes is reflected + output.writerIndex(output.writerIndex() + size); + } catch (IOException e) { + // release output buffer so we not leak and rethrow exception. + output.release(); + throw e; } - - private ByteBuf decompressHeap(ByteBuf input) throws IOException { - // Not a direct buffer so use byte arrays... - int inOffset = input.arrayOffset() + input.readerIndex(); - byte[] in = input.array(); - int len = input.readableBytes(); - // Increase reader index. - input.readerIndex(input.writerIndex()); - - if (!Snappy.isValidCompressedBuffer(in, inOffset, len)) - throw new DriverInternalError("Provided frame does not appear to be Snappy compressed"); - - // Allocate a heap buffer from the ByteBufAllocator as we may use a PooledByteBufAllocator and so - // can eliminate the overhead of allocate a new byte[]. - ByteBuf output = input.alloc().heapBuffer(Snappy.uncompressedLength(in, inOffset, len)); - try { - // Calculate the correct offset. - int offset = output.arrayOffset() + output.writerIndex(); - byte[] out = output.array(); - int written = Snappy.uncompress(in, inOffset, len, out, offset); - - // Increase the writerIndex with the written bytes. - output.writerIndex(output.writerIndex() + written); - } catch (IOException e) { - // release output buffer so we not leak and rethrow exception. - output.release(); - throw e; - } - return output; + return output; + } + + private ByteBuf decompressHeap(ByteBuf input) throws IOException { + // Not a direct buffer so use byte arrays... + int inOffset = input.arrayOffset() + input.readerIndex(); + byte[] in = input.array(); + int len = input.readableBytes(); + // Increase reader index. + input.readerIndex(input.writerIndex()); + + if (!Snappy.isValidCompressedBuffer(in, inOffset, len)) + throw new DriverInternalError("Provided frame does not appear to be Snappy compressed"); + + // Allocate a heap buffer from the ByteBufAllocator as we may use a PooledByteBufAllocator and + // so + // can eliminate the overhead of allocate a new byte[]. + ByteBuf output = input.alloc().heapBuffer(Snappy.uncompressedLength(in, inOffset, len)); + try { + // Calculate the correct offset. + int offset = output.arrayOffset() + output.writerIndex(); + byte[] out = output.array(); + int written = Snappy.uncompress(in, inOffset, len, out, offset); + + // Increase the writerIndex with the written bytes. + output.writerIndex(output.writerIndex() + written); + } catch (IOException e) { + // release output buffer so we not leak and rethrow exception. + output.release(); + throw e; } + return output; + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/SniEndPoint.java b/driver-core/src/main/java/com/datastax/driver/core/SniEndPoint.java new file mode 100644 index 00000000000..46e9b54a653 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/SniEndPoint.java @@ -0,0 +1,114 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import com.google.common.base.Objects; +import com.google.common.base.Preconditions; +import com.google.common.primitives.UnsignedBytes; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.net.UnknownHostException; +import java.util.Arrays; +import java.util.Comparator; +import java.util.concurrent.atomic.AtomicLong; + +/** An endpoint to access nodes through a proxy that uses SNI routing. */ +public class SniEndPoint implements EndPoint { + + private static final AtomicLong OFFSET = new AtomicLong(); + + private final InetSocketAddress proxyAddress; + private final String serverName; + + /** + * @param proxyAddress the address of the proxy. If it is {@linkplain + * InetSocketAddress#isUnresolved() unresolved}, each call to {@link #resolve()} will + * re-resolve it, fetch all of its A-records, and if there are more than 1 pick one in a + * round-robin fashion. + * @param serverName the SNI server name. In the context of DSOD, this is the string + * representation of the host id. + */ + public SniEndPoint(InetSocketAddress proxyAddress, String serverName) { + Preconditions.checkNotNull(proxyAddress); + Preconditions.checkNotNull(serverName); + this.proxyAddress = proxyAddress; + this.serverName = serverName; + } + + @Override + public InetSocketAddress resolve() { + if (proxyAddress.isUnresolved()) { + try { + InetAddress[] aRecords = InetAddress.getAllByName(proxyAddress.getHostName()); + if (aRecords.length == 0) { + // Probably never happens, but the JDK docs don't explicitly say so + throw new IllegalArgumentException( + "Could not resolve proxy address " + proxyAddress.getHostName()); + } + // The order of the returned address is unspecified. Sort by IP to make sure we get a true + // round-robin + Arrays.sort(aRecords, IP_COMPARATOR); + int index = (aRecords.length == 1) ? 0 : (int) OFFSET.getAndIncrement() % aRecords.length; + return new InetSocketAddress(aRecords[index], proxyAddress.getPort()); + } catch (UnknownHostException e) { + throw new IllegalArgumentException( + "Could not resolve proxy address " + proxyAddress.getHostName(), e); + } + } else { + return proxyAddress; + } + } + + String getServerName() { + return serverName; + } + + @Override + public boolean equals(Object other) { + if (other == this) { + return true; + } else if (other instanceof SniEndPoint) { + SniEndPoint that = (SniEndPoint) other; + return this.proxyAddress.equals(that.proxyAddress) && this.serverName.equals(that.serverName); + } else { + return false; + } + } + + @Override + public int hashCode() { + return Objects.hashCode(proxyAddress, serverName); + } + + @Override + public String toString() { + // Note that this uses the original proxy address, so if there are multiple A-records it won't + // show which one was selected. If that turns out to be a problem for debugging, we might need + // to store the result of resolve() in Connection and log that instead of the endpoint. + return proxyAddress.toString() + ":" + serverName; + } + + private static final Comparator IP_COMPARATOR = + new Comparator() { + @Override + public int compare(InetAddress address1, InetAddress address2) { + return UnsignedBytes.lexicographicalComparator() + .compare(address1.getAddress(), address2.getAddress()); + } + }; +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/SniEndPointFactory.java b/driver-core/src/main/java/com/datastax/driver/core/SniEndPointFactory.java new file mode 100644 index 00000000000..f40550dbd99 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/SniEndPointFactory.java @@ -0,0 +1,39 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import java.net.InetSocketAddress; +import java.util.UUID; + +public class SniEndPointFactory implements EndPointFactory { + + private final InetSocketAddress proxyAddress; + + public SniEndPointFactory(InetSocketAddress proxyAddress) { + this.proxyAddress = proxyAddress; + } + + @Override + public void init(Cluster cluster) {} + + @Override + public EndPoint create(Row peersRow) { + UUID host_id = peersRow.getUUID("host_id"); + return new SniEndPoint(proxyAddress, host_id.toString()); + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/SniSSLOptions.java b/driver-core/src/main/java/com/datastax/driver/core/SniSSLOptions.java new file mode 100644 index 00000000000..ae9476e3553 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/SniSSLOptions.java @@ -0,0 +1,135 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import com.google.common.collect.ImmutableList; +import io.netty.channel.socket.SocketChannel; +import io.netty.handler.ssl.SslHandler; +import java.net.InetSocketAddress; +import java.util.concurrent.CopyOnWriteArrayList; +import javax.net.ssl.SNIHostName; +import javax.net.ssl.SNIServerName; +import javax.net.ssl.SSLContext; +import javax.net.ssl.SSLEngine; +import javax.net.ssl.SSLParameters; + +@IgnoreJDK6Requirement +@SuppressWarnings("deprecation") +public class SniSSLOptions extends JdkSSLOptions implements ExtendedRemoteEndpointAwareSslOptions { + + // An offset that gets added to our "fake" ports (see below). We pick this value because it is the + // start of the ephemeral port range. + private static final int FAKE_PORT_OFFSET = 49152; + + private final CopyOnWriteArrayList fakePorts = new CopyOnWriteArrayList(); + + /** + * Creates a new instance. + * + * @param context the SSL context. + * @param cipherSuites the cipher suites to use. + */ + protected SniSSLOptions(SSLContext context, String[] cipherSuites) { + super(context, cipherSuites); + } + + @Override + public SslHandler newSSLHandler(SocketChannel channel) { + throw new AssertionError( + "This class implements RemoteEndpointAwareSSLOptions, this method should not be called"); + } + + @Override + public SslHandler newSSLHandler(SocketChannel channel, EndPoint remoteEndpoint) { + SSLEngine engine = newSSLEngine(channel, remoteEndpoint); + return new SslHandler(engine); + } + + @Override + public SslHandler newSSLHandler(SocketChannel channel, InetSocketAddress remoteEndpoint) { + throw new AssertionError( + "The driver should never call this method on an object that implements " + + this.getClass().getSimpleName()); + } + + protected SSLEngine newSSLEngine( + @SuppressWarnings("unused") SocketChannel channel, EndPoint remoteEndpoint) { + if (!(remoteEndpoint instanceof SniEndPoint)) { + throw new IllegalArgumentException( + String.format( + "Configuration error: can only use %s with SNI end points", + this.getClass().getSimpleName())); + } + SniEndPoint sniEndPoint = (SniEndPoint) remoteEndpoint; + InetSocketAddress address = sniEndPoint.resolve(); + String sniServerName = sniEndPoint.getServerName(); + + // When hostname verification is enabled (with setEndpointIdentificationAlgorithm), the SSL + // engine will try to match the server's certificate against the SNI host name; if that doesn't + // work, it will fall back to the "advisory peer host" passed to createSSLEngine. + // + // In our case, the first check will never succeed because our SNI host name is not the DNS name + // (we use the Cassandra host_id instead). So we *must* set the advisory peer information. + // + // However if we use the address as-is, this leads to another issue: the advisory peer + // information is also used to cache SSL sessions internally. All of our nodes share the same + // proxy address, so the JDK tries to reuse SSL sessions across nodes. But it doesn't update the + // SNI host name every time, so it ends up opening connections to the wrong node. + // + // To avoid that, we create a unique "fake" port for every node. We still get session reuse for + // a given node, but not across nodes. This is safe because the advisory port is only used for + // session caching. + SSLEngine engine = context.createSSLEngine(address.getHostName(), getFakePort(sniServerName)); + engine.setUseClientMode(true); + SSLParameters parameters = engine.getSSLParameters(); + parameters.setServerNames(ImmutableList.of(new SNIHostName(sniServerName))); + parameters.setEndpointIdentificationAlgorithm("HTTPS"); + engine.setSSLParameters(parameters); + if (cipherSuites != null) engine.setEnabledCipherSuites(cipherSuites); + return engine; + } + + private int getFakePort(String sniServerName) { + fakePorts.addIfAbsent(sniServerName); + return FAKE_PORT_OFFSET + fakePorts.indexOf(sniServerName); + } + + public static Builder builder() { + return new Builder(); + } + + public static class Builder extends JdkSSLOptions.Builder { + + @Override + public SniSSLOptions.Builder withSSLContext(SSLContext context) { + super.withSSLContext(context); + return this; + } + + @Override + public SniSSLOptions.Builder withCipherSuites(String[] cipherSuites) { + super.withCipherSuites(cipherSuites); + return this; + } + + @Override + public SniSSLOptions build() { + return new SniSSLOptions(context, cipherSuites); + } + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/SocketOptions.java b/driver-core/src/main/java/com/datastax/driver/core/SocketOptions.java index 1f860f1b176..61f7beabd29 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/SocketOptions.java +++ b/driver-core/src/main/java/com/datastax/driver/core/SocketOptions.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,269 +18,265 @@ package com.datastax.driver.core; /** - * Options to configure low-level socket options for the connections kept - * to the Cassandra hosts. + * Options to configure low-level socket options for the connections kept to the Cassandra hosts. */ public class SocketOptions { - /** - * The default connection timeout in milliseconds if none is set explicitly - * using {@link #setConnectTimeoutMillis}. - *

- * That default is of 5 seconds. - */ - public static final int DEFAULT_CONNECT_TIMEOUT_MILLIS = 5000; + /** + * The default connection timeout in milliseconds if none is set explicitly using {@link + * #setConnectTimeoutMillis}. + * + *

That default is of 5 seconds. + */ + public static final int DEFAULT_CONNECT_TIMEOUT_MILLIS = 5000; - /** - * The default read timeout in milliseconds if none is set explicitly - * using {@link #setReadTimeoutMillis}. - *

- * That default is of 12 seconds so as to be slightly bigger that the - * default Cassandra timeout. - * - * @see #getReadTimeoutMillis for more details on this timeout. - */ - public static final int DEFAULT_READ_TIMEOUT_MILLIS = 12000; + /** + * The default read timeout in milliseconds if none is set explicitly using {@link + * #setReadTimeoutMillis}. + * + *

That default is of 12 seconds so as to be slightly bigger that the default Cassandra + * timeout. + * + * @see #getReadTimeoutMillis for more details on this timeout. + */ + public static final int DEFAULT_READ_TIMEOUT_MILLIS = 12000; - private volatile int connectTimeoutMillis = DEFAULT_CONNECT_TIMEOUT_MILLIS; - private volatile int readTimeoutMillis = DEFAULT_READ_TIMEOUT_MILLIS; - private volatile Boolean keepAlive; - private volatile Boolean reuseAddress; - private volatile Integer soLinger; - private volatile Boolean tcpNoDelay = Boolean.TRUE; - private volatile Integer receiveBufferSize; - private volatile Integer sendBufferSize; + private volatile int connectTimeoutMillis = DEFAULT_CONNECT_TIMEOUT_MILLIS; + private volatile int readTimeoutMillis = DEFAULT_READ_TIMEOUT_MILLIS; + private volatile Boolean keepAlive; + private volatile Boolean reuseAddress; + private volatile Integer soLinger; + private volatile Boolean tcpNoDelay = Boolean.TRUE; + private volatile Integer receiveBufferSize; + private volatile Integer sendBufferSize; - /** - * Creates a new {@code SocketOptions} instance with default values. - */ - public SocketOptions() { - } + /** Creates a new {@code SocketOptions} instance with default values. */ + public SocketOptions() {} - /** - * The connection timeout in milliseconds. - *

- * As the name implies, the connection timeout defines how long the driver - * waits to establish a new connection to a Cassandra node before giving up. - * - * @return the connection timeout in milliseconds - */ - public int getConnectTimeoutMillis() { - return connectTimeoutMillis; - } + /** + * The connection timeout in milliseconds. + * + *

As the name implies, the connection timeout defines how long the driver waits to establish a + * new connection to a Cassandra node before giving up. + * + * @return the connection timeout in milliseconds + */ + public int getConnectTimeoutMillis() { + return connectTimeoutMillis; + } - /** - * Sets the connection timeout in milliseconds. - *

- * The default value is {@link #DEFAULT_CONNECT_TIMEOUT_MILLIS}. - * - * @param connectTimeoutMillis the timeout to set. - * @return this {@code SocketOptions}. - */ - public SocketOptions setConnectTimeoutMillis(int connectTimeoutMillis) { - this.connectTimeoutMillis = connectTimeoutMillis; - return this; - } + /** + * Sets the connection timeout in milliseconds. + * + *

The default value is {@link #DEFAULT_CONNECT_TIMEOUT_MILLIS}. + * + * @param connectTimeoutMillis the timeout to set. + * @return this {@code SocketOptions}. + */ + public SocketOptions setConnectTimeoutMillis(int connectTimeoutMillis) { + this.connectTimeoutMillis = connectTimeoutMillis; + return this; + } - /** - * The per-host read timeout in milliseconds. - *

- * This defines how long the driver will wait for a given Cassandra node to - * answer a query. - *

- * Please note that this is not the maximum time a call to {@link Session#execute} may block; - * this is the maximum time that a call will wait for one particular - * Cassandra host, but other hosts could be tried if one of them times out, depending - * on the {@link com.datastax.driver.core.policies.RetryPolicy} in use. In - * other words, a {@link Session#execute} call may theoretically wait up to - * {@code getReadTimeoutMillis() * } (though the - * total number of hosts tried for a given query also depends on the - * {@link com.datastax.driver.core.policies.LoadBalancingPolicy} in use). - * If you want to control how long to wait for a query, use {@link Session#executeAsync} - * and the {@code ResultSetFuture#get(long, TimeUnit)} method. - *

- * Also note that for efficiency reasons, this read timeout is approximate: it - * has an accuracy of up to 100 milliseconds (i.e. it may fire up to 100 milliseconds late). - * It is not meant to be used for precise timeout, but rather as a protection - * against misbehaving Cassandra nodes. - *

- * - * @return the read timeout in milliseconds. - */ - public int getReadTimeoutMillis() { - return readTimeoutMillis; - } + /** + * The per-host read timeout in milliseconds. + * + *

This defines how long the driver will wait for a given Cassandra node to answer a query. + * + *

Please note that this is not the maximum time a call to {@link Session#execute} may block; + * this is the maximum time that a call will wait for one particular Cassandra host, but other + * hosts could be tried if one of them times out, depending on the {@link + * com.datastax.driver.core.policies.RetryPolicy} in use. In other words, a {@link + * Session#execute} call may theoretically wait up to {@code getReadTimeoutMillis() * + * } (though the total number of hosts tried for a given query also + * depends on the {@link com.datastax.driver.core.policies.LoadBalancingPolicy} in use). If you + * want to control how long to wait for a query, use {@link Session#executeAsync} and the {@code + * ResultSetFuture#get(long, TimeUnit)} method. + * + *

Also note that for efficiency reasons, this read timeout is approximate: it has an accuracy + * of up to 100 milliseconds (i.e. it may fire up to 100 milliseconds late). It is not meant to be + * used for precise timeout, but rather as a protection against misbehaving Cassandra nodes. + * + *

+ * + * @return the read timeout in milliseconds. + */ + public int getReadTimeoutMillis() { + return readTimeoutMillis; + } - /** - * Sets the per-host read timeout in milliseconds. - *

- * When setting this value, keep in mind the following: - *

    - *
  • it should be higher than the timeout settings used on the Cassandra side - * ({@code *_request_timeout_in_ms} in {@code cassandra.yaml}).
  • - *
  • the read timeout is only approximate and only control the timeout to one Cassandra - * host, not the full query (see {@link #getReadTimeoutMillis} for more details). If a - * high level of precision on the timeout to a request is required, you should use - * the {@link ResultSetFuture#get(long, java.util.concurrent.TimeUnit)} method. - *
  • - *
- *

- * If you don't call this method, the default value is {@link #DEFAULT_READ_TIMEOUT_MILLIS}. - * - * @param readTimeoutMillis the timeout to set. If it is less than or equal to 0, read timeouts are disabled. - * @return this {@code SocketOptions}. - */ - public SocketOptions setReadTimeoutMillis(int readTimeoutMillis) { - this.readTimeoutMillis = readTimeoutMillis; - return this; - } + /** + * Sets the per-host read timeout in milliseconds. + * + *

When setting this value, keep in mind the following: + * + *

    + *
  • it should be higher than the timeout settings used on the Cassandra side ({@code + * *_request_timeout_in_ms} in {@code cassandra.yaml}). + *
  • the read timeout is only approximate and only control the timeout to one Cassandra host, + * not the full query (see {@link #getReadTimeoutMillis} for more details). If a high level + * of precision on the timeout to a request is required, you should use the {@link + * ResultSetFuture#get(long, java.util.concurrent.TimeUnit)} method. + *
+ * + *

If you don't call this method, the default value is {@link #DEFAULT_READ_TIMEOUT_MILLIS}. + * + * @param readTimeoutMillis the timeout to set. If it is less than or equal to 0, read timeouts + * are disabled. + * @return this {@code SocketOptions}. + */ + public SocketOptions setReadTimeoutMillis(int readTimeoutMillis) { + this.readTimeoutMillis = readTimeoutMillis; + return this; + } - /** - * Returns whether TCP keepalive is enabled. - * - * @return the value of the option, or {@code null} if it is not set. - * @see #setKeepAlive(boolean) - */ - public Boolean getKeepAlive() { - return keepAlive; - } + /** + * Returns whether TCP keepalive is enabled. + * + * @return the value of the option, or {@code null} if it is not set. + * @see #setKeepAlive(boolean) + */ + public Boolean getKeepAlive() { + return keepAlive; + } - /** - * Sets whether to enable TCP keepalive. - *

- * By default, this option is not set by the driver. The actual value will be the default - * from the underlying Netty transport (Java NIO or native epoll). - * - * @param keepAlive whether to enable or disable the option. - * @return this {@code SocketOptions}. - * @see java.net.SocketOptions#TCP_NODELAY - */ - public SocketOptions setKeepAlive(boolean keepAlive) { - this.keepAlive = keepAlive; - return this; - } + /** + * Sets whether to enable TCP keepalive. + * + *

By default, this option is not set by the driver. The actual value will be the default from + * the underlying Netty transport (Java NIO or native epoll). + * + * @param keepAlive whether to enable or disable the option. + * @return this {@code SocketOptions}. + * @see java.net.SocketOptions#TCP_NODELAY + */ + public SocketOptions setKeepAlive(boolean keepAlive) { + this.keepAlive = keepAlive; + return this; + } - /** - * Returns whether reuse-address is enabled. - * - * @return the value of the option, or {@code null} if it is not set. - * @see #setReuseAddress(boolean) - */ - public Boolean getReuseAddress() { - return reuseAddress; - } + /** + * Returns whether reuse-address is enabled. + * + * @return the value of the option, or {@code null} if it is not set. + * @see #setReuseAddress(boolean) + */ + public Boolean getReuseAddress() { + return reuseAddress; + } - /** - * Sets whether to enable reuse-address. - *

- * By default, this option is not set by the driver. The actual value will be the default - * from the underlying Netty transport (Java NIO or native epoll). - * - * @param reuseAddress whether to enable or disable the option. - * @return this {@code SocketOptions}. - * @see java.net.SocketOptions#SO_REUSEADDR - */ - public SocketOptions setReuseAddress(boolean reuseAddress) { - this.reuseAddress = reuseAddress; - return this; - } + /** + * Sets whether to enable reuse-address. + * + *

By default, this option is not set by the driver. The actual value will be the default from + * the underlying Netty transport (Java NIO or native epoll). + * + * @param reuseAddress whether to enable or disable the option. + * @return this {@code SocketOptions}. + * @see java.net.SocketOptions#SO_REUSEADDR + */ + public SocketOptions setReuseAddress(boolean reuseAddress) { + this.reuseAddress = reuseAddress; + return this; + } - /** - * Returns the linger-on-close timeout. - * - * @return the value of the option, or {@code null} if it is not set. - * @see #setSoLinger(int) - */ - public Integer getSoLinger() { - return soLinger; - } + /** + * Returns the linger-on-close timeout. + * + * @return the value of the option, or {@code null} if it is not set. + * @see #setSoLinger(int) + */ + public Integer getSoLinger() { + return soLinger; + } - /** - * Sets the linger-on-close timeout. - *

- * By default, this option is not set by the driver. The actual value will be the default - * from the underlying Netty transport (Java NIO or native epoll). - * - * @param soLinger the new value. - * @return this {@code SocketOptions}. - * @see java.net.SocketOptions#SO_LINGER - */ - public SocketOptions setSoLinger(int soLinger) { - this.soLinger = soLinger; - return this; - } + /** + * Sets the linger-on-close timeout. + * + *

By default, this option is not set by the driver. The actual value will be the default from + * the underlying Netty transport (Java NIO or native epoll). + * + * @param soLinger the new value. + * @return this {@code SocketOptions}. + * @see java.net.SocketOptions#SO_LINGER + */ + public SocketOptions setSoLinger(int soLinger) { + this.soLinger = soLinger; + return this; + } - /** - * Returns whether Nagle's algorithm is disabled. - * - * @return the value of the option ({@code true} means Nagle is disabled), or {@code null} if it is not set. - * @see #setTcpNoDelay(boolean) - */ - public Boolean getTcpNoDelay() { - return tcpNoDelay; - } + /** + * Returns whether Nagle's algorithm is disabled. + * + * @return the value of the option ({@code true} means Nagle is disabled), or {@code null} if it + * is not set. + * @see #setTcpNoDelay(boolean) + */ + public Boolean getTcpNoDelay() { + return tcpNoDelay; + } - /** - * Sets whether to disable Nagle's algorithm. - *

- * By default, this option is set to {@code true} (Nagle disabled). - * - * @param tcpNoDelay whether to enable or disable the option. - * @return this {@code SocketOptions}. - * @see java.net.SocketOptions#TCP_NODELAY - */ - public SocketOptions setTcpNoDelay(boolean tcpNoDelay) { - this.tcpNoDelay = tcpNoDelay; - return this; - } + /** + * Sets whether to disable Nagle's algorithm. + * + *

By default, this option is set to {@code true} (Nagle disabled). + * + * @param tcpNoDelay whether to enable or disable the option. + * @return this {@code SocketOptions}. + * @see java.net.SocketOptions#TCP_NODELAY + */ + public SocketOptions setTcpNoDelay(boolean tcpNoDelay) { + this.tcpNoDelay = tcpNoDelay; + return this; + } - /** - * Returns the hint to the size of the underlying buffers for incoming network I/O. - * - * @return the value of the option, or {@code null} if it is not set. - * @see #setReceiveBufferSize(int) - */ - public Integer getReceiveBufferSize() { - return receiveBufferSize; - } + /** + * Returns the hint to the size of the underlying buffers for incoming network I/O. + * + * @return the value of the option, or {@code null} if it is not set. + * @see #setReceiveBufferSize(int) + */ + public Integer getReceiveBufferSize() { + return receiveBufferSize; + } - /** - * Sets a hint to the size of the underlying buffers for incoming network I/O. - *

- * By default, this option is not set by the driver. The actual value will be the default - * from the underlying Netty transport (Java NIO or native epoll). - * - * @param receiveBufferSize the new value. - * @return this {@code SocketOptions}. - * @see java.net.SocketOptions#SO_RCVBUF - */ - public SocketOptions setReceiveBufferSize(int receiveBufferSize) { - this.receiveBufferSize = receiveBufferSize; - return this; - } + /** + * Sets a hint to the size of the underlying buffers for incoming network I/O. + * + *

By default, this option is not set by the driver. The actual value will be the default from + * the underlying Netty transport (Java NIO or native epoll). + * + * @param receiveBufferSize the new value. + * @return this {@code SocketOptions}. + * @see java.net.SocketOptions#SO_RCVBUF + */ + public SocketOptions setReceiveBufferSize(int receiveBufferSize) { + this.receiveBufferSize = receiveBufferSize; + return this; + } - /** - * Returns the hint to the size of the underlying buffers for outgoing network I/O. - * - * @return the value of the option, or {@code null} if it is not set. - * @see #setSendBufferSize(int) - */ - public Integer getSendBufferSize() { - return sendBufferSize; - } + /** + * Returns the hint to the size of the underlying buffers for outgoing network I/O. + * + * @return the value of the option, or {@code null} if it is not set. + * @see #setSendBufferSize(int) + */ + public Integer getSendBufferSize() { + return sendBufferSize; + } - /** - * Sets a hint to the size of the underlying buffers for outgoing network I/O. - *

- * By default, this option is not set by the driver. The actual value will be the default - * from the underlying Netty transport (Java NIO or native epoll). - * - * @param sendBufferSize the new value. - * @return this {@code SocketOptions}. - * @see java.net.SocketOptions#SO_SNDBUF - */ - public SocketOptions setSendBufferSize(int sendBufferSize) { - this.sendBufferSize = sendBufferSize; - return this; - } + /** + * Sets a hint to the size of the underlying buffers for outgoing network I/O. + * + *

By default, this option is not set by the driver. The actual value will be the default from + * the underlying Netty transport (Java NIO or native epoll). + * + * @param sendBufferSize the new value. + * @return this {@code SocketOptions}. + * @see java.net.SocketOptions#SO_SNDBUF + */ + public SocketOptions setSendBufferSize(int sendBufferSize) { + this.sendBufferSize = sendBufferSize; + return this; + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/Statement.java b/driver-core/src/main/java/com/datastax/driver/core/Statement.java index 6eb3ba6db1d..a6190dfb1cb 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Statement.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Statement.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,585 +17,655 @@ */ package com.datastax.driver.core; +import com.datastax.driver.core.exceptions.NoHostAvailableException; import com.datastax.driver.core.exceptions.PagingStateException; import com.datastax.driver.core.exceptions.UnsupportedProtocolVersionException; +import com.datastax.driver.core.policies.LoadBalancingPolicy; import com.datastax.driver.core.policies.RetryPolicy; import com.datastax.driver.core.querybuilder.BuiltStatement; import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableMap; - import java.nio.ByteBuffer; import java.util.Collection; import java.util.Map; /** * An executable query. - *

- * This represents either a {@link RegularStatement}, a {@link BoundStatement} or a - * {@link BatchStatement} along with the querying options (consistency level, - * whether to trace the query, ...). + * + *

This represents either a {@link RegularStatement}, a {@link BoundStatement} or a {@link + * BatchStatement} along with the querying options (consistency level, whether to trace the query, + * ...). */ public abstract class Statement { - /** - * A special ByteBuffer value that can be used with custom payloads - * to denote a null value in a payload map. - */ - public static final ByteBuffer NULL_PAYLOAD_VALUE = ByteBuffer.allocate(0); + /** + * A special ByteBuffer value that can be used with custom payloads to denote a null value in a + * payload map. + */ + public static final ByteBuffer NULL_PAYLOAD_VALUE = ByteBuffer.allocate(0); - // An exception to the RegularStatement, BoundStatement or BatchStatement rule above. This is - // used when preparing a statement and for other internal queries. Do not expose publicly. - static final Statement DEFAULT = new Statement() { + // An exception to the RegularStatement, BoundStatement or BatchStatement rule above. This is + // used when preparing a statement and for other internal queries. Do not expose publicly. + static final Statement DEFAULT = + new Statement() { @Override - public ByteBuffer getRoutingKey(ProtocolVersion protocolVersion, CodecRegistry codecRegistry) { - return null; + public ByteBuffer getRoutingKey( + ProtocolVersion protocolVersion, CodecRegistry codecRegistry) { + return null; } @Override public String getKeyspace() { - return null; + return null; } @Override public ConsistencyLevel getConsistencyLevel() { - return ConsistencyLevel.ONE; - } - }; - - private volatile ConsistencyLevel consistency; - private volatile ConsistencyLevel serialConsistency; - private volatile boolean traceQuery; - private volatile int fetchSize; - private volatile long defaultTimestamp = Long.MIN_VALUE; - private volatile int readTimeoutMillis = Integer.MIN_VALUE; - private volatile RetryPolicy retryPolicy; - private volatile ByteBuffer pagingState; - protected volatile Boolean idempotent; - private volatile Map outgoingPayload; - - // We don't want to expose the constructor, because the code relies on this being only sub-classed by RegularStatement, BoundStatement and BatchStatement - Statement() { - } - - /** - * Sets the consistency level for the query. - * - * @param consistency the consistency level to set. - * @return this {@code Statement} object. - */ - public Statement setConsistencyLevel(ConsistencyLevel consistency) { - this.consistency = consistency; - return this; - } - - /** - * The consistency level for this query. - * - * @return the consistency level for this query, or {@code null} if no - * consistency level has been specified (through {@code setConsistencyLevel}). - * In the latter case, the default consistency level will be used. - */ - public ConsistencyLevel getConsistencyLevel() { - return consistency; - } - - /** - * Sets the serial consistency level for the query. - *

- * The serial consistency level is only used by conditional updates ({@code INSERT}, {@code UPDATE} - * or {@code DELETE} statements with an {@code IF} condition). - * For those, the serial consistency level defines - * the consistency level of the serial phase (or "paxos" phase) while the - * normal consistency level defines the consistency for the "learn" phase, i.e. what - * type of reads will be guaranteed to see the update right away. For instance, if - * a conditional write has a regular consistency of QUORUM (and is successful), then a - * QUORUM read is guaranteed to see that write. But if the regular consistency of that - * write is ANY, then only a read with a consistency of SERIAL is guaranteed to see it - * (even a read with consistency ALL is not guaranteed to be enough). - *

- * The serial consistency can only be one of {@code ConsistencyLevel.SERIAL} or - * {@code ConsistencyLevel.LOCAL_SERIAL}. While {@code ConsistencyLevel.SERIAL} guarantees full - * linearizability (with other SERIAL updates), {@code ConsistencyLevel.LOCAL_SERIAL} only - * guarantees it in the local data center. - *

- * The serial consistency level is ignored for any query that is not a conditional - * update (serial reads should use the regular consistency level for instance). - * - * @param serialConsistency the serial consistency level to set. - * @return this {@code Statement} object. - * @throws IllegalArgumentException if {@code serialConsistency} is not one of - * {@code ConsistencyLevel.SERIAL} or {@code ConsistencyLevel.LOCAL_SERIAL}. - */ - public Statement setSerialConsistencyLevel(ConsistencyLevel serialConsistency) { - if (!serialConsistency.isSerial()) - throw new IllegalArgumentException("Supplied consistency level is not serial: " + serialConsistency); - this.serialConsistency = serialConsistency; - return this; - } - - /** - * The serial consistency level for this query. - *

- * See {@link #setSerialConsistencyLevel(ConsistencyLevel)} for more detail on the serial consistency level. - * - * @return the serial consistency level for this query, or {@code null} if no serial - * consistency level has been specified (through {@link #setSerialConsistencyLevel(ConsistencyLevel)}). - * In the latter case, the default serial consistency level will be used. - */ - public ConsistencyLevel getSerialConsistencyLevel() { - return serialConsistency; - } - - /** - * Enables tracing for this query. - *

- * By default (that is unless you call this method), tracing is not enabled. - * - * @return this {@code Statement} object. - */ - public Statement enableTracing() { - this.traceQuery = true; - return this; - } - - /** - * Disables tracing for this query. - * - * @return this {@code Statement} object. - */ - public Statement disableTracing() { - this.traceQuery = false; - return this; - } - - /** - * Returns whether tracing is enabled for this query or not. - * - * @return {@code true} if this query has tracing enabled, {@code false} - * otherwise. - */ - public boolean isTracing() { - return traceQuery; - } - - /** - * Returns the routing key (in binary raw form) to use for token aware - * routing of this query. - *

- * The routing key is optional in that implementers are free to - * return {@code null}. The routing key is an hint used for token-aware routing (see - * {@link com.datastax.driver.core.policies.TokenAwarePolicy}), and - * if provided should correspond to the binary value for the query - * partition key. However, not providing a routing key never causes a query - * to fail and if the load balancing policy used is not token aware, then - * the routing key can be safely ignored. - * - * @param protocolVersion the protocol version that will be used if the actual - * implementation needs to serialize something to compute - * the key. - * @param codecRegistry the codec registry that will be used if the actual - * implementation needs to serialize something to compute - * this key. - * @return the routing key for this query or {@code null}. - */ - public abstract ByteBuffer getRoutingKey(ProtocolVersion protocolVersion, CodecRegistry codecRegistry); - - /** - * Returns the keyspace this query operates on. - *

- * Note that not all query specify on which keyspace they operate on, and - * so this method can always return {@code null}. Firstly, some queries do - * not operate inside a keyspace: keyspace creation, {@code USE} queries, - * user creation, etc. Secondly, even query that operate within a keyspace - * do not have to specify said keyspace directly, in which case the - * currently logged in keyspace (the one set through a {@code USE} query - * (or through the use of {@link Cluster#connect(String)})). Lastly, as - * for the routing key, this keyspace information is only a hint for - * token-aware routing (since replica placement depend on the replication - * strategy in use which is a per-keyspace property) and having this method - * return {@code null} (or even a bogus keyspace name) will never cause the - * query to fail. - * - * @return the keyspace this query operate on if relevant or {@code null}. - */ - public abstract String getKeyspace(); - - /** - * Sets the retry policy to use for this query. - *

- * The default retry policy, if this method is not called, is the one returned by - * {@link com.datastax.driver.core.policies.Policies#getRetryPolicy} in the - * cluster configuration. This method is thus only useful in case you want - * to punctually override the default policy for this request. - * - * @param policy the retry policy to use for this query. - * @return this {@code Statement} object. - */ - public Statement setRetryPolicy(RetryPolicy policy) { - this.retryPolicy = policy; - return this; - } - - /** - * Returns the retry policy sets for this query, if any. - * - * @return the retry policy sets specifically for this query or {@code null} if no query specific - * retry policy has been set through {@link #setRetryPolicy} (in which case - * the Cluster retry policy will apply if necessary). - */ - public RetryPolicy getRetryPolicy() { - return retryPolicy; - } - - /** - * Sets the query fetch size. - *

- * The fetch size controls how much resulting rows will be retrieved - * simultaneously (the goal being to avoid loading too much results - * in memory for queries yielding large results). Please note that - * while value as low as 1 can be used, it is *highly* discouraged to - * use such a low value in practice as it will yield very poor - * performance. If in doubt, leaving the default is probably a good - * idea. - *

- * Only {@code SELECT} queries only ever make use of that setting. - *

- * Note: Paging is not supported with the native protocol version 1. If - * you call this method with {@code fetchSize > 0} and - * {@code fetchSize != Integer.MAX_VALUE} and the protocol version is in - * use (i.e. if you've force version 1 through {@link Cluster.Builder#withProtocolVersion} - * or you use Cassandra 1.2), you will get {@link UnsupportedProtocolVersionException} - * when submitting this statement for execution. - * - * @param fetchSize the fetch size to use. If {@code fetchSize <e; 0}, - * the default fetch size will be used. To disable paging of the - * result set, use {@code fetchSize == Integer.MAX_VALUE}. - * @return this {@code Statement} object. - */ - public Statement setFetchSize(int fetchSize) { - this.fetchSize = fetchSize; - return this; - } - - /** - * The fetch size for this query. - * - * @return the fetch size for this query. If that value is less or equal - * to 0 (the default unless {@link #setFetchSize} is used), the default - * fetch size will be used. - */ - public int getFetchSize() { - return fetchSize; - } - - /** - * Sets the default timestamp for this query (in microseconds since the epoch). - *

- * This feature is only available when version {@link ProtocolVersion#V3 V3} or - * higher of the native protocol is in use. With earlier versions, calling this - * method has no effect. - *

- * The actual timestamp that will be used for this query is, in order of - * preference: - *

    - *
  • the timestamp specified directly in the CQL query string (using the - * {@code USING TIMESTAMP} syntax);
  • - *
  • the timestamp specified through this method, if different from - * {@link Long#MIN_VALUE};
  • - *
  • the timestamp returned by the {@link TimestampGenerator} currently in use, - * if different from {@link Long#MIN_VALUE}.
  • - *
- * If none of these apply, no timestamp will be sent with the query and Cassandra - * will generate a server-side one (similar to the pre-V3 behavior). - * - * @param defaultTimestamp the default timestamp for this query (must be strictly - * positive). - * @return this {@code Statement} object. - * @see Cluster.Builder#withTimestampGenerator(TimestampGenerator) - */ - public Statement setDefaultTimestamp(long defaultTimestamp) { - this.defaultTimestamp = defaultTimestamp; - return this; - } - - /** - * The default timestamp for this query. - * - * @return the default timestamp (in microseconds since the epoch). - */ - public long getDefaultTimestamp() { - return defaultTimestamp; - } - - /** - * Overrides the default per-host read timeout ({@link SocketOptions#getReadTimeoutMillis()}) - * for this statement. - *

- * You should override this only for statements for which the coordinator may allow a longer server-side - * timeout (for example aggregation queries). - * - * @param readTimeoutMillis the timeout to set. Negative values are not allowed. If it is 0, the read timeout will - * be disabled for this statement. - * @return this {@code Statement} object. - */ - public Statement setReadTimeoutMillis(int readTimeoutMillis) { - Preconditions.checkArgument(readTimeoutMillis >= 0, "read timeout must be >= 0"); - this.readTimeoutMillis = readTimeoutMillis; - return this; - } - - /** - * Return the per-host read timeout that was set for this statement. - * - * @return the timeout. Note that a negative value means that the default - * {@link SocketOptions#getReadTimeoutMillis()} will be used. - */ - public int getReadTimeoutMillis() { - return readTimeoutMillis; - } - - /** - * Sets the paging state. - *

- * This will cause the next execution of this statement to fetch results from a given - * page, rather than restarting from the beginning. - *

- * You get the paging state from a previous execution of the statement (see - * {@link ExecutionInfo#getPagingState()}. - * This is typically used to iterate in a "stateless" manner (e.g. across HTTP requests): - *

-     * {@code
-     * Statement st = new SimpleStatement("your query");
-     * ResultSet rs = session.execute(st.setFetchSize(20));
-     * int available = rs.getAvailableWithoutFetching();
-     * for (int i = 0; i < available; i++) {
-     *     Row row = rs.one();
-     *     // Do something with row (e.g. display it to the user...)
-     * }
-     * // Get state and serialize as string or byte[] to store it for the next execution
-     * // (e.g. pass it as a parameter in the "next page" URI)
-     * PagingState pagingState = rs.getExecutionInfo().getPagingState();
-     * String savedState = pagingState.toString();
-     *
-     * // Next execution:
-     * // Get serialized state back (e.g. get URI parameter)
-     * String savedState = ...
-     * Statement st = new SimpleStatement("your query");
-     * st.setPagingState(PagingState.fromString(savedState));
-     * ResultSet rs = session.execute(st.setFetchSize(20));
-     * int available = rs.getAvailableWithoutFetching();
-     * for (int i = 0; i < available; i++) {
-     *     ...
-     * }
-     * }
-     * 
- *

- * The paging state can only be reused between perfectly identical statements - * (same query string, same bound parameters). Altering the contents of the paging state - * or trying to set it on a different statement will cause this method to fail. - *

- * Note that, due to internal implementation details, the paging state is not portable - * across native protocol versions (see the - * online documentation - * for more explanations about the native protocol). - * This means that {@code PagingState} instances generated with an old version won't work - * with a higher version. If that is a problem for you, consider using the "unsafe" API (see - * {@link #setPagingStateUnsafe(byte[])}). - * - * @param pagingState the paging state to set, or {@code null} to remove any state that was - * previously set on this statement. - * @param codecRegistry the codec registry that will be used if this method needs to serialize the - * statement's values in order to check that the paging state matches. - * @return this {@code Statement} object. - * @throws PagingStateException if the paging state does not match this statement. - * @see #setPagingState(PagingState) - */ - public Statement setPagingState(PagingState pagingState, CodecRegistry codecRegistry) { - if (this instanceof BatchStatement) { - throw new UnsupportedOperationException("Cannot set the paging state on a batch statement"); - } else { - if (pagingState == null) { - this.pagingState = null; - } else if (pagingState.matches(this, codecRegistry)) { - this.pagingState = pagingState.getRawState(); - } else { - throw new PagingStateException("Paging state mismatch, " - + "this means that either the paging state contents were altered, " - + "or you're trying to apply it to a different statement"); - } + return ConsistencyLevel.ONE; } - return this; + }; + + private volatile ConsistencyLevel consistency; + private volatile ConsistencyLevel serialConsistency; + private volatile boolean traceQuery; + private volatile int fetchSize; + private volatile long defaultTimestamp = Long.MIN_VALUE; + private volatile int readTimeoutMillis = Integer.MIN_VALUE; + private volatile RetryPolicy retryPolicy; + private volatile ByteBuffer pagingState; + protected volatile Boolean idempotent; + private volatile Map outgoingPayload; + private volatile Host host; + private volatile int nowInSeconds = Integer.MIN_VALUE; + + // We don't want to expose the constructor, because the code relies on this being only sub-classed + // by RegularStatement, BoundStatement and BatchStatement + Statement() {} + + /** + * Sets the consistency level for the query. + * + * @param consistency the consistency level to set. + * @return this {@code Statement} object. + */ + public Statement setConsistencyLevel(ConsistencyLevel consistency) { + this.consistency = consistency; + return this; + } + + /** + * The consistency level for this query. + * + * @return the consistency level for this query, or {@code null} if no consistency level has been + * specified (through {@code setConsistencyLevel}). In the latter case, the default + * consistency level will be used. + */ + public ConsistencyLevel getConsistencyLevel() { + return consistency; + } + + /** + * Sets the serial consistency level for the query. + * + *

The serial consistency level is only used by conditional updates ({@code INSERT}, {@code + * UPDATE} or {@code DELETE} statements with an {@code IF} condition). For those, the serial + * consistency level defines the consistency level of the serial phase (or "paxos" phase) while + * the normal consistency level defines the consistency for the "learn" phase, i.e. what type of + * reads will be guaranteed to see the update right away. For instance, if a conditional write has + * a regular consistency of QUORUM (and is successful), then a QUORUM read is guaranteed to see + * that write. But if the regular consistency of that write is ANY, then only a read with a + * consistency of SERIAL is guaranteed to see it (even a read with consistency ALL is not + * guaranteed to be enough). + * + *

The serial consistency can only be one of {@code ConsistencyLevel.SERIAL} or {@code + * ConsistencyLevel.LOCAL_SERIAL}. While {@code ConsistencyLevel.SERIAL} guarantees full + * linearizability (with other SERIAL updates), {@code ConsistencyLevel.LOCAL_SERIAL} only + * guarantees it in the local data center. + * + *

The serial consistency level is ignored for any query that is not a conditional update + * (serial reads should use the regular consistency level for instance). + * + * @param serialConsistency the serial consistency level to set. + * @return this {@code Statement} object. + * @throws IllegalArgumentException if {@code serialConsistency} is not one of {@code + * ConsistencyLevel.SERIAL} or {@code ConsistencyLevel.LOCAL_SERIAL}. + */ + public Statement setSerialConsistencyLevel(ConsistencyLevel serialConsistency) { + if (!serialConsistency.isSerial()) + throw new IllegalArgumentException( + "Supplied consistency level is not serial: " + serialConsistency); + this.serialConsistency = serialConsistency; + return this; + } + + /** + * The serial consistency level for this query. + * + *

See {@link #setSerialConsistencyLevel(ConsistencyLevel)} for more detail on the serial + * consistency level. + * + * @return the serial consistency level for this query, or {@code null} if no serial consistency + * level has been specified (through {@link #setSerialConsistencyLevel(ConsistencyLevel)}). In + * the latter case, the default serial consistency level will be used. + */ + public ConsistencyLevel getSerialConsistencyLevel() { + return serialConsistency; + } + + /** + * Enables tracing for this query. + * + *

By default (that is unless you call this method), tracing is not enabled. + * + * @return this {@code Statement} object. + */ + public Statement enableTracing() { + this.traceQuery = true; + return this; + } + + /** + * Disables tracing for this query. + * + * @return this {@code Statement} object. + */ + public Statement disableTracing() { + this.traceQuery = false; + return this; + } + + /** + * Returns whether tracing is enabled for this query or not. + * + * @return {@code true} if this query has tracing enabled, {@code false} otherwise. + */ + public boolean isTracing() { + return traceQuery; + } + + /** + * Returns the routing key (in binary raw form) to use for token aware routing of this query. + * + *

The routing key is optional in that implementers are free to return {@code null}. The + * routing key is an hint used for token-aware routing (see {@link + * com.datastax.driver.core.policies.TokenAwarePolicy}), and if provided should correspond to the + * binary value for the query partition key. However, not providing a routing key never causes a + * query to fail and if the load balancing policy used is not token aware, then the routing key + * can be safely ignored. + * + * @param protocolVersion the protocol version that will be used if the actual implementation + * needs to serialize something to compute the key. + * @param codecRegistry the codec registry that will be used if the actual implementation needs to + * serialize something to compute this key. + * @return the routing key for this query or {@code null}. + */ + public abstract ByteBuffer getRoutingKey( + ProtocolVersion protocolVersion, CodecRegistry codecRegistry); + + /** + * Returns the keyspace this query operates on. + * + *

Note that not all query specify on which keyspace they operate on, and so this method can + * always return {@code null}. Firstly, some queries do not operate inside a keyspace: keyspace + * creation, {@code USE} queries, user creation, etc. Secondly, even query that operate within a + * keyspace do not have to specify said keyspace directly, in which case the currently logged in + * keyspace (the one set through a {@code USE} query (or through the use of {@link + * Cluster#connect(String)})). Lastly, as for the routing key, this keyspace information is only a + * hint for token-aware routing (since replica placement depend on the replication strategy in use + * which is a per-keyspace property) and having this method return {@code null} (or even a bogus + * keyspace name) will never cause the query to fail. + * + * @return the keyspace this query operate on if relevant or {@code null}. + */ + public abstract String getKeyspace(); + + /** + * Sets the retry policy to use for this query. + * + *

The default retry policy, if this method is not called, is the one returned by {@link + * com.datastax.driver.core.policies.Policies#getRetryPolicy} in the cluster configuration. This + * method is thus only useful in case you want to punctually override the default policy for this + * request. + * + * @param policy the retry policy to use for this query. + * @return this {@code Statement} object. + */ + public Statement setRetryPolicy(RetryPolicy policy) { + this.retryPolicy = policy; + return this; + } + + /** + * Returns the retry policy sets for this query, if any. + * + * @return the retry policy sets specifically for this query or {@code null} if no query specific + * retry policy has been set through {@link #setRetryPolicy} (in which case the Cluster retry + * policy will apply if necessary). + */ + public RetryPolicy getRetryPolicy() { + return retryPolicy; + } + + /** + * Sets the query fetch size. + * + *

The fetch size controls how much resulting rows will be retrieved simultaneously (the goal + * being to avoid loading too much results in memory for queries yielding large results). Please + * note that while value as low as 1 can be used, it is *highly* discouraged to use such a low + * value in practice as it will yield very poor performance. If in doubt, leaving the default is + * probably a good idea. + * + *

Only {@code SELECT} queries only ever make use of this setting. + * + *

Note that unlike other configuration, when this statement is prepared {@link + * BoundStatement}s created off of {@link PreparedStatement} do not inherit this configuration. + * + *

Note: Paging is not supported with the native protocol version 1. If you call this method + * with {@code fetchSize > 0} and {@code fetchSize != Integer.MAX_VALUE} and the protocol + * version is in use (i.e. if you've force version 1 through {@link + * Cluster.Builder#withProtocolVersion} or you use Cassandra 1.2), you will get {@link + * UnsupportedProtocolVersionException} when submitting this statement for execution. + * + * @param fetchSize the fetch size to use. If {@code fetchSize <e; 0}, the default fetch size + * will be used. To disable paging of the result set, use {@code fetchSize == + * Integer.MAX_VALUE}. + * @return this {@code Statement} object. + */ + public Statement setFetchSize(int fetchSize) { + this.fetchSize = fetchSize; + return this; + } + + /** + * The fetch size for this query. + * + * @return the fetch size for this query. If that value is less or equal to 0 (the default unless + * {@link #setFetchSize} is used), the default fetch size will be used. + */ + public int getFetchSize() { + return fetchSize; + } + + /** + * Sets the default timestamp for this query (in microseconds since the epoch). + * + *

This feature is only available when version {@link ProtocolVersion#V3 V3} or higher of the + * native protocol is in use. With earlier versions, calling this method has no effect. + * + *

The actual timestamp that will be used for this query is, in order of preference: + * + *

    + *
  • the timestamp specified directly in the CQL query string (using the {@code USING + * TIMESTAMP} syntax); + *
  • the timestamp specified through this method, if different from {@link Long#MIN_VALUE}; + *
  • the timestamp returned by the {@link TimestampGenerator} currently in use, if different + * from {@link Long#MIN_VALUE}. + *
+ * + * If none of these apply, no timestamp will be sent with the query and Cassandra will generate a + * server-side one (similar to the pre-V3 behavior). + * + *

Note that unlike other configuration, when this statement is prepared {@link + * BoundStatement}s created off of {@link PreparedStatement} do not inherit this configuration. + * + * @param defaultTimestamp the default timestamp for this query (must be strictly positive). + * @return this {@code Statement} object. + * @see Cluster.Builder#withTimestampGenerator(TimestampGenerator) + */ + public Statement setDefaultTimestamp(long defaultTimestamp) { + this.defaultTimestamp = defaultTimestamp; + return this; + } + + /** + * The default timestamp for this query. + * + * @return the default timestamp (in microseconds since the epoch). + */ + public long getDefaultTimestamp() { + return defaultTimestamp; + } + + /** + * Overrides the default per-host read timeout ({@link SocketOptions#getReadTimeoutMillis()}) for + * this statement. + * + *

You should override this only for statements for which the coordinator may allow a longer + * server-side timeout (for example aggregation queries). + * + *

Note that unlike other configuration, when this statement is prepared {@link + * BoundStatement}s created off of {@link PreparedStatement} do not inherit this configuration. + * + * @param readTimeoutMillis the timeout to set. Negative values are not allowed. If it is 0, the + * read timeout will be disabled for this statement. + * @return this {@code Statement} object. + */ + public Statement setReadTimeoutMillis(int readTimeoutMillis) { + Preconditions.checkArgument(readTimeoutMillis >= 0, "read timeout must be >= 0"); + this.readTimeoutMillis = readTimeoutMillis; + return this; + } + + /** + * Return the per-host read timeout that was set for this statement. + * + * @return the timeout. Note that a negative value means that the default {@link + * SocketOptions#getReadTimeoutMillis()} will be used. + */ + public int getReadTimeoutMillis() { + return readTimeoutMillis; + } + + /** + * Sets the paging state. + * + *

This will cause the next execution of this statement to fetch results from a given page, + * rather than restarting from the beginning. + * + *

You get the paging state from a previous execution of the statement (see {@link + * ExecutionInfo#getPagingState()}. This is typically used to iterate in a "stateless" manner + * (e.g. across HTTP requests): + * + *

{@code
+   * Statement st = new SimpleStatement("your query");
+   * ResultSet rs = session.execute(st.setFetchSize(20));
+   * int available = rs.getAvailableWithoutFetching();
+   * for (int i = 0; i < available; i++) {
+   *     Row row = rs.one();
+   *     // Do something with row (e.g. display it to the user...)
+   * }
+   * // Get state and serialize as string or byte[] to store it for the next execution
+   * // (e.g. pass it as a parameter in the "next page" URI)
+   * PagingState pagingState = rs.getExecutionInfo().getPagingState();
+   * String savedState = pagingState.toString();
+   *
+   * // Next execution:
+   * // Get serialized state back (e.g. get URI parameter)
+   * String savedState = ...
+   * Statement st = new SimpleStatement("your query");
+   * st.setPagingState(PagingState.fromString(savedState));
+   * ResultSet rs = session.execute(st.setFetchSize(20));
+   * int available = rs.getAvailableWithoutFetching();
+   * for (int i = 0; i < available; i++) {
+   *     ...
+   * }
+   * }
+ * + *

The paging state can only be reused between perfectly identical statements (same query + * string, same bound parameters). Altering the contents of the paging state or trying to set it + * on a different statement will cause this method to fail. + * + *

Note that, due to internal implementation details, the paging state is not portable across + * native protocol versions (see the online documentation + * for more explanations about the native protocol). This means that {@code PagingState} instances + * generated with an old version won't work with a higher version. If that is a problem for you, + * consider using the "unsafe" API (see {@link #setPagingStateUnsafe(byte[])}). + * + * @param pagingState the paging state to set, or {@code null} to remove any state that was + * previously set on this statement. + * @param codecRegistry the codec registry that will be used if this method needs to serialize the + * statement's values in order to check that the paging state matches. + * @return this {@code Statement} object. + * @throws PagingStateException if the paging state does not match this statement. + * @see #setPagingState(PagingState) + */ + public Statement setPagingState(PagingState pagingState, CodecRegistry codecRegistry) { + if (this instanceof BatchStatement) { + throw new UnsupportedOperationException("Cannot set the paging state on a batch statement"); + } else { + if (pagingState == null) { + this.pagingState = null; + } else if (pagingState.matches(this, codecRegistry)) { + this.pagingState = pagingState.getRawState(); + } else { + throw new PagingStateException( + "Paging state mismatch, " + + "this means that either the paging state contents were altered, " + + "or you're trying to apply it to a different statement"); + } } - - /** - * Sets the paging state. - *

- * This method calls {@link #setPagingState(PagingState, CodecRegistry)} with {@link CodecRegistry#DEFAULT_INSTANCE}. - * Whether you should use this or the other variant depends on the type of statement this is - * called on: - *

    - *
  • for a {@link BoundStatement}, the codec registry isn't actually needed, so it's always safe to - * use this method;
  • - *
  • for a {@link SimpleStatement} or {@link BuiltStatement}, you can use this method if you use no - * custom codecs, or if your custom codecs are registered with the default registry. Otherwise, use - * the other method and provide the registry that contains your codecs.
  • - *
- * - * @param pagingState the paging state to set, or {@code null} to remove any state that was - * previously set on this statement. - */ - public Statement setPagingState(PagingState pagingState) { - return setPagingState(pagingState, CodecRegistry.DEFAULT_INSTANCE); + return this; + } + + /** + * Sets the paging state. + * + *

This method calls {@link #setPagingState(PagingState, CodecRegistry)} with {@link + * CodecRegistry#DEFAULT_INSTANCE}. Whether you should use this or the other variant depends on + * the type of statement this is called on: + * + *

    + *
  • for a {@link BoundStatement}, the codec registry isn't actually needed, so it's always + * safe to use this method; + *
  • for a {@link SimpleStatement} or {@link BuiltStatement}, you can use this method if you + * use no custom codecs, or if your custom codecs are registered with the default registry. + * Otherwise, use the other method and provide the registry that contains your codecs. + *
+ * + * @param pagingState the paging state to set, or {@code null} to remove any state that was + * previously set on this statement. + */ + public Statement setPagingState(PagingState pagingState) { + return setPagingState(pagingState, CodecRegistry.DEFAULT_INSTANCE); + } + + /** + * Sets the paging state. + * + *

Contrary to {@link #setPagingState(PagingState)}, this method takes the "raw" form of the + * paging state (previously extracted with {@link ExecutionInfo#getPagingStateUnsafe()}. It won't + * validate that this statement matches the one that the paging state was extracted from. If the + * paging state was altered in any way, you will get unpredictable behavior from Cassandra + * (ranging from wrong results to a query failure). If you decide to use this variant, it is + * strongly recommended to add your own validation (for example, signing the raw state with a + * private key). + * + * @param pagingState the paging state to set, or {@code null} to remove any state that was + * previously set on this statement. + * @return this {@code Statement} object. + */ + public Statement setPagingStateUnsafe(byte[] pagingState) { + if (pagingState == null) { + this.pagingState = null; + } else { + this.pagingState = ByteBuffer.wrap(pagingState); } - - /** - * Sets the paging state. - *

- * Contrary to {@link #setPagingState(PagingState)}, this method takes the "raw" form of the - * paging state (previously extracted with {@link ExecutionInfo#getPagingStateUnsafe()}. - * It won't validate that this statement matches the one that the paging state was extracted from. - * If the paging state was altered in any way, you will get unpredictable behavior from - * Cassandra (ranging from wrong results to a query failure). If you decide to use this variant, - * it is strongly recommended to add your own validation (for example, signing the raw state with - * a private key). - * - * @param pagingState the paging state to set, or {@code null} to remove any state that was - * previously set on this statement. - * @return this {@code Statement} object. - */ - public Statement setPagingStateUnsafe(byte[] pagingState) { - if (pagingState == null) { - this.pagingState = null; - } else { - this.pagingState = ByteBuffer.wrap(pagingState); - } - return this; - } - - ByteBuffer getPagingState() { - return pagingState; - } - - /** - * Sets whether this statement is idempotent. - *

- * See {@link #isIdempotent()} for more explanations about this property. - * - * @param idempotent the new value. - * @return this {@code Statement} object. - */ - public Statement setIdempotent(boolean idempotent) { - this.idempotent = idempotent; - return this; - } - - /** - * Whether this statement is idempotent, i.e. whether it can be applied multiple times - * without changing the result beyond the initial application. - *

- * If a statement is not idempotent, the driver will ensure that it never gets executed more than once, - * which means: - *

    - *
  • avoiding {@link RetryPolicy retries} on write timeouts or request errors;
  • - *
  • never scheduling {@link com.datastax.driver.core.policies.SpeculativeExecutionPolicy speculative executions}. - *
  • - *
- * (this behavior is implemented in the driver internals, the corresponding policies will not even be invoked). - *

- * Note that this method can return {@code null}, in which case the driver will default to - * {@link QueryOptions#getDefaultIdempotence()}. - *

- * By default, this method returns {@code null} for all statements, except for - *

    - *
  • {@link BuiltStatement} - value will be inferred from the query: if it updates counters, - * prepends/appends to a list, or uses a function call or - * {@link com.datastax.driver.core.querybuilder.QueryBuilder#raw(String)} anywhere in an inserted value, - * the result will be {@code false}; otherwise it will be {@code true}. - *
  • - *
  • - * {@link com.datastax.driver.core.querybuilder.Batch} and {@link BatchStatement}: - *
      - *
    1. If any statement in batch has isIdempotent() false - return false
    2. - *
    3. If no statements with isIdempotent() false, but some have isIdempotent() null - return null
    4. - *
    5. Otherwise - return true
    6. - *
    - *
  • - *
- * In all cases, calling {@link #setIdempotent(boolean)} forces a value that overrides calculated value. - *

- * Note that when a statement is prepared ({@link Session#prepare(String)}), its idempotence flag will be propagated - * to all {@link PreparedStatement}s created from it. - * - * @return whether this statement is idempotent, or {@code null} to use - * {@link QueryOptions#getDefaultIdempotence()}. - */ - public Boolean isIdempotent() { - return idempotent; - } - - boolean isIdempotentWithDefault(QueryOptions queryOptions) { - Boolean myValue = this.isIdempotent(); - if (myValue != null) - return myValue; - else - return queryOptions.getDefaultIdempotence(); - } - - /** - * Returns this statement's outgoing payload. - * Each time this statement is executed, this payload will be included in the query request. - *

- * This method returns {@code null} if no payload has been set, otherwise - * it always returns immutable maps. - *

- * This feature is only available with {@link ProtocolVersion#V4} or above. - * Trying to include custom payloads in requests sent by the driver - * under lower protocol versions will result in an - * {@link com.datastax.driver.core.exceptions.UnsupportedFeatureException} - * (wrapped in a {@link com.datastax.driver.core.exceptions.NoHostAvailableException}). - * - * @return the outgoing payload to include with this statement, - * or {@code null} if no payload has been set. - * @since 2.2 - */ - public Map getOutgoingPayload() { - return outgoingPayload; - } - - /** - * Set the given outgoing payload on this statement. - * Each time this statement is executed, this payload will be included in the query request. - *

- * This method makes a defensive copy of the given map, but its values - * remain inherently mutable. Care should be taken not to modify the original map - * once it is passed to this method. - *

- * This feature is only available with {@link ProtocolVersion#V4} or above. - * Trying to include custom payloads in requests sent by the driver - * under lower protocol versions will result in an - * {@link com.datastax.driver.core.exceptions.UnsupportedFeatureException} - * (wrapped in a {@link com.datastax.driver.core.exceptions.NoHostAvailableException}). - * - * @param payload the outgoing payload to include with this statement, - * or {@code null} to clear any previously entered payload. - * @return this {@link Statement} object. - * @since 2.2 - */ - public Statement setOutgoingPayload(Map payload) { - this.outgoingPayload = payload == null ? null : ImmutableMap.copyOf(payload); - return this; - } - - protected static Boolean isBatchIdempotent(Collection statements) { - boolean hasNullIdempotentStatements = false; - for (Statement statement : statements) { - Boolean innerIdempotent = statement.isIdempotent(); - if (innerIdempotent == null) { - hasNullIdempotentStatements = true; - } else if (!innerIdempotent) { - return false; - } - } - return (hasNullIdempotentStatements) ? null : true; + return this; + } + + ByteBuffer getPagingState() { + return pagingState; + } + + /** + * Sets whether this statement is idempotent. + * + *

See {@link #isIdempotent()} for more explanations about this property. + * + * @param idempotent the new value. + * @return this {@code Statement} object. + */ + public Statement setIdempotent(boolean idempotent) { + this.idempotent = idempotent; + return this; + } + + /** + * Whether this statement is idempotent, i.e. whether it can be applied multiple times without + * changing the result beyond the initial application. + * + *

If a statement is not idempotent, the driver will ensure that it never gets + * executed more than once, which means: + * + *

    + *
  • avoiding {@link RetryPolicy retries} on write timeouts or request errors; + *
  • never scheduling {@link com.datastax.driver.core.policies.SpeculativeExecutionPolicy + * speculative executions}. + *
+ * + * (this behavior is implemented in the driver internals, the corresponding policies will not even + * be invoked). + * + *

Note that this method can return {@code null}, in which case the driver will default to + * {@link QueryOptions#getDefaultIdempotence()}. + * + *

By default, this method returns {@code null} for all statements, except for + * + *

    + *
  • {@link BuiltStatement} - value will be inferred from the query: if it updates counters, + * prepends/appends to a list, or uses a function call or {@link + * com.datastax.driver.core.querybuilder.QueryBuilder#raw(String)} anywhere in an inserted + * value, the result will be {@code false}; otherwise it will be {@code true}. + *
  • {@link com.datastax.driver.core.querybuilder.Batch} and {@link BatchStatement}: + *
      + *
    1. If any statement in batch has isIdempotent() false - return false + *
    2. If no statements with isIdempotent() false, but some have isIdempotent() null - + * return null + *
    3. Otherwise - return true + *
    + *
+ * + * In all cases, calling {@link #setIdempotent(boolean)} forces a value that overrides calculated + * value. + * + *

Note that when a statement is prepared ({@link Session#prepare(String)}), its idempotence + * flag will be propagated to all {@link PreparedStatement}s created from it. + * + * @return whether this statement is idempotent, or {@code null} to use {@link + * QueryOptions#getDefaultIdempotence()}. + */ + public Boolean isIdempotent() { + return idempotent; + } + + boolean isIdempotentWithDefault(QueryOptions queryOptions) { + Boolean myValue = this.isIdempotent(); + if (myValue != null) return myValue; + else return queryOptions.getDefaultIdempotence(); + } + + /** + * Returns this statement's outgoing payload. Each time this statement is executed, this payload + * will be included in the query request. + * + *

This method returns {@code null} if no payload has been set, otherwise it always returns + * immutable maps. + * + *

This feature is only available with {@link ProtocolVersion#V4} or above. Trying to include + * custom payloads in requests sent by the driver under lower protocol versions will result in an + * {@link com.datastax.driver.core.exceptions.UnsupportedFeatureException} (wrapped in a {@link + * com.datastax.driver.core.exceptions.NoHostAvailableException}). + * + * @return the outgoing payload to include with this statement, or {@code null} if no payload has + * been set. + * @since 2.2 + */ + public Map getOutgoingPayload() { + return outgoingPayload; + } + + /** + * Set the given outgoing payload on this statement. Each time this statement is executed, this + * payload will be included in the query request. + * + *

This method makes a defensive copy of the given map, but its values remain inherently + * mutable. Care should be taken not to modify the original map once it is passed to this method. + * + *

This feature is only available with {@link ProtocolVersion#V4} or above. Trying to include + * custom payloads in requests sent by the driver under lower protocol versions will result in an + * {@link com.datastax.driver.core.exceptions.UnsupportedFeatureException} (wrapped in a {@link + * com.datastax.driver.core.exceptions.NoHostAvailableException}). + * + * @param payload the outgoing payload to include with this statement, or {@code null} to clear + * any previously entered payload. + * @return this {@link Statement} object. + * @since 2.2 + */ + public Statement setOutgoingPayload(Map payload) { + this.outgoingPayload = payload == null ? null : ImmutableMap.copyOf(payload); + return this; + } + + /** + * Returns the number of bytes required to encode this statement. + * + *

The calculated size may be overestimated by a few bytes, but is never underestimated. If the + * size cannot be calculated, this method returns -1. + * + *

Note that the returned value is not cached, but instead recomputed at every method call. + * + * @return the number of bytes required to encode this statement. + */ + public int requestSizeInBytes(ProtocolVersion protocolVersion, CodecRegistry codecRegistry) { + return -1; + } + + protected static Boolean isBatchIdempotent(Collection statements) { + boolean hasNullIdempotentStatements = false; + for (Statement statement : statements) { + Boolean innerIdempotent = statement.isIdempotent(); + if (innerIdempotent == null) { + hasNullIdempotentStatements = true; + } else if (!innerIdempotent) { + return false; + } } + return (hasNullIdempotentStatements) ? null : true; + } + + /** + * @return The host configured on this statement, or null if none is configured. + * @see #setHost(Host) + */ + public Host getHost() { + return host; + } + + /** + * Sets the {@link Host} that should handle this query. + * + *

In the general case, use of this method is heavily discouraged and should only be + * used in the following cases: + * + *

    + *
  1. Querying node-local tables, such as tables in the {@code system} and {@code system_views} + * keyspaces. + *
  2. Applying a series of schema changes, where it may be advantageous to execute schema + * changes in sequence on the same node. + *
+ * + *

Configuring a specific host causes the configured {@link LoadBalancingPolicy} to be + * completely bypassed. However, if the load balancing policy dictates that the host is at + * distance {@link HostDistance#IGNORED} or there is no active connectivity to the host, the + * request will fail with a {@link NoHostAvailableException}. + * + *

Note that unlike other configuration, when this statement is prepared {@link + * BoundStatement}s created off of {@link PreparedStatement} do not inherit this configuration. + * + * @param host The host that should be used to handle executions of this statement or null to + * delegate to the configured load balancing policy. + * @return this {@code Statement} object. + */ + public Statement setHost(Host host) { + this.host = host; + return this; + } + + /** + * @return a custom "now in seconds" to use when applying the request (for testing purposes). + * {@link Integer#MIN_VALUE} means "no value". + */ + public int getNowInSeconds() { + return nowInSeconds; + } + + /** + * Sets the "now in seconds" to use when applying the request (for testing purposes). {@link + * Integer#MIN_VALUE} means "no value". + */ + public Statement setNowInSeconds(int nowInSeconds) { + this.nowInSeconds = nowInSeconds; + return this; + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/StatementWrapper.java b/driver-core/src/main/java/com/datastax/driver/core/StatementWrapper.java index 9443611376d..ffcb26a3ad7 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/StatementWrapper.java +++ b/driver-core/src/main/java/com/datastax/driver/core/StatementWrapper.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,20 +20,19 @@ import com.datastax.driver.core.policies.LoadBalancingPolicy; import com.datastax.driver.core.policies.RetryPolicy; import com.datastax.driver.core.policies.SpeculativeExecutionPolicy; - import java.nio.ByteBuffer; import java.util.Map; /** * Base class for custom {@link Statement} implementations that wrap another statement. - *

- * This is intended for use with a custom {@link RetryPolicy}, {@link LoadBalancingPolicy} or - * {@link SpeculativeExecutionPolicy}. The client code can wrap a statement to "mark" it, or - * add information that will lead to special handling in the policy. - *

- * Example: - *

- * {@code
+ *
+ * 

This is intended for use with a custom {@link RetryPolicy}, {@link LoadBalancingPolicy} or + * {@link SpeculativeExecutionPolicy}. The client code can wrap a statement to "mark" it, or add + * information that will lead to special handling in the policy. + * + *

Example: + * + *

{@code
  * // Define your own subclass
  * public class MyCustomStatement extends StatementWrapper {
  *     public MyCustomStatement(Statement wrapped) {
@@ -54,155 +55,195 @@
  * Statement s = new SimpleStatement("...");
  * session.execute(s);                         // will use default plan
  * session.execute(new MyCustomStatement(s));  // will use special plan
- * }
- * 
+ * }
*/ public abstract class StatementWrapper extends Statement { - private final Statement wrapped; - - /** - * Builds a new instance. - * - * @param wrapped the wrapped statement. - */ - protected StatementWrapper(Statement wrapped) { - this.wrapped = wrapped; - } - - Statement getWrappedStatement() { - // Protect against multiple levels of wrapping (even though there is no practical reason for that) - return (wrapped instanceof StatementWrapper) - ? ((StatementWrapper) wrapped).getWrappedStatement() - : wrapped; - } - - @Override - public Statement setConsistencyLevel(ConsistencyLevel consistency) { - return wrapped.setConsistencyLevel(consistency); - } - - @Override - public ConsistencyLevel getConsistencyLevel() { - return wrapped.getConsistencyLevel(); - } - - @Override - public Statement setSerialConsistencyLevel(ConsistencyLevel serialConsistency) { - return wrapped.setSerialConsistencyLevel(serialConsistency); - } - - @Override - public ConsistencyLevel getSerialConsistencyLevel() { - return wrapped.getSerialConsistencyLevel(); - } - - @Override - public Statement enableTracing() { - return wrapped.enableTracing(); - } - - @Override - public Statement disableTracing() { - return wrapped.disableTracing(); - } - - @Override - public boolean isTracing() { - return wrapped.isTracing(); - } - - @Override - public ByteBuffer getRoutingKey(ProtocolVersion protocolVersion, CodecRegistry codecRegistry) { - return wrapped.getRoutingKey(protocolVersion, codecRegistry); - } - - @Override - public String getKeyspace() { - return wrapped.getKeyspace(); - } - - @Override - public Statement setRetryPolicy(RetryPolicy policy) { - return wrapped.setRetryPolicy(policy); - } - - @Override - public RetryPolicy getRetryPolicy() { - return wrapped.getRetryPolicy(); - } - - @Override - public Statement setFetchSize(int fetchSize) { - return wrapped.setFetchSize(fetchSize); - } - - @Override - public int getFetchSize() { - return wrapped.getFetchSize(); - } - - @Override - public Statement setDefaultTimestamp(long defaultTimestamp) { - return wrapped.setDefaultTimestamp(defaultTimestamp); - } - - @Override - public long getDefaultTimestamp() { - return wrapped.getDefaultTimestamp(); - } - - @Override - public Statement setReadTimeoutMillis(int readTimeoutMillis) { - return wrapped.setReadTimeoutMillis(readTimeoutMillis); - } - - @Override - public int getReadTimeoutMillis() { - return wrapped.getReadTimeoutMillis(); - } - - @Override - public Statement setPagingState(PagingState pagingState, CodecRegistry codecRegistry) { - return wrapped.setPagingState(pagingState, codecRegistry); - } - - @Override - public Statement setPagingState(PagingState pagingState) { - return wrapped.setPagingState(pagingState); - } - - @Override - public Statement setPagingStateUnsafe(byte[] pagingState) { - return wrapped.setPagingStateUnsafe(pagingState); - } - - @Override - public ByteBuffer getPagingState() { - return wrapped.getPagingState(); - } - - @Override - public Statement setIdempotent(boolean idempotent) { - return wrapped.setIdempotent(idempotent); - } - - @Override - public Boolean isIdempotent() { - return wrapped.isIdempotent(); - } - - @Override - public boolean isIdempotentWithDefault(QueryOptions queryOptions) { - return wrapped.isIdempotentWithDefault(queryOptions); - } - - @Override - public Map getOutgoingPayload() { - return wrapped.getOutgoingPayload(); - } - - @Override - public Statement setOutgoingPayload(Map payload) { - return wrapped.setOutgoingPayload(payload); - } + private final Statement wrapped; + + /** + * Builds a new instance. + * + * @param wrapped the wrapped statement. + */ + protected StatementWrapper(Statement wrapped) { + this.wrapped = wrapped; + } + + Statement getWrappedStatement() { + // Protect against multiple levels of wrapping (even though there is no practical reason for + // that) + return (wrapped instanceof StatementWrapper) + ? ((StatementWrapper) wrapped).getWrappedStatement() + : wrapped; + } + + @Override + public Statement setConsistencyLevel(ConsistencyLevel consistency) { + wrapped.setConsistencyLevel(consistency); + return this; + } + + @Override + public ConsistencyLevel getConsistencyLevel() { + return wrapped.getConsistencyLevel(); + } + + @Override + public Statement setSerialConsistencyLevel(ConsistencyLevel serialConsistency) { + wrapped.setSerialConsistencyLevel(serialConsistency); + return this; + } + + @Override + public ConsistencyLevel getSerialConsistencyLevel() { + return wrapped.getSerialConsistencyLevel(); + } + + @Override + public Statement enableTracing() { + wrapped.enableTracing(); + return this; + } + + @Override + public Statement disableTracing() { + wrapped.disableTracing(); + return this; + } + + @Override + public boolean isTracing() { + return wrapped.isTracing(); + } + + @Override + public ByteBuffer getRoutingKey(ProtocolVersion protocolVersion, CodecRegistry codecRegistry) { + return wrapped.getRoutingKey(protocolVersion, codecRegistry); + } + + @Override + public String getKeyspace() { + return wrapped.getKeyspace(); + } + + @Override + public Statement setRetryPolicy(RetryPolicy policy) { + wrapped.setRetryPolicy(policy); + return this; + } + + @Override + public RetryPolicy getRetryPolicy() { + return wrapped.getRetryPolicy(); + } + + @Override + public Statement setFetchSize(int fetchSize) { + wrapped.setFetchSize(fetchSize); + return this; + } + + @Override + public int getFetchSize() { + return wrapped.getFetchSize(); + } + + @Override + public Statement setDefaultTimestamp(long defaultTimestamp) { + wrapped.setDefaultTimestamp(defaultTimestamp); + return this; + } + + @Override + public long getDefaultTimestamp() { + return wrapped.getDefaultTimestamp(); + } + + @Override + public Statement setReadTimeoutMillis(int readTimeoutMillis) { + wrapped.setReadTimeoutMillis(readTimeoutMillis); + return this; + } + + @Override + public int getReadTimeoutMillis() { + return wrapped.getReadTimeoutMillis(); + } + + @Override + public Statement setPagingState(PagingState pagingState, CodecRegistry codecRegistry) { + wrapped.setPagingState(pagingState, codecRegistry); + return this; + } + + @Override + public Statement setPagingState(PagingState pagingState) { + wrapped.setPagingState(pagingState); + return this; + } + + @Override + public Statement setPagingStateUnsafe(byte[] pagingState) { + wrapped.setPagingStateUnsafe(pagingState); + return this; + } + + @Override + public ByteBuffer getPagingState() { + return wrapped.getPagingState(); + } + + @Override + public Statement setIdempotent(boolean idempotent) { + wrapped.setIdempotent(idempotent); + return this; + } + + @Override + public Boolean isIdempotent() { + return wrapped.isIdempotent(); + } + + @Override + public boolean isIdempotentWithDefault(QueryOptions queryOptions) { + return wrapped.isIdempotentWithDefault(queryOptions); + } + + @Override + public Map getOutgoingPayload() { + return wrapped.getOutgoingPayload(); + } + + @Override + public Statement setOutgoingPayload(Map payload) { + wrapped.setOutgoingPayload(payload); + return this; + } + + @Override + public int requestSizeInBytes(ProtocolVersion protocolVersion, CodecRegistry codecRegistry) { + return wrapped.requestSizeInBytes(protocolVersion, codecRegistry); + } + + @Override + public Host getHost() { + return wrapped.getHost(); + } + + @Override + public Statement setHost(Host host) { + wrapped.setHost(host); + return this; + } + + @Override + public int getNowInSeconds() { + return wrapped.getNowInSeconds(); + } + + @Override + public Statement setNowInSeconds(int nowInSeconds) { + wrapped.setNowInSeconds(nowInSeconds); + return this; + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/StreamIdGenerator.java b/driver-core/src/main/java/com/datastax/driver/core/StreamIdGenerator.java index 325cecffb45..f914adcfc5c 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/StreamIdGenerator.java +++ b/driver-core/src/main/java/com/datastax/driver/core/StreamIdGenerator.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,124 +22,120 @@ /** * Manages a set of integer identifiers. - *

- * Clients can borrow an id with {@link #next()}, and return it to the set with {@link #release(int)}. - * It is guaranteed that a given id can't be borrowed by two clients at the same time. - * This class is thread-safe and non-blocking. - *

- * Implementation notes: we use an atomic long array where each bit represents an id. It is set to 1 if - * the id is available, 0 otherwise. When looking for an id, we find a long that has remaining 1's and - * pick the rightmost one. - * To minimize the average time to find that long, we search the array in a round-robin fashion. + * + *

Clients can borrow an id with {@link #next()}, and return it to the set with {@link + * #release(int)}. It is guaranteed that a given id can't be borrowed by two clients at the same + * time. This class is thread-safe and non-blocking. + * + *

Implementation notes: we use an atomic long array where each bit represents an id. It is set + * to 1 if the id is available, 0 otherwise. When looking for an id, we find a long that has + * remaining 1's and pick the rightmost one. To minimize the average time to find that long, we + * search the array in a round-robin fashion. */ class StreamIdGenerator { - static final int MAX_STREAM_PER_CONNECTION_V2 = 128; - static final int MAX_STREAM_PER_CONNECTION_V3 = 32768; - private static final long MAX_UNSIGNED_LONG = -1L; - - static StreamIdGenerator newInstance(ProtocolVersion version) { - return new StreamIdGenerator(streamIdSizeFor(version)); + static final int MAX_STREAM_PER_CONNECTION_V2 = 128; + static final int MAX_STREAM_PER_CONNECTION_V3 = 32768; + private static final long MAX_UNSIGNED_LONG = -1L; + + static StreamIdGenerator newInstance(ProtocolVersion version) { + return new StreamIdGenerator(streamIdSizeFor(version)); + } + + private static int streamIdSizeFor(ProtocolVersion version) { + switch (version) { + case V1: + case V2: + return 1; + case V3: + case V4: + case V5: + case V6: + return 2; + default: + throw version.unsupported(); } + } - private static int streamIdSizeFor(ProtocolVersion version) { - switch (version) { - case V1: - case V2: - return 1; - case V3: - case V4: - case V5: - return 2; - default: - throw version.unsupported(); - } - } + private final AtomicLongArray bits; + private final int maxIds; + private final AtomicInteger offset; - private final AtomicLongArray bits; - private final int maxIds; - private final AtomicInteger offset; + // If a query timeout, we'll stop waiting for it. However in that case, we + // can't release/reuse the ID because we don't know if the response is lost + // or will just come back to use sometimes in the future. In that case, we + // just "mark" the fact that we have one less available ID and marked counts + // how many marks we've put. + private final AtomicInteger marked = new AtomicInteger(0); - // If a query timeout, we'll stop waiting for it. However in that case, we - // can't release/reuse the ID because we don't know if the response is lost - // or will just come back to use sometimes in the future. In that case, we - // just "mark" the fact that we have one less available ID and marked counts - // how many marks we've put. - private final AtomicInteger marked = new AtomicInteger(0); + private StreamIdGenerator(int streamIdSizeInBytes) { + // Stream IDs are signed and we only handle positive values + // (negative stream IDs are for server side initiated streams). + maxIds = 1 << (streamIdSizeInBytes * 8 - 1); - private StreamIdGenerator(int streamIdSizeInBytes) { - // Stream IDs are signed and we only handle positive values - // (negative stream IDs are for server side initiated streams). - maxIds = 1 << (streamIdSizeInBytes * 8 - 1); + // This is true for 1 byte = 128 streams, and therefore for any higher value + assert maxIds % 64 == 0; - // This is true for 1 byte = 128 streams, and therefore for any higher value - assert maxIds % 64 == 0; + // We use one bit in our array of longs to represent each stream ID. + bits = new AtomicLongArray(maxIds / 64); - // We use one bit in our array of longs to represent each stream ID. - bits = new AtomicLongArray(maxIds / 64); + // Initialize all bits to 1 + for (int i = 0; i < bits.length(); i++) bits.set(i, MAX_UNSIGNED_LONG); - // Initialize all bits to 1 - for (int i = 0; i < bits.length(); i++) - bits.set(i, MAX_UNSIGNED_LONG); + offset = new AtomicInteger(bits.length() - 1); + } - offset = new AtomicInteger(bits.length() - 1); - } + public int next() { + int previousOffset, myOffset; + do { + previousOffset = offset.get(); + myOffset = (previousOffset + 1) % bits.length(); + } while (!offset.compareAndSet(previousOffset, myOffset)); - public int next() { - int previousOffset, myOffset; - do { - previousOffset = offset.get(); - myOffset = (previousOffset + 1) % bits.length(); - } while (!offset.compareAndSet(previousOffset, myOffset)); - - for (int i = 0; i < bits.length(); i++) { - int j = (i + myOffset) % bits.length(); - - int id = atomicGetAndSetFirstAvailable(j); - if (id >= 0) - return id + (64 * j); - } - return -1; - } + for (int i = 0; i < bits.length(); i++) { + int j = (i + myOffset) % bits.length(); - public void release(int streamId) { - atomicClear(streamId / 64, streamId % 64); + int id = atomicGetAndSetFirstAvailable(j); + if (id >= 0) return id + (64 * j); } - - public void mark(int streamId) { - marked.incrementAndGet(); + return -1; + } + + public void release(int streamId) { + atomicClear(streamId / 64, streamId % 64); + } + + public void mark(int streamId) { + marked.incrementAndGet(); + } + + public void unmark(int streamId) { + marked.decrementAndGet(); + } + + public int maxAvailableStreams() { + return maxIds - marked.get(); + } + + // Returns >= 0 if found and set an id, -1 if no bits are available. + private int atomicGetAndSetFirstAvailable(int idx) { + while (true) { + long l = bits.get(idx); + if (l == 0) return -1; + + // Find the position of the right-most 1-bit + int id = Long.numberOfTrailingZeros(l); + if (bits.compareAndSet(idx, l, l ^ mask(id))) return id; } + } - public void unmark(int streamId) { - marked.decrementAndGet(); + private void atomicClear(int idx, int toClear) { + while (true) { + long l = bits.get(idx); + if (bits.compareAndSet(idx, l, l | mask(toClear))) return; } + } - public int maxAvailableStreams() { - return maxIds - marked.get(); - } - - // Returns >= 0 if found and set an id, -1 if no bits are available. - private int atomicGetAndSetFirstAvailable(int idx) { - while (true) { - long l = bits.get(idx); - if (l == 0) - return -1; - - // Find the position of the right-most 1-bit - int id = Long.numberOfTrailingZeros(l); - if (bits.compareAndSet(idx, l, l ^ mask(id))) - return id; - } - } - - private void atomicClear(int idx, int toClear) { - while (true) { - long l = bits.get(idx); - if (bits.compareAndSet(idx, l, l | mask(toClear))) - return; - } - } - - private static long mask(int id) { - return 1L << id; - } + private static long mask(int id) { + return 1L << id; + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/SystemProperties.java b/driver-core/src/main/java/com/datastax/driver/core/SystemProperties.java index c9ed45ae9e4..8d78d81a677 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/SystemProperties.java +++ b/driver-core/src/main/java/com/datastax/driver/core/SystemProperties.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,41 +22,49 @@ /** * Allows overriding internal settings via system properties. - *

- * This is generally reserved for tests or "expert" usage. + * + *

This is generally reserved for tests or "expert" usage. */ class SystemProperties { - private static final Logger logger = LoggerFactory.getLogger(SystemProperties.class); + private static final Logger logger = LoggerFactory.getLogger(SystemProperties.class); - static int getInt(String key, int defaultValue) { - String stringValue = System.getProperty(key); - if (stringValue == null) { - logger.debug("{} is undefined, using default value {}", key, defaultValue); - return defaultValue; - } - try { - int value = Integer.parseInt(stringValue); - logger.info("{} is defined, using value {}", key, value); - return value; - } catch (NumberFormatException e) { - logger.warn("{} is defined but could not parse value {}, using default value {}", key, stringValue, defaultValue); - return defaultValue; - } + static int getInt(String key, int defaultValue) { + String stringValue = System.getProperty(key); + if (stringValue == null) { + logger.debug("{} is undefined, using default value {}", key, defaultValue); + return defaultValue; } + try { + int value = Integer.parseInt(stringValue); + logger.info("{} is defined, using value {}", key, value); + return value; + } catch (NumberFormatException e) { + logger.warn( + "{} is defined but could not parse value {}, using default value {}", + key, + stringValue, + defaultValue); + return defaultValue; + } + } - static boolean getBoolean(String key, boolean defaultValue) { - String stringValue = System.getProperty(key); - if (stringValue == null) { - logger.debug("{} is undefined, using default value {}", key, defaultValue); - return defaultValue; - } - try { - boolean value = Boolean.parseBoolean(stringValue); - logger.info("{} is defined, using value {}", key, value); - return value; - } catch (NumberFormatException e) { - logger.warn("{} is defined but could not parse value {}, using default value {}", key, stringValue, defaultValue); - return defaultValue; - } + static boolean getBoolean(String key, boolean defaultValue) { + String stringValue = System.getProperty(key); + if (stringValue == null) { + logger.debug("{} is undefined, using default value {}", key, defaultValue); + return defaultValue; + } + try { + boolean value = Boolean.parseBoolean(stringValue); + logger.info("{} is defined, using value {}", key, value); + return value; + } catch (NumberFormatException e) { + logger.warn( + "{} is defined but could not parse value {}, using default value {}", + key, + stringValue, + defaultValue); + return defaultValue; } + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/TableMetadata.java b/driver-core/src/main/java/com/datastax/driver/core/TableMetadata.java index c8d133b0833..c88f8183d44 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/TableMetadata.java +++ b/driver-core/src/main/java/com/datastax/driver/core/TableMetadata.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,442 +18,608 @@ package com.datastax.driver.core; import com.datastax.driver.core.utils.MoreObjects; +import com.google.common.collect.ImmutableSortedSet; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.Iterator; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.TreeSet; +import java.util.UUID; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.util.*; - -/** - * Describes a Table. - */ +/** Describes a Table. */ public class TableMetadata extends AbstractTableMetadata { - private static final Logger logger = LoggerFactory.getLogger(TableMetadata.class); - - private static final String CF_ID_V2 = "cf_id"; - private static final String CF_ID_V3 = "id"; - - private static final String KEY_VALIDATOR = "key_validator"; - private static final String COMPARATOR = "comparator"; - private static final String VALIDATOR = "default_validator"; - - private static final String KEY_ALIASES = "key_aliases"; - private static final String COLUMN_ALIASES = "column_aliases"; - private static final String VALUE_ALIAS = "value_alias"; - - private static final String DEFAULT_KEY_ALIAS = "key"; - private static final String DEFAULT_COLUMN_ALIAS = "column"; - private static final String DEFAULT_VALUE_ALIAS = "value"; - - private static final String FLAGS = "flags"; - private static final String DENSE = "dense"; - private static final String SUPER = "super"; - private static final String COMPOUND = "compound"; - - private static final String EMPTY_TYPE = "empty"; - - private final Map indexes; - - private final Map views; - - private TableMetadata(KeyspaceMetadata keyspace, - String name, - UUID id, - List partitionKey, - List clusteringColumns, - Map columns, - Map indexes, - TableOptionsMetadata options, - List clusteringOrder, - VersionNumber cassandraVersion) { - super(keyspace, name, id, partitionKey, clusteringColumns, columns, options, clusteringOrder, cassandraVersion); - this.indexes = indexes; - this.views = new HashMap(); - } - - static TableMetadata build(KeyspaceMetadata ksm, Row row, Map rawCols, List indexRows, String nameColumn, VersionNumber cassandraVersion, Cluster cluster) { - - String name = row.getString(nameColumn); - - UUID id = null; - - if (cassandraVersion.getMajor() == 2 && cassandraVersion.getMinor() >= 1) - id = row.getUUID(CF_ID_V2); - else if (cassandraVersion.getMajor() > 2) - id = row.getUUID(CF_ID_V3); - - DataTypeClassNameParser.ParseResult comparator = null; - DataTypeClassNameParser.ParseResult keyValidator = null; - List columnAliases = null; - - ProtocolVersion protocolVersion = cluster.getConfiguration().getProtocolOptions().getProtocolVersion(); - CodecRegistry codecRegistry = cluster.getConfiguration().getCodecRegistry(); - - if (cassandraVersion.getMajor() <= 2) { - comparator = DataTypeClassNameParser.parseWithComposite(row.getString(COMPARATOR), protocolVersion, codecRegistry); - keyValidator = DataTypeClassNameParser.parseWithComposite(row.getString(KEY_VALIDATOR), protocolVersion, codecRegistry); - columnAliases = cassandraVersion.getMajor() >= 2 || row.getString(COLUMN_ALIASES) == null - ? Collections.emptyList() - : SimpleJSONParser.parseStringList(row.getString(COLUMN_ALIASES)); - } - - int partitionKeySize = findPartitionKeySize(rawCols.values(), keyValidator); - int clusteringSize; - - boolean isDense; - boolean isCompact; - if (cassandraVersion.getMajor() > 2) { - Set flags = row.getSet(FLAGS, String.class); - isDense = flags.contains(DENSE); - boolean isSuper = flags.contains(SUPER); - boolean isCompound = flags.contains(COMPOUND); - isCompact = isSuper || isDense || !isCompound; - boolean isStaticCompact = !isSuper && !isDense && !isCompound; - if (isStaticCompact) { - rawCols = pruneStaticCompactTableColumns(rawCols); - } - if (isDense) { - rawCols = pruneDenseTableColumnsV3(rawCols); - } - clusteringSize = findClusteringSize(comparator, rawCols.values(), columnAliases, cassandraVersion); + private static final Logger logger = LoggerFactory.getLogger(TableMetadata.class); + + private static final String CF_ID_V2 = "cf_id"; + private static final String CF_ID_V3 = "id"; + + private static final String KEY_VALIDATOR = "key_validator"; + private static final String COMPARATOR = "comparator"; + private static final String VALIDATOR = "default_validator"; + + private static final String KEY_ALIASES = "key_aliases"; + private static final String COLUMN_ALIASES = "column_aliases"; + private static final String VALUE_ALIAS = "value_alias"; + + private static final String DEFAULT_KEY_ALIAS = "key"; + private static final String DEFAULT_COLUMN_ALIAS = "column"; + private static final String DEFAULT_VALUE_ALIAS = "value"; + + private static final String FLAGS = "flags"; + private static final String DENSE = "dense"; + private static final String SUPER = "super"; + private static final String COMPOUND = "compound"; + + private static final String EMPTY_TYPE = "empty"; + + private final Map indexes; + + private final Map views; + + private TableMetadata( + KeyspaceMetadata keyspace, + String name, + UUID id, + List partitionKey, + List clusteringColumns, + Map columns, + Map indexes, + TableOptionsMetadata options, + List clusteringOrder, + VersionNumber cassandraVersion) { + super( + keyspace, + name, + id, + partitionKey, + clusteringColumns, + columns, + options, + clusteringOrder, + cassandraVersion); + this.indexes = indexes; + this.views = new HashMap(); + } + + static TableMetadata build( + KeyspaceMetadata ksm, + Row row, + Map rawCols, + List indexRows, + String nameColumn, + VersionNumber cassandraVersion, + Cluster cluster) { + ProtocolVersion protocolVersion = + cluster.getConfiguration().getProtocolOptions().getProtocolVersion(); + CodecRegistry codecRegistry = cluster.getConfiguration().getCodecRegistry(); + + String name = row.getString(nameColumn); + if (ksm.isVirtual()) { + + // This is always going to be >V3 so key validator can be null + int partitionKeySize = findPartitionKeySize(rawCols.values(), null); + // This is always going to be >V3 so comparator and columnAliases can be null. They are not + // used + int clusteringSize = findClusteringSize(null, rawCols.values(), null, cassandraVersion); + LinkedHashMap columns = new LinkedHashMap(); + List partitionKey = + new ArrayList( + Collections.nCopies(partitionKeySize, null)); + List clusteringColumns = + new ArrayList(Collections.nCopies(clusteringSize, null)); + List clusteringOrder = + new ArrayList( + Collections.nCopies(clusteringSize, null)); + Set otherColumns = new TreeSet(columnMetadataComparator); + TableMetadata tm = + new TableMetadata( + ksm, + name, + new UUID(0L, 0L), + partitionKey, + clusteringColumns, + columns, + Collections.emptyMap(), + null, + clusteringOrder, + cassandraVersion); + + for (ColumnMetadata.Raw rawCol : rawCols.values()) { + DataType dataType; + if (cassandraVersion.getMajor() >= 3) { + dataType = + DataTypeCqlNameParser.parse( + rawCol.dataType, cluster, ksm.getName(), ksm.userTypes, null, false, false); } else { - assert comparator != null; - clusteringSize = findClusteringSize(comparator, rawCols.values(), columnAliases, cassandraVersion); - isDense = clusteringSize != comparator.types.size() - 1; - if (isDense) { - rawCols = pruneDenseTableColumnsV2(rawCols); - } - isCompact = isDense || !comparator.isComposite; - } - - List partitionKey = new ArrayList(Collections.nCopies(partitionKeySize, null)); - List clusteringColumns = new ArrayList(Collections.nCopies(clusteringSize, null)); - List clusteringOrder = new ArrayList(Collections.nCopies(clusteringSize, null)); - - // We use a linked hashmap because we will keep this in the order of a 'SELECT * FROM ...'. - LinkedHashMap columns = new LinkedHashMap(); - LinkedHashMap indexes = new LinkedHashMap(); - - TableOptionsMetadata options = null; - try { - options = new TableOptionsMetadata(row, isCompact, cassandraVersion); - } catch (RuntimeException e) { - // See ControlConnection#refreshSchema for why we'd rather not probably this further. Since table options is one thing - // that tends to change often in Cassandra, it's worth special casing this. - logger.error(String.format("Error parsing schema options for table %s.%s: " - + "Cluster.getMetadata().getKeyspace(\"%s\").getTable(\"%s\").getOptions() will return null", - ksm.getName(), name, ksm.getName(), name), e); - } - - TableMetadata tm = new TableMetadata(ksm, name, id, partitionKey, clusteringColumns, columns, indexes, options, clusteringOrder, cassandraVersion); - - // We use this temporary set just so non PK columns are added in lexicographical order, which is the one of a - // 'SELECT * FROM ...' - Set otherColumns = new TreeSet(columnMetadataComparator); - - if (cassandraVersion.getMajor() < 2) { - - assert comparator != null; - assert keyValidator != null; - assert columnAliases != null; - - // In C* 1.2, only the REGULAR columns are in the columns schema table, so we need to add the names from - // the aliases (and make sure we handle default aliases). - List keyAliases = row.getString(KEY_ALIASES) == null - ? Collections.emptyList() - : SimpleJSONParser.parseStringList(row.getString(KEY_ALIASES)); - for (int i = 0; i < partitionKey.size(); i++) { - String alias = keyAliases.size() > i ? keyAliases.get(i) : (i == 0 ? DEFAULT_KEY_ALIAS : DEFAULT_KEY_ALIAS + (i + 1)); - partitionKey.set(i, ColumnMetadata.forAlias(tm, alias, keyValidator.types.get(i))); - } - - for (int i = 0; i < clusteringSize; i++) { - String alias = columnAliases.size() > i ? columnAliases.get(i) : DEFAULT_COLUMN_ALIAS + (i + 1); - clusteringColumns.set(i, ColumnMetadata.forAlias(tm, alias, comparator.types.get(i))); - clusteringOrder.set(i, comparator.reversed.get(i) ? ClusteringOrder.DESC : ClusteringOrder.ASC); - } - - // if we're dense, chances are that we have a single regular "value" column with an alias - if (isDense) { - String alias = row.isNull(VALUE_ALIAS) ? DEFAULT_VALUE_ALIAS : row.getString(VALUE_ALIAS); - // ...unless the table does not have any regular column, only primary key columns (JAVA-873) - if (!alias.isEmpty()) { - DataType type = DataTypeClassNameParser.parseOne(row.getString(VALIDATOR), protocolVersion, codecRegistry); - otherColumns.add(ColumnMetadata.forAlias(tm, alias, type)); - } - } + dataType = + DataTypeClassNameParser.parseOne(rawCol.dataType, protocolVersion, codecRegistry); } - - for (ColumnMetadata.Raw rawCol : rawCols.values()) { - DataType dataType; - if (cassandraVersion.getMajor() >= 3) { - dataType = DataTypeCqlNameParser.parse(rawCol.dataType, cluster, ksm.getName(), ksm.userTypes, null, false, false); - } else { - dataType = DataTypeClassNameParser.parseOne(rawCol.dataType, protocolVersion, codecRegistry); - } - ColumnMetadata col = ColumnMetadata.fromRaw(tm, rawCol, dataType); - switch (rawCol.kind) { - case PARTITION_KEY: - partitionKey.set(rawCol.position, col); - break; - case CLUSTERING_COLUMN: - clusteringColumns.set(rawCol.position, col); - clusteringOrder.set(rawCol.position, rawCol.isReversed ? ClusteringOrder.DESC : ClusteringOrder.ASC); - break; - default: - otherColumns.add(col); - break; - } - - // legacy secondary indexes (C* < 3.0) - IndexMetadata index = IndexMetadata.fromLegacy(col, rawCol); - if (index != null) - indexes.put(index.getName(), index); + ColumnMetadata cm = ColumnMetadata.fromRaw(tm, rawCol, dataType); + + switch (rawCol.kind) { + case PARTITION_KEY: + partitionKey.set(rawCol.position, cm); + break; + case CLUSTERING_COLUMN: + clusteringColumns.set(rawCol.position, cm); + clusteringOrder.set( + rawCol.position, rawCol.isReversed ? ClusteringOrder.DESC : ClusteringOrder.ASC); + break; + default: + otherColumns.add(cm); + break; } + } - for (ColumnMetadata c : partitionKey) - columns.put(c.getName(), c); - for (ColumnMetadata c : clusteringColumns) - columns.put(c.getName(), c); - for (ColumnMetadata c : otherColumns) - columns.put(c.getName(), c); - - // create secondary indexes (C* >= 3.0) - if (indexRows != null) - for (Row indexRow : indexRows) { - IndexMetadata index = IndexMetadata.fromRow(tm, indexRow); - indexes.put(index.getName(), index); - } - - return tm; - } - - /** - * Upon migration from thrift to CQL, we internally create a pair of surrogate clustering/regular columns - * for compact static tables. These columns shouldn't be exposed to the user but are currently returned by C*. - * We also need to remove the static keyword for all other columns in the table. - */ - private static Map pruneStaticCompactTableColumns(Map rawCols) { - Collection cols = rawCols.values(); - Iterator it = cols.iterator(); - while (it.hasNext()) { - ColumnMetadata.Raw col = it.next(); - if (col.kind == ColumnMetadata.Raw.Kind.CLUSTERING_COLUMN) { - // remove "column1 text" clustering column - it.remove(); - } else if (col.kind == ColumnMetadata.Raw.Kind.REGULAR) { - // remove "value blob" regular column - it.remove(); - } else if (col.kind == ColumnMetadata.Raw.Kind.STATIC) { - // remove "static" keyword - col.kind = ColumnMetadata.Raw.Kind.REGULAR; - } - } - return rawCols; + // Order for virtual table columns should mirror that of normal tables. + for (ColumnMetadata c : partitionKey) columns.put(c.getName(), c); + for (ColumnMetadata c : clusteringColumns) columns.put(c.getName(), c); + for (ColumnMetadata c : otherColumns) columns.put(c.getName(), c); + return tm; } - /** - * Upon migration from thrift to CQL, we internally create a surrogate column "value" of type - * EmptyType for dense tables. This column shouldn't be exposed to the user but is currently returned by C*. - */ - private static Map pruneDenseTableColumnsV3(Map rawCols) { - Collection cols = rawCols.values(); - Iterator it = cols.iterator(); - while (it.hasNext()) { - ColumnMetadata.Raw col = it.next(); - if (col.kind == ColumnMetadata.Raw.Kind.REGULAR - && col.dataType.equals(EMPTY_TYPE)) { - // remove "value empty" regular column - it.remove(); - } - } - return rawCols; + UUID id = null; + + if (cassandraVersion.getMajor() == 2 && cassandraVersion.getMinor() >= 1) + id = row.getUUID(CF_ID_V2); + else if (cassandraVersion.getMajor() > 2) id = row.getUUID(CF_ID_V3); + + DataTypeClassNameParser.ParseResult comparator = null; + DataTypeClassNameParser.ParseResult keyValidator = null; + List columnAliases = null; + + if (cassandraVersion.getMajor() <= 2) { + comparator = + DataTypeClassNameParser.parseWithComposite( + row.getString(COMPARATOR), protocolVersion, codecRegistry); + keyValidator = + DataTypeClassNameParser.parseWithComposite( + row.getString(KEY_VALIDATOR), protocolVersion, codecRegistry); + columnAliases = + cassandraVersion.getMajor() >= 2 || row.getString(COLUMN_ALIASES) == null + ? Collections.emptyList() + : SimpleJSONParser.parseStringList(row.getString(COLUMN_ALIASES)); } - private static Map pruneDenseTableColumnsV2(Map rawCols) { - Collection cols = rawCols.values(); - Iterator it = cols.iterator(); - while (it.hasNext()) { - ColumnMetadata.Raw col = it.next(); - if (col.kind == ColumnMetadata.Raw.Kind.COMPACT_VALUE && col.name.isEmpty()) { - // remove "" blob regular COMPACT_VALUE column - it.remove(); - } - } - return rawCols; + int partitionKeySize = findPartitionKeySize(rawCols.values(), keyValidator); + int clusteringSize; + + boolean isDense; + boolean isCompact; + if (cassandraVersion.getMajor() > 2) { + Set flags = row.getSet(FLAGS, String.class); + isDense = flags.contains(DENSE); + boolean isSuper = flags.contains(SUPER); + boolean isCompound = flags.contains(COMPOUND); + isCompact = isSuper || isDense || !isCompound; + boolean isStaticCompact = !isSuper && !isDense && !isCompound; + if (isStaticCompact) { + rawCols = pruneStaticCompactTableColumns(rawCols); + } + if (isDense) { + rawCols = pruneDenseTableColumnsV3(rawCols); + } + clusteringSize = + findClusteringSize(comparator, rawCols.values(), columnAliases, cassandraVersion); + } else { + assert comparator != null; + clusteringSize = + findClusteringSize(comparator, rawCols.values(), columnAliases, cassandraVersion); + isDense = clusteringSize != comparator.types.size() - 1; + if (isDense) { + rawCols = pruneDenseTableColumnsV2(rawCols); + } + isCompact = isDense || !comparator.isComposite; } - private static int findPartitionKeySize(Collection cols, DataTypeClassNameParser.ParseResult keyValidator) { - // C* 1.2, 2.0, 2.1 and 2.2 - if (keyValidator != null) - return keyValidator.types.size(); - // C* 3.0 onwards - int maxId = -1; - for (ColumnMetadata.Raw col : cols) - if (col.kind == ColumnMetadata.Raw.Kind.PARTITION_KEY) - maxId = Math.max(maxId, col.position); - return maxId + 1; + List partitionKey = + new ArrayList(Collections.nCopies(partitionKeySize, null)); + List clusteringColumns = + new ArrayList(Collections.nCopies(clusteringSize, null)); + List clusteringOrder = + new ArrayList(Collections.nCopies(clusteringSize, null)); + + // We use a linked hashmap because we will keep this in the order of a 'SELECT * FROM ...'. + LinkedHashMap columns = new LinkedHashMap(); + LinkedHashMap indexes = new LinkedHashMap(); + + TableOptionsMetadata options = null; + try { + options = new TableOptionsMetadata(row, isCompact, cassandraVersion); + } catch (RuntimeException e) { + // See ControlConnection#refreshSchema for why we'd rather not probably this further. Since + // table options is one thing + // that tends to change often in Cassandra, it's worth special casing this. + logger.error( + String.format( + "Error parsing schema options for table %s.%s: " + + "Cluster.getMetadata().getKeyspace(\"%s\").getTable(\"%s\").getOptions() will return null", + ksm.getName(), name, ksm.getName(), name), + e); } - private static int findClusteringSize(DataTypeClassNameParser.ParseResult comparator, - Collection cols, - List columnAliases, - VersionNumber cassandraVersion) { - // In 2.0 onwards, this is relatively easy, we just find the biggest 'position' amongst the clustering columns. - // For 1.2 however, this is slightly more subtle: we need to infer it based on whether the comparator is composite or not, and whether we have - // regular columns or not. - if (cassandraVersion.getMajor() >= 2) { - int maxId = -1; - for (ColumnMetadata.Raw col : cols) - if (col.kind == ColumnMetadata.Raw.Kind.CLUSTERING_COLUMN) - maxId = Math.max(maxId, col.position); - return maxId + 1; - } else { - int size = comparator.types.size(); - if (comparator.isComposite) - return !comparator.collections.isEmpty() || (columnAliases.size() == size - 1 && comparator.types.get(size - 1).equals(DataType.text())) ? size - 1 : size; - else - // We know cols only has the REGULAR ones for 1.2 - return !columnAliases.isEmpty() || cols.isEmpty() ? size : 0; + TableMetadata tm = + new TableMetadata( + ksm, + name, + id, + partitionKey, + clusteringColumns, + columns, + indexes, + options, + clusteringOrder, + cassandraVersion); + + // We use this temporary set just so non PK columns are added in lexicographical order, which is + // the one of a + // 'SELECT * FROM ...' + Set otherColumns = new TreeSet(columnMetadataComparator); + + if (cassandraVersion.getMajor() < 2) { + + assert comparator != null; + assert keyValidator != null; + assert columnAliases != null; + + // In C* 1.2, only the REGULAR columns are in the columns schema table, so we need to add the + // names from + // the aliases (and make sure we handle default aliases). + List keyAliases = + row.getString(KEY_ALIASES) == null + ? Collections.emptyList() + : SimpleJSONParser.parseStringList(row.getString(KEY_ALIASES)); + for (int i = 0; i < partitionKey.size(); i++) { + String alias = + keyAliases.size() > i + ? keyAliases.get(i) + : (i == 0 ? DEFAULT_KEY_ALIAS : DEFAULT_KEY_ALIAS + (i + 1)); + partitionKey.set(i, ColumnMetadata.forAlias(tm, alias, keyValidator.types.get(i))); + } + + for (int i = 0; i < clusteringSize; i++) { + String alias = + columnAliases.size() > i ? columnAliases.get(i) : DEFAULT_COLUMN_ALIAS + (i + 1); + clusteringColumns.set(i, ColumnMetadata.forAlias(tm, alias, comparator.types.get(i))); + clusteringOrder.set( + i, comparator.reversed.get(i) ? ClusteringOrder.DESC : ClusteringOrder.ASC); + } + + // if we're dense, chances are that we have a single regular "value" column with an alias + if (isDense) { + String alias = row.isNull(VALUE_ALIAS) ? DEFAULT_VALUE_ALIAS : row.getString(VALUE_ALIAS); + // ...unless the table does not have any regular column, only primary key columns (JAVA-873) + if (!alias.isEmpty()) { + DataType type = + DataTypeClassNameParser.parseOne( + row.getString(VALIDATOR), protocolVersion, codecRegistry); + otherColumns.add(ColumnMetadata.forAlias(tm, alias, type)); } + } } - /** - * Returns metadata on a index of this table. - * - * @param name the name of the index to retrieve ({@code name} will be - * interpreted as a case-insensitive identifier unless enclosed in double-quotes, - * see {@link Metadata#quote}). - * @return the metadata for the {@code name} index if it exists, or - * {@code null} otherwise. - */ - public IndexMetadata getIndex(String name) { - return indexes.get(Metadata.handleId(name)); + for (ColumnMetadata.Raw rawCol : rawCols.values()) { + DataType dataType; + if (cassandraVersion.getMajor() >= 3) { + dataType = + DataTypeCqlNameParser.parse( + rawCol.dataType, cluster, ksm.getName(), ksm.userTypes, null, false, false); + } else { + dataType = + DataTypeClassNameParser.parseOne(rawCol.dataType, protocolVersion, codecRegistry); + } + ColumnMetadata col = ColumnMetadata.fromRaw(tm, rawCol, dataType); + switch (rawCol.kind) { + case PARTITION_KEY: + partitionKey.set(rawCol.position, col); + break; + case CLUSTERING_COLUMN: + clusteringColumns.set(rawCol.position, col); + clusteringOrder.set( + rawCol.position, rawCol.isReversed ? ClusteringOrder.DESC : ClusteringOrder.ASC); + break; + default: + otherColumns.add(col); + break; + } + + // legacy secondary indexes (C* < 3.0) + IndexMetadata index = IndexMetadata.fromLegacy(col, rawCol); + if (index != null) indexes.put(index.getName(), index); } - /** - * Returns all indexes based on this table. - * - * @return all indexes based on this table. - */ - public Collection getIndexes() { - return Collections.unmodifiableCollection(indexes.values()); + for (ColumnMetadata c : partitionKey) columns.put(c.getName(), c); + for (ColumnMetadata c : clusteringColumns) columns.put(c.getName(), c); + for (ColumnMetadata c : otherColumns) columns.put(c.getName(), c); + + // create secondary indexes (C* >= 3.0) + if (indexRows != null) + for (Row indexRow : indexRows) { + IndexMetadata index = IndexMetadata.fromRow(tm, indexRow); + indexes.put(index.getName(), index); + } + + return tm; + } + + /** + * Upon migration from thrift to CQL, we internally create a pair of surrogate clustering/regular + * columns for compact static tables. These columns shouldn't be exposed to the user but are + * currently returned by C*. We also need to remove the static keyword for all other columns in + * the table. + */ + private static Map pruneStaticCompactTableColumns( + Map rawCols) { + Collection cols = rawCols.values(); + Iterator it = cols.iterator(); + while (it.hasNext()) { + ColumnMetadata.Raw col = it.next(); + if (col.kind == ColumnMetadata.Raw.Kind.CLUSTERING_COLUMN) { + // remove "column1 text" clustering column + it.remove(); + } else if (col.kind == ColumnMetadata.Raw.Kind.REGULAR) { + // remove "value blob" regular column + it.remove(); + } else if (col.kind == ColumnMetadata.Raw.Kind.STATIC) { + // remove "static" keyword + col.kind = ColumnMetadata.Raw.Kind.REGULAR; + } } - - /** - * Returns metadata on a view of this table. - * - * @param name the name of the view to retrieve ({@code name} will be - * interpreted as a case-insensitive identifier unless enclosed in double-quotes, - * see {@link Metadata#quote}). - * @return the metadata for the {@code name} view if it exists, or - * {@code null} otherwise. - */ - public MaterializedViewMetadata getView(String name) { - return views.get(Metadata.handleId(name)); + return rawCols; + } + + /** + * Upon migration from thrift to CQL, we internally create a surrogate column "value" of type + * EmptyType for dense tables. This column shouldn't be exposed to the user but is currently + * returned by C*. + */ + private static Map pruneDenseTableColumnsV3( + Map rawCols) { + Collection cols = rawCols.values(); + Iterator it = cols.iterator(); + while (it.hasNext()) { + ColumnMetadata.Raw col = it.next(); + if (col.kind == ColumnMetadata.Raw.Kind.REGULAR && col.dataType.equals(EMPTY_TYPE)) { + // remove "value empty" regular column + it.remove(); + } } - - /** - * Returns all views based on this table. - * - * @return all views based on this table. - */ - public Collection getViews() { - return Collections.unmodifiableCollection(views.values()); + return rawCols; + } + + private static Map pruneDenseTableColumnsV2( + Map rawCols) { + Collection cols = rawCols.values(); + Iterator it = cols.iterator(); + while (it.hasNext()) { + ColumnMetadata.Raw col = it.next(); + if (col.kind == ColumnMetadata.Raw.Kind.COMPACT_VALUE && col.name.isEmpty()) { + // remove "" blob regular COMPACT_VALUE column + it.remove(); + } } - - void add(MaterializedViewMetadata view) { - views.put(view.getName(), view); + return rawCols; + } + + private static int findPartitionKeySize( + Collection cols, DataTypeClassNameParser.ParseResult keyValidator) { + // C* 1.2, 2.0, 2.1 and 2.2 + if (keyValidator != null) return keyValidator.types.size(); + // C* 3.0 onwards + int maxId = -1; + for (ColumnMetadata.Raw col : cols) + if (col.kind == ColumnMetadata.Raw.Kind.PARTITION_KEY) maxId = Math.max(maxId, col.position); + return maxId + 1; + } + + private static int findClusteringSize( + DataTypeClassNameParser.ParseResult comparator, + Collection cols, + List columnAliases, + VersionNumber cassandraVersion) { + // In 2.0 onwards, this is relatively easy, we just find the biggest 'position' amongst the + // clustering columns. + // For 1.2 however, this is slightly more subtle: we need to infer it based on whether the + // comparator is composite or not, and whether we have + // regular columns or not. + if (cassandraVersion.getMajor() >= 2) { + int maxId = -1; + for (ColumnMetadata.Raw col : cols) + if (col.kind == ColumnMetadata.Raw.Kind.CLUSTERING_COLUMN) + maxId = Math.max(maxId, col.position); + return maxId + 1; + } else { + int size = comparator.types.size(); + if (comparator.isComposite) + return !comparator.collections.isEmpty() + || (columnAliases.size() == size - 1 + && comparator.types.get(size - 1).equals(DataType.text())) + ? size - 1 + : size; + else + // We know cols only has the REGULAR ones for 1.2 + return !columnAliases.isEmpty() || cols.isEmpty() ? size : 0; } - - /** - * Returns a {@code String} containing CQL queries representing this - * table and the index on it. - *

- * In other words, this method returns the queries that would allow you to - * recreate the schema of this table, along with the indexes and views defined on - * this table, if any. - *

- * Note that the returned String is formatted to be human readable (for - * some definition of human readable at least). - * - * @return the CQL queries representing this table schema as a {code - * String}. - */ - @Override - public String exportAsString() { - StringBuilder sb = new StringBuilder(); - - sb.append(super.exportAsString()); - - for (IndexMetadata index : indexes.values()) { - sb.append('\n').append(index.asCQLQuery()); - } - - for (MaterializedViewMetadata view : views.values()) { - sb.append('\n').append(view.asCQLQuery()); + } + + /** + * Returns metadata on a index of this table. + * + * @param name the name of the index to retrieve ({@code name} will be interpreted as a + * case-insensitive identifier unless enclosed in double-quotes, see {@link Metadata#quote}). + * @return the metadata for the {@code name} index if it exists, or {@code null} otherwise. + */ + public IndexMetadata getIndex(String name) { + return indexes.get(Metadata.handleId(name)); + } + + /** + * Returns all indexes based on this table. + * + * @return all indexes based on this table. + */ + public Collection getIndexes() { + return Collections.unmodifiableCollection(indexes.values()); + } + + /** + * Returns metadata on a view of this table. + * + * @param name the name of the view to retrieve ({@code name} will be interpreted as a + * case-insensitive identifier unless enclosed in double-quotes, see {@link Metadata#quote}). + * @return the metadata for the {@code name} view if it exists, or {@code null} otherwise. + */ + public MaterializedViewMetadata getView(String name) { + return views.get(Metadata.handleId(name)); + } + + /** + * Returns all views based on this table. + * + * @return all views based on this table. + */ + public Collection getViews() { + return Collections.unmodifiableCollection(views.values()); + } + + void add(MaterializedViewMetadata view) { + views.put(view.getName(), view); + } + + /** + * Returns a {@code String} containing CQL queries representing this table and the index on it. + * + *

In other words, this method returns the queries that would allow you to recreate the schema + * of this table, along with the indexes and views defined on this table, if any. + * + *

Note that the returned String is formatted to be human readable (for some definition of + * human readable at least). + * + * @return the CQL queries representing this table schema as a {code String}. + */ + @Override + public String exportAsString() { + StringBuilder sb = new StringBuilder(); + + sb.append(super.exportAsString()); + + if (!indexes.isEmpty()) { + sb.append('\n'); + + Iterator indexIt = indexes.values().iterator(); + while (indexIt.hasNext()) { + IndexMetadata index = indexIt.next(); + sb.append('\n').append(index.asCQLQuery()); + if (indexIt.hasNext()) { + sb.append('\n'); } - - return sb.toString(); + } } - @Override - protected String asCQLQuery(boolean formatted) { - StringBuilder sb = new StringBuilder(); - sb.append("CREATE TABLE ").append(Metadata.quoteIfNecessary(keyspace.getName())).append('.').append(Metadata.quoteIfNecessary(name)).append(" ("); - newLine(sb, formatted); - for (ColumnMetadata cm : columns.values()) - newLine(sb.append(spaces(4, formatted)).append(cm).append(',').append(spaces(1, !formatted)), formatted); - - // PK - sb.append(spaces(4, formatted)).append("PRIMARY KEY ("); - if (partitionKey.size() == 1) { - sb.append(Metadata.quoteIfNecessary(partitionKey.get(0).getName())); - } else { - sb.append('('); - boolean first = true; - for (ColumnMetadata cm : partitionKey) { - if (first) - first = false; - else - sb.append(", "); - sb.append(Metadata.quoteIfNecessary(cm.getName())); - } - sb.append(')'); + if (!views.isEmpty()) { + sb.append('\n'); + + Iterator viewsIt = + ImmutableSortedSet.orderedBy(AbstractTableMetadata.byNameComparator) + .addAll(views.values()) + .build() + .iterator(); + while (viewsIt.hasNext()) { + AbstractTableMetadata view = viewsIt.next(); + sb.append('\n').append(view.exportAsString()); + if (viewsIt.hasNext()) { + sb.append('\n'); } - for (ColumnMetadata cm : clusteringColumns) - sb.append(", ").append(Metadata.quoteIfNecessary(cm.getName())); - sb.append(')'); - newLine(sb, formatted); - // end PK - - sb.append(")"); - appendOptions(sb, formatted); - return sb.toString(); + } } - @Override - public boolean equals(Object other) { - if (other == this) - return true; - if (!(other instanceof TableMetadata)) - return false; - - TableMetadata that = (TableMetadata) other; - - return MoreObjects.equal(this.name, that.name) && - MoreObjects.equal(this.id, that.id) && - MoreObjects.equal(this.partitionKey, that.partitionKey) && - MoreObjects.equal(this.clusteringColumns, that.clusteringColumns) && - MoreObjects.equal(this.columns, that.columns) && - MoreObjects.equal(this.options, that.options) && - MoreObjects.equal(this.clusteringOrder, that.clusteringOrder) && - MoreObjects.equal(this.indexes, that.indexes) && - MoreObjects.equal(this.views, that.views); - } + return sb.toString(); + } - @Override - public int hashCode() { - return MoreObjects.hashCode(name, id, partitionKey, clusteringColumns, columns, options, clusteringOrder, indexes, views); + @Override + protected String asCQLQuery(boolean formatted) { + StringBuilder sb = new StringBuilder(); + if (isVirtual()) { + sb.append("/* VIRTUAL "); + } else { + sb.append("CREATE "); + } + sb.append("TABLE ") + .append(Metadata.quoteIfNecessary(keyspace.getName())) + .append('.') + .append(Metadata.quoteIfNecessary(name)) + .append(" ("); + if (formatted) { + spaceOrNewLine(sb, true); + } + for (ColumnMetadata cm : columns.values()) { + sb.append(cm).append(','); + spaceOrNewLine(sb, formatted); + } + // PK + sb.append("PRIMARY KEY ("); + if (partitionKey.size() == 1) { + sb.append(Metadata.quoteIfNecessary(partitionKey.get(0).getName())); + } else { + sb.append('('); + boolean first = true; + for (ColumnMetadata cm : partitionKey) { + if (first) first = false; + else sb.append(", "); + sb.append(Metadata.quoteIfNecessary(cm.getName())); + } + sb.append(')'); + } + for (ColumnMetadata cm : clusteringColumns) + sb.append(", ").append(Metadata.quoteIfNecessary(cm.getName())); + sb.append(')'); + newLine(sb, formatted); + // end PK + sb.append(") "); + appendOptions(sb, formatted); + if (isVirtual()) { + sb.append(" */"); } + return sb.toString(); + } + + @Override + public boolean equals(Object other) { + if (other == this) return true; + if (!(other instanceof TableMetadata)) return false; + + TableMetadata that = (TableMetadata) other; + + return MoreObjects.equal(this.name, that.name) + && MoreObjects.equal(this.id, that.id) + && MoreObjects.equal(this.partitionKey, that.partitionKey) + && MoreObjects.equal(this.clusteringColumns, that.clusteringColumns) + && MoreObjects.equal(this.columns, that.columns) + && MoreObjects.equal(this.options, that.options) + && MoreObjects.equal(this.clusteringOrder, that.clusteringOrder) + && MoreObjects.equal(this.indexes, that.indexes) + && MoreObjects.equal(this.views, that.views); + } + + @Override + public int hashCode() { + return MoreObjects.hashCode( + name, + id, + partitionKey, + clusteringColumns, + columns, + options, + clusteringOrder, + indexes, + views); + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/TableOptionsMetadata.java b/driver-core/src/main/java/com/datastax/driver/core/TableOptionsMetadata.java index e6553957954..15c172ef37f 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/TableOptionsMetadata.java +++ b/driver-core/src/main/java/com/datastax/driver/core/TableOptionsMetadata.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,413 +19,475 @@ import com.datastax.driver.core.utils.MoreObjects; import com.google.common.collect.ImmutableMap; - import java.nio.ByteBuffer; import java.util.Map; public class TableOptionsMetadata { - private static final String COMMENT = "comment"; - private static final String READ_REPAIR = "read_repair_chance"; - private static final String DCLOCAL_READ_REPAIR = "dclocal_read_repair_chance"; - private static final String LOCAL_READ_REPAIR = "local_read_repair_chance"; - private static final String REPLICATE_ON_WRITE = "replicate_on_write"; - private static final String GC_GRACE = "gc_grace_seconds"; - private static final String BF_FP_CHANCE = "bloom_filter_fp_chance"; - private static final String CACHING = "caching"; - private static final String COMPACTION = "compaction"; - private static final String COMPACTION_CLASS = "compaction_strategy_class"; - private static final String COMPACTION_OPTIONS = "compaction_strategy_options"; - private static final String POPULATE_CACHE_ON_FLUSH = "populate_io_cache_on_flush"; - private static final String COMPRESSION = "compression"; - private static final String COMPRESSION_PARAMS = "compression_parameters"; - private static final String MEMTABLE_FLUSH_PERIOD_MS = "memtable_flush_period_in_ms"; - private static final String DEFAULT_TTL = "default_time_to_live"; - private static final String SPECULATIVE_RETRY = "speculative_retry"; - private static final String INDEX_INTERVAL = "index_interval"; - private static final String MIN_INDEX_INTERVAL = "min_index_interval"; - private static final String MAX_INDEX_INTERVAL = "max_index_interval"; - private static final String CRC_CHECK_CHANCE = "crc_check_chance"; - private static final String EXTENSIONS = "extensions"; - private static final String CDC = "cdc"; - - private static final boolean DEFAULT_REPLICATE_ON_WRITE = true; - private static final double DEFAULT_BF_FP_CHANCE = 0.01; - private static final boolean DEFAULT_POPULATE_CACHE_ON_FLUSH = false; - private static final int DEFAULT_MEMTABLE_FLUSH_PERIOD = 0; - private static final int DEFAULT_DEFAULT_TTL = 0; - private static final String DEFAULT_SPECULATIVE_RETRY = "NONE"; - private static final int DEFAULT_INDEX_INTERVAL = 128; - private static final int DEFAULT_MIN_INDEX_INTERVAL = 128; - private static final int DEFAULT_MAX_INDEX_INTERVAL = 2048; - private static final double DEFAULT_CRC_CHECK_CHANCE = 1.0; - private static final boolean DEFAULT_CDC = false; - - private final boolean isCompactStorage; - - private final String comment; - private final double readRepair; - private final double localReadRepair; - private final boolean replicateOnWrite; - private final int gcGrace; - private final double bfFpChance; - private final Map caching; - private final boolean populateCacheOnFlush; - private final int memtableFlushPeriodMs; - private final int defaultTTL; - private final String speculativeRetry; - private final Integer indexInterval; - private final Integer minIndexInterval; - private final Integer maxIndexInterval; - private final Map compaction; - private final Map compression; - private final Double crcCheckChance; - private final Map extensions; - private final boolean cdc; - - TableOptionsMetadata(Row row, boolean isCompactStorage, VersionNumber version) { - - boolean is120 = version.getMajor() < 2; - boolean is200 = version.getMajor() == 2 && version.getMinor() == 0; - boolean is210 = version.getMajor() == 2 && version.getMinor() >= 1; - boolean is400OrHigher = version.getMajor() > 3; - boolean is380OrHigher = is400OrHigher || version.getMajor() == 3 && version.getMinor() >= 8; - boolean is300OrHigher = version.getMajor() > 2; - boolean is210OrHigher = is210 || is300OrHigher; - - this.isCompactStorage = isCompactStorage; - this.comment = isNullOrAbsent(row, COMMENT) ? "" : row.getString(COMMENT); - this.readRepair = row.getDouble(READ_REPAIR); - - if (is300OrHigher) - this.localReadRepair = row.getDouble(DCLOCAL_READ_REPAIR); - else - this.localReadRepair = row.getDouble(LOCAL_READ_REPAIR); - - this.replicateOnWrite = is210OrHigher || isNullOrAbsent(row, REPLICATE_ON_WRITE) ? DEFAULT_REPLICATE_ON_WRITE : row.getBool(REPLICATE_ON_WRITE); - this.gcGrace = row.getInt(GC_GRACE); - this.bfFpChance = isNullOrAbsent(row, BF_FP_CHANCE) ? DEFAULT_BF_FP_CHANCE : row.getDouble(BF_FP_CHANCE); - - this.populateCacheOnFlush = isNullOrAbsent(row, POPULATE_CACHE_ON_FLUSH) ? DEFAULT_POPULATE_CACHE_ON_FLUSH : row.getBool(POPULATE_CACHE_ON_FLUSH); - this.memtableFlushPeriodMs = is120 || isNullOrAbsent(row, MEMTABLE_FLUSH_PERIOD_MS) ? DEFAULT_MEMTABLE_FLUSH_PERIOD : row.getInt(MEMTABLE_FLUSH_PERIOD_MS); - this.defaultTTL = is120 || isNullOrAbsent(row, DEFAULT_TTL) ? DEFAULT_DEFAULT_TTL : row.getInt(DEFAULT_TTL); - this.speculativeRetry = is120 || isNullOrAbsent(row, SPECULATIVE_RETRY) ? DEFAULT_SPECULATIVE_RETRY : row.getString(SPECULATIVE_RETRY); - - if (is200) - this.indexInterval = isNullOrAbsent(row, INDEX_INTERVAL) ? DEFAULT_INDEX_INTERVAL : row.getInt(INDEX_INTERVAL); - else - this.indexInterval = null; - - if (is210OrHigher) { - this.minIndexInterval = isNullOrAbsent(row, MIN_INDEX_INTERVAL) - ? DEFAULT_MIN_INDEX_INTERVAL - : row.getInt(MIN_INDEX_INTERVAL); - this.maxIndexInterval = isNullOrAbsent(row, MAX_INDEX_INTERVAL) - ? DEFAULT_MAX_INDEX_INTERVAL - : row.getInt(MAX_INDEX_INTERVAL); - } else { - this.minIndexInterval = null; - this.maxIndexInterval = null; - } - - if (is300OrHigher) { - this.caching = ImmutableMap.copyOf(row.getMap(CACHING, String.class, String.class)); - } else if (is210) { - this.caching = ImmutableMap.copyOf(SimpleJSONParser.parseStringMap(row.getString(CACHING))); - } else { - this.caching = ImmutableMap.of("keys", row.getString(CACHING)); - } - - if (is300OrHigher) - this.compaction = ImmutableMap.copyOf(row.getMap(COMPACTION, String.class, String.class)); - else { - this.compaction = ImmutableMap.builder() - .put("class", row.getString(COMPACTION_CLASS)) - .putAll(SimpleJSONParser.parseStringMap(row.getString(COMPACTION_OPTIONS))) - .build(); - } - - if (is300OrHigher) - this.compression = ImmutableMap.copyOf(row.getMap(COMPRESSION, String.class, String.class)); - else - this.compression = ImmutableMap.copyOf(SimpleJSONParser.parseStringMap(row.getString(COMPRESSION_PARAMS))); - - if (is300OrHigher) - this.crcCheckChance = isNullOrAbsent(row, CRC_CHECK_CHANCE) - ? DEFAULT_CRC_CHECK_CHANCE - : row.getDouble(CRC_CHECK_CHANCE); - else - this.crcCheckChance = null; - - if (is300OrHigher) - this.extensions = ImmutableMap.copyOf(row.getMap(EXTENSIONS, String.class, ByteBuffer.class)); - else - this.extensions = ImmutableMap.of(); - - if (is380OrHigher) - this.cdc = isNullOrAbsent(row, CDC) - ? DEFAULT_CDC - : row.getBool(CDC); - else - this.cdc = DEFAULT_CDC; - } - - private static boolean isNullOrAbsent(Row row, String name) { - return row.getColumnDefinitions().getIndexOf(name) < 0 - || row.isNull(name); - } - - /** - * Returns whether the table uses the {@code COMPACT STORAGE} option. - * - * @return whether the table uses the {@code COMPACT STORAGE} option. - */ - public boolean isCompactStorage() { - return isCompactStorage; - } - - /** - * Returns the commentary set for this table. - * - * @return the commentary set for this table, or {@code null} if noe has been set. - */ - public String getComment() { - return comment; - } - - /** - * Returns the chance with which a read repair is triggered for this table. - * - * @return the read repair chance set for table (in [0.0, 1.0]). - */ - public double getReadRepairChance() { - return readRepair; - } - - /** - * Returns the cluster local read repair chance set for this table. - * - * @return the local read repair chance set for table (in [0.0, 1.0]). - */ - public double getLocalReadRepairChance() { - return localReadRepair; - } - - /** - * Returns whether replicateOnWrite is set for this table. - *

- * This is only meaningful for tables holding counters. - * - * @return whether replicateOnWrite is set for this table. - */ - public boolean getReplicateOnWrite() { - return replicateOnWrite; - } - - /** - * Returns the tombstone garbage collection grace time in seconds for this table. - * - * @return the tombstone garbage collection grace time in seconds for this table. - */ - public int getGcGraceInSeconds() { - return gcGrace; - } - - /** - * Returns the false positive chance for the Bloom filter of this table. - * - * @return the Bloom filter false positive chance for this table (in [0.0, 1.0]). - */ - public double getBloomFilterFalsePositiveChance() { - return bfFpChance; - } - - /** - * Returns the caching options for this table. - * - * @return an immutable map containing the caching options for this table. - */ - public Map getCaching() { - return caching; - } - - /** - * Whether the populate I/O cache on flush is set on this table. - * - * @return whether the populate I/O cache on flush is set on this table. - */ - public boolean getPopulateIOCacheOnFlush() { - return populateCacheOnFlush; + private static final String COMMENT = "comment"; + private static final String READ_REPAIR_CHANCE = "read_repair_chance"; + private static final String DCLOCAL_READ_REPAIR_CHANCE = "dclocal_read_repair_chance"; + private static final String READ_REPAIR = "read_repair"; + private static final String LOCAL_READ_REPAIR_CHANCE = "local_read_repair_chance"; + private static final String REPLICATE_ON_WRITE = "replicate_on_write"; + private static final String GC_GRACE = "gc_grace_seconds"; + private static final String BF_FP_CHANCE = "bloom_filter_fp_chance"; + private static final String CACHING = "caching"; + private static final String COMPACTION = "compaction"; + private static final String COMPACTION_CLASS = "compaction_strategy_class"; + private static final String COMPACTION_OPTIONS = "compaction_strategy_options"; + private static final String POPULATE_CACHE_ON_FLUSH = "populate_io_cache_on_flush"; + private static final String COMPRESSION = "compression"; + private static final String COMPRESSION_PARAMS = "compression_parameters"; + private static final String MEMTABLE_FLUSH_PERIOD_MS = "memtable_flush_period_in_ms"; + private static final String DEFAULT_TTL = "default_time_to_live"; + private static final String SPECULATIVE_RETRY = "speculative_retry"; + private static final String INDEX_INTERVAL = "index_interval"; + private static final String MIN_INDEX_INTERVAL = "min_index_interval"; + private static final String MAX_INDEX_INTERVAL = "max_index_interval"; + private static final String CRC_CHECK_CHANCE = "crc_check_chance"; + private static final String EXTENSIONS = "extensions"; + private static final String CDC = "cdc"; + private static final String ADDITIONAL_WRITE_POLICY = "additional_write_policy"; + + private static final boolean DEFAULT_REPLICATE_ON_WRITE = true; + private static final double DEFAULT_BF_FP_CHANCE = 0.01; + private static final boolean DEFAULT_POPULATE_CACHE_ON_FLUSH = false; + private static final int DEFAULT_MEMTABLE_FLUSH_PERIOD = 0; + private static final int DEFAULT_DEFAULT_TTL = 0; + private static final String DEFAULT_SPECULATIVE_RETRY = "NONE"; + private static final int DEFAULT_INDEX_INTERVAL = 128; + private static final int DEFAULT_MIN_INDEX_INTERVAL = 128; + private static final int DEFAULT_MAX_INDEX_INTERVAL = 2048; + private static final double DEFAULT_CRC_CHECK_CHANCE = 1.0; + private static final boolean DEFAULT_CDC = false; + private static final String DEFAULT_READ_REPAIR = "BLOCKING"; + private static final String DEFAULT_ADDITIONAL_WRITE_POLICY = "99p"; + + private final boolean isCompactStorage; + + private final String comment; + private final double readRepairChance; + private final double localReadRepairChance; + private final String readRepair; + private final boolean replicateOnWrite; + private final int gcGrace; + private final double bfFpChance; + private final Map caching; + private final boolean populateCacheOnFlush; + private final int memtableFlushPeriodMs; + private final int defaultTTL; + private final String speculativeRetry; + private final Integer indexInterval; + private final Integer minIndexInterval; + private final Integer maxIndexInterval; + private final Map compaction; + private final Map compression; + private final Double crcCheckChance; + private final Map extensions; + private final boolean cdc; + private final String additionalWritePolicy; + + TableOptionsMetadata(Row row, boolean isCompactStorage, VersionNumber version) { + + boolean is120 = version.getMajor() < 2; + boolean is200 = version.getMajor() == 2 && version.getMinor() == 0; + boolean is210 = version.getMajor() == 2 && version.getMinor() >= 1; + boolean is400OrHigher = version.getMajor() > 3; + boolean is380OrHigher = is400OrHigher || version.getMajor() == 3 && version.getMinor() >= 8; + boolean is300OrHigher = version.getMajor() > 2; + boolean is210OrHigher = is210 || is300OrHigher; + + this.isCompactStorage = isCompactStorage; + this.comment = isNullOrAbsent(row, COMMENT) ? "" : row.getString(COMMENT); + this.readRepairChance = row.getDouble(READ_REPAIR_CHANCE); + + if (is400OrHigher) this.readRepair = row.getString(READ_REPAIR); + else this.readRepair = DEFAULT_READ_REPAIR; + + if (is300OrHigher) this.localReadRepairChance = row.getDouble(DCLOCAL_READ_REPAIR_CHANCE); + else this.localReadRepairChance = row.getDouble(LOCAL_READ_REPAIR_CHANCE); + + this.replicateOnWrite = + is210OrHigher || isNullOrAbsent(row, REPLICATE_ON_WRITE) + ? DEFAULT_REPLICATE_ON_WRITE + : row.getBool(REPLICATE_ON_WRITE); + this.gcGrace = row.getInt(GC_GRACE); + this.bfFpChance = + isNullOrAbsent(row, BF_FP_CHANCE) ? DEFAULT_BF_FP_CHANCE : row.getDouble(BF_FP_CHANCE); + + this.populateCacheOnFlush = + isNullOrAbsent(row, POPULATE_CACHE_ON_FLUSH) + ? DEFAULT_POPULATE_CACHE_ON_FLUSH + : row.getBool(POPULATE_CACHE_ON_FLUSH); + this.memtableFlushPeriodMs = + is120 || isNullOrAbsent(row, MEMTABLE_FLUSH_PERIOD_MS) + ? DEFAULT_MEMTABLE_FLUSH_PERIOD + : row.getInt(MEMTABLE_FLUSH_PERIOD_MS); + this.defaultTTL = + is120 || isNullOrAbsent(row, DEFAULT_TTL) ? DEFAULT_DEFAULT_TTL : row.getInt(DEFAULT_TTL); + this.speculativeRetry = + is120 || isNullOrAbsent(row, SPECULATIVE_RETRY) + ? DEFAULT_SPECULATIVE_RETRY + : row.getString(SPECULATIVE_RETRY); + + if (is200) + this.indexInterval = + isNullOrAbsent(row, INDEX_INTERVAL) ? DEFAULT_INDEX_INTERVAL : row.getInt(INDEX_INTERVAL); + else this.indexInterval = null; + + if (is210OrHigher) { + this.minIndexInterval = + isNullOrAbsent(row, MIN_INDEX_INTERVAL) + ? DEFAULT_MIN_INDEX_INTERVAL + : row.getInt(MIN_INDEX_INTERVAL); + this.maxIndexInterval = + isNullOrAbsent(row, MAX_INDEX_INTERVAL) + ? DEFAULT_MAX_INDEX_INTERVAL + : row.getInt(MAX_INDEX_INTERVAL); + } else { + this.minIndexInterval = null; + this.maxIndexInterval = null; } - /* - * Returns the memtable flush period (in milliseconds) option for this table. - *

- * Note: this option is not available in Cassandra 1.2 and will return 0 (no periodic - * flush) when connected to 1.2 nodes. - * - * @return the memtable flush period option for this table or 0 if no - * periodic flush is configured. - */ - public int getMemtableFlushPeriodInMs() { - return memtableFlushPeriodMs; + if (is300OrHigher) { + this.caching = ImmutableMap.copyOf(row.getMap(CACHING, String.class, String.class)); + } else if (is210) { + this.caching = ImmutableMap.copyOf(SimpleJSONParser.parseStringMap(row.getString(CACHING))); + } else { + this.caching = ImmutableMap.of("keys", row.getString(CACHING)); } - /** - * Returns the default TTL for this table. - *

- * Note: this option is not available in Cassandra 1.2 and will return 0 (no default - * TTL) when connected to 1.2 nodes. - * - * @return the default TTL for this table or 0 if no default TTL is - * configured. - */ - public int getDefaultTimeToLive() { - return defaultTTL; + if (is300OrHigher) + this.compaction = ImmutableMap.copyOf(row.getMap(COMPACTION, String.class, String.class)); + else { + this.compaction = + ImmutableMap.builder() + .put("class", row.getString(COMPACTION_CLASS)) + .putAll(SimpleJSONParser.parseStringMap(row.getString(COMPACTION_OPTIONS))) + .build(); } - /** - * Returns the speculative retry option for this table. - *

- * Note: this option is not available in Cassandra 1.2 and will return "NONE" (no - * speculative retry) when connected to 1.2 nodes. - * - * @return the speculative retry option this table. - */ - public String getSpeculativeRetry() { - return speculativeRetry; - } - - /** - * Returns the index interval option for this table. - *

- * Note: this option is not available in Cassandra 1.2 (more precisely, it is not - * configurable per-table) and will return 128 (the default index interval) when - * connected to 1.2 nodes. It is deprecated in Cassandra 2.1 and above, and will - * therefore return {@code null} for 2.1 nodes. - * - * @return the index interval option for this table. - */ - public Integer getIndexInterval() { - return indexInterval; - } - - /** - * Returns the minimum index interval option for this table. - *

- * Note: this option is available in Cassandra 2.1 and above, and will return - * {@code null} for earlier versions. - * - * @return the minimum index interval option for this table. - */ - public Integer getMinIndexInterval() { - return minIndexInterval; - } - - /** - * Returns the maximum index interval option for this table. - *

- * Note: this option is available in Cassandra 2.1 and above, and will return - * {@code null} for earlier versions. - * - * @return the maximum index interval option for this table. - */ - public Integer getMaxIndexInterval() { - return maxIndexInterval; - } - - /** - * When compression is enabled, this option defines the probability - * with which checksums for compressed blocks are checked during reads. - * The default value for this options is 1.0 (always check). - *

- * Note that this option is available in Cassandra 3.0.0 and above, when it - * became a "top-level" table option, whereas previously it was a suboption - * of the {@link #getCompression() compression} option. - *

- * For Cassandra versions prior to 3.0.0, this method always returns {@code null}. - * - * @return the probability with which checksums for compressed blocks are checked during reads - */ - public Double getCrcCheckChance() { - return crcCheckChance; - } - - /** - * Returns the compaction options for this table. - * - * @return an immutable map containing the compaction options for this table. - */ - public Map getCompaction() { - return compaction; - } - - /** - * Returns the compression options for this table. - * - * @return an immutable map containing the compression options for this table. - */ - public Map getCompression() { - return compression; - } - - /** - * Returns the extension options for this table. - *

- * For Cassandra versions prior to 3.0.0, this method always returns an empty map. - * - * @return an immutable map containing the extension options for this table. - */ - public Map getExtensions() { - return extensions; - } - - /** - * Returns whether or not change data capture is enabled for this table. - *

- * For Cassandra versions prior to 3.8.0, this method always returns false. - * - * @return whether or not change data capture is enabled for this table. - */ - public boolean isCDC() { - return cdc; - } - - @Override - public boolean equals(Object other) { - if (other == this) - return true; - if (!(other instanceof TableOptionsMetadata)) - return false; - - TableOptionsMetadata that = (TableOptionsMetadata) other; - return this.isCompactStorage == that.isCompactStorage && - MoreObjects.equal(this.comment, that.comment) && - this.readRepair == that.readRepair && - this.localReadRepair == that.localReadRepair && - this.replicateOnWrite == that.replicateOnWrite && - this.gcGrace == that.gcGrace && - this.bfFpChance == that.bfFpChance && - MoreObjects.equal(this.caching, that.caching) && - this.populateCacheOnFlush == that.populateCacheOnFlush && - this.memtableFlushPeriodMs == that.memtableFlushPeriodMs && - this.defaultTTL == that.defaultTTL && - this.cdc == that.cdc && - MoreObjects.equal(this.speculativeRetry, that.speculativeRetry) && - MoreObjects.equal(this.indexInterval, that.indexInterval) && - MoreObjects.equal(this.minIndexInterval, that.minIndexInterval) && - MoreObjects.equal(this.maxIndexInterval, that.maxIndexInterval) && - MoreObjects.equal(this.compaction, that.compaction) && - MoreObjects.equal(this.compression, that.compression) && - MoreObjects.equal(this.crcCheckChance, that.crcCheckChance) && - MoreObjects.equal(this.extensions, that.extensions); - } - - @Override - public int hashCode() { - return MoreObjects.hashCode(isCompactStorage, comment, readRepair, localReadRepair, replicateOnWrite, gcGrace, - bfFpChance, caching, populateCacheOnFlush, memtableFlushPeriodMs, defaultTTL, speculativeRetry, - indexInterval, minIndexInterval, maxIndexInterval, compaction, compression, crcCheckChance, extensions, - cdc); - } + if (is300OrHigher) + this.compression = ImmutableMap.copyOf(row.getMap(COMPRESSION, String.class, String.class)); + else + this.compression = + ImmutableMap.copyOf(SimpleJSONParser.parseStringMap(row.getString(COMPRESSION_PARAMS))); + + if (is300OrHigher) + this.crcCheckChance = + isNullOrAbsent(row, CRC_CHECK_CHANCE) + ? DEFAULT_CRC_CHECK_CHANCE + : row.getDouble(CRC_CHECK_CHANCE); + else this.crcCheckChance = null; + + if (is300OrHigher) + this.extensions = ImmutableMap.copyOf(row.getMap(EXTENSIONS, String.class, ByteBuffer.class)); + else this.extensions = ImmutableMap.of(); + + if (is380OrHigher) this.cdc = isNullOrAbsent(row, CDC) ? DEFAULT_CDC : row.getBool(CDC); + else this.cdc = DEFAULT_CDC; + + if (is400OrHigher) this.additionalWritePolicy = row.getString(ADDITIONAL_WRITE_POLICY); + else this.additionalWritePolicy = DEFAULT_ADDITIONAL_WRITE_POLICY; + } + + private static boolean isNullOrAbsent(Row row, String name) { + return row.getColumnDefinitions().getIndexOf(name) < 0 || row.isNull(name); + } + + /** + * Returns whether the table uses the {@code COMPACT STORAGE} option. + * + * @return whether the table uses the {@code COMPACT STORAGE} option. + */ + public boolean isCompactStorage() { + return isCompactStorage; + } + + /** + * Returns the commentary set for this table. + * + * @return the commentary set for this table, or {@code null} if noe has been set. + */ + public String getComment() { + return comment; + } + + /** + * Returns the chance with which a read repair is triggered for this table. + * + * @return the read repair chance set for table (in [0.0, 1.0]). + */ + public double getReadRepairChance() { + return readRepairChance; + } + + /** + * Returns the read_repair option for this table. NOTE: this is a Cassandra® 4.0 and newer + * option (described here: + * http://cassandra.apache.org/doc/latest/operating/read_repair.html). Possible values are + * {@code BLOCKING} or {@code NONE}, with the default being {@code BLOCKING}. + * + * @return the read repair option (either {@code BLOCKING} or {@code NONE}). + */ + public String getReadRepair() { + return readRepair; + } + + /** + * Returns the cluster local read repair chance set for this table. + * + * @return the local read repair chance set for table (in [0.0, 1.0]). + */ + public double getLocalReadRepairChance() { + return localReadRepairChance; + } + + /** + * Returns whether replicateOnWrite is set for this table. + * + *

This is only meaningful for tables holding counters. + * + * @return whether replicateOnWrite is set for this table. + */ + public boolean getReplicateOnWrite() { + return replicateOnWrite; + } + + /** + * Returns the tombstone garbage collection grace time in seconds for this table. + * + * @return the tombstone garbage collection grace time in seconds for this table. + */ + public int getGcGraceInSeconds() { + return gcGrace; + } + + /** + * Returns the false positive chance for the Bloom filter of this table. + * + * @return the Bloom filter false positive chance for this table (in [0.0, 1.0]). + */ + public double getBloomFilterFalsePositiveChance() { + return bfFpChance; + } + + /** + * Returns the caching options for this table. + * + * @return an immutable map containing the caching options for this table. + */ + public Map getCaching() { + return caching; + } + + /** + * Whether the populate I/O cache on flush is set on this table. + * + * @return whether the populate I/O cache on flush is set on this table. + */ + public boolean getPopulateIOCacheOnFlush() { + return populateCacheOnFlush; + } + + /* + * Returns the memtable flush period (in milliseconds) option for this table. + *

+ * Note: this option is not available in Cassandra 1.2 and will return 0 (no periodic + * flush) when connected to 1.2 nodes. + * + * @return the memtable flush period option for this table or 0 if no + * periodic flush is configured. + */ + public int getMemtableFlushPeriodInMs() { + return memtableFlushPeriodMs; + } + + /** + * Returns the default TTL for this table. + * + *

Note: this option is not available in Cassandra 1.2 and will return 0 (no default TTL) when + * connected to 1.2 nodes. + * + * @return the default TTL for this table or 0 if no default TTL is configured. + */ + public int getDefaultTimeToLive() { + return defaultTTL; + } + + /** + * Returns the speculative retry option for this table. + * + *

Note: this option is not available in Cassandra 1.2 and will return "NONE" (no speculative + * retry) when connected to 1.2 nodes. + * + * @return the speculative retry option this table. + */ + public String getSpeculativeRetry() { + return speculativeRetry; + } + + /** + * Returns the index interval option for this table. + * + *

Note: this option is not available in Cassandra 1.2 (more precisely, it is not configurable + * per-table) and will return 128 (the default index interval) when connected to 1.2 nodes. It is + * deprecated in Cassandra 2.1 and above, and will therefore return {@code null} for 2.1 nodes. + * + * @return the index interval option for this table. + */ + public Integer getIndexInterval() { + return indexInterval; + } + + /** + * Returns the minimum index interval option for this table. + * + *

Note: this option is available in Cassandra 2.1 and above, and will return {@code null} for + * earlier versions. + * + * @return the minimum index interval option for this table. + */ + public Integer getMinIndexInterval() { + return minIndexInterval; + } + + /** + * Returns the maximum index interval option for this table. + * + *

Note: this option is available in Cassandra 2.1 and above, and will return {@code null} for + * earlier versions. + * + * @return the maximum index interval option for this table. + */ + public Integer getMaxIndexInterval() { + return maxIndexInterval; + } + + /** + * When compression is enabled, this option defines the probability with which checksums for + * compressed blocks are checked during reads. The default value for this options is 1.0 (always + * check). + * + *

Note that this option is available in Cassandra 3.0.0 and above, when it became a + * "top-level" table option, whereas previously it was a suboption of the {@link #getCompression() + * compression} option. + * + *

For Cassandra versions prior to 3.0.0, this method always returns {@code null}. + * + * @return the probability with which checksums for compressed blocks are checked during reads + */ + public Double getCrcCheckChance() { + return crcCheckChance; + } + + /** + * Returns the compaction options for this table. + * + * @return an immutable map containing the compaction options for this table. + */ + public Map getCompaction() { + return compaction; + } + + /** + * Returns the compression options for this table. + * + * @return an immutable map containing the compression options for this table. + */ + public Map getCompression() { + return compression; + } + + /** + * Returns the extension options for this table. + * + *

For Cassandra versions prior to 3.0.0, this method always returns an empty map. + * + * @return an immutable map containing the extension options for this table. + */ + public Map getExtensions() { + return extensions; + } + + /** + * Returns whether or not change data capture is enabled for this table. + * + *

For Cassandra versions prior to 3.8.0, this method always returns false. + * + * @return whether or not change data capture is enabled for this table. + */ + public boolean isCDC() { + return cdc; + } + + /** + * The threshold at which a cheap quorum write will be upgraded to include transient replicas. + * + *

This option is only available in Cassandra® 4.0 and above. Default value is {@code 99p}. + * + * @return The additional write policy for this table (ex. '99p'). + */ + public String getAdditionalWritePolicy() { + return additionalWritePolicy; + } + + @Override + public boolean equals(Object other) { + if (other == this) return true; + if (!(other instanceof TableOptionsMetadata)) return false; + + TableOptionsMetadata that = (TableOptionsMetadata) other; + return this.isCompactStorage == that.isCompactStorage + && MoreObjects.equal(this.comment, that.comment) + && this.readRepairChance == that.readRepairChance + && this.localReadRepairChance == that.localReadRepairChance + && MoreObjects.equal(this.readRepair, that.readRepair) + && this.replicateOnWrite == that.replicateOnWrite + && this.gcGrace == that.gcGrace + && this.bfFpChance == that.bfFpChance + && MoreObjects.equal(this.caching, that.caching) + && this.populateCacheOnFlush == that.populateCacheOnFlush + && this.memtableFlushPeriodMs == that.memtableFlushPeriodMs + && this.defaultTTL == that.defaultTTL + && this.cdc == that.cdc + && MoreObjects.equal(this.speculativeRetry, that.speculativeRetry) + && MoreObjects.equal(this.indexInterval, that.indexInterval) + && MoreObjects.equal(this.minIndexInterval, that.minIndexInterval) + && MoreObjects.equal(this.maxIndexInterval, that.maxIndexInterval) + && MoreObjects.equal(this.compaction, that.compaction) + && MoreObjects.equal(this.compression, that.compression) + && MoreObjects.equal(this.crcCheckChance, that.crcCheckChance) + && MoreObjects.equal(this.additionalWritePolicy, that.additionalWritePolicy) + && MoreObjects.equal(this.extensions, that.extensions); + } + + @Override + public int hashCode() { + return MoreObjects.hashCode( + isCompactStorage, + comment, + readRepairChance, + localReadRepairChance, + readRepair, + replicateOnWrite, + gcGrace, + bfFpChance, + caching, + populateCacheOnFlush, + memtableFlushPeriodMs, + defaultTTL, + speculativeRetry, + indexInterval, + minIndexInterval, + maxIndexInterval, + compaction, + compression, + crcCheckChance, + extensions, + cdc, + additionalWritePolicy); + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/ThreadLocalMonotonicTimestampGenerator.java b/driver-core/src/main/java/com/datastax/driver/core/ThreadLocalMonotonicTimestampGenerator.java index cdfbe160ba9..bcadb3007a2 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ThreadLocalMonotonicTimestampGenerator.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ThreadLocalMonotonicTimestampGenerator.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,56 +17,60 @@ */ package com.datastax.driver.core; - import java.util.concurrent.TimeUnit; /** - * A timestamp generator that guarantees monotonically increasing timestamps on a per-thread basis, and logs warnings - * when timestamps drift in the future. - *

- * Beware that there is a risk of timestamp collision with this generator when accessed - * by more than one thread at a time; only use it when threads are not in direct competition - * for timestamp ties (i.e., they are executing independent statements). + * A timestamp generator that guarantees monotonically increasing timestamps on a per-thread basis, + * and logs warnings when timestamps drift in the future. + * + *

Beware that there is a risk of timestamp collision with this generator when accessed by more + * than one thread at a time; only use it when threads are not in direct competition for timestamp + * ties (i.e., they are executing independent statements). * * @see AbstractMonotonicTimestampGenerator */ public class ThreadLocalMonotonicTimestampGenerator extends LoggingMonotonicTimestampGenerator { - // We're deliberately avoiding an anonymous subclass with initialValue(), because this can introduce - // classloader leaks in managed environments like Tomcat - private final ThreadLocal lastRef = new ThreadLocal(); + // We're deliberately avoiding an anonymous subclass with initialValue(), because this can + // introduce + // classloader leaks in managed environments like Tomcat + private final ThreadLocal lastRef = new ThreadLocal(); - /** - * Creates a new instance with a warning threshold and warning interval of one second. - * - * @see #ThreadLocalMonotonicTimestampGenerator(long, TimeUnit, long, TimeUnit) - */ - public ThreadLocalMonotonicTimestampGenerator() { - this(1, TimeUnit.SECONDS, 1, TimeUnit.SECONDS); - } + /** + * Creates a new instance with a warning threshold and warning interval of one second. + * + * @see #ThreadLocalMonotonicTimestampGenerator(long, TimeUnit, long, TimeUnit) + */ + public ThreadLocalMonotonicTimestampGenerator() { + this(1, TimeUnit.SECONDS, 1, TimeUnit.SECONDS); + } - /** - * Creates a new instance. - * - * @param warningThreshold how far in the future timestamps are allowed to drift before a warning is logged. - * @param warningThresholdUnit the unit for {@code warningThreshold}. - * @param warningInterval how often the warning will be logged if timestamps keep drifting above the threshold. - * @param warningIntervalUnit the unit for {@code warningIntervalUnit}. - */ - public ThreadLocalMonotonicTimestampGenerator(long warningThreshold, TimeUnit warningThresholdUnit, - long warningInterval, TimeUnit warningIntervalUnit) { - super(warningThreshold, warningThresholdUnit, warningInterval, warningIntervalUnit); - } + /** + * Creates a new instance. + * + * @param warningThreshold how far in the future timestamps are allowed to drift before a warning + * is logged. + * @param warningThresholdUnit the unit for {@code warningThreshold}. + * @param warningInterval how often the warning will be logged if timestamps keep drifting above + * the threshold. + * @param warningIntervalUnit the unit for {@code warningIntervalUnit}. + */ + public ThreadLocalMonotonicTimestampGenerator( + long warningThreshold, + TimeUnit warningThresholdUnit, + long warningInterval, + TimeUnit warningIntervalUnit) { + super(warningThreshold, warningThresholdUnit, warningInterval, warningIntervalUnit); + } - @Override - public long next() { - Long last = this.lastRef.get(); - if (last == null) - last = 0L; + @Override + public long next() { + Long last = this.lastRef.get(); + if (last == null) last = 0L; - long next = computeNext(last); + long next = computeNext(last); - this.lastRef.set(next); - return next; - } + this.lastRef.set(next); + return next; + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/ThreadingOptions.java b/driver-core/src/main/java/com/datastax/driver/core/ThreadingOptions.java index f3f97032002..07c99a874bc 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ThreadingOptions.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ThreadingOptions.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,127 +19,153 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder; import io.netty.util.concurrent.DefaultThreadFactory; - -import java.util.concurrent.*; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.RejectedExecutionHandler; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ScheduledThreadPoolExecutor; +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.TimeUnit; /** * A set of hooks that allow clients to customize the driver's internal executors. - *

- * The methods in this class are invoked when the cluster initializes. To customize the behavior, extend the class and - * override the appropriate methods. - *

- * This is mainly intended to allow customization and instrumentation of driver threads. Each method must return a - * newly-allocated executor; don't use a shared executor, as this could introduce unintended consequences like deadlocks - * (we're working to simplify the driver's architecture and reduce the number of executors in a future release). The - * default implementations use unbounded queues, which is appropriate when the driver is properly configured; the only - * reason you would want to use bounded queues is to limit memory consumption in case of a bug or bad configuration. In - * that case, make sure to use a {@link RejectedExecutionHandler} that throws, such as - * {@link java.util.concurrent.ThreadPoolExecutor.AbortPolicy}; a blocking handler could introduce deadlocks. - *

- * Netty uses a separate pool for I/O operations, that can be configured via {@link NettyOptions}. + * + *

The methods in this class are invoked when the cluster initializes. To customize the behavior, + * extend the class and override the appropriate methods. + * + *

This is mainly intended to allow customization and instrumentation of driver threads. Each + * method must return a newly-allocated executor; don't use a shared executor, as this could + * introduce unintended consequences like deadlocks (we're working to simplify the driver's + * architecture and reduce the number of executors in a future release). The default implementations + * use unbounded queues, which is appropriate when the driver is properly configured; the only + * reason you would want to use bounded queues is to limit memory consumption in case of a bug or + * bad configuration. In that case, make sure to use a {@link RejectedExecutionHandler} that throws, + * such as {@link java.util.concurrent.ThreadPoolExecutor.AbortPolicy}; a blocking handler could + * introduce deadlocks. + * + *

Netty uses a separate pool for I/O operations, that can be configured via {@link + * NettyOptions}. */ public class ThreadingOptions { - // Kept for backward compatibility, but this should be customized via this class now - private static final int NON_BLOCKING_EXECUTOR_SIZE = SystemProperties.getInt( - "com.datastax.driver.NON_BLOCKING_EXECUTOR_SIZE", Runtime.getRuntime().availableProcessors()); - private static final int DEFAULT_THREAD_KEEP_ALIVE_SECONDS = 30; + // Kept for backward compatibility, but this should be customized via this class now + private static final int NON_BLOCKING_EXECUTOR_SIZE = + SystemProperties.getInt( + "com.datastax.driver.NON_BLOCKING_EXECUTOR_SIZE", + Runtime.getRuntime().availableProcessors()); + private static final int DEFAULT_THREAD_KEEP_ALIVE_SECONDS = 30; - /** - * Builds a thread factory for the threads created by a given executor. - *

- * This is used by the default implementations in this class, and also internally to create the Netty I/O pool. - * - * @param clusterName the name of the cluster, as specified by - * {@link com.datastax.driver.core.Cluster.Builder#withClusterName(String)}. - * @param executorName a name that identifies the executor. - * @return the thread factory. - */ - public ThreadFactory createThreadFactory(String clusterName, String executorName) { - return new ThreadFactoryBuilder() - .setNameFormat(clusterName + "-" + executorName + "-%d") - // Back with Netty's thread factory in order to create FastThreadLocalThread instances. This allows - // an optimization around ThreadLocals (we could use DefaultThreadFactory directly but it creates - // slightly different thread names, so keep we keep a ThreadFactoryBuilder wrapper for backward - // compatibility). - .setThreadFactory(new DefaultThreadFactory("ignored name")) - .build(); - } + /** + * Builds a thread factory for the threads created by a given executor. + * + *

This is used by the default implementations in this class, and also internally to create the + * Netty I/O pool. + * + * @param clusterName the name of the cluster, as specified by {@link + * com.datastax.driver.core.Cluster.Builder#withClusterName(String)}. + * @param executorName a name that identifies the executor. + * @return the thread factory. + */ + public ThreadFactory createThreadFactory(String clusterName, String executorName) { + return new ThreadFactoryBuilder() + .setNameFormat(clusterName + "-" + executorName + "-%d") + // Back with Netty's thread factory in order to create FastThreadLocalThread instances. This + // allows + // an optimization around ThreadLocals (we could use DefaultThreadFactory directly but it + // creates + // slightly different thread names, so keep we keep a ThreadFactoryBuilder wrapper for + // backward + // compatibility). + .setThreadFactory(new DefaultThreadFactory("ignored name")) + .build(); + } - /** - * Builds the main internal executor, used for tasks such as scheduling speculative executions, triggering - * registered {@link SchemaChangeListener}s, reacting to node state changes, and metadata updates. - *

- * The default implementation sets the pool size to the number of available cores. - * - * @param clusterName the name of the cluster, as specified by - * {@link com.datastax.driver.core.Cluster.Builder#withClusterName(String)}. - * @return the executor. - */ - public ExecutorService createExecutor(String clusterName) { - ThreadPoolExecutor executor = new ThreadPoolExecutor( - NON_BLOCKING_EXECUTOR_SIZE, NON_BLOCKING_EXECUTOR_SIZE, - DEFAULT_THREAD_KEEP_ALIVE_SECONDS, TimeUnit.SECONDS, - new LinkedBlockingQueue(), - createThreadFactory(clusterName, "worker")); - executor.allowCoreThreadTimeOut(true); - return executor; - } + /** + * Builds the main internal executor, used for tasks such as scheduling speculative executions, + * triggering registered {@link SchemaChangeListener}s, reacting to node state changes, and + * metadata updates. + * + *

The default implementation sets the pool size to the number of available cores. + * + * @param clusterName the name of the cluster, as specified by {@link + * com.datastax.driver.core.Cluster.Builder#withClusterName(String)}. + * @return the executor. + */ + public ExecutorService createExecutor(String clusterName) { + ThreadPoolExecutor executor = + new ThreadPoolExecutor( + NON_BLOCKING_EXECUTOR_SIZE, + NON_BLOCKING_EXECUTOR_SIZE, + DEFAULT_THREAD_KEEP_ALIVE_SECONDS, + TimeUnit.SECONDS, + new LinkedBlockingQueue(), + createThreadFactory(clusterName, "worker")); + executor.allowCoreThreadTimeOut(true); + return executor; + } - /** - * Builds the executor used to block on new connections before they are added to a pool. - *

- * The default implementation uses 2 threads. - * - * @param clusterName the name of the cluster, as specified by - * {@link com.datastax.driver.core.Cluster.Builder#withClusterName(String)}. - * @return the executor. - */ - public ExecutorService createBlockingExecutor(String clusterName) { - ThreadPoolExecutor executor = new ThreadPoolExecutor( - 2, 2, - DEFAULT_THREAD_KEEP_ALIVE_SECONDS, TimeUnit.SECONDS, - new LinkedBlockingQueue(), - createThreadFactory(clusterName, "blocking-task-worker")); - executor.allowCoreThreadTimeOut(true); - return executor; - } + /** + * Builds the executor used to block on new connections before they are added to a pool. + * + *

The default implementation uses 2 threads. + * + * @param clusterName the name of the cluster, as specified by {@link + * com.datastax.driver.core.Cluster.Builder#withClusterName(String)}. + * @return the executor. + */ + public ExecutorService createBlockingExecutor(String clusterName) { + ThreadPoolExecutor executor = + new ThreadPoolExecutor( + 2, + 2, + DEFAULT_THREAD_KEEP_ALIVE_SECONDS, + TimeUnit.SECONDS, + new LinkedBlockingQueue(), + createThreadFactory(clusterName, "blocking-task-worker")); + executor.allowCoreThreadTimeOut(true); + return executor; + } - /** - * Builds the executor when reconnection attempts will be scheduled. - *

- * The default implementation uses 2 threads. - * - * @param clusterName the name of the cluster, as specified by - * {@link com.datastax.driver.core.Cluster.Builder#withClusterName(String)}. - * @return the executor. - */ - public ScheduledExecutorService createReconnectionExecutor(String clusterName) { - return new ScheduledThreadPoolExecutor(2, createThreadFactory(clusterName, "reconnection")); - } + /** + * Builds the executor when reconnection attempts will be scheduled. + * + *

The default implementation uses 2 threads. + * + * @param clusterName the name of the cluster, as specified by {@link + * com.datastax.driver.core.Cluster.Builder#withClusterName(String)}. + * @return the executor. + */ + public ScheduledExecutorService createReconnectionExecutor(String clusterName) { + return new ScheduledThreadPoolExecutor(2, createThreadFactory(clusterName, "reconnection")); + } - /** - * Builds the executor to handle host state notifications from Cassandra. - *

- * This executor must have exactly one thread so that notifications are processed in order. - * - * @param clusterName the name of the cluster, as specified by - * {@link com.datastax.driver.core.Cluster.Builder#withClusterName(String)}. - * @return the executor. - */ - public ScheduledExecutorService createScheduledTasksExecutor(String clusterName) { - return new ScheduledThreadPoolExecutor(1, createThreadFactory(clusterName, "scheduled-task-worker")); - } + /** + * Builds the executor to handle host state notifications from Cassandra. + * + *

This executor must have exactly one thread so that notifications are processed in + * order. + * + * @param clusterName the name of the cluster, as specified by {@link + * com.datastax.driver.core.Cluster.Builder#withClusterName(String)}. + * @return the executor. + */ + public ScheduledExecutorService createScheduledTasksExecutor(String clusterName) { + return new ScheduledThreadPoolExecutor( + 1, createThreadFactory(clusterName, "scheduled-task-worker")); + } - /** - * Builds the executor for an internal maintenance task used to clean up closed connections. - *

- * A single scheduled task runs on this executor, so there is no reason to use more than one thread. - * - * @param clusterName the name of the cluster, as specified by - * {@link com.datastax.driver.core.Cluster.Builder#withClusterName(String)}. - * @return the executor. - */ - public ScheduledExecutorService createReaperExecutor(String clusterName) { - return new ScheduledThreadPoolExecutor(1, createThreadFactory(clusterName, "connection-reaper")); - } + /** + * Builds the executor for an internal maintenance task used to clean up closed connections. + * + *

A single scheduled task runs on this executor, so there is no reason to use more than one + * thread. + * + * @param clusterName the name of the cluster, as specified by {@link + * com.datastax.driver.core.Cluster.Builder#withClusterName(String)}. + * @return the executor. + */ + public ScheduledExecutorService createReaperExecutor(String clusterName) { + return new ScheduledThreadPoolExecutor( + 1, createThreadFactory(clusterName, "connection-reaper")); + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/TimestampGenerator.java b/driver-core/src/main/java/com/datastax/driver/core/TimestampGenerator.java index 361f9fff580..309db0cb88a 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/TimestampGenerator.java +++ b/driver-core/src/main/java/com/datastax/driver/core/TimestampGenerator.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,24 +19,23 @@ /** * Generates client-side, microsecond-precision query timestamps. - *

- * Given that Cassandra uses those timestamps to resolve conflicts, implementations should generate - * monotonically increasing timestamps for successive invocations of {@link #next()}. + * + *

Given that Cassandra uses those timestamps to resolve conflicts, implementations should + * generate monotonically increasing timestamps for successive invocations of {@link #next()}. */ public interface TimestampGenerator { - /** - * Returns the next timestamp. - *

- * Implementors should enforce increasing monotonicity of timestamps, that is, - * a timestamp returned should always be strictly greater that any previously returned - * timestamp. - *

- * Implementors should strive to achieve microsecond precision in the best possible way, - * which is usually largely dependent on the underlying operating system's capabilities. - * - * @return the next timestamp (in microseconds). If it equals {@link Long#MIN_VALUE}, it won't be - * sent by the driver, letting Cassandra generate a server-side timestamp. - */ - long next(); + /** + * Returns the next timestamp. + * + *

Implementors should enforce increasing monotonicity of timestamps, that is, a timestamp + * returned should always be strictly greater that any previously returned timestamp. + * + *

Implementors should strive to achieve microsecond precision in the best possible way, which + * is usually largely dependent on the underlying operating system's capabilities. + * + * @return the next timestamp (in microseconds). If it equals {@link Long#MIN_VALUE}, it won't be + * sent by the driver, letting Cassandra generate a server-side timestamp. + */ + long next(); } diff --git a/driver-core/src/main/java/com/datastax/driver/core/Token.java b/driver-core/src/main/java/com/datastax/driver/core/Token.java index c7014d57e01..79bd75c5ab5 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Token.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Token.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,685 +21,656 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.Lists; import com.google.common.primitives.UnsignedBytes; - import java.math.BigInteger; import java.nio.ByteBuffer; import java.security.MessageDigest; import java.security.NoSuchAlgorithmException; import java.util.List; -/** - * A token on the Cassandra ring. - */ +/** A token on the Cassandra ring. */ public abstract class Token implements Comparable { - /** - * Returns the data type of this token's value. - * - * @return the datatype. - */ - public abstract DataType getType(); - - /** - * Returns the raw value of this token. - * - * @return the value. - */ - public abstract Object getValue(); + /** + * Returns the data type of this token's value. + * + * @return the datatype. + */ + public abstract DataType getType(); + + /** + * Returns the raw value of this token. + * + * @return the value. + */ + public abstract Object getValue(); + + /** + * Returns the serialized form of the current token, using the appropriate codec depending on the + * partitioner in use and the CQL datatype for the token. + * + * @param protocolVersion the protocol version in use. + * @return the serialized form of the current token + */ + public abstract ByteBuffer serialize(ProtocolVersion protocolVersion); + + static Token.Factory getFactory(String partitionerName) { + if (partitionerName.endsWith("Murmur3Partitioner")) return M3PToken.FACTORY; + else if (partitionerName.endsWith("RandomPartitioner")) return RPToken.FACTORY; + else if (partitionerName.endsWith("OrderedPartitioner")) return OPPToken.FACTORY; + else return null; + } + + abstract static class Factory { + abstract Token fromString(String tokenStr); + + abstract DataType getTokenType(); + + abstract Token deserialize(ByteBuffer buffer, ProtocolVersion protocolVersion); /** - * Returns the serialized form of the current token, - * using the appropriate codec depending on the - * partitioner in use and the CQL datatype for - * the token. - * - * @param protocolVersion the protocol version in use. - * @return the serialized form of the current token + * The minimum token is a special value that no key ever hashes to, it's used both as lower and + * upper bound. */ - public abstract ByteBuffer serialize(ProtocolVersion protocolVersion); - - static Token.Factory getFactory(String partitionerName) { - if (partitionerName.endsWith("Murmur3Partitioner")) - return M3PToken.FACTORY; - else if (partitionerName.endsWith("RandomPartitioner")) - return RPToken.FACTORY; - else if (partitionerName.endsWith("OrderedPartitioner")) - return OPPToken.FACTORY; - else - return null; + abstract Token minToken(); + + abstract Token hash(ByteBuffer partitionKey); + + abstract List split(Token startToken, Token endToken, int numberOfSplits); + + // Base implementation for split + protected List split( + BigInteger start, + BigInteger range, + BigInteger ringEnd, + BigInteger ringLength, + int numberOfSplits) { + BigInteger[] tmp = range.divideAndRemainder(BigInteger.valueOf(numberOfSplits)); + BigInteger divider = tmp[0]; + int remainder = tmp[1].intValue(); + + List results = Lists.newArrayListWithExpectedSize(numberOfSplits - 1); + BigInteger current = start; + BigInteger dividerPlusOne = + (remainder == 0) + ? null // won't be used + : divider.add(BigInteger.ONE); + + for (int i = 1; i < numberOfSplits; i++) { + current = current.add(remainder-- > 0 ? dividerPlusOne : divider); + if (ringEnd != null && current.compareTo(ringEnd) > 0) + current = current.subtract(ringLength); + results.add(current); + } + return results; } + } + + // Murmur3Partitioner tokens + static class M3PToken extends Token { + private final long value; + + public static final Factory FACTORY = new M3PTokenFactory(); + + private static class M3PTokenFactory extends Factory { + + private static final BigInteger RING_END = BigInteger.valueOf(Long.MAX_VALUE); + private static final BigInteger RING_LENGTH = + RING_END.subtract(BigInteger.valueOf(Long.MIN_VALUE)); + static final M3PToken MIN_TOKEN = new M3PToken(Long.MIN_VALUE); + static final M3PToken MAX_TOKEN = new M3PToken(Long.MAX_VALUE); + + private long getblock(ByteBuffer key, int offset, int index) { + int i_8 = index << 3; + int blockOffset = offset + i_8; + return ((long) key.get(blockOffset + 0) & 0xff) + + (((long) key.get(blockOffset + 1) & 0xff) << 8) + + (((long) key.get(blockOffset + 2) & 0xff) << 16) + + (((long) key.get(blockOffset + 3) & 0xff) << 24) + + (((long) key.get(blockOffset + 4) & 0xff) << 32) + + (((long) key.get(blockOffset + 5) & 0xff) << 40) + + (((long) key.get(blockOffset + 6) & 0xff) << 48) + + (((long) key.get(blockOffset + 7) & 0xff) << 56); + } + + private long rotl64(long v, int n) { + return ((v << n) | (v >>> (64 - n))); + } + + private long fmix(long k) { + k ^= k >>> 33; + k *= 0xff51afd7ed558ccdL; + k ^= k >>> 33; + k *= 0xc4ceb9fe1a85ec53L; + k ^= k >>> 33; + return k; + } + + // This is an adapted version of the MurmurHash.hash3_x64_128 from Cassandra used + // for M3P. Compared to that methods, there's a few inlining of arguments and we + // only return the first 64-bits of the result since that's all M3P uses. + @SuppressWarnings("fallthrough") + private long murmur(ByteBuffer data) { + int offset = data.position(); + int length = data.remaining(); + + int nblocks = length >> 4; // Process as 128-bit blocks. + + long h1 = 0; + long h2 = 0; + + long c1 = 0x87c37b91114253d5L; + long c2 = 0x4cf5ad432745937fL; + + // ---------- + // body + + for (int i = 0; i < nblocks; i++) { + long k1 = getblock(data, offset, i * 2 + 0); + long k2 = getblock(data, offset, i * 2 + 1); + + k1 *= c1; + k1 = rotl64(k1, 31); + k1 *= c2; + h1 ^= k1; + h1 = rotl64(h1, 27); + h1 += h2; + h1 = h1 * 5 + 0x52dce729; + k2 *= c2; + k2 = rotl64(k2, 33); + k2 *= c1; + h2 ^= k2; + h2 = rotl64(h2, 31); + h2 += h1; + h2 = h2 * 5 + 0x38495ab5; + } - static abstract class Factory { - abstract Token fromString(String tokenStr); + // ---------- + // tail + + // Advance offset to the unprocessed tail of the data. + offset += nblocks * 16; + + long k1 = 0; + long k2 = 0; + + switch (length & 15) { + case 15: + k2 ^= ((long) data.get(offset + 14)) << 48; + case 14: + k2 ^= ((long) data.get(offset + 13)) << 40; + case 13: + k2 ^= ((long) data.get(offset + 12)) << 32; + case 12: + k2 ^= ((long) data.get(offset + 11)) << 24; + case 11: + k2 ^= ((long) data.get(offset + 10)) << 16; + case 10: + k2 ^= ((long) data.get(offset + 9)) << 8; + case 9: + k2 ^= ((long) data.get(offset + 8)) << 0; + k2 *= c2; + k2 = rotl64(k2, 33); + k2 *= c1; + h2 ^= k2; + + case 8: + k1 ^= ((long) data.get(offset + 7)) << 56; + case 7: + k1 ^= ((long) data.get(offset + 6)) << 48; + case 6: + k1 ^= ((long) data.get(offset + 5)) << 40; + case 5: + k1 ^= ((long) data.get(offset + 4)) << 32; + case 4: + k1 ^= ((long) data.get(offset + 3)) << 24; + case 3: + k1 ^= ((long) data.get(offset + 2)) << 16; + case 2: + k1 ^= ((long) data.get(offset + 1)) << 8; + case 1: + k1 ^= ((long) data.get(offset)); + k1 *= c1; + k1 = rotl64(k1, 31); + k1 *= c2; + h1 ^= k1; + } - abstract DataType getTokenType(); + // ---------- + // finalization - abstract Token deserialize(ByteBuffer buffer, ProtocolVersion protocolVersion); + h1 ^= length; + h2 ^= length; - /** - * The minimum token is a special value that no key ever hashes to, it's used both as lower and upper bound. - */ - abstract Token minToken(); + h1 += h2; + h2 += h1; - abstract Token hash(ByteBuffer partitionKey); + h1 = fmix(h1); + h2 = fmix(h2); - abstract List split(Token startToken, Token endToken, int numberOfSplits); + h1 += h2; + h2 += h1; - // Base implementation for split - protected List split(BigInteger start, BigInteger range, - BigInteger ringEnd, BigInteger ringLength, - int numberOfSplits) { - BigInteger[] tmp = range.divideAndRemainder(BigInteger.valueOf(numberOfSplits)); - BigInteger divider = tmp[0]; - int remainder = tmp[1].intValue(); + return h1; + } - List results = Lists.newArrayListWithExpectedSize(numberOfSplits - 1); - BigInteger current = start; - BigInteger dividerPlusOne = (remainder == 0) ? null // won't be used - : divider.add(BigInteger.ONE); + @Override + M3PToken fromString(String tokenStr) { + return new M3PToken(Long.parseLong(tokenStr)); + } - for (int i = 1; i < numberOfSplits; i++) { - current = current.add(remainder-- > 0 ? dividerPlusOne : divider); - if (ringEnd != null && current.compareTo(ringEnd) > 0) - current = current.subtract(ringLength); - results.add(current); - } - return results; - } - } + @Override + DataType getTokenType() { + return DataType.bigint(); + } - // Murmur3Partitioner tokens - static class M3PToken extends Token { - private final long value; - - public static final Factory FACTORY = new M3PTokenFactory(); - - private static class M3PTokenFactory extends Factory { - - private static final BigInteger RING_END = BigInteger.valueOf(Long.MAX_VALUE); - private static final BigInteger RING_LENGTH = RING_END.subtract(BigInteger.valueOf(Long.MIN_VALUE)); - static final M3PToken MIN_TOKEN = new M3PToken(Long.MIN_VALUE); - static final M3PToken MAX_TOKEN = new M3PToken(Long.MAX_VALUE); - - private long getblock(ByteBuffer key, int offset, int index) { - int i_8 = index << 3; - int blockOffset = offset + i_8; - return ((long) key.get(blockOffset + 0) & 0xff) + (((long) key.get(blockOffset + 1) & 0xff) << 8) + - (((long) key.get(blockOffset + 2) & 0xff) << 16) + (((long) key.get(blockOffset + 3) & 0xff) << 24) + - (((long) key.get(blockOffset + 4) & 0xff) << 32) + (((long) key.get(blockOffset + 5) & 0xff) << 40) + - (((long) key.get(blockOffset + 6) & 0xff) << 48) + (((long) key.get(blockOffset + 7) & 0xff) << 56); - } - - private long rotl64(long v, int n) { - return ((v << n) | (v >>> (64 - n))); - } - - private long fmix(long k) { - k ^= k >>> 33; - k *= 0xff51afd7ed558ccdL; - k ^= k >>> 33; - k *= 0xc4ceb9fe1a85ec53L; - k ^= k >>> 33; - return k; - } - - // This is an adapted version of the MurmurHash.hash3_x64_128 from Cassandra used - // for M3P. Compared to that methods, there's a few inlining of arguments and we - // only return the first 64-bits of the result since that's all M3P uses. - @SuppressWarnings("fallthrough") - private long murmur(ByteBuffer data) { - int offset = data.position(); - int length = data.remaining(); - - int nblocks = length >> 4; // Process as 128-bit blocks. - - long h1 = 0; - long h2 = 0; - - long c1 = 0x87c37b91114253d5L; - long c2 = 0x4cf5ad432745937fL; - - //---------- - // body - - for (int i = 0; i < nblocks; i++) { - long k1 = getblock(data, offset, i * 2 + 0); - long k2 = getblock(data, offset, i * 2 + 1); - - k1 *= c1; - k1 = rotl64(k1, 31); - k1 *= c2; - h1 ^= k1; - h1 = rotl64(h1, 27); - h1 += h2; - h1 = h1 * 5 + 0x52dce729; - k2 *= c2; - k2 = rotl64(k2, 33); - k2 *= c1; - h2 ^= k2; - h2 = rotl64(h2, 31); - h2 += h1; - h2 = h2 * 5 + 0x38495ab5; - } - - //---------- - // tail - - // Advance offset to the unprocessed tail of the data. - offset += nblocks * 16; - - long k1 = 0; - long k2 = 0; - - switch (length & 15) { - case 15: - k2 ^= ((long) data.get(offset + 14)) << 48; - case 14: - k2 ^= ((long) data.get(offset + 13)) << 40; - case 13: - k2 ^= ((long) data.get(offset + 12)) << 32; - case 12: - k2 ^= ((long) data.get(offset + 11)) << 24; - case 11: - k2 ^= ((long) data.get(offset + 10)) << 16; - case 10: - k2 ^= ((long) data.get(offset + 9)) << 8; - case 9: - k2 ^= ((long) data.get(offset + 8)) << 0; - k2 *= c2; - k2 = rotl64(k2, 33); - k2 *= c1; - h2 ^= k2; - - case 8: - k1 ^= ((long) data.get(offset + 7)) << 56; - case 7: - k1 ^= ((long) data.get(offset + 6)) << 48; - case 6: - k1 ^= ((long) data.get(offset + 5)) << 40; - case 5: - k1 ^= ((long) data.get(offset + 4)) << 32; - case 4: - k1 ^= ((long) data.get(offset + 3)) << 24; - case 3: - k1 ^= ((long) data.get(offset + 2)) << 16; - case 2: - k1 ^= ((long) data.get(offset + 1)) << 8; - case 1: - k1 ^= ((long) data.get(offset)); - k1 *= c1; - k1 = rotl64(k1, 31); - k1 *= c2; - h1 ^= k1; - } - - //---------- - // finalization - - h1 ^= length; - h2 ^= length; - - h1 += h2; - h2 += h1; - - h1 = fmix(h1); - h2 = fmix(h2); - - h1 += h2; - h2 += h1; - - return h1; - } - - @Override - M3PToken fromString(String tokenStr) { - return new M3PToken(Long.parseLong(tokenStr)); - } - - @Override - DataType getTokenType() { - return DataType.bigint(); - } - - @Override - Token deserialize(ByteBuffer buffer, ProtocolVersion protocolVersion) { - return new M3PToken(TypeCodec.bigint().deserialize(buffer, protocolVersion)); - } - - @Override - Token minToken() { - return MIN_TOKEN; - } - - @Override - M3PToken hash(ByteBuffer partitionKey) { - long v = murmur(partitionKey); - return new M3PToken(v == Long.MIN_VALUE ? Long.MAX_VALUE : v); - } - - @Override - List split(Token startToken, Token endToken, int numberOfSplits) { - // edge case: ]min, min] means the whole ring - if (startToken.equals(endToken) && startToken.equals(MIN_TOKEN)) - endToken = MAX_TOKEN; - - BigInteger start = BigInteger.valueOf(((M3PToken) startToken).value); - BigInteger end = BigInteger.valueOf(((M3PToken) endToken).value); - - BigInteger range = end.subtract(start); - if (range.compareTo(BigInteger.ZERO) < 0) - range = range.add(RING_LENGTH); - - List values = super.split(start, range, - RING_END, RING_LENGTH, - numberOfSplits); - List tokens = Lists.newArrayListWithExpectedSize(values.size()); - for (BigInteger value : values) - tokens.add(new M3PToken(value.longValue())); - return tokens; - } - } + @Override + Token deserialize(ByteBuffer buffer, ProtocolVersion protocolVersion) { + return new M3PToken(TypeCodec.bigint().deserialize(buffer, protocolVersion)); + } - private M3PToken(long value) { - this.value = value; - } + @Override + Token minToken() { + return MIN_TOKEN; + } - @Override - public DataType getType() { - return FACTORY.getTokenType(); - } + @Override + M3PToken hash(ByteBuffer partitionKey) { + long v = murmur(partitionKey); + return new M3PToken(v == Long.MIN_VALUE ? Long.MAX_VALUE : v); + } - @Override - public Object getValue() { - return value; - } + @Override + List split(Token startToken, Token endToken, int numberOfSplits) { + // edge case: ]min, min] means the whole ring + if (startToken.equals(endToken) && startToken.equals(MIN_TOKEN)) endToken = MAX_TOKEN; - @Override - public ByteBuffer serialize(ProtocolVersion protocolVersion) { - return TypeCodec.bigint().serialize(value, protocolVersion); - } + BigInteger start = BigInteger.valueOf(((M3PToken) startToken).value); + BigInteger end = BigInteger.valueOf(((M3PToken) endToken).value); - @Override - public int compareTo(Token other) { - assert other instanceof M3PToken; - long otherValue = ((M3PToken) other).value; - return value < otherValue ? -1 : (value == otherValue) ? 0 : 1; - } + BigInteger range = end.subtract(start); + if (range.compareTo(BigInteger.ZERO) < 0) range = range.add(RING_LENGTH); + + List values = super.split(start, range, RING_END, RING_LENGTH, numberOfSplits); + List tokens = Lists.newArrayListWithExpectedSize(values.size()); + for (BigInteger value : values) tokens.add(new M3PToken(value.longValue())); + return tokens; + } + } - @Override - public boolean equals(Object obj) { - if (this == obj) - return true; - if (obj == null || this.getClass() != obj.getClass()) - return false; + private M3PToken(long value) { + this.value = value; + } - return value == ((M3PToken) obj).value; - } + @Override + public DataType getType() { + return FACTORY.getTokenType(); + } - @Override - public int hashCode() { - return (int) (value ^ (value >>> 32)); - } + @Override + public Object getValue() { + return value; + } - @Override - public String toString() { - return Long.toString(value); - } + @Override + public ByteBuffer serialize(ProtocolVersion protocolVersion) { + return TypeCodec.bigint().serialize(value, protocolVersion); } - // OPPartitioner tokens - static class OPPToken extends Token { - - private final ByteBuffer value; - - public static final Factory FACTORY = new OPPTokenFactory(); - - private static class OPPTokenFactory extends Factory { - private static final BigInteger TWO = BigInteger.valueOf(2); - private static final Token MIN_TOKEN = new OPPToken(ByteBuffer.allocate(0)); - - @Override - public OPPToken fromString(String tokenStr) { - // This method must be able to parse the contents of system.peers.tokens, which do not have the "0x" prefix. - // On the other hand, OPPToken#toString has the "0x" because it should be usable in a CQL query, and it's - // nice to have fromString and toString symetrical. - // So handle both cases: - if (!tokenStr.startsWith("0x")) { - String prefix = (tokenStr.length() % 2 == 0) ? "0x" : "0x0"; - tokenStr = prefix + tokenStr; - } - ByteBuffer value = Bytes.fromHexString(tokenStr); - return new OPPToken(value); - } - - @Override - DataType getTokenType() { - return DataType.blob(); - } - - @Override - Token deserialize(ByteBuffer buffer, ProtocolVersion protocolVersion) { - return new OPPToken(buffer); - } - - @Override - Token minToken() { - return MIN_TOKEN; - } - - @Override - OPPToken hash(ByteBuffer partitionKey) { - return new OPPToken(partitionKey); - } - - @Override - List split(Token startToken, Token endToken, int numberOfSplits) { - int tokenOrder = startToken.compareTo(endToken); - - // ]min,min] means the whole ring. However, since there is no "max token" with this partitioner, we can't come up - // with a magic end value that would cover the whole ring - if (tokenOrder == 0 && startToken.equals(MIN_TOKEN)) - throw new IllegalArgumentException("Cannot split whole ring with ordered partitioner"); - - OPPToken oppStartToken = (OPPToken) startToken; - OPPToken oppEndToken = (OPPToken) endToken; - - int significantBytes; - BigInteger start, end, range, ringEnd, ringLength; - BigInteger bigNumberOfSplits = BigInteger.valueOf(numberOfSplits); - if (tokenOrder < 0) { - // Since tokens are compared lexicographically, convert to integers using the largest length - // (ex: given 0x0A and 0x0BCD, switch to 0x0A00 and 0x0BCD) - significantBytes = Math.max(oppStartToken.value.capacity(), oppEndToken.value.capacity()); - - // If the number of splits does not fit in the difference between the two integers, use more bytes - // (ex: cannot fit 4 splits between 0x01 and 0x03, so switch to 0x0100 and 0x0300) - // At most 4 additional bytes will be needed, since numberOfSplits is an integer. - int addedBytes = 0; - while (true) { - start = toBigInteger(oppStartToken.value, significantBytes); - end = toBigInteger(oppEndToken.value, significantBytes); - range = end.subtract(start); - if (addedBytes == 4 || range.compareTo(bigNumberOfSplits) >= 0) - break; - significantBytes += 1; - addedBytes += 1; - } - ringEnd = ringLength = null; // won't be used - } else { - // Same logic except that we wrap around the ring - significantBytes = Math.max(oppStartToken.value.capacity(), oppEndToken.value.capacity()); - int addedBytes = 0; - while (true) { - start = toBigInteger(oppStartToken.value, significantBytes); - end = toBigInteger(oppEndToken.value, significantBytes); - ringLength = TWO.pow(significantBytes * 8); - ringEnd = ringLength.subtract(BigInteger.ONE); - range = end.subtract(start).add(ringLength); - if (addedBytes == 4 || range.compareTo(bigNumberOfSplits) >= 0) - break; - significantBytes += 1; - addedBytes += 1; - } - } - - List values = super.split(start, range, - ringEnd, ringLength, - numberOfSplits); - List tokens = Lists.newArrayListWithExpectedSize(values.size()); - for (BigInteger value : values) - tokens.add(new OPPToken(toBytes(value, significantBytes))); - return tokens; - } - - // Convert a token's byte array to a number in order to perform computations. - // This depends on the number of "significant bytes" that we use to normalize all tokens to the same size. - // For example if the token is 0x01 but significantBytes is 2, the result is 8 (0x0100). - private BigInteger toBigInteger(ByteBuffer bb, int significantBytes) { - byte[] bytes = Bytes.getArray(bb); - byte[] target; - if (significantBytes != bytes.length) { - target = new byte[significantBytes]; - System.arraycopy(bytes, 0, target, 0, bytes.length); - } else - target = bytes; - return new BigInteger(1, target); - } - - // Convert a numeric representation back to a byte array. - // Again, the number of significant bytes matters: if the input value is 1 but significantBytes is 2, the - // expected result is 0x0001 (a simple conversion would produce 0x01). - protected ByteBuffer toBytes(BigInteger value, int significantBytes) { - byte[] rawBytes = value.toByteArray(); - byte[] result; - if (rawBytes.length == significantBytes) - result = rawBytes; - else { - result = new byte[significantBytes]; - int start, length; - if (rawBytes[0] == 0) { // that's a sign byte, ignore (it can cause rawBytes.length == significantBytes + 1) - start = 1; - length = rawBytes.length - 1; - } else { - start = 0; - length = rawBytes.length; - } - System.arraycopy(rawBytes, start, result, significantBytes - length, length); - } - return ByteBuffer.wrap(result); - } - } + @Override + public int compareTo(Token other) { + assert other instanceof M3PToken; + long otherValue = ((M3PToken) other).value; + return value < otherValue ? -1 : (value == otherValue) ? 0 : 1; + } - @VisibleForTesting - OPPToken(ByteBuffer value) { - this.value = stripTrailingZeroBytes(value); - } + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (obj == null || this.getClass() != obj.getClass()) return false; - /** - * @return A new ByteBuffer from the input Buffer with any trailing 0-bytes stripped off. - */ - private static ByteBuffer stripTrailingZeroBytes(ByteBuffer b) { - byte result[] = Bytes.getArray(b); - int zeroIndex = result.length; - for (int i = result.length - 1; i > 0; i--) { - if (result[i] == 0) { - zeroIndex = i; - } else { - break; - } - } - return ByteBuffer.wrap(result, 0, zeroIndex); - } + return value == ((M3PToken) obj).value; + } - @Override - public DataType getType() { - return FACTORY.getTokenType(); - } + @Override + public int hashCode() { + return (int) (value ^ (value >>> 32)); + } - @Override - public Object getValue() { - return value; + @Override + public String toString() { + return Long.toString(value); + } + } + + // OPPartitioner tokens + static class OPPToken extends Token { + + private final ByteBuffer value; + + public static final Factory FACTORY = new OPPTokenFactory(); + + private static class OPPTokenFactory extends Factory { + private static final BigInteger TWO = BigInteger.valueOf(2); + private static final Token MIN_TOKEN = new OPPToken(ByteBuffer.allocate(0)); + + @Override + public OPPToken fromString(String tokenStr) { + // This method must be able to parse the contents of system.peers.tokens, which do not have + // the "0x" prefix. + // On the other hand, OPPToken#toString has the "0x" because it should be usable in a CQL + // query, and it's + // nice to have fromString and toString symetrical. + // So handle both cases: + if (!tokenStr.startsWith("0x")) { + String prefix = (tokenStr.length() % 2 == 0) ? "0x" : "0x0"; + tokenStr = prefix + tokenStr; } - - @Override - public ByteBuffer serialize(ProtocolVersion protocolVersion) { - return TypeCodec.blob().serialize(value, protocolVersion); + ByteBuffer value = Bytes.fromHexString(tokenStr); + return new OPPToken(value); + } + + @Override + DataType getTokenType() { + return DataType.blob(); + } + + @Override + Token deserialize(ByteBuffer buffer, ProtocolVersion protocolVersion) { + return new OPPToken(buffer); + } + + @Override + Token minToken() { + return MIN_TOKEN; + } + + @Override + OPPToken hash(ByteBuffer partitionKey) { + return new OPPToken(partitionKey); + } + + @Override + List split(Token startToken, Token endToken, int numberOfSplits) { + int tokenOrder = startToken.compareTo(endToken); + + // ]min,min] means the whole ring. However, since there is no "max token" with this + // partitioner, we can't come up + // with a magic end value that would cover the whole ring + if (tokenOrder == 0 && startToken.equals(MIN_TOKEN)) + throw new IllegalArgumentException("Cannot split whole ring with ordered partitioner"); + + OPPToken oppStartToken = (OPPToken) startToken; + OPPToken oppEndToken = (OPPToken) endToken; + + int significantBytes; + BigInteger start, end, range, ringEnd, ringLength; + BigInteger bigNumberOfSplits = BigInteger.valueOf(numberOfSplits); + if (tokenOrder < 0) { + // Since tokens are compared lexicographically, convert to integers using the largest + // length + // (ex: given 0x0A and 0x0BCD, switch to 0x0A00 and 0x0BCD) + significantBytes = Math.max(oppStartToken.value.capacity(), oppEndToken.value.capacity()); + + // If the number of splits does not fit in the difference between the two integers, use + // more bytes + // (ex: cannot fit 4 splits between 0x01 and 0x03, so switch to 0x0100 and 0x0300) + // At most 4 additional bytes will be needed, since numberOfSplits is an integer. + int addedBytes = 0; + while (true) { + start = toBigInteger(oppStartToken.value, significantBytes); + end = toBigInteger(oppEndToken.value, significantBytes); + range = end.subtract(start); + if (addedBytes == 4 || start.equals(end) || range.compareTo(bigNumberOfSplits) >= 0) + break; + significantBytes += 1; + addedBytes += 1; + } + ringEnd = ringLength = null; // won't be used + } else { + // Same logic except that we wrap around the ring + significantBytes = Math.max(oppStartToken.value.capacity(), oppEndToken.value.capacity()); + int addedBytes = 0; + while (true) { + start = toBigInteger(oppStartToken.value, significantBytes); + end = toBigInteger(oppEndToken.value, significantBytes); + ringLength = TWO.pow(significantBytes * 8); + ringEnd = ringLength.subtract(BigInteger.ONE); + range = end.subtract(start).add(ringLength); + if (addedBytes == 4 || range.compareTo(bigNumberOfSplits) >= 0) break; + significantBytes += 1; + addedBytes += 1; + } } - @Override - public int compareTo(Token other) { - assert other instanceof OPPToken; - return UnsignedBytes.lexicographicalComparator().compare( - Bytes.getArray(value), - Bytes.getArray(((OPPToken) other).value)); + List values = super.split(start, range, ringEnd, ringLength, numberOfSplits); + List tokens = Lists.newArrayListWithExpectedSize(values.size()); + for (BigInteger value : values) tokens.add(new OPPToken(toBytes(value, significantBytes))); + return tokens; + } + + // Convert a token's byte array to a number in order to perform computations. + // This depends on the number of "significant bytes" that we use to normalize all tokens to + // the same size. + // For example if the token is 0x01 but significantBytes is 2, the result is 8 (0x0100). + private BigInteger toBigInteger(ByteBuffer bb, int significantBytes) { + byte[] bytes = Bytes.getArray(bb); + byte[] target; + if (significantBytes != bytes.length) { + target = new byte[significantBytes]; + System.arraycopy(bytes, 0, target, 0, bytes.length); + } else target = bytes; + return new BigInteger(1, target); + } + + // Convert a numeric representation back to a byte array. + // Again, the number of significant bytes matters: if the input value is 1 but + // significantBytes is 2, the + // expected result is 0x0001 (a simple conversion would produce 0x01). + protected ByteBuffer toBytes(BigInteger value, int significantBytes) { + byte[] rawBytes = value.toByteArray(); + byte[] result; + if (rawBytes.length == significantBytes) result = rawBytes; + else { + result = new byte[significantBytes]; + int start, length; + if (rawBytes[0] == 0) { // that's a sign byte, ignore (it can cause rawBytes.length == + // significantBytes + 1) + start = 1; + length = rawBytes.length - 1; + } else { + start = 0; + length = rawBytes.length; + } + System.arraycopy(rawBytes, start, result, significantBytes - length, length); } + return ByteBuffer.wrap(result); + } + } - @Override - public boolean equals(Object obj) { - if (this == obj) - return true; - if (obj == null || this.getClass() != obj.getClass()) - return false; + @VisibleForTesting + OPPToken(ByteBuffer value) { + this.value = value; + } - return value.equals(((OPPToken) obj).value); - } + @Override + public DataType getType() { + return FACTORY.getTokenType(); + } - @Override - public int hashCode() { - return value.hashCode(); - } + @Override + public Object getValue() { + return value; + } - @Override - public String toString() { - return Bytes.toHexString(value); - } + @Override + public ByteBuffer serialize(ProtocolVersion protocolVersion) { + return TypeCodec.blob().serialize(value, protocolVersion); } - // RandomPartitioner tokens - static class RPToken extends Token { - - private final BigInteger value; - - public static final Factory FACTORY = new RPTokenFactory(); - - private static class RPTokenFactory extends Factory { - - private static final BigInteger MIN_VALUE = BigInteger.ONE.negate(); - private static final BigInteger MAX_VALUE = BigInteger.valueOf(2).pow(127); - private static final BigInteger RING_LENGTH = MAX_VALUE.add(BigInteger.ONE); - private static final Token MIN_TOKEN = new RPToken(MIN_VALUE); - private static final Token MAX_TOKEN = new RPToken(MAX_VALUE); - - private final MessageDigest prototype; - private final boolean supportsClone; - - private RPTokenFactory() { - prototype = createMessageDigest(); - boolean supportsClone; - try { - prototype.clone(); - supportsClone = true; - } catch (CloneNotSupportedException e) { - supportsClone = false; - } - this.supportsClone = supportsClone; - } - - private static MessageDigest createMessageDigest() { - try { - return MessageDigest.getInstance("MD5"); - } catch (NoSuchAlgorithmException e) { - throw new RuntimeException("MD5 doesn't seem to be available on this JVM", e); - } - } - - private MessageDigest newMessageDigest() { - if (supportsClone) { - try { - return (MessageDigest) prototype.clone(); - } catch (CloneNotSupportedException ignored) { - } - } - return createMessageDigest(); - } - - private BigInteger md5(ByteBuffer data) { - MessageDigest digest = newMessageDigest(); - digest.update(data.duplicate()); - return new BigInteger(digest.digest()).abs(); - } - - @Override - RPToken fromString(String tokenStr) { - return new RPToken(new BigInteger(tokenStr)); - } - - @Override - DataType getTokenType() { - return DataType.varint(); - } - - @Override - Token deserialize(ByteBuffer buffer, ProtocolVersion protocolVersion) { - return new RPToken(TypeCodec.varint().deserialize(buffer, protocolVersion)); - } - - @Override - Token minToken() { - return MIN_TOKEN; - } - - @Override - RPToken hash(ByteBuffer partitionKey) { - return new RPToken(md5(partitionKey)); - } - - @Override - List split(Token startToken, Token endToken, int numberOfSplits) { - // edge case: ]min, min] means the whole ring - if (startToken.equals(endToken) && startToken.equals(MIN_TOKEN)) - endToken = MAX_TOKEN; - - BigInteger start = ((RPToken) startToken).value; - BigInteger end = ((RPToken) endToken).value; - - BigInteger range = end.subtract(start); - if (range.compareTo(BigInteger.ZERO) < 0) - range = range.add(RING_LENGTH); - - List values = super.split(start, range, - MAX_VALUE, RING_LENGTH, - numberOfSplits); - List tokens = Lists.newArrayListWithExpectedSize(values.size()); - for (BigInteger value : values) - tokens.add(new RPToken(value)); - return tokens; - } - } + @Override + public int compareTo(Token other) { + assert other instanceof OPPToken; + return UnsignedBytes.lexicographicalComparator() + .compare(Bytes.getArray(value), Bytes.getArray(((OPPToken) other).value)); + } - private RPToken(BigInteger value) { - this.value = value; - } + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (obj == null || this.getClass() != obj.getClass()) return false; - @Override - public DataType getType() { - return FACTORY.getTokenType(); - } + return value.equals(((OPPToken) obj).value); + } - @Override - public Object getValue() { - return value; - } + @Override + public int hashCode() { + return value.hashCode(); + } - @Override - public ByteBuffer serialize(ProtocolVersion protocolVersion) { - return TypeCodec.varint().serialize(value, protocolVersion); - } + @Override + public String toString() { + return Bytes.toHexString(value); + } + } - @Override - public int compareTo(Token other) { - assert other instanceof RPToken; - return value.compareTo(((RPToken) other).value); - } + // RandomPartitioner tokens + static class RPToken extends Token { - @Override - public boolean equals(Object obj) { - if (this == obj) - return true; - if (obj == null || this.getClass() != obj.getClass()) - return false; + private final BigInteger value; - return value.equals(((RPToken) obj).value); - } + public static final Factory FACTORY = new RPTokenFactory(); - @Override - public int hashCode() { - return value.hashCode(); - } + private static class RPTokenFactory extends Factory { + + private static final BigInteger MIN_VALUE = BigInteger.ONE.negate(); + private static final BigInteger MAX_VALUE = BigInteger.valueOf(2).pow(127); + private static final BigInteger RING_LENGTH = MAX_VALUE.add(BigInteger.ONE); + private static final Token MIN_TOKEN = new RPToken(MIN_VALUE); + private static final Token MAX_TOKEN = new RPToken(MAX_VALUE); - @Override - public String toString() { - return value.toString(); + private final MessageDigest prototype; + private final boolean supportsClone; + + private RPTokenFactory() { + prototype = createMessageDigest(); + boolean supportsClone; + try { + prototype.clone(); + supportsClone = true; + } catch (CloneNotSupportedException e) { + supportsClone = false; + } + this.supportsClone = supportsClone; + } + + private static MessageDigest createMessageDigest() { + try { + return MessageDigest.getInstance("MD5"); + } catch (NoSuchAlgorithmException e) { + throw new RuntimeException("MD5 doesn't seem to be available on this JVM", e); } + } + + private MessageDigest newMessageDigest() { + if (supportsClone) { + try { + return (MessageDigest) prototype.clone(); + } catch (CloneNotSupportedException ignored) { + } + } + return createMessageDigest(); + } + + private BigInteger md5(ByteBuffer data) { + MessageDigest digest = newMessageDigest(); + digest.update(data.duplicate()); + return new BigInteger(digest.digest()).abs(); + } + + @Override + RPToken fromString(String tokenStr) { + return new RPToken(new BigInteger(tokenStr)); + } + + @Override + DataType getTokenType() { + return DataType.varint(); + } + + @Override + Token deserialize(ByteBuffer buffer, ProtocolVersion protocolVersion) { + return new RPToken(TypeCodec.varint().deserialize(buffer, protocolVersion)); + } + + @Override + Token minToken() { + return MIN_TOKEN; + } + + @Override + RPToken hash(ByteBuffer partitionKey) { + return new RPToken(md5(partitionKey)); + } + + @Override + List split(Token startToken, Token endToken, int numberOfSplits) { + // edge case: ]min, min] means the whole ring + if (startToken.equals(endToken) && startToken.equals(MIN_TOKEN)) endToken = MAX_TOKEN; + + BigInteger start = ((RPToken) startToken).value; + BigInteger end = ((RPToken) endToken).value; + + BigInteger range = end.subtract(start); + if (range.compareTo(BigInteger.ZERO) < 0) range = range.add(RING_LENGTH); + + List values = super.split(start, range, MAX_VALUE, RING_LENGTH, numberOfSplits); + List tokens = Lists.newArrayListWithExpectedSize(values.size()); + for (BigInteger value : values) tokens.add(new RPToken(value)); + return tokens; + } + } + + private RPToken(BigInteger value) { + this.value = value; + } + + @Override + public DataType getType() { + return FACTORY.getTokenType(); + } + + @Override + public Object getValue() { + return value; + } + + @Override + public ByteBuffer serialize(ProtocolVersion protocolVersion) { + return TypeCodec.varint().serialize(value, protocolVersion); + } + + @Override + public int compareTo(Token other) { + assert other instanceof RPToken; + return value.compareTo(((RPToken) other).value); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (obj == null || this.getClass() != obj.getClass()) return false; + + return value.equals(((RPToken) obj).value); + } + + @Override + public int hashCode() { + return value.hashCode(); + } + + @Override + public String toString() { + return value.toString(); } + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/TokenRange.java b/driver-core/src/main/java/com/datastax/driver/core/TokenRange.java index 777f1fdbb6d..40b41ed0a51 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/TokenRange.java +++ b/driver-core/src/main/java/com/datastax/driver/core/TokenRange.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,319 +21,319 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.ImmutableList; import com.google.common.collect.Lists; - import java.util.ArrayList; import java.util.List; /** * A range of tokens on the Cassandra ring. - *

- * A range is start-exclusive and end-inclusive. It is empty when start and end are the same token, except if that is the minimum - * token, in which case the range covers the whole ring (this is consistent with the behavior of CQL range queries). - *

- * Note that CQL does not handle wrapping. To query all partitions in a range, see {@link #unwrap()}. + * + *

A range is start-exclusive and end-inclusive. It is empty when start and end are the same + * token, except if that is the minimum token, in which case the range covers the whole ring (this + * is consistent with the behavior of CQL range queries). + * + *

Note that CQL does not handle wrapping. To query all partitions in a range, see {@link + * #unwrap()}. */ public final class TokenRange implements Comparable { - private final Token start; - private final Token end; - @VisibleForTesting - final Token.Factory factory; - - TokenRange(Token start, Token end, Token.Factory factory) { - this.start = start; - this.end = end; - this.factory = factory; - } - - /** - * Return the start of the range. - * - * @return the start of the range (exclusive). - */ - public Token getStart() { - return start; - } - - /** - * Return the end of the range. - * - * @return the end of the range (inclusive). - */ - public Token getEnd() { - return end; - } - - /** - * Splits this range into a number of smaller ranges of equal "size" (referring to the number of tokens, not the actual amount of data). - *

- * Splitting an empty range is not permitted. But note that, in edge cases, splitting a range might produce one or more empty ranges. - * - * @param numberOfSplits the number of splits to create. - * @return the splits. - * @throws IllegalArgumentException if the range is empty or if numberOfSplits < 1. - */ - public List splitEvenly(int numberOfSplits) { - if (numberOfSplits < 1) - throw new IllegalArgumentException(String.format("numberOfSplits (%d) must be greater than 0.", numberOfSplits)); - if (isEmpty()) - throw new IllegalArgumentException("Can't split empty range " + this); - - List tokenRanges = new ArrayList(); - List splitPoints = factory.split(start, end, numberOfSplits); - Token splitStart = start; - for (Token splitEnd : splitPoints) { - tokenRanges.add(new TokenRange(splitStart, splitEnd, factory)); - splitStart = splitEnd; - } - tokenRanges.add(new TokenRange(splitStart, end, factory)); - return tokenRanges; - } - - /** - * Returns whether this range is empty. - *

- * A range is empty when start and end are the same token, except if that is the minimum token, - * in which case the range covers the whole ring (this is consistent with the behavior of CQL - * range queries). - * - * @return whether the range is empty. - */ - public boolean isEmpty() { - return start.equals(end) && !start.equals(factory.minToken()); - } - - /** - * Returns whether this range wraps around the end of the ring. - * - * @return whether this range wraps around. - */ - public boolean isWrappedAround() { - return start.compareTo(end) > 0 && !end.equals(factory.minToken()); - } - - /** - * Splits this range into a list of two non-wrapping ranges. This will return the range itself if it is - * non-wrapping, or two ranges otherwise. - *

- * For example: - *

    - *
  • {@code ]1,10]} unwraps to itself;
  • - *
  • {@code ]10,1]} unwraps to {@code ]10,min_token]} and {@code ]min_token,1]}.
  • - *
- *

- * This is useful for CQL range queries, which do not handle wrapping: - *

-     * {@code
-     * List rows = new ArrayList();
-     * for (TokenRange subRange : range.unwrap()) {
-     *     ResultSet rs = session.execute("SELECT * FROM mytable WHERE token(pk) > ? and token(pk) <= ?",
-     *                                    subRange.getStart(), subRange.getEnd());
-     *     rows.addAll(rs.all());
-     * }
-     * }
- * - * @return the list of non-wrapping ranges. - */ - public List unwrap() { - if (isWrappedAround()) { - return ImmutableList.of( - new TokenRange(start, factory.minToken(), factory), - new TokenRange(factory.minToken(), end, factory)); - } else { - return ImmutableList.of(this); - } + private final Token start; + private final Token end; + @VisibleForTesting final Token.Factory factory; + + TokenRange(Token start, Token end, Token.Factory factory) { + this.start = start; + this.end = end; + this.factory = factory; + } + + /** + * Return the start of the range. + * + * @return the start of the range (exclusive). + */ + public Token getStart() { + return start; + } + + /** + * Return the end of the range. + * + * @return the end of the range (inclusive). + */ + public Token getEnd() { + return end; + } + + /** + * Splits this range into a number of smaller ranges of equal "size" (referring to the number of + * tokens, not the actual amount of data). + * + *

Splitting an empty range is not permitted. But note that, in edge cases, splitting a range + * might produce one or more empty ranges. + * + * @param numberOfSplits the number of splits to create. + * @return the splits. + * @throws IllegalArgumentException if the range is empty or if numberOfSplits < 1. + */ + public List splitEvenly(int numberOfSplits) { + if (numberOfSplits < 1) + throw new IllegalArgumentException( + String.format("numberOfSplits (%d) must be greater than 0.", numberOfSplits)); + if (isEmpty()) throw new IllegalArgumentException("Can't split empty range " + this); + + List tokenRanges = new ArrayList(); + List splitPoints = factory.split(start, end, numberOfSplits); + Token splitStart = start; + for (Token splitEnd : splitPoints) { + tokenRanges.add(new TokenRange(splitStart, splitEnd, factory)); + splitStart = splitEnd; } - - /** - * Returns whether this range intersects another one. - *

- * For example: - *

    - *
  • {@code ]3,5]} intersects {@code ]1,4]}, {@code ]4,5]}...
  • - *
  • {@code ]3,5]} does not intersect {@code ]1,2]}, {@code ]2,3]}, {@code ]5,7]}...
  • - *
- * - * @param that the other range. - * @return whether they intersect. - */ - public boolean intersects(TokenRange that) { - // Empty ranges never intersect any other range - if (this.isEmpty() || that.isEmpty()) - return false; - - return this.contains(that.start, true) - || this.contains(that.end, false) - || that.contains(this.start, true) - || that.contains(this.end, false); - } - - /** - * Computes the intersection of this range with another one. - *

- * If either of these ranges overlap the the ring, they are unwrapped and the unwrapped - * tokens are compared with one another. - *

- * This call will fail if the two ranges do not intersect, you must check by calling - * {@link #intersects(TokenRange)} beforehand. - * - * @param that the other range. - * @return the range(s) resulting from the intersection. - * @throws IllegalArgumentException if the ranges do not intersect. - */ - public List intersectWith(TokenRange that) { - if (!this.intersects(that)) - throw new IllegalArgumentException("The two ranges do not intersect, use intersects() before calling this method"); - - List intersected = Lists.newArrayList(); - - // Compare the unwrapped ranges to one another. - List unwrappedForThis = this.unwrap(); - List unwrappedForThat = that.unwrap(); - for (TokenRange t1 : unwrappedForThis) { - for (TokenRange t2 : unwrappedForThat) { - if (t1.intersects(t2)) { - intersected.add(new TokenRange( - (t1.contains(t2.start, true)) ? t2.start : t1.start, - (t1.contains(t2.end, false)) ? t2.end : t1.end, - factory)); - } - } - } - - // If two intersecting ranges were produced, merge them if they are adjacent. - // This could happen in the case that two wrapped ranges intersected. - if (intersected.size() == 2) { - TokenRange t1 = intersected.get(0); - TokenRange t2 = intersected.get(1); - if (t1.end.equals(t2.start) || t2.end.equals(t1.start)) { - return ImmutableList.of(t1.mergeWith(t2)); - } - } - - return intersected; - } - - /** - * Checks whether this range contains a given token. - * - * @param token the token to check for. - * @return whether this range contains the token, i.e. {@code range.start < token <= range.end}. - */ - public boolean contains(Token token) { - return contains(token, false); + tokenRanges.add(new TokenRange(splitStart, end, factory)); + return tokenRanges; + } + + /** + * Returns whether this range is empty. + * + *

A range is empty when start and end are the same token, except if that is the minimum token, + * in which case the range covers the whole ring (this is consistent with the behavior of CQL + * range queries). + * + * @return whether the range is empty. + */ + public boolean isEmpty() { + return start.equals(end) && !start.equals(factory.minToken()); + } + + /** + * Returns whether this range wraps around the end of the ring. + * + * @return whether this range wraps around. + */ + public boolean isWrappedAround() { + return start.compareTo(end) > 0 && !end.equals(factory.minToken()); + } + + /** + * Splits this range into a list of two non-wrapping ranges. This will return the range itself if + * it is non-wrapping, or two ranges otherwise. + * + *

For example: + * + *

    + *
  • {@code ]1,10]} unwraps to itself; + *
  • {@code ]10,1]} unwraps to {@code ]10,min_token]} and {@code ]min_token,1]}. + *
+ * + *

This is useful for CQL range queries, which do not handle wrapping: + * + *

{@code
+   * List rows = new ArrayList();
+   * for (TokenRange subRange : range.unwrap()) {
+   *     ResultSet rs = session.execute("SELECT * FROM mytable WHERE token(pk) > ? and token(pk) <= ?",
+   *                                    subRange.getStart(), subRange.getEnd());
+   *     rows.addAll(rs.all());
+   * }
+   * }
+ * + * @return the list of non-wrapping ranges. + */ + public List unwrap() { + if (isWrappedAround()) { + return ImmutableList.of( + new TokenRange(start, factory.minToken(), factory), + new TokenRange(factory.minToken(), end, factory)); + } else { + return ImmutableList.of(this); } - - // isStart handles the case where the token is the start of another range, for example: - // * ]1,2] contains 2, but it does not contain the start of ]2,3] - // * ]1,2] does not contain 1, but it contains the start of ]1,3] - @VisibleForTesting - boolean contains(Token token, boolean isStart) { - if (isEmpty()) { - return false; - } - Token minToken = factory.minToken(); - if (end.equals(minToken)) { - if (start.equals(minToken)) { // ]min, min] = full ring, contains everything - return true; - } else if (token.equals(minToken)) { - return !isStart; - } else { - return isStart ? token.compareTo(start) >= 0 : token.compareTo(start) > 0; - } - } else { - boolean isAfterStart = isStart ? token.compareTo(start) >= 0 : token.compareTo(start) > 0; - boolean isBeforeEnd = isStart ? token.compareTo(end) < 0 : token.compareTo(end) <= 0; - return isWrappedAround() - ? isAfterStart || isBeforeEnd // ####]----]#### - : isAfterStart && isBeforeEnd; // ----]####]---- + } + + /** + * Returns whether this range intersects another one. + * + *

For example: + * + *

    + *
  • {@code ]3,5]} intersects {@code ]1,4]}, {@code ]4,5]}... + *
  • {@code ]3,5]} does not intersect {@code ]1,2]}, {@code ]2,3]}, {@code ]5,7]}... + *
+ * + * @param that the other range. + * @return whether they intersect. + */ + public boolean intersects(TokenRange that) { + // Empty ranges never intersect any other range + if (this.isEmpty() || that.isEmpty()) return false; + + return this.contains(that.start, true) + || this.contains(that.end, false) + || that.contains(this.start, true) + || that.contains(this.end, false); + } + + /** + * Computes the intersection of this range with another one. + * + *

If either of these ranges overlap the the ring, they are unwrapped and the unwrapped tokens + * are compared with one another. + * + *

This call will fail if the two ranges do not intersect, you must check by calling {@link + * #intersects(TokenRange)} beforehand. + * + * @param that the other range. + * @return the range(s) resulting from the intersection. + * @throws IllegalArgumentException if the ranges do not intersect. + */ + public List intersectWith(TokenRange that) { + if (!this.intersects(that)) + throw new IllegalArgumentException( + "The two ranges do not intersect, use intersects() before calling this method"); + + List intersected = Lists.newArrayList(); + + // Compare the unwrapped ranges to one another. + List unwrappedForThis = this.unwrap(); + List unwrappedForThat = that.unwrap(); + for (TokenRange t1 : unwrappedForThis) { + for (TokenRange t2 : unwrappedForThat) { + if (t1.intersects(t2)) { + intersected.add( + new TokenRange( + (t1.contains(t2.start, true)) ? t2.start : t1.start, + (t1.contains(t2.end, false)) ? t2.end : t1.end, + factory)); } + } } - /** - * Merges this range with another one. - *

- * The two ranges should either intersect or be adjacent; in other words, the merged range - * should not include tokens that are in neither of the original ranges. - *

- * For example: - *

    - *
  • merging {@code ]3,5]} with {@code ]4,7]} produces {@code ]3,7]};
  • - *
  • merging {@code ]3,5]} with {@code ]4,5]} produces {@code ]3,5]};
  • - *
  • merging {@code ]3,5]} with {@code ]5,8]} produces {@code ]3,8]};
  • - *
  • merging {@code ]3,5]} with {@code ]6,8]} fails.
  • - *
- * - * @param that the other range. - * @return the resulting range. - * @throws IllegalArgumentException if the ranges neither intersect nor are adjacent. - */ - public TokenRange mergeWith(TokenRange that) { - if (this.equals(that)) - return this; - - if (!(this.intersects(that) || this.end.equals(that.start) || that.end.equals(this.start))) - throw new IllegalArgumentException(String.format( - "Can't merge %s with %s because they neither intersect nor are adjacent", - this, that)); - - if (this.isEmpty()) - return that; - - if (that.isEmpty()) - return this; - - // That's actually "starts in or is adjacent to the end of" - boolean thisStartsInThat = that.contains(this.start, true) || this.start.equals(that.end); - boolean thatStartsInThis = this.contains(that.start, true) || that.start.equals(this.end); - - // This takes care of all the cases that return the full ring, so that we don't have to worry about them below - if (thisStartsInThat && thatStartsInThis) - return fullRing(); - - // Starting at this.start, see how far we can go while staying in at least one of the ranges. - Token mergedEnd = (thatStartsInThis && !this.contains(that.end, false)) - ? that.end - : this.end; - - // Repeat in the other direction. - Token mergedStart = thisStartsInThat ? that.start : this.start; - - return new TokenRange(mergedStart, mergedEnd, factory); + // If two intersecting ranges were produced, merge them if they are adjacent. + // This could happen in the case that two wrapped ranges intersected. + if (intersected.size() == 2) { + TokenRange t1 = intersected.get(0); + TokenRange t2 = intersected.get(1); + if (t1.end.equals(t2.start) || t2.end.equals(t1.start)) { + return ImmutableList.of(t1.mergeWith(t2)); + } } - private TokenRange fullRing() { - return new TokenRange(factory.minToken(), factory.minToken(), factory); + return intersected; + } + + /** + * Checks whether this range contains a given token. + * + * @param token the token to check for. + * @return whether this range contains the token, i.e. {@code range.start < token <= range.end}. + */ + public boolean contains(Token token) { + return contains(token, false); + } + + // isStart handles the case where the token is the start of another range, for example: + // * ]1,2] contains 2, but it does not contain the start of ]2,3] + // * ]1,2] does not contain 1, but it contains the start of ]1,3] + @VisibleForTesting + boolean contains(Token token, boolean isStart) { + if (isEmpty()) { + return false; } - - @Override - public boolean equals(Object other) { - if (other == this) - return true; - if (other instanceof TokenRange) { - TokenRange that = (TokenRange) other; - return MoreObjects.equal(this.start, that.start) && - MoreObjects.equal(this.end, that.end); - } - return false; + Token minToken = factory.minToken(); + if (end.equals(minToken)) { + if (start.equals(minToken)) { // ]min, min] = full ring, contains everything + return true; + } else if (token.equals(minToken)) { + return !isStart; + } else { + return isStart ? token.compareTo(start) >= 0 : token.compareTo(start) > 0; + } + } else { + boolean isAfterStart = isStart ? token.compareTo(start) >= 0 : token.compareTo(start) > 0; + boolean isBeforeEnd = isStart ? token.compareTo(end) < 0 : token.compareTo(end) <= 0; + return isWrappedAround() + ? isAfterStart || isBeforeEnd // ####]----]#### + : isAfterStart && isBeforeEnd; // ----]####]---- } - - @Override - public int hashCode() { - return MoreObjects.hashCode(start, end); + } + + /** + * Merges this range with another one. + * + *

The two ranges should either intersect or be adjacent; in other words, the merged range + * should not include tokens that are in neither of the original ranges. + * + *

For example: + * + *

    + *
  • merging {@code ]3,5]} with {@code ]4,7]} produces {@code ]3,7]}; + *
  • merging {@code ]3,5]} with {@code ]4,5]} produces {@code ]3,5]}; + *
  • merging {@code ]3,5]} with {@code ]5,8]} produces {@code ]3,8]}; + *
  • merging {@code ]3,5]} with {@code ]6,8]} fails. + *
+ * + * @param that the other range. + * @return the resulting range. + * @throws IllegalArgumentException if the ranges neither intersect nor are adjacent. + */ + public TokenRange mergeWith(TokenRange that) { + if (this.equals(that)) return this; + + if (!(this.intersects(that) || this.end.equals(that.start) || that.end.equals(this.start))) + throw new IllegalArgumentException( + String.format( + "Can't merge %s with %s because they neither intersect nor are adjacent", + this, that)); + + if (this.isEmpty()) return that; + + if (that.isEmpty()) return this; + + // That's actually "starts in or is adjacent to the end of" + boolean thisStartsInThat = that.contains(this.start, true) || this.start.equals(that.end); + boolean thatStartsInThis = this.contains(that.start, true) || that.start.equals(this.end); + + // This takes care of all the cases that return the full ring, so that we don't have to worry + // about them below + if (thisStartsInThat && thatStartsInThis) return fullRing(); + + // Starting at this.start, see how far we can go while staying in at least one of the ranges. + Token mergedEnd = (thatStartsInThis && !this.contains(that.end, false)) ? that.end : this.end; + + // Repeat in the other direction. + Token mergedStart = thisStartsInThat ? that.start : this.start; + + return new TokenRange(mergedStart, mergedEnd, factory); + } + + private TokenRange fullRing() { + return new TokenRange(factory.minToken(), factory.minToken(), factory); + } + + @Override + public boolean equals(Object other) { + if (other == this) return true; + if (other instanceof TokenRange) { + TokenRange that = (TokenRange) other; + return MoreObjects.equal(this.start, that.start) && MoreObjects.equal(this.end, that.end); } - - @Override - public String toString() { - return String.format("]%s, %s]", start, end); - } - - @Override - public int compareTo(TokenRange other) { - if (this.equals(other)) { - return 0; - } else { - int compareStart = this.start.compareTo(other.start); - return compareStart != 0 ? compareStart : this.end.compareTo(other.end); - } + return false; + } + + @Override + public int hashCode() { + return MoreObjects.hashCode(start, end); + } + + @Override + public String toString() { + return String.format("]%s, %s]", start, end); + } + + @Override + public int compareTo(TokenRange other) { + if (this.equals(other)) { + return 0; + } else { + int compareStart = this.start.compareTo(other.start); + return compareStart != 0 ? compareStart : this.end.compareTo(other.end); } + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/TranslatedAddressEndPoint.java b/driver-core/src/main/java/com/datastax/driver/core/TranslatedAddressEndPoint.java new file mode 100644 index 00000000000..168376f53b5 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/TranslatedAddressEndPoint.java @@ -0,0 +1,61 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import com.google.common.base.Objects; +import java.net.InetSocketAddress; + +/** + * An endpoint based on server-reported RPC addresses, that might require translation if they are + * accessed through a proxy. + */ +class TranslatedAddressEndPoint implements EndPoint { + + private final InetSocketAddress translatedAddress; + + TranslatedAddressEndPoint(InetSocketAddress translatedAddress) { + this.translatedAddress = translatedAddress; + } + + @Override + public InetSocketAddress resolve() { + return translatedAddress; + } + + @Override + public boolean equals(Object other) { + if (other == this) { + return true; + } else if (other instanceof TranslatedAddressEndPoint) { + TranslatedAddressEndPoint that = (TranslatedAddressEndPoint) other; + return Objects.equal(this.translatedAddress, that.translatedAddress); + } else { + return false; + } + } + + @Override + public int hashCode() { + return translatedAddress.hashCode(); + } + + @Override + public String toString() { + return translatedAddress.toString(); + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/TupleType.java b/driver-core/src/main/java/com/datastax/driver/core/TupleType.java index ca9279e1959..deac9aa0624 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/TupleType.java +++ b/driver-core/src/main/java/com/datastax/driver/core/TupleType.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,181 +19,168 @@ import com.datastax.driver.core.exceptions.InvalidTypeException; import com.google.common.collect.ImmutableList; - import java.util.Arrays; import java.util.List; /** * A tuple type. - *

- * A tuple type is a essentially a list of types. + * + *

A tuple type is a essentially a list of types. */ public class TupleType extends DataType { - private final List types; - private final ProtocolVersion protocolVersion; - private volatile CodecRegistry codecRegistry; - - TupleType(List types, ProtocolVersion protocolVersion, CodecRegistry codecRegistry) { - super(DataType.Name.TUPLE); - this.types = ImmutableList.copyOf(types); - this.protocolVersion = protocolVersion; - this.codecRegistry = codecRegistry; - } - - /** - * Creates a "disconnected" tuple type (you should prefer - * {@link Metadata#newTupleType(DataType...) cluster.getMetadata().newTupleType(...)} - * whenever possible). - *

- * This method is only exposed for situations where you don't have a {@code Cluster} - * instance available. If you create a type with this method and use it with a - * {@code Cluster} later, you won't be able to set tuple fields with custom codecs - * registered against the cluster, or you might get errors if the protocol versions don't - * match. - * - * @param protocolVersion the protocol version to use. - * @param codecRegistry the codec registry to use. - * @param types the types for the tuple type. - * @return the newly created tuple type. - */ - public static TupleType of(ProtocolVersion protocolVersion, CodecRegistry codecRegistry, DataType... types) { - return new TupleType(Arrays.asList(types), protocolVersion, codecRegistry); - } - - /** - * The (immutable) list of types composing this tuple type. - * - * @return the (immutable) list of types composing this tuple type. - */ - public List getComponentTypes() { - return types; - } - - /** - * Returns a new empty value for this tuple type. - * - * @return an empty (with all component to {@code null}) value for this - * user type definition. - */ - public TupleValue newValue() { - return new TupleValue(this); - } - - /** - * Returns a new value for this tuple type that uses the provided values - * for the components. - *

- * The numbers of values passed to this method must correspond to the - * number of components in this tuple type. The {@code i}th parameter - * value will then be assigned to the {@code i}th component of the resulting - * tuple value. - * - * @param values the values to use for the component of the resulting - * tuple. - * @return a new tuple values based on the provided values. - * @throws IllegalArgumentException if the number of {@code values} - * provided does not correspond to the number of components in this tuple - * type. - * @throws InvalidTypeException if any of the provided value is not of - * the correct type for the component. - */ - public TupleValue newValue(Object... values) { - if (values.length != types.size()) - throw new IllegalArgumentException(String.format("Invalid number of values. Expecting %d but got %d", types.size(), values.length)); - - TupleValue t = newValue(); - for (int i = 0; i < values.length; i++) { - DataType dataType = types.get(i); - if (values[i] == null) - t.setValue(i, null); - else - t.setValue(i, codecRegistry.codecFor(dataType, values[i]).serialize(values[i], protocolVersion)); - } - return t; - } - - @Override - public boolean isFrozen() { - return true; - } - - /** - * Return the protocol version that has been used to deserialize - * this tuple type, or that will be used to serialize it. - * In most cases this should be the version - * currently in use by the cluster instance - * that this tuple type belongs to, as reported by - * {@link ProtocolOptions#getProtocolVersion()}. - * - * @return the protocol version that has been used to deserialize - * this tuple type, or that will be used to serialize it. - */ - ProtocolVersion getProtocolVersion() { - return protocolVersion; + private final List types; + private final ProtocolVersion protocolVersion; + private volatile CodecRegistry codecRegistry; + + TupleType(List types, ProtocolVersion protocolVersion, CodecRegistry codecRegistry) { + super(DataType.Name.TUPLE); + this.types = ImmutableList.copyOf(types); + this.protocolVersion = protocolVersion; + this.codecRegistry = codecRegistry; + } + + /** + * Creates a "disconnected" tuple type (you should prefer {@link + * Metadata#newTupleType(DataType...) cluster.getMetadata().newTupleType(...)} whenever + * possible). + * + *

This method is only exposed for situations where you don't have a {@code Cluster} instance + * available. If you create a type with this method and use it with a {@code Cluster} later, you + * won't be able to set tuple fields with custom codecs registered against the cluster, or you + * might get errors if the protocol versions don't match. + * + * @param protocolVersion the protocol version to use. + * @param codecRegistry the codec registry to use. + * @param types the types for the tuple type. + * @return the newly created tuple type. + */ + public static TupleType of( + ProtocolVersion protocolVersion, CodecRegistry codecRegistry, DataType... types) { + return new TupleType(Arrays.asList(types), protocolVersion, codecRegistry); + } + + /** + * The (immutable) list of types composing this tuple type. + * + * @return the (immutable) list of types composing this tuple type. + */ + public List getComponentTypes() { + return types; + } + + /** + * Returns a new empty value for this tuple type. + * + * @return an empty (with all component to {@code null}) value for this user type definition. + */ + public TupleValue newValue() { + return new TupleValue(this); + } + + /** + * Returns a new value for this tuple type that uses the provided values for the components. + * + *

The numbers of values passed to this method must correspond to the number of components in + * this tuple type. The {@code i}th parameter value will then be assigned to the {@code i}th + * component of the resulting tuple value. + * + * @param values the values to use for the component of the resulting tuple. + * @return a new tuple values based on the provided values. + * @throws IllegalArgumentException if the number of {@code values} provided does not correspond + * to the number of components in this tuple type. + * @throws InvalidTypeException if any of the provided value is not of the correct type for the + * component. + */ + public TupleValue newValue(Object... values) { + if (values.length != types.size()) + throw new IllegalArgumentException( + String.format( + "Invalid number of values. Expecting %d but got %d", types.size(), values.length)); + + TupleValue t = newValue(); + for (int i = 0; i < values.length; i++) { + DataType dataType = types.get(i); + if (values[i] == null) t.setValue(i, null); + else + t.setValue( + i, codecRegistry.codecFor(dataType, values[i]).serialize(values[i], protocolVersion)); } - - CodecRegistry getCodecRegistry() { - return codecRegistry; - } - - void setCodecRegistry(CodecRegistry codecRegistry) { - this.codecRegistry = codecRegistry; - } - - - @Override - public int hashCode() { - return Arrays.hashCode(new Object[]{name, types}); - } - - @Override - public boolean equals(Object o) { - if (!(o instanceof TupleType)) - return false; - - TupleType d = (TupleType) o; - return name == d.name && types.equals(d.types); - } - - /** - * Return {@code true} if this tuple type contains the given tuple type, - * and {@code false} otherwise. - *

- * A tuple type is said to contain another one - * if the latter has fewer components than the former, - * but all of them are of the same type. - * E.g. the type {@code tuple} - * is contained by the type {@code tuple}. - *

- * A contained type can be seen as a "partial" view - * of a containing type, where the missing components - * are supposed to be {@code null}. - * - * @param other the tuple type to compare against the current one - * @return {@code true} if this tuple type contains the given tuple type, - * and {@code false} otherwise. - */ - public boolean contains(TupleType other) { - if (this.equals(other)) - return true; - if (other.types.size() > this.types.size()) - return false; - return types.subList(0, other.types.size()).equals(other.types); - } - - @Override - public String toString() { - return "frozen<" + asFunctionParameterString() + ">"; - } - - @Override - public String asFunctionParameterString() { - StringBuilder sb = new StringBuilder(); - for (DataType type : types) { - sb.append(sb.length() == 0 ? "tuple<" : ", "); - sb.append(type.asFunctionParameterString()); - } - return sb.append(">").toString(); + return t; + } + + @Override + public boolean isFrozen() { + return true; + } + + /** + * Return the protocol version that has been used to deserialize this tuple type, or that will be + * used to serialize it. In most cases this should be the version currently in use by the cluster + * instance that this tuple type belongs to, as reported by {@link + * ProtocolOptions#getProtocolVersion()}. + * + * @return the protocol version that has been used to deserialize this tuple type, or that will be + * used to serialize it. + */ + ProtocolVersion getProtocolVersion() { + return protocolVersion; + } + + CodecRegistry getCodecRegistry() { + return codecRegistry; + } + + void setCodecRegistry(CodecRegistry codecRegistry) { + this.codecRegistry = codecRegistry; + } + + @Override + public int hashCode() { + return Arrays.hashCode(new Object[] {name, types}); + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof TupleType)) return false; + + TupleType d = (TupleType) o; + return name == d.name && types.equals(d.types); + } + + /** + * Return {@code true} if this tuple type contains the given tuple type, and {@code false} + * otherwise. + * + *

A tuple type is said to contain another one if the latter has fewer components than the + * former, but all of them are of the same type. E.g. the type {@code tuple} is + * contained by the type {@code tuple}. + * + *

A contained type can be seen as a "partial" view of a containing type, where the missing + * components are supposed to be {@code null}. + * + * @param other the tuple type to compare against the current one + * @return {@code true} if this tuple type contains the given tuple type, and {@code false} + * otherwise. + */ + public boolean contains(TupleType other) { + if (this.equals(other)) return true; + if (other.types.size() > this.types.size()) return false; + return types.subList(0, other.types.size()).equals(other.types); + } + + @Override + public String toString() { + return "frozen<" + asFunctionParameterString() + ">"; + } + + @Override + public String asFunctionParameterString() { + StringBuilder sb = new StringBuilder(); + for (DataType type : types) { + sb.append(sb.length() == 0 ? "tuple<" : ", "); + sb.append(type.asFunctionParameterString()); } + return sb.append(">").toString(); + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/TupleValue.java b/driver-core/src/main/java/com/datastax/driver/core/TupleValue.java index 478b059bec6..57622546c72 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/TupleValue.java +++ b/driver-core/src/main/java/com/datastax/driver/core/TupleValue.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,69 +17,65 @@ */ package com.datastax.driver.core; -/** - * A value for a Tuple. - */ +/** A value for a Tuple. */ public class TupleValue extends AbstractAddressableByIndexData { - private final TupleType type; + private final TupleType type; - /** - * Builds a new value for a tuple. - * - * @param type the {@link TupleType} instance defining this tuple's components. - */ - TupleValue(TupleType type) { - super(type.getProtocolVersion(), type.getComponentTypes().size()); - this.type = type; - } + /** + * Builds a new value for a tuple. + * + * @param type the {@link TupleType} instance defining this tuple's components. + */ + TupleValue(TupleType type) { + super(type.getProtocolVersion(), type.getComponentTypes().size()); + this.type = type; + } - protected DataType getType(int i) { - return type.getComponentTypes().get(i); - } + protected DataType getType(int i) { + return type.getComponentTypes().get(i); + } - @Override - protected String getName(int i) { - // This is used for error messages - return "component " + i; - } + @Override + protected String getName(int i) { + // This is used for error messages + return "component " + i; + } - @Override - protected CodecRegistry getCodecRegistry() { - return type.getCodecRegistry(); - } + @Override + protected CodecRegistry getCodecRegistry() { + return type.getCodecRegistry(); + } - /** - * The tuple type this is a value of. - * - * @return The tuple type this is a value of. - */ - public TupleType getType() { - return type; - } + /** + * The tuple type this is a value of. + * + * @return The tuple type this is a value of. + */ + public TupleType getType() { + return type; + } - @Override - public boolean equals(Object o) { - if (!(o instanceof TupleValue)) - return false; + @Override + public boolean equals(Object o) { + if (!(o instanceof TupleValue)) return false; - TupleValue that = (TupleValue) o; - if (!type.equals(that.type)) - return false; + TupleValue that = (TupleValue) o; + if (!type.equals(that.type)) return false; - return super.equals(o); - } + return super.equals(o); + } - @Override - public int hashCode() { - return super.hashCode(); - } + @Override + public int hashCode() { + return super.hashCode(); + } - @Override - public String toString() { - StringBuilder sb = new StringBuilder(); - TypeCodec codec = getCodecRegistry().codecFor(type); - sb.append(codec.format(this)); - return sb.toString(); - } + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + TypeCodec codec = getCodecRegistry().codecFor(type); + sb.append(codec.format(this)); + return sb.toString(); + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/TypeCodec.java b/driver-core/src/main/java/com/datastax/driver/core/TypeCodec.java index b4f0a1bd1ff..e78e7eb7ad7 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/TypeCodec.java +++ b/driver-core/src/main/java/com/datastax/driver/core/TypeCodec.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,15 +17,21 @@ */ package com.datastax.driver.core; +import static com.datastax.driver.core.DataType.CollectionType; +import static com.datastax.driver.core.DataType.Name; +import static com.datastax.driver.core.DataType.smallint; +import static com.datastax.driver.core.DataType.timeuuid; +import static com.datastax.driver.core.DataType.tinyint; +import static com.google.common.base.Preconditions.checkArgument; +import static com.google.common.base.Preconditions.checkNotNull; + import com.datastax.driver.core.exceptions.InvalidTypeException; import com.datastax.driver.core.utils.Bytes; import com.google.common.io.ByteArrayDataOutput; import com.google.common.io.ByteStreams; import com.google.common.reflect.TypeToken; - import java.io.DataInput; import java.io.IOException; -import java.lang.reflect.Type; import java.math.BigDecimal; import java.math.BigInteger; import java.net.InetAddress; @@ -32,2621 +40,2676 @@ import java.nio.ByteBuffer; import java.nio.charset.Charset; import java.text.ParseException; -import java.util.*; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Date; +import java.util.HashMap; +import java.util.LinkedHashMap; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.UUID; import java.util.regex.Pattern; -import static com.datastax.driver.core.DataType.*; -import static com.google.common.base.Preconditions.checkArgument; -import static com.google.common.base.Preconditions.checkNotNull; - /** - * A Codec that can serialize and deserialize to and from a given - * {@link #getCqlType() CQL type} and a given {@link #getJavaType() Java Type}. - *

+ * A Codec that can serialize and deserialize to and from a given {@link #getCqlType() CQL type} and + * a given {@link #getJavaType() Java Type}. + * + *

+ * *

Serializing and deserializing

- *

- * Two methods handle the serialization and deserialization of Java types into - * CQL types according to the native protocol specifications: + * + *

Two methods handle the serialization and deserialization of Java types into CQL types + * according to the native protocol specifications: + * *

    - *
  1. {@link #serialize(Object, ProtocolVersion)}: used to serialize from the codec's Java type to a - * {@link ByteBuffer} instance corresponding to the codec's CQL type;
  2. - *
  3. {@link #deserialize(ByteBuffer, ProtocolVersion)}: used to deserialize a {@link ByteBuffer} instance - * corresponding to the codec's CQL type to the codec's Java type.
  4. + *
  5. {@link #serialize(Object, ProtocolVersion)}: used to serialize from the codec's Java type + * to a {@link ByteBuffer} instance corresponding to the codec's CQL type; + *
  6. {@link #deserialize(ByteBuffer, ProtocolVersion)}: used to deserialize a {@link ByteBuffer} + * instance corresponding to the codec's CQL type to the codec's Java type. *
- *

+ * + *

+ * *

Formatting and parsing

- *

- * Two methods handle the formatting and parsing of Java types into - * CQL strings: + * + *

Two methods handle the formatting and parsing of Java types into CQL strings: + * *

    - *
  1. {@link #format(Object)}: formats the Java type handled by the codec as a CQL string;
  2. - *
  3. {@link #parse(String)}; parses a CQL string into the Java type handled by the codec.
  4. + *
  5. {@link #format(Object)}: formats the Java type handled by the codec as a CQL string; + *
  6. {@link #parse(String)}; parses a CQL string into the Java type handled by the codec. *
- *

+ * + *

+ * *

Inspection

- *

- * Codecs also have the following inspection methods: - *

+ * + *

Codecs also have the following inspection methods: + * + *

+ * *

    - *
  1. {@link #accepts(DataType)}: returns true if the codec can deserialize the given CQL type;
  2. - *
  3. {@link #accepts(TypeToken)}: returns true if the codec can serialize the given Java type;
  4. - *
  5. {@link #accepts(Object)}; returns true if the codec can serialize the given object.
  6. + *
  7. {@link #accepts(DataType)}: returns true if the codec can deserialize the given CQL type; + *
  8. {@link #accepts(TypeToken)}: returns true if the codec can serialize the given Java type; + *
  9. {@link #accepts(Object)}; returns true if the codec can serialize the given object. *
- *

+ * + *

+ * *

Implementation notes

- *

+ * + *

+ * *

    - *
  1. TypeCodec implementations must be thread-safe.
  2. - *
  3. TypeCodec implementations must perform fast and never block.
  4. - *
  5. TypeCodec implementations must support all native protocol versions; it is not possible - * to use different codecs for the same types but under different protocol versions.
  6. - *
  7. TypeCodec implementations must comply with the native protocol specifications; failing - * to do so will result in unexpected results and could cause the driver to crash.
  8. - *
  9. TypeCodec implementations should be stateless and immutable.
  10. - *
  11. TypeCodec implementations should interpret {@code null} values and empty ByteBuffers - * (i.e. {@link ByteBuffer#remaining()} == 0) in a reasonable way; - * usually, {@code NULL} CQL values should map to {@code null} references, but exceptions exist; - * e.g. for varchar types, a {@code NULL} CQL value maps to a {@code null} reference, - * whereas an empty buffer maps to an empty String. For collection types, it is also admitted that - * {@code NULL} CQL values map to empty Java collections instead of {@code null} references. - * In any case, the codec's behavior in respect to {@code null} values and empty ByteBuffers - * should be clearly documented.
  12. - *
  13. TypeCodec implementations that wish to handle Java primitive types must be instantiated with - * the wrapper Java class instead, and implement the appropriate interface - * (e.g. {@link com.datastax.driver.core.TypeCodec.PrimitiveBooleanCodec} for primitive {@code boolean} types; - * there is one such interface for each Java primitive type).
  14. - *
  15. When deserializing, TypeCodec implementations should not consume {@link ByteBuffer} instances - * by performing relative read operations that modify their current position; - * codecs should instead prefer absolute read methods, or, if necessary, they should - * {@link ByteBuffer#duplicate() duplicate} their byte buffers prior to reading them.
  16. + *
  17. TypeCodec implementations must be thread-safe. + *
  18. TypeCodec implementations must perform fast and never block. + *
  19. TypeCodec implementations must support all native protocol versions; it is not + * possible to use different codecs for the same types but under different protocol versions. + *
  20. TypeCodec implementations must comply with the native protocol specifications; failing to + * do so will result in unexpected results and could cause the driver to crash. + *
  21. TypeCodec implementations should be stateless and immutable. + *
  22. TypeCodec implementations should interpret {@code null} values and empty + * ByteBuffers (i.e. {@link ByteBuffer#remaining()} == 0) in a + * reasonable way; usually, {@code NULL} CQL values should map to {@code null} + * references, but exceptions exist; e.g. for varchar types, a {@code NULL} CQL value maps to + * a {@code null} reference, whereas an empty buffer maps to an empty String. For collection + * types, it is also admitted that {@code NULL} CQL values map to empty Java collections + * instead of {@code null} references. In any case, the codec's behavior in respect to {@code + * null} values and empty ByteBuffers should be clearly documented. + *
  23. TypeCodec implementations that wish to handle Java primitive types must be + * instantiated with the wrapper Java class instead, and implement the appropriate interface + * (e.g. {@link com.datastax.driver.core.TypeCodec.PrimitiveBooleanCodec} for primitive {@code + * boolean} types; there is one such interface for each Java primitive type). + *
  24. When deserializing, TypeCodec implementations should not consume {@link ByteBuffer} + * instances by performing relative read operations that modify their current position; codecs + * should instead prefer absolute read methods, or, if necessary, they should {@link + * ByteBuffer#duplicate() duplicate} their byte buffers prior to reading them. *
* * @param The codec's Java type */ public abstract class TypeCodec { - /** - * Return the default codec for the CQL type {@code boolean}. - * The returned codec maps the CQL type {@code boolean} into the Java type {@link Boolean}. - * The returned instance is a singleton. - * - * @return the default codec for CQL type {@code boolean}. - */ - public static PrimitiveBooleanCodec cboolean() { - return BooleanCodec.instance; - } + /** + * Return the default codec for the CQL type {@code boolean}. The returned codec maps the CQL type + * {@code boolean} into the Java type {@link Boolean}. The returned instance is a singleton. + * + * @return the default codec for CQL type {@code boolean}. + */ + public static PrimitiveBooleanCodec cboolean() { + return BooleanCodec.instance; + } + + /** + * Return the default codec for the CQL type {@code tinyint}. The returned codec maps the CQL type + * {@code tinyint} into the Java type {@link Byte}. The returned instance is a singleton. + * + * @return the default codec for CQL type {@code tinyint}. + */ + public static PrimitiveByteCodec tinyInt() { + return TinyIntCodec.instance; + } + + /** + * Return the default codec for the CQL type {@code smallint}. The returned codec maps the CQL + * type {@code smallint} into the Java type {@link Short}. The returned instance is a singleton. + * + * @return the default codec for CQL type {@code smallint}. + */ + public static PrimitiveShortCodec smallInt() { + return SmallIntCodec.instance; + } + + /** + * Return the default codec for the CQL type {@code int}. The returned codec maps the CQL type + * {@code int} into the Java type {@link Integer}. The returned instance is a singleton. + * + * @return the default codec for CQL type {@code int}. + */ + public static PrimitiveIntCodec cint() { + return IntCodec.instance; + } + + /** + * Return the default codec for the CQL type {@code bigint}. The returned codec maps the CQL type + * {@code bigint} into the Java type {@link Long}. The returned instance is a singleton. + * + * @return the default codec for CQL type {@code bigint}. + */ + public static PrimitiveLongCodec bigint() { + return BigintCodec.instance; + } + + /** + * Return the default codec for the CQL type {@code counter}. The returned codec maps the CQL type + * {@code counter} into the Java type {@link Long}. The returned instance is a singleton. + * + * @return the default codec for CQL type {@code counter}. + */ + public static PrimitiveLongCodec counter() { + return CounterCodec.instance; + } + + /** + * Return the default codec for the CQL type {@code float}. The returned codec maps the CQL type + * {@code float} into the Java type {@link Float}. The returned instance is a singleton. + * + * @return the default codec for CQL type {@code float}. + */ + public static PrimitiveFloatCodec cfloat() { + return FloatCodec.instance; + } + + /** + * Return the default codec for the CQL type {@code double}. The returned codec maps the CQL type + * {@code double} into the Java type {@link Double}. The returned instance is a singleton. + * + * @return the default codec for CQL type {@code double}. + */ + public static PrimitiveDoubleCodec cdouble() { + return DoubleCodec.instance; + } + + /** + * Return the default codec for the CQL type {@code varint}. The returned codec maps the CQL type + * {@code varint} into the Java type {@link BigInteger}. The returned instance is a singleton. + * + * @return the default codec for CQL type {@code varint}. + */ + public static TypeCodec varint() { + return VarintCodec.instance; + } + + /** + * Return the default codec for the CQL type {@code decimal}. The returned codec maps the CQL type + * {@code decimal} into the Java type {@link BigDecimal}. The returned instance is a singleton. + * + * @return the default codec for CQL type {@code decimal}. + */ + public static TypeCodec decimal() { + return DecimalCodec.instance; + } + + /** + * Return the default codec for the CQL type {@code ascii}. The returned codec maps the CQL type + * {@code ascii} into the Java type {@link String}. The returned instance is a singleton. + * + * @return the default codec for CQL type {@code ascii}. + */ + public static TypeCodec ascii() { + return AsciiCodec.instance; + } + + /** + * Return the default codec for the CQL type {@code varchar}. The returned codec maps the CQL type + * {@code varchar} into the Java type {@link String}. The returned instance is a singleton. + * + * @return the default codec for CQL type {@code varchar}. + */ + public static TypeCodec varchar() { + return VarcharCodec.instance; + } + + /** + * Return the default codec for the CQL type {@code blob}. The returned codec maps the CQL type + * {@code blob} into the Java type {@link ByteBuffer}. The returned instance is a singleton. + * + * @return the default codec for CQL type {@code blob}. + */ + public static TypeCodec blob() { + return BlobCodec.instance; + } + + /** + * Return the default codec for the CQL type {@code date}. The returned codec maps the CQL type + * {@code date} into the Java type {@link LocalDate}. The returned instance is a singleton. + * + * @return the default codec for CQL type {@code date}. + */ + public static TypeCodec date() { + return DateCodec.instance; + } + + /** + * Return the default codec for the CQL type {@code time}. The returned codec maps the CQL type + * {@code time} into the Java type {@link Long}. The returned instance is a singleton. + * + * @return the default codec for CQL type {@code time}. + */ + public static PrimitiveLongCodec time() { + return TimeCodec.instance; + } + + /** + * Return the default codec for the CQL type {@code timestamp}. The returned codec maps the CQL + * type {@code timestamp} into the Java type {@link Date}. The returned instance is a singleton. + * + * @return the default codec for CQL type {@code timestamp}. + */ + public static TypeCodec timestamp() { + return TimestampCodec.instance; + } + + /** + * Return the default codec for the CQL type {@code uuid}. The returned codec maps the CQL type + * {@code uuid} into the Java type {@link UUID}. The returned instance is a singleton. + * + * @return the default codec for CQL type {@code uuid}. + */ + public static TypeCodec uuid() { + return UUIDCodec.instance; + } + + /** + * Return the default codec for the CQL type {@code timeuuid}. The returned codec maps the CQL + * type {@code timeuuid} into the Java type {@link UUID}. The returned instance is a singleton. + * + * @return the default codec for CQL type {@code timeuuid}. + */ + public static TypeCodec timeUUID() { + return TimeUUIDCodec.instance; + } + + /** + * Return the default codec for the CQL type {@code inet}. The returned codec maps the CQL type + * {@code inet} into the Java type {@link InetAddress}. The returned instance is a singleton. + * + * @return the default codec for CQL type {@code inet}. + */ + public static TypeCodec inet() { + return InetCodec.instance; + } + + /** + * Return a newly-created codec for the CQL type {@code list} whose element type is determined by + * the given element codec. The returned codec maps the CQL type {@code list} into the Java type + * {@link List}. This method does not cache returned instances and returns a newly-allocated + * object at each invocation. + * + * @param elementCodec the codec that will handle elements of this list. + * @return A newly-created codec for CQL type {@code list}. + */ + public static TypeCodec> list(TypeCodec elementCodec) { + return new ListCodec(elementCodec); + } + + /** + * Return a newly-created codec for the CQL type {@code set} whose element type is determined by + * the given element codec. The returned codec maps the CQL type {@code set} into the Java type + * {@link Set}. This method does not cache returned instances and returns a newly-allocated object + * at each invocation. + * + * @param elementCodec the codec that will handle elements of this set. + * @return A newly-created codec for CQL type {@code set}. + */ + public static TypeCodec> set(TypeCodec elementCodec) { + return new SetCodec(elementCodec); + } + + /** + * Return a newly-created codec for the CQL type {@code map} whose key type and value type are + * determined by the given codecs. The returned codec maps the CQL type {@code map} into the Java + * type {@link Map}. This method does not cache returned instances and returns a newly-allocated + * object at each invocation. + * + * @param keyCodec the codec that will handle keys of this map. + * @param valueCodec the codec that will handle values of this map. + * @return A newly-created codec for CQL type {@code map}. + */ + public static TypeCodec> map(TypeCodec keyCodec, TypeCodec valueCodec) { + return new MapCodec(keyCodec, valueCodec); + } + + /** + * Return a newly-created codec for the given user-defined CQL type. The returned codec maps the + * user-defined type into the Java type {@link UDTValue}. This method does not cache returned + * instances and returns a newly-allocated object at each invocation. + * + * @param type the user-defined type this codec should handle. + * @return A newly-created codec for the given user-defined CQL type. + */ + public static TypeCodec userType(UserType type) { + return new UDTCodec(type); + } + + /** + * Return a newly-created codec for the given CQL tuple type. The returned codec maps the tuple + * type into the Java type {@link TupleValue}. This method does not cache returned instances and + * returns a newly-allocated object at each invocation. + * + * @param type the tuple type this codec should handle. + * @return A newly-created codec for the given CQL tuple type. + */ + public static TypeCodec tuple(TupleType type) { + return new TupleCodec(type); + } + + /** + * Return a newly-created codec for the given CQL custom type. + * + *

The returned codec maps the custom type into the Java type {@link ByteBuffer}, thus + * providing a (very lightweight) support for Cassandra types that do not have a CQL equivalent. + * + *

Note that the returned codec assumes that CQL literals for the given custom type are + * expressed in binary form as well, e.g. {@code 0xcafebabe}. If this is not the case, the + * returned codec might be unable to {@link #parse(String) parse} and {@link #format(Object) + * format} literals for this type. This is notoriously true for types inheriting from {@code + * org.apache.cassandra.db.marshal.AbstractCompositeType}, whose CQL literals are actually + * expressed as quoted strings. + * + *

This method does not cache returned instances and returns a newly-allocated object at each + * invocation. + * + * @param type the custom type this codec should handle. + * @return A newly-created codec for the given CQL custom type. + */ + public static TypeCodec custom(DataType.CustomType type) { + return new CustomCodec(type); + } + + /** + * Returns the default codec for the {@link DataType#duration() Duration type}. + * + *

This codec maps duration types to the driver's built-in {@link Duration} class, thus + * providing a more user-friendly mapping than the low-level mapping provided by regular {@link + * #custom(DataType.CustomType) custom type codecs}. + * + *

The returned instance is a singleton. + * + * @return the default codec for the Duration type. + */ + public static TypeCodec duration() { + return DurationCodec.instance; + } + + protected final TypeToken javaType; + + protected final DataType cqlType; + + /** + * This constructor can only be used for non parameterized types. For parameterized ones, please + * use {@link #TypeCodec(DataType, TypeToken)} instead. + * + * @param javaClass The Java class this codec serializes from and deserializes to. + */ + protected TypeCodec(DataType cqlType, Class javaClass) { + this(cqlType, TypeToken.of(javaClass)); + } + + protected TypeCodec(DataType cqlType, TypeToken javaType) { + checkNotNull(cqlType, "cqlType cannot be null"); + checkNotNull(javaType, "javaType cannot be null"); + checkArgument( + !javaType.isPrimitive(), + "Cannot create a codec for a primitive Java type (%s), please use the wrapper type instead", + javaType); + this.cqlType = cqlType; + this.javaType = javaType; + } + + /** + * Return the Java type that this codec deserializes to and serializes from. + * + * @return The Java type this codec deserializes to and serializes from. + */ + public TypeToken getJavaType() { + return javaType; + } + + /** + * Return the CQL type that this codec deserializes from and serializes to. + * + * @return The Java type this codec deserializes from and serializes to. + */ + public DataType getCqlType() { + return cqlType; + } + + /** + * Serialize the given value according to the CQL type handled by this codec. + * + *

Implementation notes: + * + *

    + *
  1. Null values should be gracefully handled and no exception should be raised; these should + * be considered as the equivalent of a NULL CQL value; + *
  2. Codecs for CQL collection types should not permit null elements; + *
  3. Codecs for CQL collection types should treat a {@code null} input as the equivalent of an + * empty collection. + *
+ * + * @param value An instance of T; may be {@code null}. + * @param protocolVersion the protocol version to use when serializing {@code bytes}. In most + * cases, the proper value to provide for this argument is the value returned by {@link + * ProtocolOptions#getProtocolVersion} (which is the protocol version in use by the driver). + * @return A {@link ByteBuffer} instance containing the serialized form of T + * @throws InvalidTypeException if the given value does not have the expected type + */ + public abstract ByteBuffer serialize(T value, ProtocolVersion protocolVersion) + throws InvalidTypeException; + + /** + * Deserialize the given {@link ByteBuffer} instance according to the CQL type handled by this + * codec. + * + *

Implementation notes: + * + *

    + *
  1. Null or empty buffers should be gracefully handled and no exception should be raised; + * these should be considered as the equivalent of a NULL CQL value and, in most cases, + * should map to {@code null} or a default value for the corresponding Java type, if + * applicable; + *
  2. Codecs for CQL collection types should clearly document whether they return immutable + * collections or not (note that the driver's default collection codecs return + * mutable collections); + *
  3. Codecs for CQL collection types should avoid returning {@code null}; they should return + * empty collections instead (the driver's default collection codecs all comply with this + * rule). + *
  4. The provided {@link ByteBuffer} should never be consumed by read operations that modify + * its current position; if necessary, {@link ByteBuffer#duplicate()} duplicate} it before + * consuming. + *
+ * + * @param bytes A {@link ByteBuffer} instance containing the serialized form of T; may be {@code + * null} or empty. + * @param protocolVersion the protocol version to use when serializing {@code bytes}. In most + * cases, the proper value to provide for this argument is the value returned by {@link + * ProtocolOptions#getProtocolVersion} (which is the protocol version in use by the driver). + * @return An instance of T + * @throws InvalidTypeException if the given {@link ByteBuffer} instance cannot be deserialized + */ + public abstract T deserialize(ByteBuffer bytes, ProtocolVersion protocolVersion) + throws InvalidTypeException; + + /** + * Parse the given CQL literal into an instance of the Java type handled by this codec. + * + *

Implementors should take care of unquoting and unescaping the given CQL string where + * applicable. Null values and empty Strings should be accepted, as well as the string {@code + * "NULL"}; in most cases, implementations should interpret these inputs has equivalent to a + * {@code null} reference. + * + *

Implementing this method is not strictly mandatory: internally, the driver only uses it to + * parse the INITCOND when building the metadata of an aggregate function (and in most cases it + * will use a built-in codec, unless the INITCOND has a custom type). + * + * @param value The CQL string to parse, may be {@code null} or empty. + * @return An instance of T; may be {@code null} on a {@code null input}. + * @throws InvalidTypeException if the given value cannot be parsed into the expected type + */ + public abstract T parse(String value) throws InvalidTypeException; + + /** + * Format the given value as a valid CQL literal according to the CQL type handled by this codec. + * + *

Implementors should take care of quoting and escaping the resulting CQL literal where + * applicable. Null values should be accepted; in most cases, implementations should return the + * CQL keyword {@code "NULL"} for {@code null} inputs. + * + *

Implementing this method is not strictly mandatory. It is used: + * + *

    + *
  1. in the query builder, when values are inlined in the query string (see {@link + * com.datastax.driver.core.querybuilder.BuiltStatement} for a detailed explanation of when + * this happens); + *
  2. in the {@link QueryLogger}, if parameter logging is enabled; + *
  3. to format the INITCOND in {@link AggregateMetadata#asCQLQuery(boolean)}; + *
  4. in the {@code toString()} implementation of some objects ({@link UDTValue}, {@link + * TupleValue}, and the internal representation of a {@code ROWS} response), which may + * appear in driver logs. + *
+ * + * If you choose not to implement this method, you should not throw an exception but instead + * return a constant string (for example "XxxCodec.format not implemented"). + * + * @param value An instance of T; may be {@code null}. + * @return CQL string + * @throws InvalidTypeException if the given value does not have the expected type + */ + public abstract String format(T value) throws InvalidTypeException; + + /** + * Return {@code true} if this codec is capable of serializing the given {@code javaType}. + * + *

The implementation is invariant with respect to the passed argument (through the + * usage of {@link TypeToken#equals(Object)} and it's strongly recommended not to modify this + * behavior. This means that a codec will only ever return {@code true} for the + * exact Java type that it has been created for. + * + *

If the argument represents a Java primitive type, its wrapper type is considered instead. + * + * @param javaType The Java type this codec should serialize from and deserialize to; cannot be + * {@code null}. + * @return {@code true} if the codec is capable of serializing the given {@code javaType}, and + * {@code false} otherwise. + * @throws NullPointerException if {@code javaType} is {@code null}. + */ + public boolean accepts(TypeToken javaType) { + checkNotNull(javaType, "Parameter javaType cannot be null"); + return this.javaType.equals(javaType.wrap()); + } + + /** + * Return {@code true} if this codec is capable of serializing the given {@code javaType}. + * + *

This implementation simply compares the given type against this codec's runtime (raw) type + * for equality; it is invariant with respect to the passed argument (through the usage + * of {@link Class#equals(Object)} and it's strongly recommended not to modify this + * behavior. This means that a codec will only ever return {@code true} for the + * exact runtime (raw) Java type that it has been created for. + * + * @param javaType The Java type this codec should serialize from and deserialize to; cannot be + * {@code null}. + * @return {@code true} if the codec is capable of serializing the given {@code javaType}, and + * {@code false} otherwise. + * @throws NullPointerException if {@code javaType} is {@code null}. + */ + public boolean accepts(Class javaType) { + checkNotNull(javaType, "Parameter javaType cannot be null"); + if (javaType.isPrimitive()) { + if (javaType == Boolean.TYPE) { + javaType = Boolean.class; + } else if (javaType == Character.TYPE) { + javaType = Character.class; + } else if (javaType == Byte.TYPE) { + javaType = Byte.class; + } else if (javaType == Short.TYPE) { + javaType = Short.class; + } else if (javaType == Integer.TYPE) { + javaType = Integer.class; + } else if (javaType == Long.TYPE) { + javaType = Long.class; + } else if (javaType == Float.TYPE) { + javaType = Float.class; + } else if (javaType == Double.TYPE) { + javaType = Double.class; + } + } + return this.javaType.getRawType().equals(javaType); + } + + /** + * Return {@code true} if this codec is capable of deserializing the given {@code cqlType}. + * + * @param cqlType The CQL type this codec should deserialize from and serialize to; cannot be + * {@code null}. + * @return {@code true} if the codec is capable of deserializing the given {@code cqlType}, and + * {@code false} otherwise. + * @throws NullPointerException if {@code cqlType} is {@code null}. + */ + public boolean accepts(DataType cqlType) { + checkNotNull(cqlType, "Parameter cqlType cannot be null"); + return this.cqlType.equals(cqlType); + } + + /** + * Return {@code true} if this codec is capable of serializing the given object. Note that the + * object's Java type is inferred from the object's runtime (raw) type, contrary to {@link + * #accepts(TypeToken)} which is capable of handling generic types. + * + *

This method is intended mostly to be used by the QueryBuilder when no type information is + * available when the codec is used. + * + *

Implementation notes: + * + *

    + *
  1. The default implementation is covariant with respect to the passed argument + * (through the usage of {@link Class#isAssignableFrom(Class)}) and it's strongly + * recommended not to modify this behavior. This means that, by default, a codec will + * accept any subtype of the Java type that it has been created for. + *
  2. The base implementation provided here can only handle non-parameterized types; codecs + * handling parameterized types, such as collection types, must override this method and + * perform some sort of "manual" inspection of the actual type parameters. + *
  3. Similarly, codecs that only accept a partial subset of all possible values must override + * this method and manually inspect the object to check if it complies or not with the + * codec's limitations. + *
+ * + * @param value The Java type this codec should serialize from and deserialize to; cannot be + * {@code null}. + * @return {@code true} if the codec is capable of serializing the given {@code javaType}, and + * {@code false} otherwise. + * @throws NullPointerException if {@code value} is {@code null}. + */ + public boolean accepts(Object value) { + checkNotNull(value, "Parameter value cannot be null"); + return javaType.getRawType().isAssignableFrom(value.getClass()); + } + + @Override + public String toString() { + return String.format("%s [%s <-> %s]", this.getClass().getSimpleName(), cqlType, javaType); + } + + /** + * A codec that is capable of handling primitive booleans, thus avoiding the overhead of boxing + * and unboxing such primitives. + */ + public abstract static class PrimitiveBooleanCodec extends TypeCodec { + + protected PrimitiveBooleanCodec(DataType cqlType) { + super(cqlType, Boolean.class); + } + + public abstract ByteBuffer serializeNoBoxing(boolean v, ProtocolVersion protocolVersion); + + public abstract boolean deserializeNoBoxing(ByteBuffer v, ProtocolVersion protocolVersion); - /** - * Return the default codec for the CQL type {@code tinyint}. - * The returned codec maps the CQL type {@code tinyint} into the Java type {@link Byte}. - * The returned instance is a singleton. - * - * @return the default codec for CQL type {@code tinyint}. - */ - public static PrimitiveByteCodec tinyInt() { - return TinyIntCodec.instance; + @Override + public ByteBuffer serialize(Boolean value, ProtocolVersion protocolVersion) { + return value == null ? null : serializeNoBoxing(value, protocolVersion); } - /** - * Return the default codec for the CQL type {@code smallint}. - * The returned codec maps the CQL type {@code smallint} into the Java type {@link Short}. - * The returned instance is a singleton. - * - * @return the default codec for CQL type {@code smallint}. - */ - public static PrimitiveShortCodec smallInt() { - return SmallIntCodec.instance; + @Override + public Boolean deserialize(ByteBuffer bytes, ProtocolVersion protocolVersion) { + return bytes == null || bytes.remaining() == 0 + ? null + : deserializeNoBoxing(bytes, protocolVersion); } + } - /** - * Return the default codec for the CQL type {@code int}. - * The returned codec maps the CQL type {@code int} into the Java type {@link Integer}. - * The returned instance is a singleton. - * - * @return the default codec for CQL type {@code int}. - */ - public static PrimitiveIntCodec cint() { - return IntCodec.instance; - } + /** + * A codec that is capable of handling primitive bytes, thus avoiding the overhead of boxing and + * unboxing such primitives. + */ + public abstract static class PrimitiveByteCodec extends TypeCodec { - /** - * Return the default codec for the CQL type {@code bigint}. - * The returned codec maps the CQL type {@code bigint} into the Java type {@link Long}. - * The returned instance is a singleton. - * - * @return the default codec for CQL type {@code bigint}. - */ - public static PrimitiveLongCodec bigint() { - return BigintCodec.instance; + protected PrimitiveByteCodec(DataType cqlType) { + super(cqlType, Byte.class); } - /** - * Return the default codec for the CQL type {@code counter}. - * The returned codec maps the CQL type {@code counter} into the Java type {@link Long}. - * The returned instance is a singleton. - * - * @return the default codec for CQL type {@code counter}. - */ - public static PrimitiveLongCodec counter() { - return CounterCodec.instance; - } + public abstract ByteBuffer serializeNoBoxing(byte v, ProtocolVersion protocolVersion); - /** - * Return the default codec for the CQL type {@code float}. - * The returned codec maps the CQL type {@code float} into the Java type {@link Float}. - * The returned instance is a singleton. - * - * @return the default codec for CQL type {@code float}. - */ - public static PrimitiveFloatCodec cfloat() { - return FloatCodec.instance; - } + public abstract byte deserializeNoBoxing(ByteBuffer v, ProtocolVersion protocolVersion); - /** - * Return the default codec for the CQL type {@code double}. - * The returned codec maps the CQL type {@code double} into the Java type {@link Double}. - * The returned instance is a singleton. - * - * @return the default codec for CQL type {@code double}. - */ - public static PrimitiveDoubleCodec cdouble() { - return DoubleCodec.instance; + @Override + public ByteBuffer serialize(Byte value, ProtocolVersion protocolVersion) { + return value == null ? null : serializeNoBoxing(value, protocolVersion); } - /** - * Return the default codec for the CQL type {@code varint}. - * The returned codec maps the CQL type {@code varint} into the Java type {@link BigInteger}. - * The returned instance is a singleton. - * - * @return the default codec for CQL type {@code varint}. - */ - public static TypeCodec varint() { - return VarintCodec.instance; + @Override + public Byte deserialize(ByteBuffer bytes, ProtocolVersion protocolVersion) { + return bytes == null || bytes.remaining() == 0 + ? null + : deserializeNoBoxing(bytes, protocolVersion); } + } - /** - * Return the default codec for the CQL type {@code decimal}. - * The returned codec maps the CQL type {@code decimal} into the Java type {@link BigDecimal}. - * The returned instance is a singleton. - * - * @return the default codec for CQL type {@code decimal}. - */ - public static TypeCodec decimal() { - return DecimalCodec.instance; - } + /** + * A codec that is capable of handling primitive shorts, thus avoiding the overhead of boxing and + * unboxing such primitives. + */ + public abstract static class PrimitiveShortCodec extends TypeCodec { - /** - * Return the default codec for the CQL type {@code ascii}. - * The returned codec maps the CQL type {@code ascii} into the Java type {@link String}. - * The returned instance is a singleton. - * - * @return the default codec for CQL type {@code ascii}. - */ - public static TypeCodec ascii() { - return AsciiCodec.instance; + protected PrimitiveShortCodec(DataType cqlType) { + super(cqlType, Short.class); } - /** - * Return the default codec for the CQL type {@code varchar}. - * The returned codec maps the CQL type {@code varchar} into the Java type {@link String}. - * The returned instance is a singleton. - * - * @return the default codec for CQL type {@code varchar}. - */ - public static TypeCodec varchar() { - return VarcharCodec.instance; + public abstract ByteBuffer serializeNoBoxing(short v, ProtocolVersion protocolVersion); + + public abstract short deserializeNoBoxing(ByteBuffer v, ProtocolVersion protocolVersion); + + @Override + public ByteBuffer serialize(Short value, ProtocolVersion protocolVersion) { + return value == null ? null : serializeNoBoxing(value, protocolVersion); } - /** - * Return the default codec for the CQL type {@code blob}. - * The returned codec maps the CQL type {@code blob} into the Java type {@link ByteBuffer}. - * The returned instance is a singleton. - * - * @return the default codec for CQL type {@code blob}. - */ - public static TypeCodec blob() { - return BlobCodec.instance; + @Override + public Short deserialize(ByteBuffer bytes, ProtocolVersion protocolVersion) { + return bytes == null || bytes.remaining() == 0 + ? null + : deserializeNoBoxing(bytes, protocolVersion); } + } - /** - * Return the default codec for the CQL type {@code date}. - * The returned codec maps the CQL type {@code date} into the Java type {@link LocalDate}. - * The returned instance is a singleton. - * - * @return the default codec for CQL type {@code date}. - */ - public static TypeCodec date() { - return DateCodec.instance; + /** + * A codec that is capable of handling primitive ints, thus avoiding the overhead of boxing and + * unboxing such primitives. + */ + public abstract static class PrimitiveIntCodec extends TypeCodec { + + protected PrimitiveIntCodec(DataType cqlType) { + super(cqlType, Integer.class); } - /** - * Return the default codec for the CQL type {@code time}. - * The returned codec maps the CQL type {@code time} into the Java type {@link Long}. - * The returned instance is a singleton. - * - * @return the default codec for CQL type {@code time}. - */ - public static PrimitiveLongCodec time() { - return TimeCodec.instance; + public abstract ByteBuffer serializeNoBoxing(int v, ProtocolVersion protocolVersion); + + public abstract int deserializeNoBoxing(ByteBuffer v, ProtocolVersion protocolVersion); + + @Override + public ByteBuffer serialize(Integer value, ProtocolVersion protocolVersion) { + return value == null ? null : serializeNoBoxing(value, protocolVersion); } - /** - * Return the default codec for the CQL type {@code timestamp}. - * The returned codec maps the CQL type {@code timestamp} into the Java type {@link Date}. - * The returned instance is a singleton. - * - * @return the default codec for CQL type {@code timestamp}. - */ - public static TypeCodec timestamp() { - return TimestampCodec.instance; + @Override + public Integer deserialize(ByteBuffer bytes, ProtocolVersion protocolVersion) { + return bytes == null || bytes.remaining() == 0 + ? null + : deserializeNoBoxing(bytes, protocolVersion); } + } - /** - * Return the default codec for the CQL type {@code uuid}. - * The returned codec maps the CQL type {@code uuid} into the Java type {@link UUID}. - * The returned instance is a singleton. - * - * @return the default codec for CQL type {@code uuid}. - */ - public static TypeCodec uuid() { - return UUIDCodec.instance; + /** + * A codec that is capable of handling primitive longs, thus avoiding the overhead of boxing and + * unboxing such primitives. + */ + public abstract static class PrimitiveLongCodec extends TypeCodec { + + protected PrimitiveLongCodec(DataType cqlType) { + super(cqlType, Long.class); } - /** - * Return the default codec for the CQL type {@code timeuuid}. - * The returned codec maps the CQL type {@code timeuuid} into the Java type {@link UUID}. - * The returned instance is a singleton. - * - * @return the default codec for CQL type {@code timeuuid}. - */ - public static TypeCodec timeUUID() { - return TimeUUIDCodec.instance; + public abstract ByteBuffer serializeNoBoxing(long v, ProtocolVersion protocolVersion); + + public abstract long deserializeNoBoxing(ByteBuffer v, ProtocolVersion protocolVersion); + + @Override + public ByteBuffer serialize(Long value, ProtocolVersion protocolVersion) { + return value == null ? null : serializeNoBoxing(value, protocolVersion); } - /** - * Return the default codec for the CQL type {@code inet}. - * The returned codec maps the CQL type {@code inet} into the Java type {@link InetAddress}. - * The returned instance is a singleton. - * - * @return the default codec for CQL type {@code inet}. - */ - public static TypeCodec inet() { - return InetCodec.instance; + @Override + public Long deserialize(ByteBuffer bytes, ProtocolVersion protocolVersion) { + return bytes == null || bytes.remaining() == 0 + ? null + : deserializeNoBoxing(bytes, protocolVersion); } + } - /** - * Return a newly-created codec for the CQL type {@code list} whose element type - * is determined by the given element codec. - * The returned codec maps the CQL type {@code list} into the Java type {@link List}. - * This method does not cache returned instances and returns a newly-allocated object - * at each invocation. - * - * @param elementCodec the codec that will handle elements of this list. - * @return A newly-created codec for CQL type {@code list}. - */ - public static TypeCodec> list(TypeCodec elementCodec) { - return new ListCodec(elementCodec); + /** + * A codec that is capable of handling primitive floats, thus avoiding the overhead of boxing and + * unboxing such primitives. + */ + public abstract static class PrimitiveFloatCodec extends TypeCodec { + + protected PrimitiveFloatCodec(DataType cqlType) { + super(cqlType, Float.class); } - /** - * Return a newly-created codec for the CQL type {@code set} whose element type - * is determined by the given element codec. - * The returned codec maps the CQL type {@code set} into the Java type {@link Set}. - * This method does not cache returned instances and returns a newly-allocated object - * at each invocation. - * - * @param elementCodec the codec that will handle elements of this set. - * @return A newly-created codec for CQL type {@code set}. - */ - public static TypeCodec> set(TypeCodec elementCodec) { - return new SetCodec(elementCodec); + public abstract ByteBuffer serializeNoBoxing(float v, ProtocolVersion protocolVersion); + + public abstract float deserializeNoBoxing(ByteBuffer v, ProtocolVersion protocolVersion); + + @Override + public ByteBuffer serialize(Float value, ProtocolVersion protocolVersion) { + return value == null ? null : serializeNoBoxing(value, protocolVersion); } - /** - * Return a newly-created codec for the CQL type {@code map} whose key type - * and value type are determined by the given codecs. - * The returned codec maps the CQL type {@code map} into the Java type {@link Map}. - * This method does not cache returned instances and returns a newly-allocated object - * at each invocation. - * - * @param keyCodec the codec that will handle keys of this map. - * @param valueCodec the codec that will handle values of this map. - * @return A newly-created codec for CQL type {@code map}. - */ - public static TypeCodec> map(TypeCodec keyCodec, TypeCodec valueCodec) { - return new MapCodec(keyCodec, valueCodec); + @Override + public Float deserialize(ByteBuffer bytes, ProtocolVersion protocolVersion) { + return bytes == null || bytes.remaining() == 0 + ? null + : deserializeNoBoxing(bytes, protocolVersion); } + } - /** - * Return a newly-created codec for the given user-defined CQL type. - * The returned codec maps the user-defined type into the Java type {@link UDTValue}. - * This method does not cache returned instances and returns a newly-allocated object - * at each invocation. - * - * @param type the user-defined type this codec should handle. - * @return A newly-created codec for the given user-defined CQL type. - */ - public static TypeCodec userType(UserType type) { - return new UDTCodec(type); + /** + * A codec that is capable of handling primitive doubles, thus avoiding the overhead of boxing and + * unboxing such primitives. + */ + public abstract static class PrimitiveDoubleCodec extends TypeCodec { + + protected PrimitiveDoubleCodec(DataType cqlType) { + super(cqlType, Double.class); } - /** - * Return a newly-created codec for the given CQL tuple type. - * The returned codec maps the tuple type into the Java type {@link TupleValue}. - * This method does not cache returned instances and returns a newly-allocated object - * at each invocation. - * - * @param type the tuple type this codec should handle. - * @return A newly-created codec for the given CQL tuple type. - */ - public static TypeCodec tuple(TupleType type) { - return new TupleCodec(type); + public abstract ByteBuffer serializeNoBoxing(double v, ProtocolVersion protocolVersion); + + public abstract double deserializeNoBoxing(ByteBuffer v, ProtocolVersion protocolVersion); + + @Override + public ByteBuffer serialize(Double value, ProtocolVersion protocolVersion) { + return value == null ? null : serializeNoBoxing(value, protocolVersion); } - /** - * Return a newly-created codec for the given CQL custom type. - *

- * The returned codec maps the custom type into the Java type {@link ByteBuffer}, - * thus providing a (very lightweight) support for Cassandra - * types that do not have a CQL equivalent. - *

- * Note that the returned codec assumes that CQL literals for the given custom - * type are expressed in binary form as well, e.g. {@code 0xcafebabe}. If this is - * not the case, the returned codec might be unable to {@link #parse(String) parse} - * and {@link #format(Object) format} literals for this type. - * This is notoriously true for types inheriting from - * {@code org.apache.cassandra.db.marshal.AbstractCompositeType}, whose CQL literals - * are actually expressed as quoted strings. - *

- * This method does not cache returned instances and returns a newly-allocated object - * at each invocation. - * - * @param type the custom type this codec should handle. - * @return A newly-created codec for the given CQL custom type. - */ - public static TypeCodec custom(DataType.CustomType type) { - return new CustomCodec(type); + @Override + public Double deserialize(ByteBuffer bytes, ProtocolVersion protocolVersion) { + return bytes == null || bytes.remaining() == 0 + ? null + : deserializeNoBoxing(bytes, protocolVersion); } + } - /** - * Returns the default codec for the {@link DataType#duration() Duration type}. - *

- * This codec maps duration types to the driver's built-in {@link Duration} class, - * thus providing a more user-friendly mapping than the low-level mapping provided by regular - * {@link #custom(DataType.CustomType) custom type codecs}. - *

- * The returned instance is a singleton. - * - * @return the default codec for the Duration type. - */ - public static TypeCodec duration() { - return DurationCodec.instance; + /** + * Base class for codecs handling CQL string types such as {@link DataType#varchar()}, {@link + * DataType#text()} or {@link DataType#ascii()}. + */ + private abstract static class StringCodec extends TypeCodec { + + private final Charset charset; + + private StringCodec(DataType cqlType, Charset charset) { + super(cqlType, String.class); + this.charset = charset; } - protected final TypeToken javaType; + @Override + public String parse(String value) { + if (value == null || value.isEmpty() || value.equalsIgnoreCase("NULL")) return null; + if (!ParseUtils.isQuoted(value)) + throw new InvalidTypeException("text or varchar values must be enclosed by single quotes"); - protected final DataType cqlType; + return ParseUtils.unquote(value); + } - /** - * This constructor can only be used for non parameterized types. - * For parameterized ones, please use {@link #TypeCodec(DataType, TypeToken)} instead. - * - * @param javaClass The Java class this codec serializes from and deserializes to. - */ - protected TypeCodec(DataType cqlType, Class javaClass) { - this(cqlType, TypeToken.of(javaClass)); + @Override + public String format(String value) { + if (value == null) return "NULL"; + return ParseUtils.quote(value); } - protected TypeCodec(DataType cqlType, TypeToken javaType) { - checkNotNull(cqlType, "cqlType cannot be null"); - checkNotNull(javaType, "javaType cannot be null"); - checkArgument(!javaType.isPrimitive(), "Cannot create a codec for a primitive Java type (%s), please use the wrapper type instead", javaType); - this.cqlType = cqlType; - this.javaType = javaType; + @Override + public ByteBuffer serialize(String value, ProtocolVersion protocolVersion) { + return value == null ? null : ByteBuffer.wrap(value.getBytes(charset)); } /** - * Return the Java type that this codec deserializes to and serializes from. + * {@inheritDoc} * - * @return The Java type this codec deserializes to and serializes from. + *

Implementation note: this method treats {@code null}s and empty buffers differently: the + * formers are mapped to {@code null}s while the latters are mapped to empty strings. */ - public TypeToken getJavaType() { - return javaType; + @Override + public String deserialize(ByteBuffer bytes, ProtocolVersion protocolVersion) { + if (bytes == null) return null; + if (bytes.remaining() == 0) return ""; + return new String(Bytes.getArray(bytes), charset); } + } - /** - * Return the CQL type that this codec deserializes from and serializes to. - * - * @return The Java type this codec deserializes from and serializes to. - */ - public DataType getCqlType() { - return cqlType; + /** + * This codec maps a CQL {@link DataType#varchar()} to a Java {@link String}. Note that this codec + * also handles {@link DataType#text()}, which is merely an alias for {@link DataType#varchar()}. + */ + private static class VarcharCodec extends StringCodec { + + private static final VarcharCodec instance = new VarcharCodec(); + + private VarcharCodec() { + super(DataType.varchar(), Charset.forName("UTF-8")); } + } - /** - * Serialize the given value according to the CQL type - * handled by this codec. - *

- * Implementation notes: - *

    - *
  1. Null values should be gracefully handled and no exception should be raised; - * these should be considered as the equivalent of a NULL CQL value;
  2. - *
  3. Codecs for CQL collection types should not permit null elements;
  4. - *
  5. Codecs for CQL collection types should treat a {@code null} input as - * the equivalent of an empty collection.
  6. - *
- * - * @param value An instance of T; may be {@code null}. - * @param protocolVersion the protocol version to use when serializing - * {@code bytes}. In most cases, the proper value to provide for this argument - * is the value returned by {@link ProtocolOptions#getProtocolVersion} (which - * is the protocol version in use by the driver). - * @return A {@link ByteBuffer} instance containing the serialized form of T - * @throws InvalidTypeException if the given value does not have the expected type - */ - public abstract ByteBuffer serialize(T value, ProtocolVersion protocolVersion) throws InvalidTypeException; + /** This codec maps a CQL {@link DataType#ascii()} to a Java {@link String}. */ + private static class AsciiCodec extends StringCodec { - /** - * Deserialize the given {@link ByteBuffer} instance according to the CQL type - * handled by this codec. - *

- * Implementation notes: - *

    - *
  1. Null or empty buffers should be gracefully handled and no exception should be raised; - * these should be considered as the equivalent of a NULL CQL value and, in most cases, should - * map to {@code null} or a default value for the corresponding Java type, if applicable;
  2. - *
  3. Codecs for CQL collection types should clearly document whether they return immutable collections or not - * (note that the driver's default collection codecs return mutable collections);
  4. - *
  5. Codecs for CQL collection types should avoid returning {@code null}; - * they should return empty collections instead (the driver's default collection codecs all comply with this rule).
  6. - *
  7. The provided {@link ByteBuffer} should never be consumed by read operations that - * modify its current position; if necessary, - * {@link ByteBuffer#duplicate()} duplicate} it before consuming.
  8. - *
- * - * @param bytes A {@link ByteBuffer} instance containing the serialized form of T; - * may be {@code null} or empty. - * @param protocolVersion the protocol version to use when serializing - * {@code bytes}. In most cases, the proper value to provide for this argument - * is the value returned by {@link ProtocolOptions#getProtocolVersion} (which - * is the protocol version in use by the driver). - * @return An instance of T - * @throws InvalidTypeException if the given {@link ByteBuffer} instance cannot be deserialized - */ - public abstract T deserialize(ByteBuffer bytes, ProtocolVersion protocolVersion) throws InvalidTypeException; + private static final AsciiCodec instance = new AsciiCodec(); - /** - * Parse the given CQL literal into an instance of the Java type - * handled by this codec. - *

- * Implementors should take care of unquoting and unescaping the given CQL string - * where applicable. - * Null values and empty Strings should be accepted, as well as the string {@code "NULL"}; - * in most cases, implementations should interpret these inputs has equivalent to a {@code null} - * reference. - *

- * Implementing this method is not strictly mandatory: internally, the driver only uses it to - * parse the INITCOND when building the metadata of an aggregate function (and in most cases it - * will use a built-in codec, unless the INITCOND has a custom type). - * - * @param value The CQL string to parse, may be {@code null} or empty. - * @return An instance of T; may be {@code null} on a {@code null input}. - * @throws InvalidTypeException if the given value cannot be parsed into the expected type - */ - public abstract T parse(String value) throws InvalidTypeException; + private static final Pattern ASCII_PATTERN = Pattern.compile("^\\p{ASCII}*$"); - /** - * Format the given value as a valid CQL literal according - * to the CQL type handled by this codec. - *

- * Implementors should take care of quoting and escaping the resulting CQL literal - * where applicable. - * Null values should be accepted; in most cases, implementations should - * return the CQL keyword {@code "NULL"} for {@code null} inputs. - *

- * Implementing this method is not strictly mandatory. It is used: - *

    - *
  1. in the query builder, when values are inlined in the query string (see - * {@link com.datastax.driver.core.querybuilder.BuiltStatement} for a detailed - * explanation of when this happens);
  2. - *
  3. in the {@link QueryLogger}, if parameter logging is enabled;
  4. - *
  5. to format the INITCOND in {@link AggregateMetadata#asCQLQuery(boolean)};
  6. - *
  7. in the {@code toString()} implementation of some objects ({@link UDTValue}, - * {@link TupleValue}, and the internal representation of a {@code ROWS} response), - * which may appear in driver logs.
  8. - *
- * If you choose not to implement this method, you should not throw an exception but - * instead return a constant string (for example "XxxCodec.format not implemented"). - * - * @param value An instance of T; may be {@code null}. - * @return CQL string - * @throws InvalidTypeException if the given value does not have the expected type - */ - public abstract String format(T value) throws InvalidTypeException; + private AsciiCodec() { + super(DataType.ascii(), Charset.forName("US-ASCII")); + } - /** - * Return {@code true} if this codec is capable of serializing - * the given {@code javaType}. - *

- * The implementation is invariant with respect to the passed - * argument (through the usage of {@link TypeToken#equals(Object)} - * and it's strongly recommended not to modify this behavior. - * This means that a codec will only ever return {@code true} for the - * exact Java type that it has been created for. - *

- * If the argument represents a Java primitive type, its wrapper type - * is considered instead. - * - * @param javaType The Java type this codec should serialize from and deserialize to; cannot be {@code null}. - * @return {@code true} if the codec is capable of serializing - * the given {@code javaType}, and {@code false} otherwise. - * @throws NullPointerException if {@code javaType} is {@code null}. - */ - public boolean accepts(TypeToken javaType) { - checkNotNull(javaType, "Parameter javaType cannot be null"); - return this.javaType.equals(javaType.wrap()); + @Override + public ByteBuffer serialize(String value, ProtocolVersion protocolVersion) { + if (value != null && !ASCII_PATTERN.matcher(value).matches()) { + throw new InvalidTypeException(String.format("%s is not a valid ASCII String", value)); + } + return super.serialize(value, protocolVersion); } - /** - * Return {@code true} if this codec is capable of serializing - * the given {@code javaType}. - *

- * This implementation simply calls {@link #accepts(TypeToken)}. - * - * @param javaType The Java type this codec should serialize from and deserialize to; cannot be {@code null}. - * @return {@code true} if the codec is capable of serializing - * the given {@code javaType}, and {@code false} otherwise. - * @throws NullPointerException if {@code javaType} is {@code null}. - */ - public boolean accepts(Class javaType) { - checkNotNull(javaType, "Parameter javaType cannot be null"); - return accepts(TypeToken.of(javaType)); + @Override + public String format(String value) { + if (value != null && !ASCII_PATTERN.matcher(value).matches()) { + throw new InvalidTypeException(String.format("%s is not a valid ASCII String", value)); + } + return super.format(value); } + } - /** - * Return {@code true} if this codec is capable of deserializing - * the given {@code cqlType}. - * - * @param cqlType The CQL type this codec should deserialize from and serialize to; cannot be {@code null}. - * @return {@code true} if the codec is capable of deserializing - * the given {@code cqlType}, and {@code false} otherwise. - * @throws NullPointerException if {@code cqlType} is {@code null}. - */ - public boolean accepts(DataType cqlType) { - checkNotNull(cqlType, "Parameter cqlType cannot be null"); - return this.cqlType.equals(cqlType); + /** + * Base class for codecs handling CQL 8-byte integer types such as {@link DataType#bigint()}, + * {@link DataType#counter()} or {@link DataType#time()}. + */ + private abstract static class LongCodec extends PrimitiveLongCodec { + + private LongCodec(DataType cqlType) { + super(cqlType); } - /** - * Return {@code true} if this codec is capable of serializing - * the given object. Note that the object's Java type is inferred - * from the object' runtime (raw) type, contrary - * to {@link #accepts(TypeToken)} which is capable of - * handling generic types. - *

- * This method is intended mostly to be used by the QueryBuilder - * when no type information is available when the codec is used. - *

- * Implementation notes: - *

    - *
  1. The default implementation is covariant with respect to the passed - * argument (through the usage of {@link TypeToken#isAssignableFrom(TypeToken)} or {@link TypeToken#isSupertypeOf(Type)}) - * and it's strongly recommended not to modify this behavior. - * This means that, by default, a codec will accept - * any subtype of the Java type that it has been created for.
  2. - *
  3. The base implementation provided here can only handle non-parameterized types; - * codecs handling parameterized types, such as collection types, must override - * this method and perform some sort of "manual" - * inspection of the actual type parameters.
  4. - *
  5. Similarly, codecs that only accept a partial subset of all possible values - * must override this method and manually inspect the object to check if it - * complies or not with the codec's limitations.
  6. - *
- * - * @param value The Java type this codec should serialize from and deserialize to; cannot be {@code null}. - * @return {@code true} if the codec is capable of serializing - * the given {@code javaType}, and {@code false} otherwise. - * @throws NullPointerException if {@code value} is {@code null}. - */ - public boolean accepts(Object value) { - checkNotNull(value, "Parameter value cannot be null"); - return GuavaCompatibility.INSTANCE.isSupertypeOf(this.javaType, TypeToken.of(value.getClass())); + @Override + public Long parse(String value) { + try { + return value == null || value.isEmpty() || value.equalsIgnoreCase("NULL") + ? null + : Long.parseLong(value); + } catch (NumberFormatException e) { + throw new InvalidTypeException( + String.format("Cannot parse 64-bits long value from \"%s\"", value)); + } } @Override - public String toString() { - return String.format("%s [%s <-> %s]", this.getClass().getSimpleName(), cqlType, javaType); + public String format(Long value) { + if (value == null) return "NULL"; + return Long.toString(value); } - /** - * A codec that is capable of handling primitive booleans, - * thus avoiding the overhead of boxing and unboxing such primitives. - */ - public static abstract class PrimitiveBooleanCodec extends TypeCodec { + @Override + public ByteBuffer serializeNoBoxing(long value, ProtocolVersion protocolVersion) { + ByteBuffer bb = ByteBuffer.allocate(8); + bb.putLong(0, value); + return bb; + } - protected PrimitiveBooleanCodec(DataType cqlType) { - super(cqlType, Boolean.class); - } + @Override + public long deserializeNoBoxing(ByteBuffer bytes, ProtocolVersion protocolVersion) { + if (bytes == null || bytes.remaining() == 0) return 0; + if (bytes.remaining() != 8) + throw new InvalidTypeException( + "Invalid 64-bits long value, expecting 8 bytes but got " + bytes.remaining()); - public abstract ByteBuffer serializeNoBoxing(boolean v, ProtocolVersion protocolVersion); + return bytes.getLong(bytes.position()); + } + } - public abstract boolean deserializeNoBoxing(ByteBuffer v, ProtocolVersion protocolVersion); + /** This codec maps a CQL {@link DataType#bigint()} to a Java {@link Long}. */ + private static class BigintCodec extends LongCodec { - @Override - public ByteBuffer serialize(Boolean value, ProtocolVersion protocolVersion) { - return value == null ? null : serializeNoBoxing(value, protocolVersion); - } + private static final BigintCodec instance = new BigintCodec(); - @Override - public Boolean deserialize(ByteBuffer bytes, ProtocolVersion protocolVersion) { - return bytes == null || bytes.remaining() == 0 ? null : deserializeNoBoxing(bytes, protocolVersion); - } + private BigintCodec() { + super(DataType.bigint()); } + } - /** - * A codec that is capable of handling primitive bytes, - * thus avoiding the overhead of boxing and unboxing such primitives. - */ - public static abstract class PrimitiveByteCodec extends TypeCodec { + /** This codec maps a CQL {@link DataType#counter()} to a Java {@link Long}. */ + private static class CounterCodec extends LongCodec { - protected PrimitiveByteCodec(DataType cqlType) { - super(cqlType, Byte.class); - } + private static final CounterCodec instance = new CounterCodec(); - public abstract ByteBuffer serializeNoBoxing(byte v, ProtocolVersion protocolVersion); + private CounterCodec() { + super(DataType.counter()); + } + } - public abstract byte deserializeNoBoxing(ByteBuffer v, ProtocolVersion protocolVersion); + /** This codec maps a CQL {@link DataType#blob()} to a Java {@link ByteBuffer}. */ + private static class BlobCodec extends TypeCodec { - @Override - public ByteBuffer serialize(Byte value, ProtocolVersion protocolVersion) { - return value == null ? null : serializeNoBoxing(value, protocolVersion); - } + private static final BlobCodec instance = new BlobCodec(); - @Override - public Byte deserialize(ByteBuffer bytes, ProtocolVersion protocolVersion) { - return bytes == null || bytes.remaining() == 0 ? null : deserializeNoBoxing(bytes, protocolVersion); - } + private BlobCodec() { + super(DataType.blob(), ByteBuffer.class); } - /** - * A codec that is capable of handling primitive shorts, - * thus avoiding the overhead of boxing and unboxing such primitives. - */ - public static abstract class PrimitiveShortCodec extends TypeCodec { + @Override + public ByteBuffer parse(String value) { + return value == null || value.isEmpty() || value.equalsIgnoreCase("NULL") + ? null + : Bytes.fromHexString(value); + } - protected PrimitiveShortCodec(DataType cqlType) { - super(cqlType, Short.class); - } + @Override + public String format(ByteBuffer value) { + if (value == null) return "NULL"; + return Bytes.toHexString(value); + } - public abstract ByteBuffer serializeNoBoxing(short v, ProtocolVersion protocolVersion); + @Override + public ByteBuffer serialize(ByteBuffer value, ProtocolVersion protocolVersion) { + return value == null ? null : value.duplicate(); + } - public abstract short deserializeNoBoxing(ByteBuffer v, ProtocolVersion protocolVersion); + @Override + public ByteBuffer deserialize(ByteBuffer bytes, ProtocolVersion protocolVersion) { + return bytes == null ? null : bytes.duplicate(); + } + } - @Override - public ByteBuffer serialize(Short value, ProtocolVersion protocolVersion) { - return value == null ? null : serializeNoBoxing(value, protocolVersion); - } + /** + * This codec maps a CQL {@link DataType#custom(String) custom} type to a Java {@link ByteBuffer}. + * Note that no instance of this codec is part of the default set of codecs used by the Java + * driver; instances of this codec must be manually registered. + */ + private static class CustomCodec extends TypeCodec { - @Override - public Short deserialize(ByteBuffer bytes, ProtocolVersion protocolVersion) { - return bytes == null || bytes.remaining() == 0 ? null : deserializeNoBoxing(bytes, protocolVersion); - } + private CustomCodec(DataType custom) { + super(custom, ByteBuffer.class); + assert custom.getName() == Name.CUSTOM; } - /** - * A codec that is capable of handling primitive ints, - * thus avoiding the overhead of boxing and unboxing such primitives. - */ - public static abstract class PrimitiveIntCodec extends TypeCodec { + @Override + public ByteBuffer parse(String value) { + return value == null || value.isEmpty() || value.equalsIgnoreCase("NULL") + ? null + : Bytes.fromHexString(value); + } - protected PrimitiveIntCodec(DataType cqlType) { - super(cqlType, Integer.class); - } + @Override + public String format(ByteBuffer value) { + if (value == null) return "NULL"; + return Bytes.toHexString(value); + } - public abstract ByteBuffer serializeNoBoxing(int v, ProtocolVersion protocolVersion); + @Override + public ByteBuffer serialize(ByteBuffer value, ProtocolVersion protocolVersion) { + return value == null ? null : value.duplicate(); + } - public abstract int deserializeNoBoxing(ByteBuffer v, ProtocolVersion protocolVersion); + @Override + public ByteBuffer deserialize(ByteBuffer bytes, ProtocolVersion protocolVersion) { + return bytes == null ? null : bytes.duplicate(); + } + } - @Override - public ByteBuffer serialize(Integer value, ProtocolVersion protocolVersion) { - return value == null ? null : serializeNoBoxing(value, protocolVersion); - } + /** This codec maps a CQL {@link DataType#cboolean()} to a Java {@link Boolean}. */ + private static class BooleanCodec extends PrimitiveBooleanCodec { - @Override - public Integer deserialize(ByteBuffer bytes, ProtocolVersion protocolVersion) { - return bytes == null || bytes.remaining() == 0 ? null : deserializeNoBoxing(bytes, protocolVersion); - } - } + private static final ByteBuffer TRUE = ByteBuffer.wrap(new byte[] {1}); + private static final ByteBuffer FALSE = ByteBuffer.wrap(new byte[] {0}); - /** - * A codec that is capable of handling primitive longs, - * thus avoiding the overhead of boxing and unboxing such primitives. - */ - public static abstract class PrimitiveLongCodec extends TypeCodec { + private static final BooleanCodec instance = new BooleanCodec(); - protected PrimitiveLongCodec(DataType cqlType) { - super(cqlType, Long.class); - } + private BooleanCodec() { + super(DataType.cboolean()); + } - public abstract ByteBuffer serializeNoBoxing(long v, ProtocolVersion protocolVersion); + @Override + public Boolean parse(String value) { + if (value == null || value.isEmpty() || value.equalsIgnoreCase("NULL")) return null; + if (value.equalsIgnoreCase(Boolean.FALSE.toString())) return false; + if (value.equalsIgnoreCase(Boolean.TRUE.toString())) return true; - public abstract long deserializeNoBoxing(ByteBuffer v, ProtocolVersion protocolVersion); + throw new InvalidTypeException( + String.format("Cannot parse boolean value from \"%s\"", value)); + } - @Override - public ByteBuffer serialize(Long value, ProtocolVersion protocolVersion) { - return value == null ? null : serializeNoBoxing(value, protocolVersion); - } + @Override + public String format(Boolean value) { + if (value == null) return "NULL"; + return value ? "true" : "false"; + } - @Override - public Long deserialize(ByteBuffer bytes, ProtocolVersion protocolVersion) { - return bytes == null || bytes.remaining() == 0 ? null : deserializeNoBoxing(bytes, protocolVersion); - } + @Override + public ByteBuffer serializeNoBoxing(boolean value, ProtocolVersion protocolVersion) { + return value ? TRUE.duplicate() : FALSE.duplicate(); } - /** - * A codec that is capable of handling primitive floats, - * thus avoiding the overhead of boxing and unboxing such primitives. - */ - public static abstract class PrimitiveFloatCodec extends TypeCodec { + @Override + public boolean deserializeNoBoxing(ByteBuffer bytes, ProtocolVersion protocolVersion) { + if (bytes == null || bytes.remaining() == 0) return false; + if (bytes.remaining() != 1) + throw new InvalidTypeException( + "Invalid boolean value, expecting 1 byte but got " + bytes.remaining()); - protected PrimitiveFloatCodec(DataType cqlType) { - super(cqlType, Float.class); - } + return bytes.get(bytes.position()) != 0; + } + } - public abstract ByteBuffer serializeNoBoxing(float v, ProtocolVersion protocolVersion); + /** This codec maps a CQL {@link DataType#decimal()} to a Java {@link BigDecimal}. */ + private static class DecimalCodec extends TypeCodec { - public abstract float deserializeNoBoxing(ByteBuffer v, ProtocolVersion protocolVersion); + private static final DecimalCodec instance = new DecimalCodec(); - @Override - public ByteBuffer serialize(Float value, ProtocolVersion protocolVersion) { - return value == null ? null : serializeNoBoxing(value, protocolVersion); - } + private DecimalCodec() { + super(DataType.decimal(), BigDecimal.class); + } - @Override - public Float deserialize(ByteBuffer bytes, ProtocolVersion protocolVersion) { - return bytes == null || bytes.remaining() == 0 ? null : deserializeNoBoxing(bytes, protocolVersion); - } + @Override + public BigDecimal parse(String value) { + try { + return value == null || value.isEmpty() || value.equalsIgnoreCase("NULL") + ? null + : new BigDecimal(value); + } catch (NumberFormatException e) { + throw new InvalidTypeException( + String.format("Cannot parse decimal value from \"%s\"", value)); + } } - /** - * A codec that is capable of handling primitive doubles, - * thus avoiding the overhead of boxing and unboxing such primitives. - */ - public static abstract class PrimitiveDoubleCodec extends TypeCodec { + @Override + public String format(BigDecimal value) { + if (value == null) return "NULL"; + return value.toString(); + } - protected PrimitiveDoubleCodec(DataType cqlType) { - super(cqlType, Double.class); - } + @Override + public ByteBuffer serialize(BigDecimal value, ProtocolVersion protocolVersion) { + if (value == null) return null; + BigInteger bi = value.unscaledValue(); + int scale = value.scale(); + byte[] bibytes = bi.toByteArray(); - public abstract ByteBuffer serializeNoBoxing(double v, ProtocolVersion protocolVersion); + ByteBuffer bytes = ByteBuffer.allocate(4 + bibytes.length); + bytes.putInt(scale); + bytes.put(bibytes); + bytes.rewind(); + return bytes; + } - public abstract double deserializeNoBoxing(ByteBuffer v, ProtocolVersion protocolVersion); + @Override + public BigDecimal deserialize(ByteBuffer bytes, ProtocolVersion protocolVersion) { + if (bytes == null || bytes.remaining() == 0) return null; + if (bytes.remaining() < 4) + throw new InvalidTypeException( + "Invalid decimal value, expecting at least 4 bytes but got " + bytes.remaining()); - @Override - public ByteBuffer serialize(Double value, ProtocolVersion protocolVersion) { - return value == null ? null : serializeNoBoxing(value, protocolVersion); - } + bytes = bytes.duplicate(); + int scale = bytes.getInt(); + byte[] bibytes = new byte[bytes.remaining()]; + bytes.get(bibytes); - @Override - public Double deserialize(ByteBuffer bytes, ProtocolVersion protocolVersion) { - return bytes == null || bytes.remaining() == 0 ? null : deserializeNoBoxing(bytes, protocolVersion); - } + BigInteger bi = new BigInteger(bibytes); + return new BigDecimal(bi, scale); } + } - /** - * Base class for codecs handling CQL string types such as {@link DataType#varchar()}, - * {@link DataType#text()} or {@link DataType#ascii()}. - */ - private static abstract class StringCodec extends TypeCodec { + /** This codec maps a CQL {@link DataType#cdouble()} to a Java {@link Double}. */ + private static class DoubleCodec extends PrimitiveDoubleCodec { - private final Charset charset; + private static final DoubleCodec instance = new DoubleCodec(); - private StringCodec(DataType cqlType, Charset charset) { - super(cqlType, String.class); - this.charset = charset; - } - - @Override - public String parse(String value) { - if (value == null || value.isEmpty() || value.equalsIgnoreCase("NULL")) - return null; - if (!ParseUtils.isQuoted(value)) - throw new InvalidTypeException("text or varchar values must be enclosed by single quotes"); - - return ParseUtils.unquote(value); - } - - @Override - public String format(String value) { - if (value == null) - return "NULL"; - return ParseUtils.quote(value); - } - - @Override - public ByteBuffer serialize(String value, ProtocolVersion protocolVersion) { - return value == null ? null : ByteBuffer.wrap(value.getBytes(charset)); - } - - /** - * {@inheritDoc} - *

- * Implementation note: this method treats {@code null}s and empty buffers differently: - * the formers are mapped to {@code null}s while the latters are mapped to empty strings. - */ - @Override - public String deserialize(ByteBuffer bytes, ProtocolVersion protocolVersion) { - if (bytes == null) - return null; - if (bytes.remaining() == 0) - return ""; - return new String(Bytes.getArray(bytes), charset); - } + private DoubleCodec() { + super(DataType.cdouble()); } - /** - * This codec maps a CQL {@link DataType#varchar()} to a Java {@link String}. - * Note that this codec also handles {@link DataType#text()}, which is merely - * an alias for {@link DataType#varchar()}. - */ - private static class VarcharCodec extends StringCodec { - - private static final VarcharCodec instance = new VarcharCodec(); + @Override + public Double parse(String value) { + try { + return value == null || value.isEmpty() || value.equalsIgnoreCase("NULL") + ? null + : Double.parseDouble(value); + } catch (NumberFormatException e) { + throw new InvalidTypeException( + String.format("Cannot parse 64-bits double value from \"%s\"", value)); + } + } - private VarcharCodec() { - super(DataType.varchar(), Charset.forName("UTF-8")); - } + @Override + public String format(Double value) { + if (value == null) return "NULL"; + return Double.toString(value); + } + @Override + public ByteBuffer serializeNoBoxing(double value, ProtocolVersion protocolVersion) { + ByteBuffer bb = ByteBuffer.allocate(8); + bb.putDouble(0, value); + return bb; } - /** - * This codec maps a CQL {@link DataType#ascii()} to a Java {@link String}. - */ - private static class AsciiCodec extends StringCodec { + @Override + public double deserializeNoBoxing(ByteBuffer bytes, ProtocolVersion protocolVersion) { + if (bytes == null || bytes.remaining() == 0) return 0; + if (bytes.remaining() != 8) + throw new InvalidTypeException( + "Invalid 64-bits double value, expecting 8 bytes but got " + bytes.remaining()); - private static final AsciiCodec instance = new AsciiCodec(); + return bytes.getDouble(bytes.position()); + } + } - private static final Pattern ASCII_PATTERN = Pattern.compile("^\\p{ASCII}*$"); + /** This codec maps a CQL {@link DataType#cfloat()} to a Java {@link Float}. */ + private static class FloatCodec extends PrimitiveFloatCodec { - private AsciiCodec() { - super(DataType.ascii(), Charset.forName("US-ASCII")); - } + private static final FloatCodec instance = new FloatCodec(); - @Override - public ByteBuffer serialize(String value, ProtocolVersion protocolVersion) { - if (value != null && !ASCII_PATTERN.matcher(value).matches()) { - throw new InvalidTypeException(String.format("%s is not a valid ASCII String", value)); - } - return super.serialize(value, protocolVersion); - } + private FloatCodec() { + super(DataType.cfloat()); + } - @Override - public String format(String value) { - if (value != null && !ASCII_PATTERN.matcher(value).matches()) { - throw new InvalidTypeException(String.format("%s is not a valid ASCII String", value)); - } - return super.format(value); - } + @Override + public Float parse(String value) { + try { + return value == null || value.isEmpty() || value.equalsIgnoreCase("NULL") + ? null + : Float.parseFloat(value); + } catch (NumberFormatException e) { + throw new InvalidTypeException( + String.format("Cannot parse 32-bits float value from \"%s\"", value)); + } } - /** - * Base class for codecs handling CQL 8-byte integer types such as {@link DataType#bigint()}, - * {@link DataType#counter()} or {@link DataType#time()}. - */ - private abstract static class LongCodec extends PrimitiveLongCodec { + @Override + public String format(Float value) { + if (value == null) return "NULL"; + return Float.toString(value); + } - private LongCodec(DataType cqlType) { - super(cqlType); - } + @Override + public ByteBuffer serializeNoBoxing(float value, ProtocolVersion protocolVersion) { + ByteBuffer bb = ByteBuffer.allocate(4); + bb.putFloat(0, value); + return bb; + } - @Override - public Long parse(String value) { - try { - return value == null || value.isEmpty() || value.equalsIgnoreCase("NULL") ? null : Long.parseLong(value); - } catch (NumberFormatException e) { - throw new InvalidTypeException(String.format("Cannot parse 64-bits long value from \"%s\"", value)); - } - } + @Override + public float deserializeNoBoxing(ByteBuffer bytes, ProtocolVersion protocolVersion) { + if (bytes == null || bytes.remaining() == 0) return 0; + if (bytes.remaining() != 4) + throw new InvalidTypeException( + "Invalid 32-bits float value, expecting 4 bytes but got " + bytes.remaining()); - @Override - public String format(Long value) { - if (value == null) - return "NULL"; - return Long.toString(value); - } + return bytes.getFloat(bytes.position()); + } + } - @Override - public ByteBuffer serializeNoBoxing(long value, ProtocolVersion protocolVersion) { - ByteBuffer bb = ByteBuffer.allocate(8); - bb.putLong(0, value); - return bb; - } + /** This codec maps a CQL {@link DataType#inet()} to a Java {@link InetAddress}. */ + private static class InetCodec extends TypeCodec { - @Override - public long deserializeNoBoxing(ByteBuffer bytes, ProtocolVersion protocolVersion) { - if (bytes == null || bytes.remaining() == 0) - return 0; - if (bytes.remaining() != 8) - throw new InvalidTypeException("Invalid 64-bits long value, expecting 8 bytes but got " + bytes.remaining()); + private static final InetCodec instance = new InetCodec(); - return bytes.getLong(bytes.position()); - } + private InetCodec() { + super(DataType.inet(), InetAddress.class); } - /** - * This codec maps a CQL {@link DataType#bigint()} to a Java {@link Long}. - */ - private static class BigintCodec extends LongCodec { + @Override + public InetAddress parse(String value) { + if (value == null || value.isEmpty() || value.equalsIgnoreCase("NULL")) return null; - private static final BigintCodec instance = new BigintCodec(); + value = value.trim(); + if (!ParseUtils.isQuoted(value)) + throw new InvalidTypeException( + String.format("inet values must be enclosed in single quotes (\"%s\")", value)); + try { + return InetAddress.getByName(value.substring(1, value.length() - 1)); + } catch (Exception e) { + throw new InvalidTypeException(String.format("Cannot parse inet value from \"%s\"", value)); + } + } - private BigintCodec() { - super(DataType.bigint()); - } + @Override + public String format(InetAddress value) { + if (value == null) return "NULL"; + return "'" + value.getHostAddress() + "'"; + } + @Override + public ByteBuffer serialize(InetAddress value, ProtocolVersion protocolVersion) { + return value == null ? null : ByteBuffer.wrap(value.getAddress()); } - /** - * This codec maps a CQL {@link DataType#counter()} to a Java {@link Long}. - */ - private static class CounterCodec extends LongCodec { + @Override + public InetAddress deserialize(ByteBuffer bytes, ProtocolVersion protocolVersion) { + if (bytes == null || bytes.remaining() == 0) return null; + try { + return InetAddress.getByAddress(Bytes.getArray(bytes)); + } catch (UnknownHostException e) { + throw new InvalidTypeException( + "Invalid bytes for inet value, got " + bytes.remaining() + " bytes"); + } + } + } - private static final CounterCodec instance = new CounterCodec(); + /** This codec maps a CQL {@link DataType#tinyint()} to a Java {@link Byte}. */ + private static class TinyIntCodec extends PrimitiveByteCodec { - private CounterCodec() { - super(DataType.counter()); - } + private static final TinyIntCodec instance = new TinyIntCodec(); + private TinyIntCodec() { + super(tinyint()); } - /** - * This codec maps a CQL {@link DataType#blob()} to a Java {@link ByteBuffer}. - */ - private static class BlobCodec extends TypeCodec { - - private static final BlobCodec instance = new BlobCodec(); - - private BlobCodec() { - super(DataType.blob(), ByteBuffer.class); - } + @Override + public Byte parse(String value) { + try { + return value == null || value.isEmpty() || value.equalsIgnoreCase("NULL") + ? null + : Byte.parseByte(value); + } catch (NumberFormatException e) { + throw new InvalidTypeException( + String.format("Cannot parse 8-bits int value from \"%s\"", value)); + } + } - @Override - public ByteBuffer parse(String value) { - return value == null || value.isEmpty() || value.equalsIgnoreCase("NULL") ? null : Bytes.fromHexString(value); - } + @Override + public String format(Byte value) { + if (value == null) return "NULL"; + return Byte.toString(value); + } - @Override - public String format(ByteBuffer value) { - if (value == null) - return "NULL"; - return Bytes.toHexString(value); - } + @Override + public ByteBuffer serializeNoBoxing(byte value, ProtocolVersion protocolVersion) { + ByteBuffer bb = ByteBuffer.allocate(1); + bb.put(0, value); + return bb; + } - @Override - public ByteBuffer serialize(ByteBuffer value, ProtocolVersion protocolVersion) { - return value == null ? null : value.duplicate(); - } + @Override + public byte deserializeNoBoxing(ByteBuffer bytes, ProtocolVersion protocolVersion) { + if (bytes == null || bytes.remaining() == 0) return 0; + if (bytes.remaining() != 1) + throw new InvalidTypeException( + "Invalid 8-bits integer value, expecting 1 byte but got " + bytes.remaining()); - @Override - public ByteBuffer deserialize(ByteBuffer bytes, ProtocolVersion protocolVersion) { - return bytes == null ? null : bytes.duplicate(); - } + return bytes.get(bytes.position()); } + } - /** - * This codec maps a CQL {@link DataType#custom(String) custom} type to a Java {@link ByteBuffer}. - * Note that no instance of this codec is part of the default set of codecs used by the Java driver; - * instances of this codec must be manually registered. - */ - private static class CustomCodec extends TypeCodec { + /** This codec maps a CQL {@link DataType#smallint()} to a Java {@link Short}. */ + private static class SmallIntCodec extends PrimitiveShortCodec { - private CustomCodec(DataType custom) { - super(custom, ByteBuffer.class); - assert custom.getName() == Name.CUSTOM; - } + private static final SmallIntCodec instance = new SmallIntCodec(); - @Override - public ByteBuffer parse(String value) { - return value == null || value.isEmpty() || value.equalsIgnoreCase("NULL") ? null : Bytes.fromHexString(value); - } + private SmallIntCodec() { + super(smallint()); + } - @Override - public String format(ByteBuffer value) { - if (value == null) - return "NULL"; - return Bytes.toHexString(value); - } + @Override + public Short parse(String value) { + try { + return value == null || value.isEmpty() || value.equalsIgnoreCase("NULL") + ? null + : Short.parseShort(value); + } catch (NumberFormatException e) { + throw new InvalidTypeException( + String.format("Cannot parse 16-bits int value from \"%s\"", value)); + } + } - @Override - public ByteBuffer serialize(ByteBuffer value, ProtocolVersion protocolVersion) { - return value == null ? null : value.duplicate(); - } + @Override + public String format(Short value) { + if (value == null) return "NULL"; + return Short.toString(value); + } - @Override - public ByteBuffer deserialize(ByteBuffer bytes, ProtocolVersion protocolVersion) { - return bytes == null ? null : bytes.duplicate(); - } + @Override + public ByteBuffer serializeNoBoxing(short value, ProtocolVersion protocolVersion) { + ByteBuffer bb = ByteBuffer.allocate(2); + bb.putShort(0, value); + return bb; } - /** - * This codec maps a CQL {@link DataType#cboolean()} to a Java {@link Boolean}. - */ - private static class BooleanCodec extends PrimitiveBooleanCodec { + @Override + public short deserializeNoBoxing(ByteBuffer bytes, ProtocolVersion protocolVersion) { + if (bytes == null || bytes.remaining() == 0) return 0; + if (bytes.remaining() != 2) + throw new InvalidTypeException( + "Invalid 16-bits integer value, expecting 2 bytes but got " + bytes.remaining()); - private static final ByteBuffer TRUE = ByteBuffer.wrap(new byte[]{1}); - private static final ByteBuffer FALSE = ByteBuffer.wrap(new byte[]{0}); + return bytes.getShort(bytes.position()); + } + } - private static final BooleanCodec instance = new BooleanCodec(); + /** This codec maps a CQL {@link DataType#cint()} to a Java {@link Integer}. */ + private static class IntCodec extends PrimitiveIntCodec { - private BooleanCodec() { - super(DataType.cboolean()); - } + private static final IntCodec instance = new IntCodec(); - @Override - public Boolean parse(String value) { - if (value == null || value.isEmpty() || value.equalsIgnoreCase("NULL")) - return null; - if (value.equalsIgnoreCase(Boolean.FALSE.toString())) - return false; - if (value.equalsIgnoreCase(Boolean.TRUE.toString())) - return true; + private IntCodec() { + super(DataType.cint()); + } - throw new InvalidTypeException(String.format("Cannot parse boolean value from \"%s\"", value)); - } + @Override + public Integer parse(String value) { + try { + return value == null || value.isEmpty() || value.equalsIgnoreCase("NULL") + ? null + : Integer.parseInt(value); + } catch (NumberFormatException e) { + throw new InvalidTypeException( + String.format("Cannot parse 32-bits int value from \"%s\"", value)); + } + } - @Override - public String format(Boolean value) { - if (value == null) - return "NULL"; - return value ? "true" : "false"; - } + @Override + public String format(Integer value) { + if (value == null) return "NULL"; + return Integer.toString(value); + } - @Override - public ByteBuffer serializeNoBoxing(boolean value, ProtocolVersion protocolVersion) { - return value ? TRUE.duplicate() : FALSE.duplicate(); - } + @Override + public ByteBuffer serializeNoBoxing(int value, ProtocolVersion protocolVersion) { + ByteBuffer bb = ByteBuffer.allocate(4); + bb.putInt(0, value); + return bb; + } - @Override - public boolean deserializeNoBoxing(ByteBuffer bytes, ProtocolVersion protocolVersion) { - if (bytes == null || bytes.remaining() == 0) - return false; - if (bytes.remaining() != 1) - throw new InvalidTypeException("Invalid boolean value, expecting 1 byte but got " + bytes.remaining()); + @Override + public int deserializeNoBoxing(ByteBuffer bytes, ProtocolVersion protocolVersion) { + if (bytes == null || bytes.remaining() == 0) return 0; + if (bytes.remaining() != 4) + throw new InvalidTypeException( + "Invalid 32-bits integer value, expecting 4 bytes but got " + bytes.remaining()); - return bytes.get(bytes.position()) != 0; - } + return bytes.getInt(bytes.position()); } + } - /** - * This codec maps a CQL {@link DataType#decimal()} to a Java {@link BigDecimal}. - */ - private static class DecimalCodec extends TypeCodec { + /** This codec maps a CQL {@link DataType#timestamp()} to a Java {@link Date}. */ + private static class TimestampCodec extends TypeCodec { - private static final DecimalCodec instance = new DecimalCodec(); + private static final TimestampCodec instance = new TimestampCodec(); - private DecimalCodec() { - super(DataType.decimal(), BigDecimal.class); - } + private TimestampCodec() { + super(DataType.timestamp(), Date.class); + } - @Override - public BigDecimal parse(String value) { - try { - return value == null || value.isEmpty() || value.equalsIgnoreCase("NULL") ? null : new BigDecimal(value); - } catch (NumberFormatException e) { - throw new InvalidTypeException(String.format("Cannot parse decimal value from \"%s\"", value)); - } - } + @Override + public Date parse(String value) { + if (value == null || value.isEmpty() || value.equalsIgnoreCase("NULL")) return null; + // strip enclosing single quotes, if any + if (ParseUtils.isQuoted(value)) value = ParseUtils.unquote(value); - @Override - public String format(BigDecimal value) { - if (value == null) - return "NULL"; - return value.toString(); + if (ParseUtils.isLongLiteral(value)) { + try { + return new Date(Long.parseLong(value)); + } catch (NumberFormatException e) { + throw new InvalidTypeException( + String.format("Cannot parse timestamp value from \"%s\"", value)); } + } - @Override - public ByteBuffer serialize(BigDecimal value, ProtocolVersion protocolVersion) { - if (value == null) - return null; - BigInteger bi = value.unscaledValue(); - int scale = value.scale(); - byte[] bibytes = bi.toByteArray(); - - ByteBuffer bytes = ByteBuffer.allocate(4 + bibytes.length); - bytes.putInt(scale); - bytes.put(bibytes); - bytes.rewind(); - return bytes; - } + try { + return ParseUtils.parseDate(value); + } catch (ParseException e) { + throw new InvalidTypeException( + String.format("Cannot parse timestamp value from \"%s\"", value)); + } + } - @Override - public BigDecimal deserialize(ByteBuffer bytes, ProtocolVersion protocolVersion) { - if (bytes == null || bytes.remaining() == 0) - return null; - if (bytes.remaining() < 4) - throw new InvalidTypeException("Invalid decimal value, expecting at least 4 bytes but got " + bytes.remaining()); + @Override + public String format(Date value) { + if (value == null) return "NULL"; + return Long.toString(value.getTime()); + } - bytes = bytes.duplicate(); - int scale = bytes.getInt(); - byte[] bibytes = new byte[bytes.remaining()]; - bytes.get(bibytes); + @Override + public ByteBuffer serialize(Date value, ProtocolVersion protocolVersion) { + return value == null + ? null + : BigintCodec.instance.serializeNoBoxing(value.getTime(), protocolVersion); + } - BigInteger bi = new BigInteger(bibytes); - return new BigDecimal(bi, scale); - } + @Override + public Date deserialize(ByteBuffer bytes, ProtocolVersion protocolVersion) { + return bytes == null || bytes.remaining() == 0 + ? null + : new Date(BigintCodec.instance.deserializeNoBoxing(bytes, protocolVersion)); } + } - /** - * This codec maps a CQL {@link DataType#cdouble()} to a Java {@link Double}. - */ - private static class DoubleCodec extends PrimitiveDoubleCodec { + /** This codec maps a CQL {@link DataType#date()} to the custom {@link LocalDate} class. */ + private static class DateCodec extends TypeCodec { - private static final DoubleCodec instance = new DoubleCodec(); + private static final DateCodec instance = new DateCodec(); - private DoubleCodec() { - super(DataType.cdouble()); - } + private static final String pattern = "yyyy-MM-dd"; - @Override - public Double parse(String value) { - try { - return value == null || value.isEmpty() || value.equalsIgnoreCase("NULL") ? null : Double.parseDouble(value); - } catch (NumberFormatException e) { - throw new InvalidTypeException(String.format("Cannot parse 64-bits double value from \"%s\"", value)); - } - } + private DateCodec() { + super(DataType.date(), LocalDate.class); + } - @Override - public String format(Double value) { - if (value == null) - return "NULL"; - return Double.toString(value); - } + @Override + public LocalDate parse(String value) { + if (value == null || value.isEmpty() || value.equalsIgnoreCase("NULL")) return null; + + // single quotes are optional for long literals, mandatory for date patterns + // strip enclosing single quotes, if any + if (ParseUtils.isQuoted(value)) value = ParseUtils.unquote(value); + + if (ParseUtils.isLongLiteral(value)) { + long unsigned; + try { + unsigned = Long.parseLong(value); + } catch (NumberFormatException e) { + throw new InvalidTypeException( + String.format("Cannot parse date value from \"%s\"", value), e); + } + try { + int days = CodecUtils.fromCqlDateToDaysSinceEpoch(unsigned); + return LocalDate.fromDaysSinceEpoch(days); + } catch (IllegalArgumentException e) { + throw new InvalidTypeException( + String.format("Cannot parse date value from \"%s\"", value), e); + } + } + + try { + Date date = ParseUtils.parseDate(value, pattern); + return LocalDate.fromMillisSinceEpoch(date.getTime()); + } catch (ParseException e) { + throw new InvalidTypeException( + String.format("Cannot parse date value from \"%s\"", value), e); + } + } - @Override - public ByteBuffer serializeNoBoxing(double value, ProtocolVersion protocolVersion) { - ByteBuffer bb = ByteBuffer.allocate(8); - bb.putDouble(0, value); - return bb; - } + @Override + public String format(LocalDate value) { + if (value == null) return "NULL"; + return ParseUtils.quote(value.toString()); + } - @Override - public double deserializeNoBoxing(ByteBuffer bytes, ProtocolVersion protocolVersion) { - if (bytes == null || bytes.remaining() == 0) - return 0; - if (bytes.remaining() != 8) - throw new InvalidTypeException("Invalid 64-bits double value, expecting 8 bytes but got " + bytes.remaining()); + @Override + public ByteBuffer serialize(LocalDate value, ProtocolVersion protocolVersion) { + if (value == null) return null; + int unsigned = CodecUtils.fromSignedToUnsignedInt(value.getDaysSinceEpoch()); + return IntCodec.instance.serializeNoBoxing(unsigned, protocolVersion); + } - return bytes.getDouble(bytes.position()); - } + @Override + public LocalDate deserialize(ByteBuffer bytes, ProtocolVersion protocolVersion) { + if (bytes == null || bytes.remaining() == 0) return null; + int unsigned = IntCodec.instance.deserializeNoBoxing(bytes, protocolVersion); + int signed = CodecUtils.fromUnsignedToSignedInt(unsigned); + return LocalDate.fromDaysSinceEpoch(signed); } + } - /** - * This codec maps a CQL {@link DataType#cfloat()} to a Java {@link Float}. - */ - private static class FloatCodec extends PrimitiveFloatCodec { + /** This codec maps a CQL {@link DataType#time()} to a Java {@link Long}. */ + private static class TimeCodec extends LongCodec { - private static final FloatCodec instance = new FloatCodec(); + private static final TimeCodec instance = new TimeCodec(); - private FloatCodec() { - super(DataType.cfloat()); - } + private TimeCodec() { + super(DataType.time()); + } - @Override - public Float parse(String value) { - try { - return value == null || value.isEmpty() || value.equalsIgnoreCase("NULL") ? null : Float.parseFloat(value); - } catch (NumberFormatException e) { - throw new InvalidTypeException(String.format("Cannot parse 32-bits float value from \"%s\"", value)); - } - } + @Override + public Long parse(String value) { + if (value == null || value.isEmpty() || value.equalsIgnoreCase("NULL")) return null; - @Override - public String format(Float value) { - if (value == null) - return "NULL"; - return Float.toString(value); - } + // enclosing single quotes required, even for long literals + if (!ParseUtils.isQuoted(value)) + throw new InvalidTypeException("time values must be enclosed by single quotes"); + value = value.substring(1, value.length() - 1); - @Override - public ByteBuffer serializeNoBoxing(float value, ProtocolVersion protocolVersion) { - ByteBuffer bb = ByteBuffer.allocate(4); - bb.putFloat(0, value); - return bb; + if (ParseUtils.isLongLiteral(value)) { + try { + return Long.parseLong(value); + } catch (NumberFormatException e) { + throw new InvalidTypeException( + String.format("Cannot parse time value from \"%s\"", value), e); } + } - @Override - public float deserializeNoBoxing(ByteBuffer bytes, ProtocolVersion protocolVersion) { - if (bytes == null || bytes.remaining() == 0) - return 0; - if (bytes.remaining() != 4) - throw new InvalidTypeException("Invalid 32-bits float value, expecting 4 bytes but got " + bytes.remaining()); - - return bytes.getFloat(bytes.position()); - } + try { + return ParseUtils.parseTime(value); + } catch (ParseException e) { + throw new InvalidTypeException( + String.format("Cannot parse time value from \"%s\"", value), e); + } } - /** - * This codec maps a CQL {@link DataType#inet()} to a Java {@link InetAddress}. - */ - private static class InetCodec extends TypeCodec { - - private static final InetCodec instance = new InetCodec(); + @Override + public String format(Long value) { + if (value == null) return "NULL"; + return ParseUtils.quote(ParseUtils.formatTime(value)); + } + } - private InetCodec() { - super(DataType.inet(), InetAddress.class); - } + /** + * Base class for codecs handling CQL UUID types such as {@link DataType#uuid()} and {@link + * DataType#timeuuid()}. + */ + private abstract static class AbstractUUIDCodec extends TypeCodec { - @Override - public InetAddress parse(String value) { - if (value == null || value.isEmpty() || value.equalsIgnoreCase("NULL")) - return null; - - value = value.trim(); - if (!ParseUtils.isQuoted(value)) - throw new InvalidTypeException(String.format("inet values must be enclosed in single quotes (\"%s\")", value)); - try { - return InetAddress.getByName(value.substring(1, value.length() - 1)); - } catch (Exception e) { - throw new InvalidTypeException(String.format("Cannot parse inet value from \"%s\"", value)); - } - } + private AbstractUUIDCodec(DataType cqlType) { + super(cqlType, UUID.class); + } - @Override - public String format(InetAddress value) { - if (value == null) - return "NULL"; - return "'" + value.getHostAddress() + "'"; - } + @Override + public UUID parse(String value) { + try { + return value == null || value.isEmpty() || value.equalsIgnoreCase("NULL") + ? null + : UUID.fromString(value); + } catch (IllegalArgumentException e) { + throw new InvalidTypeException( + String.format("Cannot parse UUID value from \"%s\"", value), e); + } + } - @Override - public ByteBuffer serialize(InetAddress value, ProtocolVersion protocolVersion) { - return value == null ? null : ByteBuffer.wrap(value.getAddress()); - } + @Override + public String format(UUID value) { + if (value == null) return "NULL"; + return value.toString(); + } - @Override - public InetAddress deserialize(ByteBuffer bytes, ProtocolVersion protocolVersion) { - if (bytes == null || bytes.remaining() == 0) - return null; - try { - return InetAddress.getByAddress(Bytes.getArray(bytes)); - } catch (UnknownHostException e) { - throw new InvalidTypeException("Invalid bytes for inet value, got " + bytes.remaining() + " bytes"); - } - } + @Override + public ByteBuffer serialize(UUID value, ProtocolVersion protocolVersion) { + if (value == null) return null; + ByteBuffer bb = ByteBuffer.allocate(16); + bb.putLong(0, value.getMostSignificantBits()); + bb.putLong(8, value.getLeastSignificantBits()); + return bb; } - /** - * This codec maps a CQL {@link DataType#tinyint()} to a Java {@link Byte}. - */ - private static class TinyIntCodec extends PrimitiveByteCodec { + @Override + public UUID deserialize(ByteBuffer bytes, ProtocolVersion protocolVersion) { + return bytes == null || bytes.remaining() == 0 + ? null + : new UUID(bytes.getLong(bytes.position()), bytes.getLong(bytes.position() + 8)); + } + } - private static final TinyIntCodec instance = new TinyIntCodec(); + /** This codec maps a CQL {@link DataType#uuid()} to a Java {@link UUID}. */ + private static class UUIDCodec extends AbstractUUIDCodec { - private TinyIntCodec() { - super(tinyint()); - } + private static final UUIDCodec instance = new UUIDCodec(); - @Override - public Byte parse(String value) { - try { - return value == null || value.isEmpty() || value.equalsIgnoreCase("NULL") ? null : Byte.parseByte(value); - } catch (NumberFormatException e) { - throw new InvalidTypeException(String.format("Cannot parse 8-bits int value from \"%s\"", value)); - } - } + private UUIDCodec() { + super(DataType.uuid()); + } + } - @Override - public String format(Byte value) { - if (value == null) - return "NULL"; - return Byte.toString(value); - } + /** This codec maps a CQL {@link DataType#timeuuid()} to a Java {@link UUID}. */ + private static class TimeUUIDCodec extends AbstractUUIDCodec { - @Override - public ByteBuffer serializeNoBoxing(byte value, ProtocolVersion protocolVersion) { - ByteBuffer bb = ByteBuffer.allocate(1); - bb.put(0, value); - return bb; - } + private static final TimeUUIDCodec instance = new TimeUUIDCodec(); - @Override - public byte deserializeNoBoxing(ByteBuffer bytes, ProtocolVersion protocolVersion) { - if (bytes == null || bytes.remaining() == 0) - return 0; - if (bytes.remaining() != 1) - throw new InvalidTypeException("Invalid 8-bits integer value, expecting 1 byte but got " + bytes.remaining()); + private TimeUUIDCodec() { + super(timeuuid()); + } - return bytes.get(bytes.position()); - } + @Override + public String format(UUID value) { + if (value == null) return "NULL"; + if (value.version() != 1) + throw new InvalidTypeException( + String.format("%s is not a Type 1 (time-based) UUID", value)); + return super.format(value); } - /** - * This codec maps a CQL {@link DataType#smallint()} to a Java {@link Short}. - */ - private static class SmallIntCodec extends PrimitiveShortCodec { + @Override + public ByteBuffer serialize(UUID value, ProtocolVersion protocolVersion) { + if (value == null) return null; + if (value.version() != 1) + throw new InvalidTypeException( + String.format("%s is not a Type 1 (time-based) UUID", value)); + return super.serialize(value, protocolVersion); + } + } - private static final SmallIntCodec instance = new SmallIntCodec(); + /** This codec maps a CQL {@link DataType#varint()} to a Java {@link BigInteger}. */ + private static class VarintCodec extends TypeCodec { - private SmallIntCodec() { - super(smallint()); - } + private static final VarintCodec instance = new VarintCodec(); - @Override - public Short parse(String value) { - try { - return value == null || value.isEmpty() || value.equalsIgnoreCase("NULL") ? null : Short.parseShort(value); - } catch (NumberFormatException e) { - throw new InvalidTypeException(String.format("Cannot parse 16-bits int value from \"%s\"", value)); - } - } + private VarintCodec() { + super(DataType.varint(), BigInteger.class); + } - @Override - public String format(Short value) { - if (value == null) - return "NULL"; - return Short.toString(value); - } + @Override + public BigInteger parse(String value) { + try { + return value == null || value.isEmpty() || value.equalsIgnoreCase("NULL") + ? null + : new BigInteger(value); + } catch (NumberFormatException e) { + throw new InvalidTypeException( + String.format("Cannot parse varint value from \"%s\"", value), e); + } + } - @Override - public ByteBuffer serializeNoBoxing(short value, ProtocolVersion protocolVersion) { - ByteBuffer bb = ByteBuffer.allocate(2); - bb.putShort(0, value); - return bb; - } + @Override + public String format(BigInteger value) { + if (value == null) return "NULL"; + return value.toString(); + } - @Override - public short deserializeNoBoxing(ByteBuffer bytes, ProtocolVersion protocolVersion) { - if (bytes == null || bytes.remaining() == 0) - return 0; - if (bytes.remaining() != 2) - throw new InvalidTypeException("Invalid 16-bits integer value, expecting 2 bytes but got " + bytes.remaining()); + @Override + public ByteBuffer serialize(BigInteger value, ProtocolVersion protocolVersion) { + return value == null ? null : ByteBuffer.wrap(value.toByteArray()); + } - return bytes.getShort(bytes.position()); - } + @Override + public BigInteger deserialize(ByteBuffer bytes, ProtocolVersion protocolVersion) { + return bytes == null || bytes.remaining() == 0 ? null : new BigInteger(Bytes.getArray(bytes)); } + } - /** - * This codec maps a CQL {@link DataType#cint()} to a Java {@link Integer}. - */ - private static class IntCodec extends PrimitiveIntCodec { + /** + * Base class for codecs mapping CQL {@link DataType#list(DataType) lists} and {@link + * DataType#set(DataType) sets} to Java collections. + */ + public abstract static class AbstractCollectionCodec> + extends TypeCodec { - private static final IntCodec instance = new IntCodec(); + protected final TypeCodec eltCodec; - private IntCodec() { - super(DataType.cint()); - } + protected AbstractCollectionCodec( + CollectionType cqlType, TypeToken javaType, TypeCodec eltCodec) { + super(cqlType, javaType); + checkArgument( + cqlType.getName() == Name.LIST || cqlType.getName() == Name.SET, + "Expecting list or set type, got %s", + cqlType); + this.eltCodec = eltCodec; + } - @Override - public Integer parse(String value) { - try { - return value == null || value.isEmpty() || value.equalsIgnoreCase("NULL") ? null : Integer.parseInt(value); - } catch (NumberFormatException e) { - throw new InvalidTypeException(String.format("Cannot parse 32-bits int value from \"%s\"", value)); - } - } + @Override + public ByteBuffer serialize(C value, ProtocolVersion protocolVersion) { + if (value == null) return null; + int i = 0; + ByteBuffer[] bbs = new ByteBuffer[value.size()]; + for (E elt : value) { + if (elt == null) { + throw new NullPointerException("Collection elements cannot be null"); + } + ByteBuffer bb; + try { + bb = eltCodec.serialize(elt, protocolVersion); + } catch (ClassCastException e) { + throw new InvalidTypeException( + String.format( + "Invalid type for %s element, expecting %s but got %s", + cqlType, eltCodec.getJavaType(), elt.getClass()), + e); + } + bbs[i++] = bb; + } + return CodecUtils.pack(bbs, value.size(), protocolVersion); + } - @Override - public String format(Integer value) { - if (value == null) - return "NULL"; - return Integer.toString(value); - } + @Override + public C deserialize(ByteBuffer bytes, ProtocolVersion protocolVersion) { + if (bytes == null || bytes.remaining() == 0) return newInstance(0); + try { + ByteBuffer input = bytes.duplicate(); + int size = CodecUtils.readSize(input, protocolVersion); + C coll = newInstance(size); + for (int i = 0; i < size; i++) { + ByteBuffer databb = CodecUtils.readValue(input, protocolVersion); + coll.add(eltCodec.deserialize(databb, protocolVersion)); + } + return coll; + } catch (BufferUnderflowException e) { + throw new InvalidTypeException("Not enough bytes to deserialize collection", e); + } + } - @Override - public ByteBuffer serializeNoBoxing(int value, ProtocolVersion protocolVersion) { - ByteBuffer bb = ByteBuffer.allocate(4); - bb.putInt(0, value); - return bb; - } + @Override + public String format(C value) { + if (value == null) return "NULL"; + StringBuilder sb = new StringBuilder(); + sb.append(getOpeningChar()); + int i = 0; + for (E v : value) { + if (i++ != 0) sb.append(","); + sb.append(eltCodec.format(v)); + } + sb.append(getClosingChar()); + return sb.toString(); + } - @Override - public int deserializeNoBoxing(ByteBuffer bytes, ProtocolVersion protocolVersion) { - if (bytes == null || bytes.remaining() == 0) - return 0; - if (bytes.remaining() != 4) - throw new InvalidTypeException("Invalid 32-bits integer value, expecting 4 bytes but got " + bytes.remaining()); + @Override + public C parse(String value) { + if (value == null || value.isEmpty() || value.equalsIgnoreCase("NULL")) return null; + + int idx = ParseUtils.skipSpaces(value, 0); + if (value.charAt(idx++) != getOpeningChar()) + throw new InvalidTypeException( + String.format( + "Cannot parse collection value from \"%s\", at character %d expecting '%s' but got '%c'", + value, idx, getOpeningChar(), value.charAt(idx))); + + idx = ParseUtils.skipSpaces(value, idx); + + if (value.charAt(idx) == getClosingChar()) return newInstance(0); + + C l = newInstance(10); + while (idx < value.length()) { + int n; + try { + n = ParseUtils.skipCQLValue(value, idx); + } catch (IllegalArgumentException e) { + throw new InvalidTypeException( + String.format( + "Cannot parse collection value from \"%s\", invalid CQL value at character %d", + value, idx), + e); + } + + l.add(eltCodec.parse(value.substring(idx, n))); + idx = n; + + idx = ParseUtils.skipSpaces(value, idx); + if (value.charAt(idx) == getClosingChar()) return l; + if (value.charAt(idx++) != ',') + throw new InvalidTypeException( + String.format( + "Cannot parse collection value from \"%s\", at character %d expecting ',' but got '%c'", + value, idx, value.charAt(idx))); + + idx = ParseUtils.skipSpaces(value, idx); + } + throw new InvalidTypeException( + String.format( + "Malformed collection value \"%s\", missing closing '%s'", value, getClosingChar())); + } - return bytes.getInt(bytes.position()); - } + @Override + public boolean accepts(Object value) { + checkNotNull(value, "Parameter value cannot be null"); + if (getJavaType().getRawType().isAssignableFrom(value.getClass())) { + // runtime type ok, now check element type + Collection coll = (Collection) value; + if (coll.isEmpty()) return true; + Object elt = coll.iterator().next(); + return eltCodec.accepts(elt); + } + return false; } /** - * This codec maps a CQL {@link DataType#timestamp()} to a Java {@link Date}. + * Return a new instance of {@code C} with the given estimated size. + * + * @param size The estimated size of the collection to create. + * @return new instance of {@code C} with the given estimated size. */ - private static class TimestampCodec extends TypeCodec { - - private static final TimestampCodec instance = new TimestampCodec(); - - private TimestampCodec() { - super(DataType.timestamp(), Date.class); - } - - @Override - public Date parse(String value) { - if (value == null || value.isEmpty() || value.equalsIgnoreCase("NULL")) - return null; - // strip enclosing single quotes, if any - if (ParseUtils.isQuoted(value)) - value = ParseUtils.unquote(value); - - if (ParseUtils.isLongLiteral(value)) { - try { - return new Date(Long.parseLong(value)); - } catch (NumberFormatException e) { - throw new InvalidTypeException(String.format("Cannot parse timestamp value from \"%s\"", value)); - } - } - - try { - return ParseUtils.parseDate(value); - } catch (ParseException e) { - throw new InvalidTypeException(String.format("Cannot parse timestamp value from \"%s\"", value)); - } - } - - @Override - public String format(Date value) { - if (value == null) - return "NULL"; - return Long.toString(value.getTime()); - } - - @Override - public ByteBuffer serialize(Date value, ProtocolVersion protocolVersion) { - return value == null ? null : BigintCodec.instance.serializeNoBoxing(value.getTime(), protocolVersion); - } - - @Override - public Date deserialize(ByteBuffer bytes, ProtocolVersion protocolVersion) { - return bytes == null || bytes.remaining() == 0 ? null : new Date(BigintCodec.instance.deserializeNoBoxing(bytes, protocolVersion)); - } - } + protected abstract C newInstance(int size); /** - * This codec maps a CQL {@link DataType#date()} to the custom {@link LocalDate} class. + * Return the opening character to use when formatting values as CQL literals. + * + * @return The opening character to use when formatting values as CQL literals. */ - private static class DateCodec extends TypeCodec { - - private static final DateCodec instance = new DateCodec(); - - private static final String pattern = "yyyy-MM-dd"; - - private DateCodec() { - super(DataType.date(), LocalDate.class); - } - - @Override - public LocalDate parse(String value) { - if (value == null || value.isEmpty() || value.equalsIgnoreCase("NULL")) - return null; - - // single quotes are optional for long literals, mandatory for date patterns - // strip enclosing single quotes, if any - if (ParseUtils.isQuoted(value)) - value = ParseUtils.unquote(value); - - if (ParseUtils.isLongLiteral(value)) { - long unsigned; - try { - unsigned = Long.parseLong(value); - } catch (NumberFormatException e) { - throw new InvalidTypeException(String.format("Cannot parse date value from \"%s\"", value), e); - } - try { - int days = CodecUtils.fromCqlDateToDaysSinceEpoch(unsigned); - return LocalDate.fromDaysSinceEpoch(days); - } catch (IllegalArgumentException e) { - throw new InvalidTypeException(String.format("Cannot parse date value from \"%s\"", value), e); - } - } - - try { - Date date = ParseUtils.parseDate(value, pattern); - return LocalDate.fromMillisSinceEpoch(date.getTime()); - } catch (ParseException e) { - throw new InvalidTypeException(String.format("Cannot parse date value from \"%s\"", value), e); - } - } - - @Override - public String format(LocalDate value) { - if (value == null) - return "NULL"; - return ParseUtils.quote(value.toString()); - } - - @Override - public ByteBuffer serialize(LocalDate value, ProtocolVersion protocolVersion) { - if (value == null) - return null; - int unsigned = CodecUtils.fromSignedToUnsignedInt(value.getDaysSinceEpoch()); - return IntCodec.instance.serializeNoBoxing(unsigned, protocolVersion); - } - - @Override - public LocalDate deserialize(ByteBuffer bytes, ProtocolVersion protocolVersion) { - if (bytes == null || bytes.remaining() == 0) - return null; - int unsigned = IntCodec.instance.deserializeNoBoxing(bytes, protocolVersion); - int signed = CodecUtils.fromUnsignedToSignedInt(unsigned); - return LocalDate.fromDaysSinceEpoch(signed); - } - + private char getOpeningChar() { + return cqlType.getName() == Name.LIST ? '[' : '{'; } /** - * This codec maps a CQL {@link DataType#time()} to a Java {@link Long}. + * Return the closing character to use when formatting values as CQL literals. + * + * @return The closing character to use when formatting values as CQL literals. */ - private static class TimeCodec extends LongCodec { + private char getClosingChar() { + return cqlType.getName() == Name.LIST ? ']' : '}'; + } + } - private static final TimeCodec instance = new TimeCodec(); + /** + * This codec maps a CQL {@link DataType#list(DataType) list type} to a Java {@link List}. + * Implementation note: this codec returns mutable, non thread-safe {@link ArrayList} instances. + */ + private static class ListCodec extends AbstractCollectionCodec> { - private TimeCodec() { - super(DataType.time()); - } + private ListCodec(TypeCodec eltCodec) { + super( + DataType.list(eltCodec.getCqlType()), + TypeTokens.listOf(eltCodec.getJavaType()), + eltCodec); + } - @Override - public Long parse(String value) { - if (value == null || value.isEmpty() || value.equalsIgnoreCase("NULL")) - return null; - - // enclosing single quotes required, even for long literals - if (!ParseUtils.isQuoted(value)) - throw new InvalidTypeException("time values must be enclosed by single quotes"); - value = value.substring(1, value.length() - 1); - - if (ParseUtils.isLongLiteral(value)) { - try { - return Long.parseLong(value); - } catch (NumberFormatException e) { - throw new InvalidTypeException(String.format("Cannot parse time value from \"%s\"", value), e); - } - } - - try { - return ParseUtils.parseTime(value); - } catch (ParseException e) { - throw new InvalidTypeException(String.format("Cannot parse time value from \"%s\"", value), e); - } - } + @Override + protected List newInstance(int size) { + return new ArrayList(size); + } + } - @Override - public String format(Long value) { - if (value == null) - return "NULL"; - return ParseUtils.quote(ParseUtils.formatTime(value)); - } + /** + * This codec maps a CQL {@link DataType#set(DataType) set type} to a Java {@link Set}. + * Implementation note: this codec returns mutable, non thread-safe {@link LinkedHashSet} + * instances. + */ + private static class SetCodec extends AbstractCollectionCodec> { + private SetCodec(TypeCodec eltCodec) { + super(DataType.set(eltCodec.cqlType), TypeTokens.setOf(eltCodec.getJavaType()), eltCodec); } - /** - * Base class for codecs handling CQL UUID types such as {@link DataType#uuid()} and {@link DataType#timeuuid()}. - */ - private static abstract class AbstractUUIDCodec extends TypeCodec { - - private AbstractUUIDCodec(DataType cqlType) { - super(cqlType, UUID.class); - } + @Override + protected Set newInstance(int size) { + return new LinkedHashSet(size); + } + } - @Override - public UUID parse(String value) { - try { - return value == null || value.isEmpty() || value.equalsIgnoreCase("NULL") ? null : UUID.fromString(value); - } catch (IllegalArgumentException e) { - throw new InvalidTypeException(String.format("Cannot parse UUID value from \"%s\"", value), e); - } - } + /** + * Base class for codecs mapping CQL {@link DataType#map(DataType, DataType) maps} to a Java + * {@link Map}. + */ + public abstract static class AbstractMapCodec extends TypeCodec> { - @Override - public String format(UUID value) { - if (value == null) - return "NULL"; - return value.toString(); - } + protected final TypeCodec keyCodec; - @Override - public ByteBuffer serialize(UUID value, ProtocolVersion protocolVersion) { - if (value == null) - return null; - ByteBuffer bb = ByteBuffer.allocate(16); - bb.putLong(0, value.getMostSignificantBits()); - bb.putLong(8, value.getLeastSignificantBits()); - return bb; - } + protected final TypeCodec valueCodec; - @Override - public UUID deserialize(ByteBuffer bytes, ProtocolVersion protocolVersion) { - return bytes == null || bytes.remaining() == 0 ? null : new UUID(bytes.getLong(bytes.position()), bytes.getLong(bytes.position() + 8)); - } + protected AbstractMapCodec(TypeCodec keyCodec, TypeCodec valueCodec) { + super( + DataType.map(keyCodec.getCqlType(), valueCodec.getCqlType()), + TypeTokens.mapOf(keyCodec.getJavaType(), valueCodec.getJavaType())); + this.keyCodec = keyCodec; + this.valueCodec = valueCodec; } - /** - * This codec maps a CQL {@link DataType#uuid()} to a Java {@link UUID}. - */ - private static class UUIDCodec extends AbstractUUIDCodec { + @Override + public boolean accepts(Object value) { + checkNotNull(value, "Parameter value cannot be null"); + if (value instanceof Map) { + // runtime type ok, now check key and value types + Map map = (Map) value; + if (map.isEmpty()) return true; + Map.Entry entry = map.entrySet().iterator().next(); + return keyCodec.accepts(entry.getKey()) && valueCodec.accepts(entry.getValue()); + } + return false; + } - private static final UUIDCodec instance = new UUIDCodec(); + @Override + public Map parse(String value) { + if (value == null || value.isEmpty() || value.equalsIgnoreCase("NULL")) return null; + + int idx = ParseUtils.skipSpaces(value, 0); + if (value.charAt(idx++) != '{') + throw new InvalidTypeException( + String.format( + "cannot parse map value from \"%s\", at character %d expecting '{' but got '%c'", + value, idx, value.charAt(idx))); + + idx = ParseUtils.skipSpaces(value, idx); + + if (value.charAt(idx) == '}') return newInstance(0); + + Map m = new HashMap(); + while (idx < value.length()) { + int n; + try { + n = ParseUtils.skipCQLValue(value, idx); + } catch (IllegalArgumentException e) { + throw new InvalidTypeException( + String.format( + "Cannot parse map value from \"%s\", invalid CQL value at character %d", + value, idx), + e); + } + + K k = keyCodec.parse(value.substring(idx, n)); + idx = n; + + idx = ParseUtils.skipSpaces(value, idx); + if (value.charAt(idx++) != ':') + throw new InvalidTypeException( + String.format( + "Cannot parse map value from \"%s\", at character %d expecting ':' but got '%c'", + value, idx, value.charAt(idx))); + idx = ParseUtils.skipSpaces(value, idx); + + try { + n = ParseUtils.skipCQLValue(value, idx); + } catch (IllegalArgumentException e) { + throw new InvalidTypeException( + String.format( + "Cannot parse map value from \"%s\", invalid CQL value at character %d", + value, idx), + e); + } + + V v = valueCodec.parse(value.substring(idx, n)); + idx = n; + + m.put(k, v); + + idx = ParseUtils.skipSpaces(value, idx); + if (value.charAt(idx) == '}') return m; + if (value.charAt(idx++) != ',') + throw new InvalidTypeException( + String.format( + "Cannot parse map value from \"%s\", at character %d expecting ',' but got '%c'", + value, idx, value.charAt(idx))); + + idx = ParseUtils.skipSpaces(value, idx); + } + throw new InvalidTypeException( + String.format("Malformed map value \"%s\", missing closing '}'", value)); + } - private UUIDCodec() { - super(DataType.uuid()); - } + @Override + public String format(Map value) { + if (value == null) return "NULL"; + StringBuilder sb = new StringBuilder(); + sb.append("{"); + int i = 0; + for (Map.Entry e : value.entrySet()) { + if (i++ != 0) sb.append(","); + sb.append(keyCodec.format(e.getKey())); + sb.append(":"); + sb.append(valueCodec.format(e.getValue())); + } + sb.append("}"); + return sb.toString(); + } + @Override + public ByteBuffer serialize(Map value, ProtocolVersion protocolVersion) { + if (value == null) return null; + int i = 0; + ByteBuffer[] bbs = new ByteBuffer[2 * value.size()]; + for (Map.Entry entry : value.entrySet()) { + ByteBuffer bbk; + K key = entry.getKey(); + if (key == null) { + throw new NullPointerException("Map keys cannot be null"); + } + try { + bbk = keyCodec.serialize(key, protocolVersion); + } catch (ClassCastException e) { + throw new InvalidTypeException( + String.format( + "Invalid type for map key, expecting %s but got %s", + keyCodec.getJavaType(), key.getClass()), + e); + } + ByteBuffer bbv; + V v = entry.getValue(); + if (v == null) { + throw new NullPointerException("Map values cannot be null"); + } + try { + bbv = valueCodec.serialize(v, protocolVersion); + } catch (ClassCastException e) { + throw new InvalidTypeException( + String.format( + "Invalid type for map value, expecting %s but got %s", + valueCodec.getJavaType(), v.getClass()), + e); + } + bbs[i++] = bbk; + bbs[i++] = bbv; + } + return CodecUtils.pack(bbs, value.size(), protocolVersion); } - /** - * This codec maps a CQL {@link DataType#timeuuid()} to a Java {@link UUID}. + @Override + public Map deserialize(ByteBuffer bytes, ProtocolVersion protocolVersion) { + if (bytes == null || bytes.remaining() == 0) return newInstance(0); + try { + ByteBuffer input = bytes.duplicate(); + int n = CodecUtils.readSize(input, protocolVersion); + Map m = newInstance(n); + for (int i = 0; i < n; i++) { + ByteBuffer kbb = CodecUtils.readValue(input, protocolVersion); + ByteBuffer vbb = CodecUtils.readValue(input, protocolVersion); + m.put( + keyCodec.deserialize(kbb, protocolVersion), + valueCodec.deserialize(vbb, protocolVersion)); + } + return m; + } catch (BufferUnderflowException e) { + throw new InvalidTypeException("Not enough bytes to deserialize a map", e); + } + } + + /** + * Return a new {@link Map} instance with the given estimated size. + * + * @param size The estimated size of the collection to create. + * @return A new {@link Map} instance with the given estimated size. */ - private static class TimeUUIDCodec extends AbstractUUIDCodec { - - private static final TimeUUIDCodec instance = new TimeUUIDCodec(); + protected abstract Map newInstance(int size); + } - private TimeUUIDCodec() { - super(timeuuid()); - } + /** + * This codec maps a CQL {@link DataType#map(DataType, DataType) map type} to a Java {@link Map}. + * Implementation note: this codec returns mutable, non thread-safe {@link LinkedHashMap} + * instances. + */ + private static class MapCodec extends AbstractMapCodec { - @Override - public String format(UUID value) { - if (value == null) - return "NULL"; - if (value.version() != 1) - throw new InvalidTypeException(String.format("%s is not a Type 1 (time-based) UUID", value)); - return super.format(value); - } + private MapCodec(TypeCodec keyCodec, TypeCodec valueCodec) { + super(keyCodec, valueCodec); + } - @Override - public ByteBuffer serialize(UUID value, ProtocolVersion protocolVersion) { - if (value == null) - return null; - if (value.version() != 1) - throw new InvalidTypeException(String.format("%s is not a Type 1 (time-based) UUID", value)); - return super.serialize(value, protocolVersion); - } + @Override + protected Map newInstance(int size) { + return new LinkedHashMap(size); } + } - /** - * This codec maps a CQL {@link DataType#varint()} to a Java {@link BigInteger}. - */ - private static class VarintCodec extends TypeCodec { + /** + * Base class for codecs mapping CQL {@link UserType user-defined types} (UDTs) to Java objects. + * It can serve as a base class for codecs dealing with direct UDT-to-Pojo mappings. + * + * @param The Java type that the UDT will be mapped to. + */ + public abstract static class AbstractUDTCodec extends TypeCodec { - private static final VarintCodec instance = new VarintCodec(); + protected final UserType definition; - private VarintCodec() { - super(DataType.varint(), BigInteger.class); - } + protected AbstractUDTCodec(UserType definition, Class javaClass) { + this(definition, TypeToken.of(javaClass)); + } - @Override - public BigInteger parse(String value) { - try { - return value == null || value.isEmpty() || value.equalsIgnoreCase("NULL") ? null : new BigInteger(value); - } catch (NumberFormatException e) { - throw new InvalidTypeException(String.format("Cannot parse varint value from \"%s\"", value), e); - } - } + protected AbstractUDTCodec(UserType definition, TypeToken javaType) { + super(definition, javaType); + this.definition = definition; + } - @Override - public String format(BigInteger value) { - if (value == null) - return "NULL"; - return value.toString(); - } + @Override + public ByteBuffer serialize(T value, ProtocolVersion protocolVersion) { + if (value == null) return null; + int size = 0; + int length = definition.size(); + ByteBuffer[] elements = new ByteBuffer[length]; + int i = 0; + for (UserType.Field field : definition) { + elements[i] = + serializeField(value, Metadata.quoteIfNecessary(field.getName()), protocolVersion); + size += 4 + (elements[i] == null ? 0 : elements[i].remaining()); + i++; + } + ByteBuffer result = ByteBuffer.allocate(size); + for (ByteBuffer bb : elements) { + if (bb == null) { + result.putInt(-1); + } else { + result.putInt(bb.remaining()); + result.put(bb.duplicate()); + } + } + return (ByteBuffer) result.flip(); + } - @Override - public ByteBuffer serialize(BigInteger value, ProtocolVersion protocolVersion) { - return value == null ? null : ByteBuffer.wrap(value.toByteArray()); - } + @Override + public T deserialize(ByteBuffer bytes, ProtocolVersion protocolVersion) { + if (bytes == null) return null; + // empty byte buffers will result in empty values + try { + ByteBuffer input = bytes.duplicate(); + T value = newInstance(); + for (UserType.Field field : definition) { + if (!input.hasRemaining()) break; + int n = input.getInt(); + ByteBuffer element = n < 0 ? null : CodecUtils.readBytes(input, n); + value = + deserializeAndSetField( + element, value, Metadata.quoteIfNecessary(field.getName()), protocolVersion); + } + return value; + } catch (BufferUnderflowException e) { + throw new InvalidTypeException("Not enough bytes to deserialize a UDT", e); + } + } - @Override - public BigInteger deserialize(ByteBuffer bytes, ProtocolVersion protocolVersion) { - return bytes == null || bytes.remaining() == 0 ? null : new BigInteger(Bytes.getArray(bytes)); - } + @Override + public String format(T value) { + if (value == null) return "NULL"; + StringBuilder sb = new StringBuilder("{"); + int i = 0; + for (UserType.Field field : definition) { + if (i > 0) sb.append(","); + sb.append(Metadata.quoteIfNecessary(field.getName())); + sb.append(":"); + sb.append(formatField(value, Metadata.quoteIfNecessary(field.getName()))); + i += 1; + } + sb.append("}"); + return sb.toString(); } - /** - * Base class for codecs mapping CQL {@link DataType#list(DataType) lists} - * and {@link DataType#set(DataType) sets} to Java collections. + @Override + public T parse(String value) { + if (value == null || value.isEmpty() || value.equals("NULL")) return null; + + T v = newInstance(); + + int idx = ParseUtils.skipSpaces(value, 0); + if (value.charAt(idx++) != '{') + throw new InvalidTypeException( + String.format( + "Cannot parse UDT value from \"%s\", at character %d expecting '{' but got '%c'", + value, idx, value.charAt(idx))); + + idx = ParseUtils.skipSpaces(value, idx); + + if (value.charAt(idx) == '}') return v; + + while (idx < value.length()) { + + int n; + try { + n = ParseUtils.skipCQLId(value, idx); + } catch (IllegalArgumentException e) { + throw new InvalidTypeException( + String.format( + "Cannot parse UDT value from \"%s\", cannot parse a CQL identifier at character %d", + value, idx), + e); + } + String name = value.substring(idx, n); + idx = n; + + if (!definition.contains(name)) + throw new InvalidTypeException( + String.format("Unknown field %s in value \"%s\"", name, value)); + + idx = ParseUtils.skipSpaces(value, idx); + if (value.charAt(idx++) != ':') + throw new InvalidTypeException( + String.format( + "Cannot parse UDT value from \"%s\", at character %d expecting ':' but got '%c'", + value, idx, value.charAt(idx))); + idx = ParseUtils.skipSpaces(value, idx); + + try { + n = ParseUtils.skipCQLValue(value, idx); + } catch (IllegalArgumentException e) { + throw new InvalidTypeException( + String.format( + "Cannot parse UDT value from \"%s\", invalid CQL value at character %d", + value, idx), + e); + } + + String input = value.substring(idx, n); + v = parseAndSetField(input, v, name); + idx = n; + + idx = ParseUtils.skipSpaces(value, idx); + if (value.charAt(idx) == '}') return v; + if (value.charAt(idx) != ',') + throw new InvalidTypeException( + String.format( + "Cannot parse UDT value from \"%s\", at character %d expecting ',' but got '%c'", + value, idx, value.charAt(idx))); + ++idx; // skip ',' + + idx = ParseUtils.skipSpaces(value, idx); + } + throw new InvalidTypeException( + String.format("Malformed UDT value \"%s\", missing closing '}'", value)); + } + + /** + * Return a new instance of {@code T}. + * + * @return A new instance of {@code T}. */ - public abstract static class AbstractCollectionCodec> extends TypeCodec { - - protected final TypeCodec eltCodec; - - protected AbstractCollectionCodec(CollectionType cqlType, TypeToken javaType, TypeCodec eltCodec) { - super(cqlType, javaType); - checkArgument(cqlType.getName() == Name.LIST || cqlType.getName() == Name.SET, "Expecting list or set type, got %s", cqlType); - this.eltCodec = eltCodec; - } - - @Override - public ByteBuffer serialize(C value, ProtocolVersion protocolVersion) { - if (value == null) - return null; - int i = 0; - ByteBuffer[] bbs = new ByteBuffer[value.size()]; - for (E elt : value) { - if (elt == null) { - throw new NullPointerException("Collection elements cannot be null"); - } - ByteBuffer bb; - try { - bb = eltCodec.serialize(elt, protocolVersion); - } catch (ClassCastException e) { - throw new InvalidTypeException( - String.format("Invalid type for %s element, expecting %s but got %s", - cqlType, eltCodec.getJavaType(), elt.getClass()), e); - } - bbs[i++] = bb; - } - return CodecUtils.pack(bbs, value.size(), protocolVersion); - } - - @Override - public C deserialize(ByteBuffer bytes, ProtocolVersion protocolVersion) { - if (bytes == null || bytes.remaining() == 0) - return newInstance(0); - try { - ByteBuffer input = bytes.duplicate(); - int size = CodecUtils.readSize(input, protocolVersion); - C coll = newInstance(size); - for (int i = 0; i < size; i++) { - ByteBuffer databb = CodecUtils.readValue(input, protocolVersion); - coll.add(eltCodec.deserialize(databb, protocolVersion)); - } - return coll; - } catch (BufferUnderflowException e) { - throw new InvalidTypeException("Not enough bytes to deserialize collection", e); - } - } - - @Override - public String format(C value) { - if (value == null) - return "NULL"; - StringBuilder sb = new StringBuilder(); - sb.append(getOpeningChar()); - int i = 0; - for (E v : value) { - if (i++ != 0) - sb.append(","); - sb.append(eltCodec.format(v)); - } - sb.append(getClosingChar()); - return sb.toString(); - } - - @Override - public C parse(String value) { - if (value == null || value.isEmpty() || value.equalsIgnoreCase("NULL")) - return null; - - int idx = ParseUtils.skipSpaces(value, 0); - if (value.charAt(idx++) != getOpeningChar()) - throw new InvalidTypeException(String.format("Cannot parse collection value from \"%s\", at character %d expecting '%s' but got '%c'", value, idx, getOpeningChar(), value.charAt(idx))); - - idx = ParseUtils.skipSpaces(value, idx); - - if (value.charAt(idx) == getClosingChar()) - return newInstance(0); - - C l = newInstance(10); - while (idx < value.length()) { - int n; - try { - n = ParseUtils.skipCQLValue(value, idx); - } catch (IllegalArgumentException e) { - throw new InvalidTypeException(String.format("Cannot parse collection value from \"%s\", invalid CQL value at character %d", value, idx), e); - } - - l.add(eltCodec.parse(value.substring(idx, n))); - idx = n; - - idx = ParseUtils.skipSpaces(value, idx); - if (value.charAt(idx) == getClosingChar()) - return l; - if (value.charAt(idx++) != ',') - throw new InvalidTypeException(String.format("Cannot parse collection value from \"%s\", at character %d expecting ',' but got '%c'", value, idx, value.charAt(idx))); - - idx = ParseUtils.skipSpaces(value, idx); - } - throw new InvalidTypeException(String.format("Malformed collection value \"%s\", missing closing '%s'", value, getClosingChar())); - } - - @Override - public boolean accepts(Object value) { - if (getJavaType().getRawType().isAssignableFrom(value.getClass())) { - // runtime type ok, now check element type - Collection coll = (Collection) value; - if (coll.isEmpty()) - return true; - Object elt = coll.iterator().next(); - return eltCodec.accepts(elt); - } - return false; - } - - /** - * Return a new instance of {@code C} with the given estimated size. - * - * @param size The estimated size of the collection to create. - * @return new instance of {@code C} with the given estimated size. - */ - protected abstract C newInstance(int size); - - /** - * Return the opening character to use when formatting values as CQL literals. - * - * @return The opening character to use when formatting values as CQL literals. - */ - private char getOpeningChar() { - return cqlType.getName() == Name.LIST ? '[' : '{'; - } - - /** - * Return the closing character to use when formatting values as CQL literals. - * - * @return The closing character to use when formatting values as CQL literals. - */ - private char getClosingChar() { - return cqlType.getName() == Name.LIST ? ']' : '}'; - } - - } + protected abstract T newInstance(); /** - * This codec maps a CQL {@link DataType#list(DataType) list type} to a Java {@link List}. - * Implementation note: this codec returns mutable, non thread-safe {@link ArrayList} instances. + * Serialize an individual field in an object, as part of serializing the whole object to a CQL + * UDT (see {@link #serialize(Object, ProtocolVersion)}). + * + * @param source The object to read the field from. + * @param fieldName The name of the field. Note that if it is case-sensitive or contains special + * characters, it will be double-quoted (i.e. the string will contain actual quote + * characters, as in {@code "\"foobar\""}). + * @param protocolVersion The protocol version to use. + * @return The serialized field, or {@code null} if that field should be ignored. */ - private static class ListCodec extends AbstractCollectionCodec> { - - private ListCodec(TypeCodec eltCodec) { - super(DataType.list(eltCodec.getCqlType()), TypeTokens.listOf(eltCodec.getJavaType()), eltCodec); - } - - @Override - protected List newInstance(int size) { - return new ArrayList(size); - } - - } + protected abstract ByteBuffer serializeField( + T source, String fieldName, ProtocolVersion protocolVersion); /** - * This codec maps a CQL {@link DataType#set(DataType) set type} to a Java {@link Set}. - * Implementation note: this codec returns mutable, non thread-safe {@link LinkedHashSet} instances. + * Deserialize an individual field and set it on an object, as part of deserializing the whole + * object from a CQL UDT (see {@link #deserialize(ByteBuffer, ProtocolVersion)}). + * + * @param input The serialized form of the field. + * @param target The object to set the field on. + * @param fieldName The name of the field. Note that if it is case-sensitive or contains special + * characters, it will be double-quoted (i.e. the string will contain actual quote + * characters, as in {@code "\"foobar\""}). + * @param protocolVersion The protocol version to use. + * @return The target object with the field set. In most cases this should be the same as {@code + * target}, but if you're dealing with immutable types you'll need to return a different + * instance. + */ + protected abstract T deserializeAndSetField( + ByteBuffer input, T target, String fieldName, ProtocolVersion protocolVersion); + + /** + * Format an individual field in an object as a CQL literal, as part of formatting the whole + * object (see {@link #format(Object)}). + * + * @param source The object to read the field from. + * @param fieldName The name of the field. Note that if it is case-sensitive or contains special + * characters, it will be double-quoted (i.e. the string will contain actual quote + * characters, as in {@code "\"foobar\""}). + * @return The formatted value. */ - private static class SetCodec extends AbstractCollectionCodec> { + protected abstract String formatField(T source, String fieldName); - private SetCodec(TypeCodec eltCodec) { - super(DataType.set(eltCodec.cqlType), TypeTokens.setOf(eltCodec.getJavaType()), eltCodec); - } + /** + * Parse an individual field and set it on an object, as part of parsing the whole object (see + * {@link #parse(String)}). + * + * @param input The String to parse the field from. + * @param target The value to write to. + * @param fieldName The name of the field. Note that if it is case-sensitive or contains special + * characters, it will be double-quoted (i.e. the string will contain actual quote + * characters, as in {@code "\"foobar\""}). + * @return The target object with the field set. In most cases this should be the same as {@code + * target}, but if you're dealing with immutable types you'll need to return a different + * instance. + */ + protected abstract T parseAndSetField(String input, T target, String fieldName); + } - @Override - protected Set newInstance(int size) { - return new LinkedHashSet(size); - } + /** This codec maps a CQL {@link UserType} to a {@link UDTValue}. */ + private static class UDTCodec extends AbstractUDTCodec { + private UDTCodec(UserType definition) { + super(definition, UDTValue.class); } - /** - * Base class for codecs mapping CQL {@link DataType#map(DataType, DataType) maps} to a Java {@link Map}. - */ - public abstract static class AbstractMapCodec extends TypeCodec> { + @Override + public boolean accepts(Object value) { + return super.accepts(value) && ((UDTValue) value).getType().equals(definition); + } - protected final TypeCodec keyCodec; + @Override + protected UDTValue newInstance() { + return definition.newValue(); + } - protected final TypeCodec valueCodec; + @Override + protected ByteBuffer serializeField( + UDTValue source, String fieldName, ProtocolVersion protocolVersion) { + return source.getBytesUnsafe(fieldName); + } - protected AbstractMapCodec(TypeCodec keyCodec, TypeCodec valueCodec) { - super(DataType.map(keyCodec.getCqlType(), valueCodec.getCqlType()), TypeTokens.mapOf(keyCodec.getJavaType(), valueCodec.getJavaType())); - this.keyCodec = keyCodec; - this.valueCodec = valueCodec; - } + @Override + protected UDTValue deserializeAndSetField( + ByteBuffer input, UDTValue target, String fieldName, ProtocolVersion protocolVersion) { + return target.setBytesUnsafe(fieldName, input); + } - @Override - public boolean accepts(Object value) { - if (value instanceof Map) { - // runtime type ok, now check key and value types - Map map = (Map) value; - if (map.isEmpty()) - return true; - Map.Entry entry = map.entrySet().iterator().next(); - return keyCodec.accepts(entry.getKey()) && valueCodec.accepts(entry.getValue()); - } - return false; - } + @Override + protected String formatField(UDTValue source, String fieldName) { + DataType elementType = definition.getFieldType(fieldName); + TypeCodec codec = definition.getCodecRegistry().codecFor(elementType); + return codec.format(source.get(fieldName, codec.getJavaType())); + } - @Override - public Map parse(String value) { - if (value == null || value.isEmpty() || value.equalsIgnoreCase("NULL")) - return null; + @Override + protected UDTValue parseAndSetField(String input, UDTValue target, String fieldName) { + DataType elementType = definition.getFieldType(fieldName); + TypeCodec codec = definition.getCodecRegistry().codecFor(elementType); + target.set(fieldName, codec.parse(input), codec.getJavaType()); + return target; + } + } - int idx = ParseUtils.skipSpaces(value, 0); - if (value.charAt(idx++) != '{') - throw new InvalidTypeException(String.format("cannot parse map value from \"%s\", at character %d expecting '{' but got '%c'", value, idx, value.charAt(idx))); + /** + * Base class for codecs mapping CQL {@link TupleType tuples} to Java objects. It can serve as a + * base class for codecs dealing with direct tuple-to-Pojo mappings. + * + * @param The Java type that this codec handles. + */ + public abstract static class AbstractTupleCodec extends TypeCodec { - idx = ParseUtils.skipSpaces(value, idx); + protected final TupleType definition; - if (value.charAt(idx) == '}') - return newInstance(0); + protected AbstractTupleCodec(TupleType definition, Class javaClass) { + this(definition, TypeToken.of(javaClass)); + } - Map m = new HashMap(); - while (idx < value.length()) { - int n; - try { - n = ParseUtils.skipCQLValue(value, idx); - } catch (IllegalArgumentException e) { - throw new InvalidTypeException(String.format("Cannot parse map value from \"%s\", invalid CQL value at character %d", value, idx), e); - } + protected AbstractTupleCodec(TupleType definition, TypeToken javaType) { + super(definition, javaType); + this.definition = definition; + } - K k = keyCodec.parse(value.substring(idx, n)); - idx = n; + @Override + public boolean accepts(DataType cqlType) { + // a tuple codec should accept tuple values of a different type, + // provided that the latter is contained in this codec's type. + return super.accepts(cqlType) && definition.contains((TupleType) cqlType); + } - idx = ParseUtils.skipSpaces(value, idx); - if (value.charAt(idx++) != ':') - throw new InvalidTypeException(String.format("Cannot parse map value from \"%s\", at character %d expecting ':' but got '%c'", value, idx, value.charAt(idx))); - idx = ParseUtils.skipSpaces(value, idx); + @Override + public ByteBuffer serialize(T value, ProtocolVersion protocolVersion) { + if (value == null) return null; + int size = 0; + int length = definition.getComponentTypes().size(); + ByteBuffer[] elements = new ByteBuffer[length]; + for (int i = 0; i < length; i++) { + elements[i] = serializeField(value, i, protocolVersion); + size += 4 + (elements[i] == null ? 0 : elements[i].remaining()); + } + ByteBuffer result = ByteBuffer.allocate(size); + for (ByteBuffer bb : elements) { + if (bb == null) { + result.putInt(-1); + } else { + result.putInt(bb.remaining()); + result.put(bb.duplicate()); + } + } + return (ByteBuffer) result.flip(); + } - try { - n = ParseUtils.skipCQLValue(value, idx); - } catch (IllegalArgumentException e) { - throw new InvalidTypeException(String.format("Cannot parse map value from \"%s\", invalid CQL value at character %d", value, idx), e); - } + @Override + public T deserialize(ByteBuffer bytes, ProtocolVersion protocolVersion) { + if (bytes == null) return null; + // empty byte buffers will result in empty values + try { + ByteBuffer input = bytes.duplicate(); + T value = newInstance(); + int i = 0; + while (input.hasRemaining() && i < definition.getComponentTypes().size()) { + int n = input.getInt(); + ByteBuffer element = n < 0 ? null : CodecUtils.readBytes(input, n); + value = deserializeAndSetField(element, value, i++, protocolVersion); + } + return value; + } catch (BufferUnderflowException e) { + throw new InvalidTypeException("Not enough bytes to deserialize a tuple", e); + } + } - V v = valueCodec.parse(value.substring(idx, n)); - idx = n; + @Override + public String format(T value) { + if (value == null) return "NULL"; + StringBuilder sb = new StringBuilder("("); + int length = definition.getComponentTypes().size(); + for (int i = 0; i < length; i++) { + if (i > 0) sb.append(","); + sb.append(formatField(value, i)); + } + sb.append(")"); + return sb.toString(); + } - m.put(k, v); + @Override + public T parse(String value) { + if (value == null || value.isEmpty() || value.equalsIgnoreCase("NULL")) return null; - idx = ParseUtils.skipSpaces(value, idx); - if (value.charAt(idx) == '}') - return m; - if (value.charAt(idx++) != ',') - throw new InvalidTypeException(String.format("Cannot parse map value from \"%s\", at character %d expecting ',' but got '%c'", value, idx, value.charAt(idx))); + T v = newInstance(); - idx = ParseUtils.skipSpaces(value, idx); + int idx = ParseUtils.skipSpaces(value, 0); + if (value.charAt(idx++) != '(') + throw new InvalidTypeException( + String.format( + "Cannot parse tuple value from \"%s\", at character %d expecting '(' but got '%c'", + value, idx, value.charAt(idx))); - } - throw new InvalidTypeException(String.format("Malformed map value \"%s\", missing closing '}'", value)); - } + idx = ParseUtils.skipSpaces(value, idx); - @Override - public String format(Map value) { - if (value == null) - return "NULL"; - StringBuilder sb = new StringBuilder(); - sb.append("{"); - int i = 0; - for (Map.Entry e : value.entrySet()) { - if (i++ != 0) - sb.append(","); - sb.append(keyCodec.format(e.getKey())); - sb.append(":"); - sb.append(valueCodec.format(e.getValue())); - } - sb.append("}"); - return sb.toString(); - } + if (value.charAt(idx) == ')') return v; - @Override - public ByteBuffer serialize(Map value, ProtocolVersion protocolVersion) { - if (value == null) - return null; - int i = 0; - ByteBuffer[] bbs = new ByteBuffer[2 * value.size()]; - for (Map.Entry entry : value.entrySet()) { - ByteBuffer bbk; - K key = entry.getKey(); - if (key == null) { - throw new NullPointerException("Map keys cannot be null"); - } - try { - bbk = keyCodec.serialize(key, protocolVersion); - } catch (ClassCastException e) { - throw new InvalidTypeException(String.format("Invalid type for map key, expecting %s but got %s", keyCodec.getJavaType(), key.getClass()), e); - } - ByteBuffer bbv; - V v = entry.getValue(); - if (v == null) { - throw new NullPointerException("Map values cannot be null"); - } - try { - bbv = valueCodec.serialize(v, protocolVersion); - } catch (ClassCastException e) { - throw new InvalidTypeException(String.format("Invalid type for map value, expecting %s but got %s", valueCodec.getJavaType(), v.getClass()), e); - } - bbs[i++] = bbk; - bbs[i++] = bbv; - } - return CodecUtils.pack(bbs, value.size(), protocolVersion); + int i = 0; + while (idx < value.length()) { + int n; + try { + n = ParseUtils.skipCQLValue(value, idx); + } catch (IllegalArgumentException e) { + throw new InvalidTypeException( + String.format( + "Cannot parse tuple value from \"%s\", invalid CQL value at character %d", + value, idx), + e); } - @Override - public Map deserialize(ByteBuffer bytes, ProtocolVersion protocolVersion) { - if (bytes == null || bytes.remaining() == 0) - return newInstance(0); - try { - ByteBuffer input = bytes.duplicate(); - int n = CodecUtils.readSize(input, protocolVersion); - Map m = newInstance(n); - for (int i = 0; i < n; i++) { - ByteBuffer kbb = CodecUtils.readValue(input, protocolVersion); - ByteBuffer vbb = CodecUtils.readValue(input, protocolVersion); - m.put(keyCodec.deserialize(kbb, protocolVersion), valueCodec.deserialize(vbb, protocolVersion)); - } - return m; - } catch (BufferUnderflowException e) { - throw new InvalidTypeException("Not enough bytes to deserialize a map", e); - } - } + String input = value.substring(idx, n); + v = parseAndSetField(input, v, i); + idx = n; + i += 1; - /** - * Return a new {@link Map} instance with the given estimated size. - * - * @param size The estimated size of the collection to create. - * @return A new {@link Map} instance with the given estimated size. - */ - protected abstract Map newInstance(int size); + idx = ParseUtils.skipSpaces(value, idx); + if (value.charAt(idx) == ')') return v; + if (value.charAt(idx) != ',') + throw new InvalidTypeException( + String.format( + "Cannot parse tuple value from \"%s\", at character %d expecting ',' but got '%c'", + value, idx, value.charAt(idx))); + ++idx; // skip ',' + idx = ParseUtils.skipSpaces(value, idx); + } + throw new InvalidTypeException( + String.format("Malformed tuple value \"%s\", missing closing ')'", value)); } /** - * This codec maps a CQL {@link DataType#map(DataType, DataType) map type} to a Java {@link Map}. - * Implementation note: this codec returns mutable, non thread-safe {@link LinkedHashMap} instances. + * Return a new instance of {@code T}. + * + * @return A new instance of {@code T}. */ - private static class MapCodec extends AbstractMapCodec { - - private MapCodec(TypeCodec keyCodec, TypeCodec valueCodec) { - super(keyCodec, valueCodec); - } - - @Override - protected Map newInstance(int size) { - return new LinkedHashMap(size); - } - - } + protected abstract T newInstance(); /** - * Base class for codecs mapping CQL {@link UserType user-defined types} (UDTs) to Java objects. - * It can serve as a base class for codecs dealing with direct UDT-to-Pojo mappings. + * Serialize an individual field in an object, as part of serializing the whole object to a CQL + * tuple (see {@link #serialize(Object, ProtocolVersion)}). * - * @param The Java type that the UDT will be mapped to. + * @param source The object to read the field from. + * @param index The index of the field. + * @param protocolVersion The protocol version to use. + * @return The serialized field, or {@code null} if that field should be ignored. */ - public abstract static class AbstractUDTCodec extends TypeCodec { - - protected final UserType definition; - - protected AbstractUDTCodec(UserType definition, Class javaClass) { - this(definition, TypeToken.of(javaClass)); - } - - protected AbstractUDTCodec(UserType definition, TypeToken javaType) { - super(definition, javaType); - this.definition = definition; - } - - @Override - public ByteBuffer serialize(T value, ProtocolVersion protocolVersion) { - if (value == null) - return null; - int size = 0; - int length = definition.size(); - ByteBuffer[] elements = new ByteBuffer[length]; - int i = 0; - for (UserType.Field field : definition) { - elements[i] = serializeField(value, Metadata.quoteIfNecessary(field.getName()), protocolVersion); - size += 4 + (elements[i] == null ? 0 : elements[i].remaining()); - i++; - } - ByteBuffer result = ByteBuffer.allocate(size); - for (ByteBuffer bb : elements) { - if (bb == null) { - result.putInt(-1); - } else { - result.putInt(bb.remaining()); - result.put(bb.duplicate()); - } - } - return (ByteBuffer) result.flip(); - } - - @Override - public T deserialize(ByteBuffer bytes, ProtocolVersion protocolVersion) { - if (bytes == null) - return null; - // empty byte buffers will result in empty values - try { - ByteBuffer input = bytes.duplicate(); - T value = newInstance(); - for (UserType.Field field : definition) { - if (!input.hasRemaining()) - break; - int n = input.getInt(); - ByteBuffer element = n < 0 ? null : CodecUtils.readBytes(input, n); - value = deserializeAndSetField(element, value, Metadata.quoteIfNecessary(field.getName()), protocolVersion); - } - return value; - } catch (BufferUnderflowException e) { - throw new InvalidTypeException("Not enough bytes to deserialize a UDT", e); - } - } - - @Override - public String format(T value) { - if (value == null) - return "NULL"; - StringBuilder sb = new StringBuilder("{"); - int i = 0; - for (UserType.Field field : definition) { - if (i > 0) - sb.append(","); - sb.append(Metadata.quoteIfNecessary(field.getName())); - sb.append(":"); - sb.append(formatField(value, Metadata.quoteIfNecessary(field.getName()))); - i += 1; - } - sb.append("}"); - return sb.toString(); - } - - @Override - public T parse(String value) { - if (value == null || value.isEmpty() || value.equals("NULL")) - return null; - - T v = newInstance(); - - int idx = ParseUtils.skipSpaces(value, 0); - if (value.charAt(idx++) != '{') - throw new InvalidTypeException(String.format("Cannot parse UDT value from \"%s\", at character %d expecting '{' but got '%c'", value, idx, value.charAt(idx))); - - idx = ParseUtils.skipSpaces(value, idx); - - if (value.charAt(idx) == '}') - return v; - - while (idx < value.length()) { - - int n; - try { - n = ParseUtils.skipCQLId(value, idx); - } catch (IllegalArgumentException e) { - throw new InvalidTypeException(String.format("Cannot parse UDT value from \"%s\", cannot parse a CQL identifier at character %d", value, idx), e); - } - String name = value.substring(idx, n); - idx = n; - - if (!definition.contains(name)) - throw new InvalidTypeException(String.format("Unknown field %s in value \"%s\"", name, value)); - - idx = ParseUtils.skipSpaces(value, idx); - if (value.charAt(idx++) != ':') - throw new InvalidTypeException(String.format("Cannot parse UDT value from \"%s\", at character %d expecting ':' but got '%c'", value, idx, value.charAt(idx))); - idx = ParseUtils.skipSpaces(value, idx); - - try { - n = ParseUtils.skipCQLValue(value, idx); - } catch (IllegalArgumentException e) { - throw new InvalidTypeException(String.format("Cannot parse UDT value from \"%s\", invalid CQL value at character %d", value, idx), e); - } - - String input = value.substring(idx, n); - v = parseAndSetField(input, v, name); - idx = n; - - idx = ParseUtils.skipSpaces(value, idx); - if (value.charAt(idx) == '}') - return v; - if (value.charAt(idx) != ',') - throw new InvalidTypeException(String.format("Cannot parse UDT value from \"%s\", at character %d expecting ',' but got '%c'", value, idx, value.charAt(idx))); - ++idx; // skip ',' - - idx = ParseUtils.skipSpaces(value, idx); - } - throw new InvalidTypeException(String.format("Malformed UDT value \"%s\", missing closing '}'", value)); - } - - /** - * Return a new instance of {@code T}. - * - * @return A new instance of {@code T}. - */ - protected abstract T newInstance(); - - /** - * Serialize an individual field in an object, as part of serializing the whole object to a CQL - * UDT (see {@link #serialize(Object, ProtocolVersion)}). - * - * @param source The object to read the field from. - * @param fieldName The name of the field. Note that if it is case-sensitive or contains special - * characters, it will be double-quoted (i.e. the string will contain actual - * quote characters, as in {@code "\"foobar\""}). - * @param protocolVersion The protocol version to use. - * @return The serialized field, or {@code null} if that field should be ignored. - */ - protected abstract ByteBuffer serializeField(T source, String fieldName, ProtocolVersion protocolVersion); - - /** - * Deserialize an individual field and set it on an object, as part of deserializing the whole - * object from a CQL UDT (see {@link #deserialize(ByteBuffer, ProtocolVersion)}). - * - * @param input The serialized form of the field. - * @param target The object to set the field on. - * @param fieldName The name of the field. Note that if it is case-sensitive or contains special - * characters, it will be double-quoted (i.e. the string will contain actual - * quote characters, as in {@code "\"foobar\""}). - * @param protocolVersion The protocol version to use. - * @return The target object with the field set. In most cases this should be the same as {@code target}, but if you're dealing - * with immutable types you'll need to return a different instance. - */ - protected abstract T deserializeAndSetField(ByteBuffer input, T target, String fieldName, ProtocolVersion protocolVersion); - - /** - * Format an individual field in an object as a CQL literal, as part of formatting the whole object - * (see {@link #format(Object)}). - * - * @param source The object to read the field from. - * @param fieldName The name of the field. Note that if it is case-sensitive or contains special - * characters, it will be double-quoted (i.e. the string will contain actual - * quote characters, as in {@code "\"foobar\""}). - * @return The formatted value. - */ - protected abstract String formatField(T source, String fieldName); - - /** - * Parse an individual field and set it on an object, as part of parsing the whole object - * (see {@link #parse(String)}). - * - * @param input The String to parse the field from. - * @param target The value to write to. - * @param fieldName The name of the field. Note that if it is case-sensitive or contains special - * characters, it will be double-quoted (i.e. the string will contain actual - * quote characters, as in {@code "\"foobar\""}). - * @return The target object with the field set. In most cases this should be the same as {@code target}, but if you're dealing - * with immutable types you'll need to return a different instance. - */ - protected abstract T parseAndSetField(String input, T target, String fieldName); - } + protected abstract ByteBuffer serializeField( + T source, int index, ProtocolVersion protocolVersion); /** - * This codec maps a CQL {@link UserType} to a {@link UDTValue}. + * Deserialize an individual field and set it on an object, as part of deserializing the whole + * object from a CQL tuple (see {@link #deserialize(ByteBuffer, ProtocolVersion)}). + * + * @param input The serialized form of the field. + * @param target The object to set the field on. + * @param index The index of the field. + * @param protocolVersion The protocol version to use. + * @return The target object with the field set. In most cases this should be the same as {@code + * target}, but if you're dealing with immutable types you'll need to return a different + * instance. */ - private static class UDTCodec extends AbstractUDTCodec { - - private UDTCodec(UserType definition) { - super(definition, UDTValue.class); - } - - @Override - public boolean accepts(Object value) { - return super.accepts(value) && ((UDTValue) value).getType().equals(definition); - } - - @Override - protected UDTValue newInstance() { - return definition.newValue(); - } - - @Override - protected ByteBuffer serializeField(UDTValue source, String fieldName, ProtocolVersion protocolVersion) { - return source.getBytesUnsafe(fieldName); - } - - @Override - protected UDTValue deserializeAndSetField(ByteBuffer input, UDTValue target, String fieldName, ProtocolVersion protocolVersion) { - return target.setBytesUnsafe(fieldName, input); - } - - @Override - protected String formatField(UDTValue source, String fieldName) { - DataType elementType = definition.getFieldType(fieldName); - TypeCodec codec = definition.getCodecRegistry().codecFor(elementType); - return codec.format(source.get(fieldName, codec.getJavaType())); - } - - @Override - protected UDTValue parseAndSetField(String input, UDTValue target, String fieldName) { - DataType elementType = definition.getFieldType(fieldName); - TypeCodec codec = definition.getCodecRegistry().codecFor(elementType); - target.set(fieldName, codec.parse(input), codec.getJavaType()); - return target; - } - } + protected abstract T deserializeAndSetField( + ByteBuffer input, T target, int index, ProtocolVersion protocolVersion); /** - * Base class for codecs mapping CQL {@link TupleType tuples} to Java objects. - * It can serve as a base class for codecs dealing with - * direct tuple-to-Pojo mappings. + * Format an individual field in an object as a CQL literal, as part of formatting the whole + * object (see {@link #format(Object)}). * - * @param The Java type that this codec handles. + * @param source The object to read the field from. + * @param index The index of the field. + * @return The formatted value. */ - public abstract static class AbstractTupleCodec extends TypeCodec { - - protected final TupleType definition; - - protected AbstractTupleCodec(TupleType definition, Class javaClass) { - this(definition, TypeToken.of(javaClass)); - } - - protected AbstractTupleCodec(TupleType definition, TypeToken javaType) { - super(definition, javaType); - this.definition = definition; - } - - @Override - public boolean accepts(DataType cqlType) { - // a tuple codec should accept tuple values of a different type, - // provided that the latter is contained in this codec's type. - return super.accepts(cqlType) && definition.contains((TupleType) cqlType); - } - - @Override - public ByteBuffer serialize(T value, ProtocolVersion protocolVersion) { - if (value == null) - return null; - int size = 0; - int length = definition.getComponentTypes().size(); - ByteBuffer[] elements = new ByteBuffer[length]; - for (int i = 0; i < length; i++) { - elements[i] = serializeField(value, i, protocolVersion); - size += 4 + (elements[i] == null ? 0 : elements[i].remaining()); - } - ByteBuffer result = ByteBuffer.allocate(size); - for (ByteBuffer bb : elements) { - if (bb == null) { - result.putInt(-1); - } else { - result.putInt(bb.remaining()); - result.put(bb.duplicate()); - } - } - return (ByteBuffer) result.flip(); - } - - @Override - public T deserialize(ByteBuffer bytes, ProtocolVersion protocolVersion) { - if (bytes == null) - return null; - // empty byte buffers will result in empty values - try { - ByteBuffer input = bytes.duplicate(); - T value = newInstance(); - int i = 0; - while (input.hasRemaining() && i < definition.getComponentTypes().size()) { - int n = input.getInt(); - ByteBuffer element = n < 0 ? null : CodecUtils.readBytes(input, n); - value = deserializeAndSetField(element, value, i++, protocolVersion); - } - return value; - } catch (BufferUnderflowException e) { - throw new InvalidTypeException("Not enough bytes to deserialize a tuple", e); - } - } - - @Override - public String format(T value) { - if (value == null) - return "NULL"; - StringBuilder sb = new StringBuilder("("); - int length = definition.getComponentTypes().size(); - for (int i = 0; i < length; i++) { - if (i > 0) - sb.append(","); - sb.append(formatField(value, i)); - } - sb.append(")"); - return sb.toString(); - } - - @Override - public T parse(String value) { - if (value == null || value.isEmpty() || value.equalsIgnoreCase("NULL")) - return null; - - T v = newInstance(); - - int idx = ParseUtils.skipSpaces(value, 0); - if (value.charAt(idx++) != '(') - throw new InvalidTypeException(String.format("Cannot parse tuple value from \"%s\", at character %d expecting '(' but got '%c'", value, idx, value.charAt(idx))); - - idx = ParseUtils.skipSpaces(value, idx); - - if (value.charAt(idx) == ')') - return v; - - int i = 0; - while (idx < value.length()) { - int n; - try { - n = ParseUtils.skipCQLValue(value, idx); - } catch (IllegalArgumentException e) { - throw new InvalidTypeException(String.format("Cannot parse tuple value from \"%s\", invalid CQL value at character %d", value, idx), e); - } - - String input = value.substring(idx, n); - v = parseAndSetField(input, v, i); - idx = n; - i += 1; - - idx = ParseUtils.skipSpaces(value, idx); - if (value.charAt(idx) == ')') - return v; - if (value.charAt(idx) != ',') - throw new InvalidTypeException(String.format("Cannot parse tuple value from \"%s\", at character %d expecting ',' but got '%c'", value, idx, value.charAt(idx))); - ++idx; // skip ',' - - idx = ParseUtils.skipSpaces(value, idx); - } - throw new InvalidTypeException(String.format("Malformed tuple value \"%s\", missing closing ')'", value)); - } - - /** - * Return a new instance of {@code T}. - * - * @return A new instance of {@code T}. - */ - protected abstract T newInstance(); - - /** - * Serialize an individual field in an object, as part of serializing the whole object to a CQL - * tuple (see {@link #serialize(Object, ProtocolVersion)}). - * - * @param source The object to read the field from. - * @param index The index of the field. - * @param protocolVersion The protocol version to use. - * @return The serialized field, or {@code null} if that field should be ignored. - */ - protected abstract ByteBuffer serializeField(T source, int index, ProtocolVersion protocolVersion); - - /** - * Deserialize an individual field and set it on an object, as part of deserializing the whole - * object from a CQL tuple (see {@link #deserialize(ByteBuffer, ProtocolVersion)}). - * - * @param input The serialized form of the field. - * @param target The object to set the field on. - * @param index The index of the field. - * @param protocolVersion The protocol version to use. - * @return The target object with the field set. In most cases this should be the same as {@code target}, but if you're dealing - * with immutable types you'll need to return a different instance. - */ - protected abstract T deserializeAndSetField(ByteBuffer input, T target, int index, ProtocolVersion protocolVersion); - - /** - * Format an individual field in an object as a CQL literal, as part of formatting the whole object - * (see {@link #format(Object)}). - * - * @param source The object to read the field from. - * @param index The index of the field. - * @return The formatted value. - */ - protected abstract String formatField(T source, int index); - - /** - * Parse an individual field and set it on an object, as part of parsing the whole object - * (see {@link #parse(String)}). - * - * @param input The String to parse the field from. - * @param target The value to write to. - * @param index The index of the field. - * @return The target object with the field set. In most cases this should be the same as {@code target}, but if you're dealing - * with immutable types you'll need to return a different instance. - */ - protected abstract T parseAndSetField(String input, T target, int index); - - } + protected abstract String formatField(T source, int index); /** - * This codec maps a CQL {@link TupleType tuple} to a {@link TupleValue}. + * Parse an individual field and set it on an object, as part of parsing the whole object (see + * {@link #parse(String)}). + * + * @param input The String to parse the field from. + * @param target The value to write to. + * @param index The index of the field. + * @return The target object with the field set. In most cases this should be the same as {@code + * target}, but if you're dealing with immutable types you'll need to return a different + * instance. */ - private static class TupleCodec extends AbstractTupleCodec { - - private TupleCodec(TupleType definition) { - super(definition, TupleValue.class); - } - - @Override - public boolean accepts(Object value) { - // a tuple codec should accept tuple values of a different type, - // provided that the latter is contained in this codec's type. - return super.accepts(value) && definition.contains(((TupleValue) value).getType()); - } + protected abstract T parseAndSetField(String input, T target, int index); + } - @Override - protected TupleValue newInstance() { - return definition.newValue(); - } + /** This codec maps a CQL {@link TupleType tuple} to a {@link TupleValue}. */ + private static class TupleCodec extends AbstractTupleCodec { - @Override - protected ByteBuffer serializeField(TupleValue source, int index, ProtocolVersion protocolVersion) { - if (index >= source.values.length) - return null; - return source.getBytesUnsafe(index); - } + private TupleCodec(TupleType definition) { + super(definition, TupleValue.class); + } - @Override - protected TupleValue deserializeAndSetField(ByteBuffer input, TupleValue target, int index, ProtocolVersion protocolVersion) { - if (index >= target.values.length) - return target; - return target.setBytesUnsafe(index, input); - } + @Override + public boolean accepts(Object value) { + // a tuple codec should accept tuple values of a different type, + // provided that the latter is contained in this codec's type. + return super.accepts(value) && definition.contains(((TupleValue) value).getType()); + } - @Override - protected String formatField(TupleValue value, int index) { - DataType elementType = definition.getComponentTypes().get(index); - TypeCodec codec = definition.getCodecRegistry().codecFor(elementType); - return codec.format(value.get(index, codec.getJavaType())); - } + @Override + protected TupleValue newInstance() { + return definition.newValue(); + } - @Override - protected TupleValue parseAndSetField(String input, TupleValue target, int index) { - DataType elementType = definition.getComponentTypes().get(index); - TypeCodec codec = definition.getCodecRegistry().codecFor(elementType); - target.set(index, codec.parse(input), codec.getJavaType()); - return target; - } + @Override + protected ByteBuffer serializeField( + TupleValue source, int index, ProtocolVersion protocolVersion) { + if (index >= source.values.length) return null; + return source.getBytesUnsafe(index); + } + @Override + protected TupleValue deserializeAndSetField( + ByteBuffer input, TupleValue target, int index, ProtocolVersion protocolVersion) { + if (index >= target.values.length) return target; + return target.setBytesUnsafe(index, input); } - private static class DurationCodec extends TypeCodec { + @Override + protected String formatField(TupleValue value, int index) { + DataType elementType = definition.getComponentTypes().get(index); + TypeCodec codec = definition.getCodecRegistry().codecFor(elementType); + return codec.format(value.get(index, codec.getJavaType())); + } - private static final DurationCodec instance = new DurationCodec(); + @Override + protected TupleValue parseAndSetField(String input, TupleValue target, int index) { + DataType elementType = definition.getComponentTypes().get(index); + TypeCodec codec = definition.getCodecRegistry().codecFor(elementType); + target.set(index, codec.parse(input), codec.getJavaType()); + return target; + } + } - private DurationCodec() { - super(DataType.duration(), Duration.class); - } + private static class DurationCodec extends TypeCodec { - @Override - public ByteBuffer serialize(Duration duration, ProtocolVersion protocolVersion) throws InvalidTypeException { - if (duration == null) - return null; - long months = duration.getMonths(); - long days = duration.getDays(); - long nanoseconds = duration.getNanoseconds(); - int size = VIntCoding.computeVIntSize(months) - + VIntCoding.computeVIntSize(days) - + VIntCoding.computeVIntSize(nanoseconds); - ByteArrayDataOutput out = ByteStreams.newDataOutput(size); - try { - VIntCoding.writeVInt(months, out); - VIntCoding.writeVInt(days, out); - VIntCoding.writeVInt(nanoseconds, out); - } catch (IOException e) { - // cannot happen - throw new AssertionError(); - } - return ByteBuffer.wrap(out.toByteArray()); - } + private static final DurationCodec instance = new DurationCodec(); - @Override - public Duration deserialize(ByteBuffer bytes, ProtocolVersion protocolVersion) throws InvalidTypeException { - if (bytes == null || bytes.remaining() == 0) { - return null; - } else { - DataInput in = ByteStreams.newDataInput(Bytes.getArray(bytes)); - try { - int months = (int) VIntCoding.readVInt(in); - int days = (int) VIntCoding.readVInt(in); - long nanoseconds = VIntCoding.readVInt(in); - return Duration.newInstance(months, days, nanoseconds); - } catch (IOException e) { - // cannot happen - throw new AssertionError(); - } - } - } + private DurationCodec() { + super(DataType.duration(), Duration.class); + } - @Override - public Duration parse(String value) throws InvalidTypeException { - if (value == null || value.isEmpty() || value.equalsIgnoreCase("NULL")) - return null; - return Duration.from(value); - } + @Override + public ByteBuffer serialize(Duration duration, ProtocolVersion protocolVersion) + throws InvalidTypeException { + if (duration == null) return null; + long months = duration.getMonths(); + long days = duration.getDays(); + long nanoseconds = duration.getNanoseconds(); + int size = + VIntCoding.computeVIntSize(months) + + VIntCoding.computeVIntSize(days) + + VIntCoding.computeVIntSize(nanoseconds); + ByteArrayDataOutput out = ByteStreams.newDataOutput(size); + try { + VIntCoding.writeVInt(months, out); + VIntCoding.writeVInt(days, out); + VIntCoding.writeVInt(nanoseconds, out); + } catch (IOException e) { + // cannot happen + throw new AssertionError(); + } + return ByteBuffer.wrap(out.toByteArray()); + } - @Override - public String format(Duration value) throws InvalidTypeException { - if (value == null) - return "NULL"; - return value.toString(); - } + @Override + public Duration deserialize(ByteBuffer bytes, ProtocolVersion protocolVersion) + throws InvalidTypeException { + if (bytes == null || bytes.remaining() == 0) { + return null; + } else { + DataInput in = ByteStreams.newDataInput(Bytes.getArray(bytes)); + try { + int months = (int) VIntCoding.readVInt(in); + int days = (int) VIntCoding.readVInt(in); + long nanoseconds = VIntCoding.readVInt(in); + return Duration.newInstance(months, days, nanoseconds); + } catch (IOException e) { + // cannot happen + throw new AssertionError(); + } + } + } + @Override + public Duration parse(String value) throws InvalidTypeException { + if (value == null || value.isEmpty() || value.equalsIgnoreCase("NULL")) return null; + return Duration.from(value); } + @Override + public String format(Duration value) throws InvalidTypeException { + if (value == null) return "NULL"; + return value.toString(); + } + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/TypeTokens.java b/driver-core/src/main/java/com/datastax/driver/core/TypeTokens.java index e9ceedde446..990b4c4dd86 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/TypeTokens.java +++ b/driver-core/src/main/java/com/datastax/driver/core/TypeTokens.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,113 +19,101 @@ import com.google.common.reflect.TypeParameter; import com.google.common.reflect.TypeToken; - import java.util.List; import java.util.Map; import java.util.Set; -/** - * Utility methods to create {@code TypeToken} instances. - */ +/** Utility methods to create {@code TypeToken} instances. */ public final class TypeTokens { - private TypeTokens() { - } + private TypeTokens() {} - /** - * Create a {@link TypeToken} that represents a {@link List} whose elements - * are of the given type. - * - * @param eltType The list element type. - * @param The list element type. - * @return A {@link TypeToken} that represents a {@link List} whose elements - * are of the given type. - */ - public static TypeToken> listOf(Class eltType) { - // @formatter:off - return new TypeToken>(){}.where(new TypeParameter(){}, eltType); - // @formatter:on - } + /** + * Create a {@link TypeToken} that represents a {@link List} whose elements are of the given type. + * + * @param eltType The list element type. + * @param The list element type. + * @return A {@link TypeToken} that represents a {@link List} whose elements are of the given + * type. + */ + public static TypeToken> listOf(Class eltType) { + // @formatter:off + return new TypeToken>() {}.where(new TypeParameter() {}, eltType); + // @formatter:on + } - /** - * Create a {@link TypeToken} that represents a {@link List} whose elements - * are of the given type. - * - * @param eltType The list element type. - * @param The list element type. - * @return A {@link TypeToken} that represents a {@link List} whose elements - * are of the given type. - */ - public static TypeToken> listOf(TypeToken eltType) { - // @formatter:off - return new TypeToken>(){}.where(new TypeParameter(){}, eltType); - // @formatter:on - } + /** + * Create a {@link TypeToken} that represents a {@link List} whose elements are of the given type. + * + * @param eltType The list element type. + * @param The list element type. + * @return A {@link TypeToken} that represents a {@link List} whose elements are of the given + * type. + */ + public static TypeToken> listOf(TypeToken eltType) { + // @formatter:off + return new TypeToken>() {}.where(new TypeParameter() {}, eltType); + // @formatter:on + } - /** - * Create a {@link TypeToken} that represents a {@link Set} whose elements - * are of the given type. - * - * @param eltType The set element type. - * @param The set element type. - * @return A {@link TypeToken} that represents a {@link Set} whose elements - * are of the given type. - */ - public static TypeToken> setOf(Class eltType) { - // @formatter:off - return new TypeToken>(){}.where(new TypeParameter(){}, eltType); - // @formatter:on - } + /** + * Create a {@link TypeToken} that represents a {@link Set} whose elements are of the given type. + * + * @param eltType The set element type. + * @param The set element type. + * @return A {@link TypeToken} that represents a {@link Set} whose elements are of the given type. + */ + public static TypeToken> setOf(Class eltType) { + // @formatter:off + return new TypeToken>() {}.where(new TypeParameter() {}, eltType); + // @formatter:on + } - /** - * Create a {@link TypeToken} that represents a {@link Set} whose elements - * are of the given type. - * - * @param eltType The set element type. - * @param The set element type. - * @return A {@link TypeToken} that represents a {@link Set} whose elements - * are of the given type. - */ - public static TypeToken> setOf(TypeToken eltType) { - // @formatter:off - return new TypeToken>(){}.where(new TypeParameter(){}, eltType); - // @formatter:on - } + /** + * Create a {@link TypeToken} that represents a {@link Set} whose elements are of the given type. + * + * @param eltType The set element type. + * @param The set element type. + * @return A {@link TypeToken} that represents a {@link Set} whose elements are of the given type. + */ + public static TypeToken> setOf(TypeToken eltType) { + // @formatter:off + return new TypeToken>() {}.where(new TypeParameter() {}, eltType); + // @formatter:on + } - /** - * Create a {@link TypeToken} that represents a {@link Map} whose keys - * and values are of the given key and value types. - * - * @param keyType The map key type. - * @param valueType The map value type - * @param The map key type. - * @param The map value type - * @return A {@link TypeToken} that represents a {@link Map} whose keys - * and values are of the given key and value types - */ - public static TypeToken> mapOf(Class keyType, Class valueType) { - // @formatter:off - return new TypeToken>(){} - .where(new TypeParameter(){}, keyType) - .where(new TypeParameter(){}, valueType); - // @formatter:on - } + /** + * Create a {@link TypeToken} that represents a {@link Map} whose keys and values are of the given + * key and value types. + * + * @param keyType The map key type. + * @param valueType The map value type + * @param The map key type. + * @param The map value type + * @return A {@link TypeToken} that represents a {@link Map} whose keys and values are of the + * given key and value types + */ + public static TypeToken> mapOf(Class keyType, Class valueType) { + // @formatter:off + return new TypeToken>() {}.where(new TypeParameter() {}, keyType) + .where(new TypeParameter() {}, valueType); + // @formatter:on + } - /** - * Create a {@link TypeToken} that represents a {@link Map} whose keys - * and values are of the given key and value types. - * - * @param keyType The map key type. - * @param valueType The map value type - * @param The map key type. - * @param The map value type - * @return A {@link TypeToken} that represents a {@link Map} whose keys - * and values are of the given key and value types - */ - public static TypeToken> mapOf(TypeToken keyType, TypeToken valueType) { - // @formatter:off - return new TypeToken>(){} - .where(new TypeParameter(){}, keyType) - .where(new TypeParameter(){}, valueType); - // @formatter:on - } + /** + * Create a {@link TypeToken} that represents a {@link Map} whose keys and values are of the given + * key and value types. + * + * @param keyType The map key type. + * @param valueType The map value type + * @param The map key type. + * @param The map value type + * @return A {@link TypeToken} that represents a {@link Map} whose keys and values are of the + * given key and value types + */ + public static TypeToken> mapOf(TypeToken keyType, TypeToken valueType) { + // @formatter:off + return new TypeToken>() {}.where(new TypeParameter() {}, keyType) + .where(new TypeParameter() {}, valueType); + // @formatter:on + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/UDTValue.java b/driver-core/src/main/java/com/datastax/driver/core/UDTValue.java index b6aa875e27e..a5af7e2e5e7 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/UDTValue.java +++ b/driver-core/src/main/java/com/datastax/driver/core/UDTValue.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,72 +17,68 @@ */ package com.datastax.driver.core; -/** - * A value for a User Defined Type. - */ +/** A value for a User Defined Type. */ public class UDTValue extends AbstractData { - private final UserType definition; + private final UserType definition; - UDTValue(UserType definition) { - super(definition.getProtocolVersion(), definition.size()); - this.definition = definition; - } + UDTValue(UserType definition) { + super(definition.getProtocolVersion(), definition.size()); + this.definition = definition; + } - @Override - protected DataType getType(int i) { - return definition.byIdx[i].getType(); - } + @Override + protected DataType getType(int i) { + return definition.byIdx[i].getType(); + } - @Override - protected String getName(int i) { - return definition.byIdx[i].getName(); - } + @Override + protected String getName(int i) { + return definition.byIdx[i].getName(); + } - @Override - protected CodecRegistry getCodecRegistry() { - return definition.getCodecRegistry(); - } + @Override + protected CodecRegistry getCodecRegistry() { + return definition.getCodecRegistry(); + } - @Override - protected int[] getAllIndexesOf(String name) { - int[] indexes = definition.byName.get(Metadata.handleId(name)); - if (indexes == null) - throw new IllegalArgumentException(name + " is not a field defined in this UDT"); - return indexes; - } + @Override + protected int[] getAllIndexesOf(String name) { + int[] indexes = definition.byName.get(Metadata.handleId(name)); + if (indexes == null) + throw new IllegalArgumentException(name + " is not a field defined in this UDT"); + return indexes; + } - /** - * The UDT this is a value of. - * - * @return the UDT this is a value of. - */ - public UserType getType() { - return definition; - } + /** + * The UDT this is a value of. + * + * @return the UDT this is a value of. + */ + public UserType getType() { + return definition; + } - @Override - public boolean equals(Object o) { - if (!(o instanceof UDTValue)) - return false; + @Override + public boolean equals(Object o) { + if (!(o instanceof UDTValue)) return false; - UDTValue that = (UDTValue) o; - if (!definition.equals(that.definition)) - return false; + UDTValue that = (UDTValue) o; + if (!definition.equals(that.definition)) return false; - return super.equals(o); - } + return super.equals(o); + } - @Override - public int hashCode() { - return super.hashCode(); - } + @Override + public int hashCode() { + return super.hashCode(); + } - @Override - public String toString() { - StringBuilder sb = new StringBuilder(); - TypeCodec codec = getCodecRegistry().codecFor(definition); - sb.append(codec.format(this)); - return sb.toString(); - } + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + TypeCodec codec = getCodecRegistry().codecFor(definition); + sb.append(codec.format(this)); + return sb.toString(); + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/UserType.java b/driver-core/src/main/java/com/datastax/driver/core/UserType.java index 25142374c8c..50afc48913a 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/UserType.java +++ b/driver-core/src/main/java/com/datastax/driver/core/UserType.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,366 +19,397 @@ import com.google.common.collect.ImmutableMap; import com.google.common.collect.Iterators; - -import java.util.*; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Iterator; +import java.util.List; +import java.util.Map; /** * A User Defined Type (UDT). - *

- * A UDT is a essentially a named collection of fields (with a name and a type). + * + *

A UDT is a essentially a named collection of fields (with a name and a type). */ public class UserType extends DataType implements Iterable { - static final String TYPE_NAME = "type_name"; - private static final String COLS_NAMES = "field_names"; - static final String COLS_TYPES = "field_types"; - - private final String keyspace; - private final String typeName; - private final boolean frozen; - private final ProtocolVersion protocolVersion; - - // can be null, if this object is being constructed from a response message - // see Responses.Result.Rows.Metadata.decode() - private volatile CodecRegistry codecRegistry; - - // Note that we don't expose the order of fields, from an API perspective this is a map - // of String->Field, but internally we care about the order because the serialization format - // of UDT expects a particular order. - final Field[] byIdx; - // For a given name, we can only have one field with that name, so we don't need a int[] in - // practice. However, storing one element arrays save allocations in UDTValue.getAllIndexesOf - // implementation. - final Map byName; - - private UserType(Name name, String keyspace, String typeName, boolean frozen, ProtocolVersion protocolVersion, CodecRegistry codecRegistry, Field[] byIdx, Map byName) { - super(name); - this.keyspace = keyspace; - this.typeName = typeName; - this.frozen = frozen; - this.protocolVersion = protocolVersion; - this.codecRegistry = codecRegistry; - this.byIdx = byIdx; - this.byName = byName; + static final String TYPE_NAME = "type_name"; + private static final String COLS_NAMES = "field_names"; + static final String COLS_TYPES = "field_types"; + + private final String keyspace; + private final String typeName; + private final boolean frozen; + private final ProtocolVersion protocolVersion; + + // can be null, if this object is being constructed from a response message + // see Responses.Result.Rows.Metadata.decode() + private volatile CodecRegistry codecRegistry; + + // Note that we don't expose the order of fields, from an API perspective this is a map + // of String->Field, but internally we care about the order because the serialization format + // of UDT expects a particular order. + final Field[] byIdx; + // For a given name, we can only have one field with that name, so we don't need a int[] in + // practice. However, storing one element arrays save allocations in UDTValue.getAllIndexesOf + // implementation. + final Map byName; + + private UserType( + Name name, + String keyspace, + String typeName, + boolean frozen, + ProtocolVersion protocolVersion, + CodecRegistry codecRegistry, + Field[] byIdx, + Map byName) { + super(name); + this.keyspace = keyspace; + this.typeName = typeName; + this.frozen = frozen; + this.protocolVersion = protocolVersion; + this.codecRegistry = codecRegistry; + this.byIdx = byIdx; + this.byName = byName; + } + + UserType( + String keyspace, + String typeName, + boolean frozen, + Collection fields, + ProtocolVersion protocolVersion, + CodecRegistry codecRegistry) { + this( + DataType.Name.UDT, + keyspace, + typeName, + frozen, + protocolVersion, + codecRegistry, + fields.toArray(new Field[fields.size()]), + mapByName(fields)); + } + + private static ImmutableMap mapByName(Collection fields) { + ImmutableMap.Builder builder = new ImmutableMap.Builder(); + int i = 0; + for (Field field : fields) { + builder.put(field.getName(), new int[] {i}); + i += 1; } - - UserType(String keyspace, String typeName, boolean frozen, Collection fields, ProtocolVersion protocolVersion, CodecRegistry codecRegistry) { - this(DataType.Name.UDT, keyspace, typeName, frozen, protocolVersion, codecRegistry, - fields.toArray(new Field[fields.size()]), - mapByName(fields)); + return builder.build(); + } + + static UserType build( + KeyspaceMetadata ksm, + Row row, + VersionNumber version, + Cluster cluster, + Map userTypes) { + ProtocolVersion protocolVersion = + cluster.getConfiguration().getProtocolOptions().getProtocolVersion(); + CodecRegistry codecRegistry = cluster.getConfiguration().getCodecRegistry(); + + String keyspace = row.getString(KeyspaceMetadata.KS_NAME); + String name = row.getString(TYPE_NAME); + + List fieldNames = row.getList(COLS_NAMES, String.class); + List fieldTypes = row.getList(COLS_TYPES, String.class); + + List fields = new ArrayList(fieldNames.size()); + for (int i = 0; i < fieldNames.size(); i++) { + DataType fieldType; + if (version.getMajor() >= 3.0) { + fieldType = + DataTypeCqlNameParser.parse( + fieldTypes.get(i), cluster, ksm.getName(), userTypes, ksm.userTypes, false, false); + } else { + fieldType = + DataTypeClassNameParser.parseOne(fieldTypes.get(i), protocolVersion, codecRegistry); + } + fields.add(new Field(fieldNames.get(i), fieldType)); } - - private static ImmutableMap mapByName(Collection fields) { - ImmutableMap.Builder builder = new ImmutableMap.Builder(); - int i = 0; - for (Field field : fields) { - builder.put(field.getName(), new int[]{i}); - i += 1; - } - return builder.build(); + return new UserType(keyspace, name, false, fields, protocolVersion, codecRegistry); + } + + /** + * Returns a new empty value for this user type definition. + * + * @return an empty value for this user type definition. + */ + public UDTValue newValue() { + return new UDTValue(this); + } + + /** + * The name of the keyspace this UDT is part of. + * + * @return the name of the keyspace this UDT is part of. + */ + public String getKeyspace() { + return keyspace; + } + + /** + * The name of this user type. + * + * @return the name of this user type. + */ + public String getTypeName() { + return typeName; + } + + /** + * Returns the number of fields in this UDT. + * + * @return the number of fields in this UDT. + */ + public int size() { + return byIdx.length; + } + + /** + * Returns whether this UDT contains a given field. + * + * @param name the name to check. Note that {@code name} obey the usual CQL identifier rules: it + * should be quoted if it denotes a case sensitive identifier (you can use {@link + * Metadata#quote} for the quoting). + * @return {@code true} if this UDT contains a field named {@code name}, {@code false} otherwise. + */ + public boolean contains(String name) { + return byName.containsKey(Metadata.handleId(name)); + } + + /** + * Returns an iterator over the fields of this UDT. + * + * @return an iterator over the fields of this UDT. + */ + @Override + public Iterator iterator() { + return Iterators.forArray(byIdx); + } + + /** + * Returns the names of the fields of this UDT. + * + * @return the names of the fields of this UDT as a collection. + */ + public Collection getFieldNames() { + return byName.keySet(); + } + + /** + * Returns the type of a given field. + * + * @param name the name of the field. Note that {@code name} obey the usual CQL identifier rules: + * it should be quoted if it denotes a case sensitive identifier (you can use {@link + * Metadata#quote} for the quoting). + * @return the type of field {@code name} if this UDT has a field of this name, {@code null} + * otherwise. + * @throws IllegalArgumentException if {@code name} is not a field of this UDT definition. + */ + public DataType getFieldType(String name) { + int[] idx = byName.get(Metadata.handleId(name)); + if (idx == null) + throw new IllegalArgumentException(name + " is not a field defined in this definition"); + + return byIdx[idx[0]].getType(); + } + + @Override + public boolean isFrozen() { + return frozen; + } + + public UserType copy(boolean newFrozen) { + if (newFrozen == frozen) { + return this; + } else { + return new UserType( + name, keyspace, typeName, newFrozen, protocolVersion, codecRegistry, byIdx, byName); } - - static UserType build(KeyspaceMetadata ksm, Row row, VersionNumber version, Cluster cluster, Map userTypes) { - ProtocolVersion protocolVersion = cluster.getConfiguration().getProtocolOptions().getProtocolVersion(); - CodecRegistry codecRegistry = cluster.getConfiguration().getCodecRegistry(); - - String keyspace = row.getString(KeyspaceMetadata.KS_NAME); - String name = row.getString(TYPE_NAME); - - List fieldNames = row.getList(COLS_NAMES, String.class); - List fieldTypes = row.getList(COLS_TYPES, String.class); - - List fields = new ArrayList(fieldNames.size()); - for (int i = 0; i < fieldNames.size(); i++) { - DataType fieldType; - if (version.getMajor() >= 3.0) { - fieldType = DataTypeCqlNameParser.parse(fieldTypes.get(i), cluster, ksm.getName(), userTypes, ksm.userTypes, false, false); - } else { - fieldType = DataTypeClassNameParser.parseOne(fieldTypes.get(i), protocolVersion, codecRegistry); - } - fields.add(new Field(fieldNames.get(i), fieldType)); - } - return new UserType(keyspace, name, false, fields, protocolVersion, codecRegistry); + } + + @Override + public int hashCode() { + int result = name.hashCode(); + result = 31 * result + keyspace.hashCode(); + result = 31 * result + typeName.hashCode(); + result = 31 * result + Arrays.hashCode(byIdx); + return result; + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof UserType)) return false; + + UserType other = (UserType) o; + + // Note: we don't test byName because it's redundant with byIdx in practice, + // but also because the map holds 'int[]' which don't have proper equal. + return name.equals(other.name) + && keyspace.equals(other.keyspace) + && typeName.equals(other.typeName) + && Arrays.equals(byIdx, other.byIdx); + } + + /** + * Returns a CQL query representing this user type in human readable form. + * + *

This method is equivalent to {@link #asCQLQuery} but the ouptut is formatted to be human + * readable (for some definition of human readable). + * + * @return the CQL query representing this user type. + */ + public String exportAsString() { + return asCQLQuery(true); + } + + /** + * Returns a CQL query representing this user type. + * + *

This method returns a single 'CREATE TYPE' query corresponding to this UDT definition. + * + *

Note that the returned string is a single line; the returned query is not formatted in any + * way. + * + * @return the 'CREATE TYPE' query corresponding to this user type. + * @see #exportAsString + */ + public String asCQLQuery() { + return asCQLQuery(false); + } + + /** + * Return the protocol version that has been used to deserialize this UDT, or that will be used to + * serialize it. In most cases this should be the version currently in use by the cluster instance + * that this UDT belongs to, as reported by {@link ProtocolOptions#getProtocolVersion()}. + * + * @return the protocol version that has been used to deserialize this UDT, or that will be used + * to serialize it. + */ + ProtocolVersion getProtocolVersion() { + return protocolVersion; + } + + CodecRegistry getCodecRegistry() { + return codecRegistry; + } + + void setCodecRegistry(CodecRegistry codecRegistry) { + this.codecRegistry = codecRegistry; + } + + private String asCQLQuery(boolean formatted) { + StringBuilder sb = new StringBuilder(); + + sb.append("CREATE TYPE ") + .append(Metadata.quoteIfNecessary(keyspace)) + .append('.') + .append(Metadata.quoteIfNecessary(typeName)) + .append(" ("); + if (formatted) { + TableMetadata.spaceOrNewLine(sb, true); } - - /** - * Returns a new empty value for this user type definition. - * - * @return an empty value for this user type definition. - */ - public UDTValue newValue() { - return new UDTValue(this); - } - - /** - * The name of the keyspace this UDT is part of. - * - * @return the name of the keyspace this UDT is part of. - */ - public String getKeyspace() { - return keyspace; + for (int i = 0; i < byIdx.length; i++) { + sb.append(byIdx[i]); + if (i < byIdx.length - 1) { + sb.append(','); + TableMetadata.spaceOrNewLine(sb, formatted); + } else { + TableMetadata.newLine(sb, formatted); + } } - /** - * The name of this user type. - * - * @return the name of this user type. - */ - public String getTypeName() { - return typeName; + return sb.append(");").toString(); + } + + @Override + public String toString() { + String str = + Metadata.quoteIfNecessary(getKeyspace()) + "." + Metadata.quoteIfNecessary(getTypeName()); + return isFrozen() ? "frozen<" + str + ">" : str; + } + + @Override + public String asFunctionParameterString() { + return Metadata.quoteIfNecessary(getTypeName()); + } + + /** A UDT field. */ + public static class Field { + private final String name; + private final DataType type; + + Field(String name, DataType type) { + this.name = name; + this.type = type; } /** - * Returns the number of fields in this UDT. + * Returns the name of the field. * - * @return the number of fields in this UDT. + * @return the name of the field. */ - public int size() { - return byIdx.length; + public String getName() { + return name; } /** - * Returns whether this UDT contains a given field. + * Returns the type of the field. * - * @param name the name to check. Note that {@code name} obey the usual - * CQL identifier rules: it should be quoted if it denotes a case sensitive - * identifier (you can use {@link Metadata#quote} for the quoting). - * @return {@code true} if this UDT contains a field named {@code name}, - * {@code false} otherwise. + * @return the type of the field. */ - public boolean contains(String name) { - return byName.containsKey(Metadata.handleId(name)); + public DataType getType() { + return type; } - /** - * Returns an iterator over the fields of this UDT. - * - * @return an iterator over the fields of this UDT. - */ @Override - public Iterator iterator() { - return Iterators.forArray(byIdx); - } - - /** - * Returns the names of the fields of this UDT. - * - * @return the names of the fields of this UDT as a collection. - */ - public Collection getFieldNames() { - return byName.keySet(); - } - - /** - * Returns the type of a given field. - * - * @param name the name of the field. Note that {@code name} obey the usual - * CQL identifier rules: it should be quoted if it denotes a case sensitive - * identifier (you can use {@link Metadata#quote} for the quoting). - * @return the type of field {@code name} if this UDT has a field of this - * name, {@code null} otherwise. - * @throws IllegalArgumentException if {@code name} is not a field of this - * UDT definition. - */ - public DataType getFieldType(String name) { - int[] idx = byName.get(Metadata.handleId(name)); - if (idx == null) - throw new IllegalArgumentException(name + " is not a field defined in this definition"); - - return byIdx[idx[0]].getType(); + public final int hashCode() { + return Arrays.hashCode(new Object[] {name, type}); } @Override - public boolean isFrozen() { - return frozen; - } - - public UserType copy(boolean newFrozen) { - if (newFrozen == frozen) { - return this; - } else { - return new UserType(name, keyspace, typeName, newFrozen, protocolVersion, codecRegistry, byIdx, byName); - } - } + public final boolean equals(Object o) { + if (!(o instanceof Field)) return false; - @Override - public int hashCode() { - int result = name.hashCode(); - result = 31 * result + keyspace.hashCode(); - result = 31 * result + typeName.hashCode(); - result = 31 * result + Arrays.hashCode(byIdx); - return result; - } - - @Override - public boolean equals(Object o) { - if (!(o instanceof UserType)) - return false; - - UserType other = (UserType) o; - - // Note: we don't test byName because it's redundant with byIdx in practice, - // but also because the map holds 'int[]' which don't have proper equal. - return name.equals(other.name) - && keyspace.equals(other.keyspace) - && typeName.equals(other.typeName) - && Arrays.equals(byIdx, other.byIdx); - } - - /** - * Returns a CQL query representing this user type in human readable form. - *

- * This method is equivalent to {@link #asCQLQuery} but the ouptut is - * formatted to be human readable (for some definition of human readable). - * - * @return the CQL query representing this user type. - */ - public String exportAsString() { - return asCQLQuery(true); - } - - /** - * Returns a CQL query representing this user type. - *

- * This method returns a single 'CREATE TYPE' query corresponding - * to this UDT definition. - *

- * Note that the returned string is a single line; the returned query - * is not formatted in any way. - * - * @return the 'CREATE TYPE' query corresponding to this user type. - * @see #exportAsString - */ - public String asCQLQuery() { - return asCQLQuery(false); - } - - /** - * Return the protocol version that has been used to deserialize - * this UDT, or that will be used to serialize it. - * In most cases this should be the version - * currently in use by the cluster instance - * that this UDT belongs to, as reported by - * {@link ProtocolOptions#getProtocolVersion()}. - * - * @return the protocol version that has been used to deserialize - * this UDT, or that will be used to serialize it. - */ - ProtocolVersion getProtocolVersion() { - return protocolVersion; - } - - CodecRegistry getCodecRegistry() { - return codecRegistry; - } - - void setCodecRegistry(CodecRegistry codecRegistry) { - this.codecRegistry = codecRegistry; - } - - private String asCQLQuery(boolean formatted) { - StringBuilder sb = new StringBuilder(); - - sb.append("CREATE TYPE ").append(Metadata.quoteIfNecessary(keyspace)).append('.').append(Metadata.quoteIfNecessary(typeName)).append(" ("); - TableMetadata.newLine(sb, formatted); - for (int i = 0; i < byIdx.length; i++) { - sb.append(TableMetadata.spaces(4, formatted)).append(byIdx[i]); - if (i < byIdx.length - 1) - sb.append(','); - TableMetadata.newLine(sb, formatted); - } - - return sb.append(");").toString(); + Field other = (Field) o; + return name.equals(other.name) && type.equals(other.type); } @Override public String toString() { - String str = Metadata.quoteIfNecessary(getKeyspace()) + "." + Metadata.quoteIfNecessary(getTypeName()); - return isFrozen() ? - "frozen<" + str + ">" : - str; - } - - @Override - public String asFunctionParameterString() { - return Metadata.quoteIfNecessary(getTypeName()); + return Metadata.quoteIfNecessary(name) + ' ' + type; } - - /** - * A UDT field. - */ - public static class Field { - private final String name; - private final DataType type; - - Field(String name, DataType type) { - this.name = name; - this.type = type; - } - - /** - * Returns the name of the field. - * - * @return the name of the field. - */ - public String getName() { - return name; - } - - /** - * Returns the type of the field. - * - * @return the type of the field. - */ - public DataType getType() { - return type; - } - - @Override - public final int hashCode() { - return Arrays.hashCode(new Object[]{name, type}); - } - - @Override - public final boolean equals(Object o) { - if (!(o instanceof Field)) - return false; - - Field other = (Field) o; - return name.equals(other.name) - && type.equals(other.type); - } - - @Override - public String toString() { - return Metadata.quoteIfNecessary(name) + ' ' + type; - } + } + + /** + * A "shallow" definition of a UDT that only contains the keyspace and type name, without any + * information about the type's structure. + * + *

This is used for internal dependency analysis only, and never returned to the client. + * + * @since 3.0.0 + */ + static class Shallow extends DataType { + + final String keyspaceName; + final String typeName; + final boolean frozen; + + Shallow(String keyspaceName, String typeName, boolean frozen) { + super(Name.UDT); + this.keyspaceName = keyspaceName; + this.typeName = typeName; + this.frozen = frozen; } - /** - * A "shallow" definition of a UDT that only contains the keyspace and type name, without any information - * about the type's structure. - *

- * This is used for internal dependency analysis only, and never returned to the client. - * - * @since 3.0.0 - */ - static class Shallow extends DataType { - - final String keyspaceName; - final String typeName; - final boolean frozen; - - Shallow(String keyspaceName, String typeName, boolean frozen) { - super(Name.UDT); - this.keyspaceName = keyspaceName; - this.typeName = typeName; - this.frozen = frozen; - } - - @Override - public boolean isFrozen() { - return frozen; - } + @Override + public boolean isFrozen() { + return frozen; } + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/VIntCoding.java b/driver-core/src/main/java/com/datastax/driver/core/VIntCoding.java index b11829a832a..84aba9bb1d5 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/VIntCoding.java +++ b/driver-core/src/main/java/com/datastax/driver/core/VIntCoding.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -45,142 +47,142 @@ package com.datastax.driver.core; import io.netty.util.concurrent.FastThreadLocal; - import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; /** - * Variable length encoding inspired from Google - * varints. + * Variable length encoding inspired from Google varints. + * *

- *

Cassandra vints are encoded with the most significant group first. The most significant - * byte will contains the information about how many extra bytes need to be read as well as - * the most significant bits of the integer. - * The number of extra bytes to read is encoded as 1 bits on the left side. - * For example, if we need to read 3 more bytes the first byte will start with 1110. - * If the encoded integer is 8 bytes long the vint will be encoded on 9 bytes and the first - * byte will be: 11111111

+ * + *

Cassandra vints are encoded with the most significant group first. The most significant byte + * will contains the information about how many extra bytes need to be read as well as the most + * significant bits of the integer. The number of extra bytes to read is encoded as 1 bits on the + * left side. For example, if we need to read 3 more bytes the first byte will start with 1110. If + * the encoded integer is 8 bytes long the vint will be encoded on 9 bytes and the first byte will + * be: 11111111 + * *

- *

Signed integer are (like protocol buffer varints) encoded using the ZigZag encoding - * so that numbers with a small absolute value have a small vint encoded value too.

+ * + *

Signed integer are (like protocol buffer varints) encoded using the ZigZag encoding so that + * numbers with a small absolute value have a small vint encoded value too. */ class VIntCoding { - private static long readUnsignedVInt(DataInput input) throws IOException { - int firstByte = input.readByte(); - - //Bail out early if this is one byte, necessary or it fails later - if (firstByte >= 0) - return firstByte; - - int size = numberOfExtraBytesToRead(firstByte); - long retval = firstByte & firstByteValueMask(size); - for (int ii = 0; ii < size; ii++) { - byte b = input.readByte(); - retval <<= 8; - retval |= b & 0xff; - } - - return retval; - } - - static long readVInt(DataInput input) throws IOException { - return decodeZigZag64(readUnsignedVInt(input)); - } - - // & this with the first byte to give the value part for a given extraBytesToRead encoded in the byte - private static int firstByteValueMask(int extraBytesToRead) { - // by including the known 0bit in the mask, we can use this for encodeExtraBytesToRead - return 0xff >> extraBytesToRead; - } + private static long readUnsignedVInt(DataInput input) throws IOException { + int firstByte = input.readByte(); - private static int encodeExtraBytesToRead(int extraBytesToRead) { - // because we have an extra bit in the value mask, we just need to invert it - return ~firstByteValueMask(extraBytesToRead); - } + // Bail out early if this is one byte, necessary or it fails later + if (firstByte >= 0) return firstByte; - private static int numberOfExtraBytesToRead(int firstByte) { - // we count number of set upper bits; so if we simply invert all of the bits, we're golden - // this is aided by the fact that we only work with negative numbers, so when upcast to an int all - // of the new upper bits are also set, so by inverting we set all of them to zero - return Integer.numberOfLeadingZeros(~firstByte) - 24; + int size = numberOfExtraBytesToRead(firstByte); + long retval = firstByte & firstByteValueMask(size); + for (int ii = 0; ii < size; ii++) { + byte b = input.readByte(); + retval <<= 8; + retval |= b & 0xff; } - private static final FastThreadLocal encodingBuffer = new FastThreadLocal() { + return retval; + } + + static long readVInt(DataInput input) throws IOException { + return decodeZigZag64(readUnsignedVInt(input)); + } + + // & this with the first byte to give the value part for a given extraBytesToRead encoded in the + // byte + private static int firstByteValueMask(int extraBytesToRead) { + // by including the known 0bit in the mask, we can use this for encodeExtraBytesToRead + return 0xff >> extraBytesToRead; + } + + private static int encodeExtraBytesToRead(int extraBytesToRead) { + // because we have an extra bit in the value mask, we just need to invert it + return ~firstByteValueMask(extraBytesToRead); + } + + private static int numberOfExtraBytesToRead(int firstByte) { + // we count number of set upper bits; so if we simply invert all of the bits, we're golden + // this is aided by the fact that we only work with negative numbers, so when upcast to an int + // all + // of the new upper bits are also set, so by inverting we set all of them to zero + return Integer.numberOfLeadingZeros(~firstByte) - 24; + } + + private static final FastThreadLocal encodingBuffer = + new FastThreadLocal() { @Override public byte[] initialValue() { - return new byte[9]; - } - }; - - private static void writeUnsignedVInt(long value, DataOutput output) throws IOException { - int size = VIntCoding.computeUnsignedVIntSize(value); - if (size == 1) { - output.write((int) value); - return; - } - - output.write(VIntCoding.encodeVInt(value, size), 0, size); - } - - private static byte[] encodeVInt(long value, int size) { - byte encodingSpace[] = encodingBuffer.get(); - int extraBytes = size - 1; - - for (int i = extraBytes; i >= 0; --i) { - encodingSpace[i] = (byte) value; - value >>= 8; + return new byte[9]; } - encodingSpace[0] |= encodeExtraBytesToRead(extraBytes); - return encodingSpace; - } + }; - static void writeVInt(long value, DataOutput output) throws IOException { - writeUnsignedVInt(encodeZigZag64(value), output); + private static void writeUnsignedVInt(long value, DataOutput output) throws IOException { + int size = VIntCoding.computeUnsignedVIntSize(value); + if (size == 1) { + output.write((int) value); + return; } - /** - * Decode a ZigZag-encoded 64-bit value. ZigZag encodes signed integers - * into values that can be efficiently encoded with varint. (Otherwise, - * negative values must be sign-extended to 64 bits to be varint encoded, - * thus always taking 10 bytes on the wire.) - * - * @param n An unsigned 64-bit integer, stored in a signed int because - * Java has no explicit unsigned support. - * @return A signed 64-bit integer. - */ - private static long decodeZigZag64(final long n) { - return (n >>> 1) ^ -(n & 1); - } + output.write(VIntCoding.encodeVInt(value, size), 0, size); + } - /** - * Encode a ZigZag-encoded 64-bit value. ZigZag encodes signed integers - * into values that can be efficiently encoded with varint. (Otherwise, - * negative values must be sign-extended to 64 bits to be varint encoded, - * thus always taking 10 bytes on the wire.) - * - * @param n A signed 64-bit integer. - * @return An unsigned 64-bit integer, stored in a signed int because - * Java has no explicit unsigned support. - */ - private static long encodeZigZag64(final long n) { - // Note: the right-shift must be arithmetic - return (n << 1) ^ (n >> 63); - } - - /** - * Compute the number of bytes that would be needed to encode a varint. - */ - static int computeVIntSize(final long param) { - return computeUnsignedVIntSize(encodeZigZag64(param)); - } + private static byte[] encodeVInt(long value, int size) { + byte encodingSpace[] = encodingBuffer.get(); + int extraBytes = size - 1; - /** - * Compute the number of bytes that would be needed to encode an unsigned varint. - */ - private static int computeUnsignedVIntSize(final long value) { - int magnitude = Long.numberOfLeadingZeros(value | 1); // | with 1 to ensure magntiude <= 63, so (63 - 1) / 7 <= 8 - return (639 - magnitude * 9) >> 6; + for (int i = extraBytes; i >= 0; --i) { + encodingSpace[i] = (byte) value; + value >>= 8; } + encodingSpace[0] |= encodeExtraBytesToRead(extraBytes); + return encodingSpace; + } + + static void writeVInt(long value, DataOutput output) throws IOException { + writeUnsignedVInt(encodeZigZag64(value), output); + } + + /** + * Decode a ZigZag-encoded 64-bit value. ZigZag encodes signed integers into values that can be + * efficiently encoded with varint. (Otherwise, negative values must be sign-extended to 64 bits + * to be varint encoded, thus always taking 10 bytes on the wire.) + * + * @param n An unsigned 64-bit integer, stored in a signed int because Java has no explicit + * unsigned support. + * @return A signed 64-bit integer. + */ + private static long decodeZigZag64(final long n) { + return (n >>> 1) ^ -(n & 1); + } + + /** + * Encode a ZigZag-encoded 64-bit value. ZigZag encodes signed integers into values that can be + * efficiently encoded with varint. (Otherwise, negative values must be sign-extended to 64 bits + * to be varint encoded, thus always taking 10 bytes on the wire.) + * + * @param n A signed 64-bit integer. + * @return An unsigned 64-bit integer, stored in a signed int because Java has no explicit + * unsigned support. + */ + private static long encodeZigZag64(final long n) { + // Note: the right-shift must be arithmetic + return (n << 1) ^ (n >> 63); + } + + /** Compute the number of bytes that would be needed to encode a varint. */ + static int computeVIntSize(final long param) { + return computeUnsignedVIntSize(encodeZigZag64(param)); + } + + /** Compute the number of bytes that would be needed to encode an unsigned varint. */ + private static int computeUnsignedVIntSize(final long value) { + int magnitude = + Long.numberOfLeadingZeros( + value | 1); // | with 1 to ensure magntiude <= 63, so (63 - 1) / 7 <= 8 + return (639 - magnitude * 9) >> 6; + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/VersionNumber.java b/driver-core/src/main/java/com/datastax/driver/core/VersionNumber.java index 2f83dd18606..debf049ce9b 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/VersionNumber.java +++ b/driver-core/src/main/java/com/datastax/driver/core/VersionNumber.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,7 +18,6 @@ package com.datastax.driver.core; import com.datastax.driver.core.utils.MoreObjects; - import java.util.Arrays; import java.util.Collections; import java.util.List; @@ -25,8 +26,8 @@ /** * A version number in the form X.Y.Z with optional pre-release labels and build metadata. - *

- * Version numbers compare the usual way, the major number (X) is compared first, then the minor + * + *

Version numbers compare the usual way, the major number (X) is compared first, then the minor * one (Y) and then the patch level one (Z). Lastly, versions with pre-release sorts before the * versions that don't have one, and labels are sorted alphabetically if necessary. Build metadata * are ignored for sorting versions. The versions supported loosely correspond to what @@ -34,220 +35,218 @@ */ public class VersionNumber implements Comparable { - private static final String VERSION_REGEXP = "(\\d+)\\.(\\d+)(\\.\\d+)?(\\.\\d+)?([~\\-]\\w[.\\w]*(?:\\-\\w[.\\w]*)*)?(\\+[.\\w]+)?"; - private static final Pattern pattern = Pattern.compile(VERSION_REGEXP); - - private final int major; - private final int minor; - private final int patch; - private final int dsePatch; - - private final String[] preReleases; - private final String build; - - private VersionNumber(int major, int minor, int patch, int dsePatch, String[] preReleases, String build) { - this.major = major; - this.minor = minor; - this.patch = patch; - this.dsePatch = dsePatch; - this.preReleases = preReleases; - this.build = build; - } - - /** - * Parse a version from a string. - *

- * The version string should have primarily the form X.Y.Z to which can be appended - * one or more pre-release label after dashes (2.0.1-beta1, 2.1.4-rc1-SNAPSHOT) - * and an optional build label (2.1.0-beta1+a20ba.sha). Out of convenience, the - * "patch" version number, Z, can be omitted, in which case it is assumed to be 0. - * - * @param version the string to parse - * @return the parsed version number. - * @throws IllegalArgumentException if the provided string does not - * represent a valid version. - */ - public static VersionNumber parse(String version) { - if (version == null) - return null; - - Matcher matcher = pattern.matcher(version); - if (!matcher.matches()) - throw new IllegalArgumentException("Invalid version number: " + version); - - try { - int major = Integer.parseInt(matcher.group(1)); - int minor = Integer.parseInt(matcher.group(2)); - - String pa = matcher.group(3); - int patch = pa == null || pa.isEmpty() ? 0 : Integer.parseInt(pa.substring(1)); // dropping the initial '.' since it's included this time - - String dse = matcher.group(4); - int dsePatch = dse == null || dse.isEmpty() ? -1 : Integer.parseInt(dse.substring(1)); // dropping the initial '.' since it's included this time - - String pr = matcher.group(5); - String[] preReleases = pr == null || pr.isEmpty() ? null : pr.substring(1).split("\\-"); // drop initial '-' or '~' then split on the remaining ones - - String bl = matcher.group(6); - String build = bl == null || bl.isEmpty() ? null : bl.substring(1); // drop the initial '+' - - return new VersionNumber(major, minor, patch, dsePatch, preReleases, build); - } catch (NumberFormatException e) { - throw new IllegalArgumentException("Invalid version number: " + version); - } - } - - /** - * The major version number. - * - * @return the major version number, i.e. X in X.Y.Z. - */ - public int getMajor() { - return major; - } - - /** - * The minor version number. - * - * @return the minor version number, i.e. Y in X.Y.Z. - */ - public int getMinor() { - return minor; + private static final String VERSION_REGEXP = + "(\\d+)\\.(\\d+)(\\.\\d+)?(\\.\\d+)?([~\\-]\\w[.\\w]*(?:\\-\\w[.\\w]*)*)?(\\+[.\\w]+)?"; + private static final Pattern pattern = Pattern.compile(VERSION_REGEXP); + + private final int major; + private final int minor; + private final int patch; + private final int dsePatch; + + private final String[] preReleases; + private final String build; + + private VersionNumber( + int major, int minor, int patch, int dsePatch, String[] preReleases, String build) { + this.major = major; + this.minor = minor; + this.patch = patch; + this.dsePatch = dsePatch; + this.preReleases = preReleases; + this.build = build; + } + + /** + * Parse a version from a string. + * + *

The version string should have primarily the form X.Y.Z to which can be appended one or more + * pre-release label after dashes (2.0.1-beta1, 2.1.4-rc1-SNAPSHOT) and an optional build label + * (2.1.0-beta1+a20ba.sha). Out of convenience, the "patch" version number, Z, can be omitted, in + * which case it is assumed to be 0. + * + * @param version the string to parse + * @return the parsed version number. + * @throws IllegalArgumentException if the provided string does not represent a valid version. + */ + public static VersionNumber parse(String version) { + if (version == null) return null; + + Matcher matcher = pattern.matcher(version); + if (!matcher.matches()) + throw new IllegalArgumentException("Invalid version number: " + version); + + try { + int major = Integer.parseInt(matcher.group(1)); + int minor = Integer.parseInt(matcher.group(2)); + + String pa = matcher.group(3); + int patch = + pa == null || pa.isEmpty() + ? 0 + : Integer.parseInt( + pa.substring(1)); // dropping the initial '.' since it's included this time + + String dse = matcher.group(4); + int dsePatch = + dse == null || dse.isEmpty() + ? -1 + : Integer.parseInt( + dse.substring(1)); // dropping the initial '.' since it's included this time + + String pr = matcher.group(5); + String[] preReleases = + pr == null || pr.isEmpty() + ? null + : pr.substring(1) + .split("\\-"); // drop initial '-' or '~' then split on the remaining ones + + String bl = matcher.group(6); + String build = bl == null || bl.isEmpty() ? null : bl.substring(1); // drop the initial '+' + + return new VersionNumber(major, minor, patch, dsePatch, preReleases, build); + } catch (NumberFormatException e) { + throw new IllegalArgumentException("Invalid version number: " + version); } - - /** - * The patch version number. - * - * @return the patch version number, i.e. Z in X.Y.Z. - */ - public int getPatch() { - return patch; - } - - /** - * The DSE patch version number (will only be present for version of Cassandra in DSE). - *

- * DataStax Entreprise (DSE) adds a fourth number to the version number to track potential - * hot fixes and/or DSE specific patches that may have been applied to the Cassandra version. - * In that case, this method return that fourth number. - * - * @return the DSE patch version number, i.e. D in X.Y.Z.D, or -1 if the version number is - * not from DSE. - */ - public int getDSEPatch() { - return dsePatch; - } - - /** - * The pre-release labels if relevant, i.e. label1 and label2 in X.Y.Z-label1-lable2. - * - * @return the pre-releases labels. The return list will be {@code null} if the version number - * doesn't have one. - */ - public List getPreReleaseLabels() { - return preReleases == null ? null : Collections.unmodifiableList(Arrays.asList(preReleases)); - } - - /** - * The build label if there is one. - * - * @return the build label or {@code null} if the version number - * doesn't have one. - */ - public String getBuildLabel() { - return build; - } - - /** - * The next stable version, i.e. the version stripped of its pre-release labels and build metadata. - *

- * This is mostly used during our development stage, where we test the driver against pre-release - * versions of Cassandra like 2.1.0-rc7-SNAPSHOT, but need to compare to the stable version 2.1.0 - * when testing for native protocol compatibility, etc. - * - * @return the next stable version. - */ - public VersionNumber nextStable() { - return new VersionNumber(major, minor, patch, dsePatch, null, null); + } + + /** + * The major version number. + * + * @return the major version number, i.e. X in X.Y.Z. + */ + public int getMajor() { + return major; + } + + /** + * The minor version number. + * + * @return the minor version number, i.e. Y in X.Y.Z. + */ + public int getMinor() { + return minor; + } + + /** + * The patch version number. + * + * @return the patch version number, i.e. Z in X.Y.Z. + */ + public int getPatch() { + return patch; + } + + /** + * The DSE patch version number (will only be present for version of Cassandra in DSE). + * + *

DataStax Entreprise (DSE) adds a fourth number to the version number to track potential hot + * fixes and/or DSE specific patches that may have been applied to the Cassandra version. In that + * case, this method return that fourth number. + * + * @return the DSE patch version number, i.e. D in X.Y.Z.D, or -1 if the version number is not + * from DSE. + */ + public int getDSEPatch() { + return dsePatch; + } + + /** + * The pre-release labels if relevant, i.e. label1 and label2 in X.Y.Z-label1-lable2. + * + * @return the pre-releases labels. The return list will be {@code null} if the version number + * doesn't have one. + */ + public List getPreReleaseLabels() { + return preReleases == null ? null : Collections.unmodifiableList(Arrays.asList(preReleases)); + } + + /** + * The build label if there is one. + * + * @return the build label or {@code null} if the version number doesn't have one. + */ + public String getBuildLabel() { + return build; + } + + /** + * The next stable version, i.e. the version stripped of its pre-release labels and build + * metadata. + * + *

This is mostly used during our development stage, where we test the driver against + * pre-release versions of Cassandra like 2.1.0-rc7-SNAPSHOT, but need to compare to the stable + * version 2.1.0 when testing for native protocol compatibility, etc. + * + * @return the next stable version. + */ + public VersionNumber nextStable() { + return new VersionNumber(major, minor, patch, dsePatch, null, null); + } + + @Override + public int compareTo(VersionNumber other) { + if (major < other.major) return -1; + if (major > other.major) return 1; + + if (minor < other.minor) return -1; + if (minor > other.minor) return 1; + + if (patch < other.patch) return -1; + if (patch > other.patch) return 1; + + if (dsePatch < 0) { + if (other.dsePatch >= 0) return -1; + } else { + if (other.dsePatch < 0) return 1; + + // Both are >= 0 + if (dsePatch < other.dsePatch) return -1; + if (dsePatch > other.dsePatch) return 1; } - @Override - public int compareTo(VersionNumber other) { - if (major < other.major) - return -1; - if (major > other.major) - return 1; - - if (minor < other.minor) - return -1; - if (minor > other.minor) - return 1; - - if (patch < other.patch) - return -1; - if (patch > other.patch) - return 1; - - if (dsePatch < 0) { - if (other.dsePatch >= 0) - return -1; - } else { - if (other.dsePatch < 0) - return 1; - - // Both are >= 0 - if (dsePatch < other.dsePatch) - return -1; - if (dsePatch > other.dsePatch) - return 1; - } - - if (preReleases == null) - return other.preReleases == null ? 0 : 1; - if (other.preReleases == null) - return -1; - - for (int i = 0; i < Math.min(preReleases.length, other.preReleases.length); i++) { - int cmp = preReleases[i].compareTo(other.preReleases[i]); - if (cmp != 0) - return cmp; - } - - return preReleases.length == other.preReleases.length ? 0 : (preReleases.length < other.preReleases.length ? -1 : 1); - } - - @Override - public boolean equals(Object other) { - if (other == this) - return true; - if (!(other instanceof VersionNumber)) - return false; - VersionNumber that = (VersionNumber) other; - return this.major == that.major - && this.minor == that.minor - && this.patch == that.patch - && this.dsePatch == that.dsePatch - && (this.preReleases == null ? that.preReleases == null : Arrays.equals(this.preReleases, that.preReleases)) - && MoreObjects.equal(this.build, that.build); - } + if (preReleases == null) return other.preReleases == null ? 0 : 1; + if (other.preReleases == null) return -1; - @Override - public int hashCode() { - return MoreObjects.hashCode(major, minor, patch, dsePatch, preReleases, build); + for (int i = 0; i < Math.min(preReleases.length, other.preReleases.length); i++) { + int cmp = preReleases[i].compareTo(other.preReleases[i]); + if (cmp != 0) return cmp; } - @Override - public String toString() { - StringBuilder sb = new StringBuilder(); - sb.append(major).append('.').append(minor).append('.').append(patch); - if (dsePatch >= 0) - sb.append('.').append(dsePatch); - if (preReleases != null) { - for (String preRelease : preReleases) - sb.append('-').append(preRelease); - } - if (build != null) - sb.append('+').append(build); - return sb.toString(); + return preReleases.length == other.preReleases.length + ? 0 + : (preReleases.length < other.preReleases.length ? -1 : 1); + } + + @Override + public boolean equals(Object other) { + if (other == this) return true; + if (!(other instanceof VersionNumber)) return false; + VersionNumber that = (VersionNumber) other; + return this.major == that.major + && this.minor == that.minor + && this.patch == that.patch + && this.dsePatch == that.dsePatch + && (this.preReleases == null + ? that.preReleases == null + : Arrays.equals(this.preReleases, that.preReleases)) + && MoreObjects.equal(this.build, that.build); + } + + @Override + public int hashCode() { + return MoreObjects.hashCode(major, minor, patch, dsePatch, Arrays.hashCode(preReleases), build); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(major).append('.').append(minor).append('.').append(patch); + if (dsePatch >= 0) sb.append('.').append(dsePatch); + if (preReleases != null) { + for (String preRelease : preReleases) sb.append('-').append(preRelease); } + if (build != null) sb.append('+').append(build); + return sb.toString(); + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/WriteType.java b/driver-core/src/main/java/com/datastax/driver/core/WriteType.java index 80269211db9..332fc125d27 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/WriteType.java +++ b/driver-core/src/main/java/com/datastax/driver/core/WriteType.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,36 +19,47 @@ /** * The type of a Cassandra write query. - *

- * This information is returned by Cassandra when a write timeout is raised to - * indicate what type of write timed out. This information is useful to decide - * which retry policy to adopt. + * + *

This information is returned by Cassandra when a write timeout is raised to indicate what type + * of write timed out. This information is useful to decide which retry policy to adopt. */ public enum WriteType { - /** - * A write to a single partition key. Such writes are guaranteed to be atomic and isolated. - */ - SIMPLE, - /** - * A write to a multiple partition key that used the distributed batch log to ensure atomicity - * (atomicity meaning that if any statement in the batch succeeds, all will eventually succeed). - */ - BATCH, - /** - * A write to a multiple partition key that doesn't use the distributed batch log. Atomicity for such writes is not guaranteed - */ - UNLOGGED_BATCH, - /** - * A counter write (that can be for one or multiple partition key). Such write should not be replayed to avoid over-counting. - */ - COUNTER, - /** - * The initial write to the distributed batch log that Cassandra performs internally before a BATCH write. - */ - BATCH_LOG, - /** - * A conditional write. If a timeout has this {@code WriteType}, the timeout has happened while doing the compare-and-swap for - * an conditional update. In this case, the update may or may not have been applied. - */ - CAS; + /** A write to a single partition key. Such writes are guaranteed to be atomic and isolated. */ + SIMPLE, + /** + * A write to a multiple partition key that used the distributed batch log to ensure atomicity + * (atomicity meaning that if any statement in the batch succeeds, all will eventually succeed). + */ + BATCH, + /** + * A write to a multiple partition key that doesn't use the distributed batch log. Atomicity for + * such writes is not guaranteed + */ + UNLOGGED_BATCH, + /** + * A counter write (that can be for one or multiple partition key). Such write should not be + * replayed to avoid over-counting. + */ + COUNTER, + /** + * The initial write to the distributed batch log that Cassandra performs internally before a + * BATCH write. + */ + BATCH_LOG, + /** + * A conditional write. If a timeout has this {@code WriteType}, the timeout has happened while + * doing the compare-and-swap for an conditional update. In this case, the update may or may not + * have been applied. + */ + CAS, + /** + * Indicates that the timeout was related to acquiring locks needed for updating materialized + * views affected by write operation. + */ + VIEW, + /** + * Indicates that the timeout was related to acquiring space for change data capture logs for cdc + * tracked tables. + */ + CDC; } diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/AlreadyExistsException.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/AlreadyExistsException.java index 4f70885e0c3..692383c9ec9 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/exceptions/AlreadyExistsException.java +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/AlreadyExistsException.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,119 +17,116 @@ */ package com.datastax.driver.core.exceptions; +import com.datastax.driver.core.EndPoint; import java.net.InetAddress; import java.net.InetSocketAddress; -/** - * Exception thrown when a query attempts to create a keyspace or table that already exists. - */ -public class AlreadyExistsException extends QueryValidationException implements CoordinatorException { - - private static final long serialVersionUID = 0; - - private final InetSocketAddress address; - private final String keyspace; - private final String table; - - public AlreadyExistsException(String keyspace, String table) { - this(null, keyspace, table); - } - - public AlreadyExistsException(InetSocketAddress address, String keyspace, String table) { - super(makeMsg(keyspace, table)); - this.address = address; - this.keyspace = keyspace; - this.table = table; - } - - private AlreadyExistsException(InetSocketAddress address, String msg, Throwable cause, String keyspace, String table) { - super(msg, cause); - this.address = address; - this.keyspace = keyspace; - this.table = table; - } - - private static String makeMsg(String keyspace, String table) { - if (table.isEmpty()) - return String.format("Keyspace %s already exists", keyspace); - else - return String.format("Table %s.%s already exists", keyspace, table); - } - - /** - * {@inheritDoc} - */ - @Override - public InetAddress getHost() { - return address.getAddress(); - } - - /** - * {@inheritDoc} - */ - @Override - public InetSocketAddress getAddress() { - return address; - } - - /** - * Returns whether the query yielding this exception was a table creation - * attempt. - * - * @return {@code true} if this exception is raised following a table - * creation attempt, {@code false} if it was a keyspace creation attempt. - */ - public boolean wasTableCreation() { - return !table.isEmpty(); - } - - /** - * The name of keyspace that either already exists or is home to the table - * that already exists. - * - * @return a keyspace name that is either the keyspace whose creation - * attempt failed because a keyspace of the same name already exists (in - * that case, {@link #table} will return {@code null}), or the keyspace of - * the table creation attempt (in which case {@link #table} will return the - * name of said table). - */ - public String getKeyspace() { - return keyspace; - } - - /** - * If the failed creation was a table creation, the name of the table that already exists. - * - * @return the name of table whose creation attempt failed because a table - * of this name already exists, or {@code null} if the query was a keyspace - * creation query. - */ - public String getTable() { - return table.isEmpty() ? null : table; - } - - @Override - public DriverException copy() { - return new AlreadyExistsException(getAddress(), getMessage(), this, keyspace, table); - } - - /** - * Create a copy of this exception with a nicer stack trace, and including the coordinator - * address that caused this exception to be raised. - *

- * This method is mainly intended for internal use by the driver and exists mainly because: - *

    - *
  1. the original exception was decoded from a response frame - * and at that time, the coordinator address was not available; and
  2. - *
  3. the newly-created exception will refer to the current thread in its stack trace, - * which generally yields a more user-friendly stack trace that the original one.
  4. - *
- * - * @param address The full address of the host that caused this exception to be thrown. - * @return a copy/clone of this exception, but with the given host address instead of the original one. - */ - public AlreadyExistsException copy(InetSocketAddress address) { - return new AlreadyExistsException(address, getMessage(), this, keyspace, table); - } - +/** Exception thrown when a query attempts to create a keyspace or table that already exists. */ +public class AlreadyExistsException extends QueryValidationException + implements CoordinatorException { + + private static final long serialVersionUID = 0; + + private final EndPoint endPoint; + private final String keyspace; + private final String table; + + public AlreadyExistsException(String keyspace, String table) { + this(null, keyspace, table); + } + + public AlreadyExistsException(EndPoint endPoint, String keyspace, String table) { + super(makeMsg(keyspace, table)); + this.endPoint = endPoint; + this.keyspace = keyspace; + this.table = table; + } + + private AlreadyExistsException( + EndPoint endPoint, String msg, Throwable cause, String keyspace, String table) { + super(msg, cause); + this.endPoint = endPoint; + this.keyspace = keyspace; + this.table = table; + } + + private static String makeMsg(String keyspace, String table) { + if (table.isEmpty()) return String.format("Keyspace %s already exists", keyspace); + else return String.format("Table %s.%s already exists", keyspace, table); + } + + @Override + public EndPoint getEndPoint() { + return endPoint; + } + + @Override + @Deprecated + public InetSocketAddress getAddress() { + return (endPoint == null) ? null : endPoint.resolve(); + } + + @Override + @Deprecated + public InetAddress getHost() { + return (endPoint == null) ? null : endPoint.resolve().getAddress(); + } + + /** + * Returns whether the query yielding this exception was a table creation attempt. + * + * @return {@code true} if this exception is raised following a table creation attempt, {@code + * false} if it was a keyspace creation attempt. + */ + public boolean wasTableCreation() { + return !table.isEmpty(); + } + + /** + * The name of keyspace that either already exists or is home to the table that already exists. + * + * @return a keyspace name that is either the keyspace whose creation attempt failed because a + * keyspace of the same name already exists (in that case, {@link #table} will return {@code + * null}), or the keyspace of the table creation attempt (in which case {@link #table} will + * return the name of said table). + */ + public String getKeyspace() { + return keyspace; + } + + /** + * If the failed creation was a table creation, the name of the table that already exists. + * + * @return the name of table whose creation attempt failed because a table of this name already + * exists, or {@code null} if the query was a keyspace creation query. + */ + public String getTable() { + return table.isEmpty() ? null : table; + } + + @Override + public DriverException copy() { + return new AlreadyExistsException(getEndPoint(), getMessage(), this, keyspace, table); + } + + /** + * Create a copy of this exception with a nicer stack trace, and including the coordinator address + * that caused this exception to be raised. + * + *

This method is mainly intended for internal use by the driver and exists mainly because: + * + *

    + *
  1. the original exception was decoded from a response frame and at that time, the + * coordinator address was not available; and + *
  2. the newly-created exception will refer to the current thread in its stack trace, which + * generally yields a more user-friendly stack trace that the original one. + *
+ * + * @param endPoint The full address of the host that caused this exception to be thrown. + * @return a copy/clone of this exception, but with the given host address instead of the original + * one. + */ + public AlreadyExistsException copy(EndPoint endPoint) { + return new AlreadyExistsException(endPoint, getMessage(), this, keyspace, table); + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/AuthenticationException.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/AuthenticationException.java index 599f808e41a..5d9747a5ba5 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/exceptions/AuthenticationException.java +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/AuthenticationException.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,46 +17,52 @@ */ package com.datastax.driver.core.exceptions; +import com.datastax.driver.core.EndPoint; import java.net.InetAddress; import java.net.InetSocketAddress; -/** - * Indicates an error during the authentication phase while connecting to a node. - */ +/** Indicates an error during the authentication phase while connecting to a node. */ public class AuthenticationException extends DriverException implements CoordinatorException { - private static final long serialVersionUID = 0; - - private final InetSocketAddress address; - - public AuthenticationException(InetSocketAddress address, String message) { - super(String.format("Authentication error on host %s: %s", address, message)); - this.address = address; - } - - private AuthenticationException(InetSocketAddress address, String message, Throwable cause) { - super(message, cause); - this.address = address; - } - - /** - * {@inheritDoc} - */ - @Override - public InetAddress getHost() { - return address.getAddress(); - } - - /** - * {@inheritDoc} - */ - @Override - public InetSocketAddress getAddress() { - return address; - } - - @Override - public DriverException copy() { - return new AuthenticationException(address, getMessage(), this); - } + private static final long serialVersionUID = 0; + + private final EndPoint endPoint; + + public AuthenticationException(EndPoint endPoint, String message) { + super(String.format("Authentication error on host %s: %s", endPoint, message)); + this.endPoint = endPoint; + } + + // Preserve a constructor with InetSocketAddress for backward compatibility, because legacy + // authenticators might use it + public AuthenticationException(InetSocketAddress address, String message) { + this(new WrappingEndPoint(address), message); + } + + private AuthenticationException(EndPoint endPoint, String message, Throwable cause) { + super(message, cause); + this.endPoint = endPoint; + } + + @Override + public EndPoint getEndPoint() { + return endPoint; + } + + @Override + @Deprecated + public InetSocketAddress getAddress() { + return (endPoint == null) ? null : endPoint.resolve(); + } + + @Override + @Deprecated + public InetAddress getHost() { + return (endPoint == null) ? null : endPoint.resolve().getAddress(); + } + + @Override + public DriverException copy() { + return new AuthenticationException(endPoint, getMessage(), this); + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/BootstrappingException.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/BootstrappingException.java index 124cff8558a..4f6663a2816 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/exceptions/BootstrappingException.java +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/BootstrappingException.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,49 +17,48 @@ */ package com.datastax.driver.core.exceptions; +import com.datastax.driver.core.EndPoint; import java.net.InetAddress; import java.net.InetSocketAddress; -/** - * Indicates that the contacted host was bootstrapping when it received a read query. - */ -public class BootstrappingException extends QueryExecutionException implements CoordinatorException { - - private static final long serialVersionUID = 0; - - private final InetSocketAddress address; - - public BootstrappingException(InetSocketAddress address, String message) { - super(String.format("Queried host (%s) was bootstrapping: %s", address, message)); - this.address = address; - } - - /** - * Private constructor used solely when copying exceptions. - */ - private BootstrappingException(InetSocketAddress address, String message, BootstrappingException cause) { - super(message, cause); - this.address = address; - } - - /** - * {@inheritDoc} - */ - @Override - public InetAddress getHost() { - return address.getAddress(); - } - - /** - * {@inheritDoc} - */ - @Override - public InetSocketAddress getAddress() { - return address; - } - - @Override - public BootstrappingException copy() { - return new BootstrappingException(address, getMessage(), this); - } +/** Indicates that the contacted host was bootstrapping when it received a read query. */ +public class BootstrappingException extends QueryExecutionException + implements CoordinatorException { + + private static final long serialVersionUID = 0; + + private final EndPoint endPoint; + + public BootstrappingException(EndPoint endPoint, String message) { + super(String.format("Queried host (%s) was bootstrapping: %s", endPoint, message)); + this.endPoint = endPoint; + } + + /** Private constructor used solely when copying exceptions. */ + private BootstrappingException(EndPoint endPoint, String message, BootstrappingException cause) { + super(message, cause); + this.endPoint = endPoint; + } + + @Override + public EndPoint getEndPoint() { + return endPoint; + } + + @Override + @Deprecated + public InetSocketAddress getAddress() { + return (endPoint == null) ? null : endPoint.resolve(); + } + + @Override + @Deprecated + public InetAddress getHost() { + return (endPoint == null) ? null : endPoint.resolve().getAddress(); + } + + @Override + public BootstrappingException copy() { + return new BootstrappingException(endPoint, getMessage(), this); + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/BusyConnectionException.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/BusyConnectionException.java index 723c8b59442..d5b51c9abac 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/exceptions/BusyConnectionException.java +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/BusyConnectionException.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,41 +17,46 @@ */ package com.datastax.driver.core.exceptions; +import com.datastax.driver.core.EndPoint; import java.net.InetAddress; import java.net.InetSocketAddress; -/** - * Indicates that a connection has run out of stream IDs. - */ +/** Indicates that a connection has run out of stream IDs. */ public class BusyConnectionException extends DriverException implements CoordinatorException { - private static final long serialVersionUID = 0; - - private final InetSocketAddress address; - - public BusyConnectionException(InetSocketAddress address) { - super(String.format("[%s] Connection has run out of stream IDs", address.getAddress())); - this.address = address; - } - - public BusyConnectionException(InetSocketAddress address, Throwable cause) { - super(String.format("[%s] Connection has run out of stream IDs", address.getAddress()), cause); - this.address = address; - } - - @Override - public InetAddress getHost() { - return address.getAddress(); - } - - @Override - public InetSocketAddress getAddress() { - return address; - } - - @Override - public BusyConnectionException copy() { - return new BusyConnectionException(address, this); - } - + private static final long serialVersionUID = 0; + + private final EndPoint endPoint; + + public BusyConnectionException(EndPoint endPoint) { + super(String.format("[%s] Connection has run out of stream IDs", endPoint)); + this.endPoint = endPoint; + } + + public BusyConnectionException(EndPoint endPoint, Throwable cause) { + super(String.format("[%s] Connection has run out of stream IDs", endPoint), cause); + this.endPoint = endPoint; + } + + @Override + public EndPoint getEndPoint() { + return endPoint; + } + + @Override + @Deprecated + public InetSocketAddress getAddress() { + return (endPoint == null) ? null : endPoint.resolve(); + } + + @Override + @Deprecated + public InetAddress getHost() { + return (endPoint == null) ? null : endPoint.resolve().getAddress(); + } + + @Override + public BusyConnectionException copy() { + return new BusyConnectionException(endPoint, this); + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/BusyPoolException.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/BusyPoolException.java index 8bcd755e94e..e98f7f75da4 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/exceptions/BusyPoolException.java +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/BusyPoolException.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,75 +17,85 @@ */ package com.datastax.driver.core.exceptions; +import com.datastax.driver.core.EndPoint; import com.datastax.driver.core.HostDistance; import com.datastax.driver.core.Statement; - import java.net.InetAddress; import java.net.InetSocketAddress; import java.util.concurrent.TimeUnit; /** * Indicates that a connection pool has run out of available connections. - *

- * This happens if the pool has no connections (for example if it's currently reconnecting to its host), or if all - * connections have reached their maximum number of in flight queries. The query will be retried on the next host in the - * {@link com.datastax.driver.core.policies.LoadBalancingPolicy#newQueryPlan(String, Statement) query plan}. - *

- * This exception is a symptom that the driver is experiencing a high workload. If it happens regularly on all hosts, - * you should consider tuning one (or a combination of) the following pooling options: + * + *

This happens if the pool has no connections (for example if it's currently reconnecting to its + * host), or if all connections have reached their maximum number of in flight queries. The query + * will be retried on the next host in the {@link + * com.datastax.driver.core.policies.LoadBalancingPolicy#newQueryPlan(String, Statement) query + * plan}. + * + *

This exception is a symptom that the driver is experiencing a high workload. If it happens + * regularly on all hosts, you should consider tuning one (or a combination of) the following + * pooling options: + * *

    - *
  • {@link com.datastax.driver.core.PoolingOptions#setMaxRequestsPerConnection(HostDistance, int)}: maximum number of - * requests per connection;
  • - *
  • {@link com.datastax.driver.core.PoolingOptions#setMaxConnectionsPerHost(HostDistance, int)}: maximum number of - * connections in the pool;
  • - *
  • {@link com.datastax.driver.core.PoolingOptions#setMaxQueueSize(int)}: maximum number of enqueued requests before - * this exception is thrown.
  • + *
  • {@link com.datastax.driver.core.PoolingOptions#setMaxRequestsPerConnection(HostDistance, + * int)}: maximum number of requests per connection; + *
  • {@link com.datastax.driver.core.PoolingOptions#setMaxConnectionsPerHost(HostDistance, + * int)}: maximum number of connections in the pool; + *
  • {@link com.datastax.driver.core.PoolingOptions#setMaxQueueSize(int)}: maximum number of + * enqueued requests before this exception is thrown. *
*/ public class BusyPoolException extends DriverException implements CoordinatorException { - private static final long serialVersionUID = 0; + private static final long serialVersionUID = 0; - private final InetSocketAddress address; + private final EndPoint endPoint; - public BusyPoolException(InetSocketAddress address, int queueSize) { - this(address, buildMessage(address, queueSize), null); - } + public BusyPoolException(EndPoint endPoint, int queueSize) { + this(endPoint, buildMessage(endPoint, queueSize), null); + } - public BusyPoolException(InetSocketAddress address, long timeout, TimeUnit unit) { - this(address, buildMessage(address, timeout, unit), null); - } + public BusyPoolException(EndPoint endPoint, long timeout, TimeUnit unit) { + this(endPoint, buildMessage(endPoint, timeout, unit), null); + } - private BusyPoolException(InetSocketAddress address, String message, Throwable cause) { - super(message, cause); - this.address = address; - } + private BusyPoolException(EndPoint endPoint, String message, Throwable cause) { + super(message, cause); + this.endPoint = endPoint; + } - private static String buildMessage(InetSocketAddress address, int queueSize) { - return String.format("[%s] Pool is busy (no available connection and the queue has reached its max size %d)", - address.getAddress(), - queueSize); - } + private static String buildMessage(EndPoint endPoint, int queueSize) { + return String.format( + "[%s] Pool is busy (no available connection and the queue has reached its max size %d)", + endPoint, queueSize); + } - private static String buildMessage(InetSocketAddress address, long timeout, TimeUnit unit) { - return String.format("[%s] Pool is busy (no available connection and timed out after %d %s)", - address.getAddress(), - timeout, unit); - } + private static String buildMessage(EndPoint endPoint, long timeout, TimeUnit unit) { + return String.format( + "[%s] Pool is busy (no available connection and timed out after %d %s)", + endPoint, timeout, unit); + } - @Override - public InetAddress getHost() { - return address.getAddress(); - } + @Override + public EndPoint getEndPoint() { + return endPoint; + } - @Override - public InetSocketAddress getAddress() { - return address; - } + @Override + @Deprecated + public InetSocketAddress getAddress() { + return (endPoint == null) ? null : endPoint.resolve(); + } - @Override - public BusyPoolException copy() { - return new BusyPoolException(address, getMessage(), this); - } + @Override + @Deprecated + public InetAddress getHost() { + return (endPoint == null) ? null : endPoint.resolve().getAddress(); + } + @Override + public BusyPoolException copy() { + return new BusyPoolException(endPoint, getMessage(), this); + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/CASWriteUnknownException.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/CASWriteUnknownException.java new file mode 100644 index 00000000000..07261fd6fce --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/CASWriteUnknownException.java @@ -0,0 +1,93 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core.exceptions; + +import com.datastax.driver.core.ConsistencyLevel; +import com.datastax.driver.core.EndPoint; + +public class CASWriteUnknownException extends QueryConsistencyException { + + private static final long serialVersionUID = 0; + + /** + * This constructor should only be used internally by the driver when decoding error responses. + */ + public CASWriteUnknownException(ConsistencyLevel consistency, int received, int required) { + this(null, consistency, received, required); + } + + public CASWriteUnknownException( + EndPoint endPoint, ConsistencyLevel consistency, int received, int required) { + super( + endPoint, + String.format( + "CAS operation result is unknown - proposal was not accepted by a quorum. (%d / %d)", + received, required), + consistency, + received, + required); + } + + private CASWriteUnknownException( + EndPoint endPoint, + String msg, + Throwable cause, + ConsistencyLevel consistency, + int received, + int required) { + super(endPoint, msg, cause, consistency, received, required); + } + + @Override + public CASWriteUnknownException copy() { + return new CASWriteUnknownException( + getEndPoint(), + getMessage(), + this, + getConsistencyLevel(), + getReceivedAcknowledgements(), + getRequiredAcknowledgements()); + } + + /** + * Create a copy of this exception with a nicer stack trace, and including the coordinator address + * that caused this exception to be raised. + * + *

This method is mainly intended for internal use by the driver and exists mainly because: + * + *

    + *
  1. the original exception was decoded from a response frame and at that time, the + * coordinator address was not available; and + *
  2. the newly-created exception will refer to the current thread in its stack trace, which + * generally yields a more user-friendly stack trace that the original one. + *
+ * + * @param endPoint The full address of the host that caused this exception to be thrown. + * @return a copy/clone of this exception, but with the given host address instead of the original + * one. + */ + public CASWriteUnknownException copy(EndPoint endPoint) { + return new CASWriteUnknownException( + endPoint, + getMessage(), + this, + getConsistencyLevel(), + getReceivedAcknowledgements(), + getRequiredAcknowledgements()); + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/CDCWriteException.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/CDCWriteException.java new file mode 100644 index 00000000000..93170eb733e --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/CDCWriteException.java @@ -0,0 +1,63 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core.exceptions; + +import com.datastax.driver.core.EndPoint; +import java.net.InetAddress; +import java.net.InetSocketAddress; + +/** An error occurred when trying to write a CDC mutation to the commitlog * */ +public class CDCWriteException extends QueryExecutionException implements CoordinatorException { + + private static final long serialVersionUID = 0; + + private final EndPoint endPoint; + + public CDCWriteException(EndPoint endPoint, String message) { + super(message); + this.endPoint = endPoint; + } + + /** Private constructor used solely when copying exceptions. */ + private CDCWriteException(EndPoint endPoint, String message, CDCWriteException cause) { + super(message, cause); + this.endPoint = endPoint; + } + + @Override + public EndPoint getEndPoint() { + return endPoint; + } + + @Override + @Deprecated + public InetSocketAddress getAddress() { + return (endPoint == null) ? null : endPoint.resolve(); + } + + @Override + @Deprecated + public InetAddress getHost() { + return (endPoint == null) ? null : endPoint.resolve().getAddress(); + } + + @Override + public CDCWriteException copy() { + return new CDCWriteException(endPoint, getMessage(), this); + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/CodecNotFoundException.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/CodecNotFoundException.java index 1a1f711c107..93e4fd07bcc 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/exceptions/CodecNotFoundException.java +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/CodecNotFoundException.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,40 +21,41 @@ import com.google.common.reflect.TypeToken; /** - * Thrown when a suitable {@link com.datastax.driver.core.TypeCodec} cannot be found by - * {@link com.datastax.driver.core.CodecRegistry} instances. + * Thrown when a suitable {@link com.datastax.driver.core.TypeCodec} cannot be found by {@link + * com.datastax.driver.core.CodecRegistry} instances. */ @SuppressWarnings("serial") public class CodecNotFoundException extends DriverException { - private final DataType cqlType; + private final DataType cqlType; - private final TypeToken javaType; + private final TypeToken javaType; - public CodecNotFoundException(String msg, DataType cqlType, TypeToken javaType) { - this(msg, null, cqlType, javaType); - } + public CodecNotFoundException(String msg, DataType cqlType, TypeToken javaType) { + this(msg, null, cqlType, javaType); + } - public CodecNotFoundException(Throwable cause, DataType cqlType, TypeToken javaType) { - this(null, cause, cqlType, javaType); - } + public CodecNotFoundException(Throwable cause, DataType cqlType, TypeToken javaType) { + this(null, cause, cqlType, javaType); + } - private CodecNotFoundException(String msg, Throwable cause, DataType cqlType, TypeTokenjavaType) { - super(msg, cause); - this.cqlType = cqlType; - this.javaType = javaType; - } + private CodecNotFoundException( + String msg, Throwable cause, DataType cqlType, TypeToken javaType) { + super(msg, cause); + this.cqlType = cqlType; + this.javaType = javaType; + } - public DataType getCqlType() { - return cqlType; - } + public DataType getCqlType() { + return cqlType; + } - public TypeToken getJavaType() { - return javaType; - } + public TypeToken getJavaType() { + return javaType; + } - @Override - public CodecNotFoundException copy() { - return new CodecNotFoundException(getMessage(), getCause(), getCqlType(), getJavaType()); - } + @Override + public CodecNotFoundException copy() { + return new CodecNotFoundException(getMessage(), getCause(), getCqlType(), getJavaType()); + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/ConnectionException.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/ConnectionException.java index 10c379aa416..4fc36b830cf 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/exceptions/ConnectionException.java +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/ConnectionException.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,51 +17,55 @@ */ package com.datastax.driver.core.exceptions; +import com.datastax.driver.core.EndPoint; import java.net.InetAddress; import java.net.InetSocketAddress; -/** - * Indicates that a connection to a host has encountered a problem - * and that it should be closed. - */ +/** Indicates that a connection to a host has encountered a problem and that it should be closed. */ public class ConnectionException extends DriverException implements CoordinatorException { - private static final long serialVersionUID = 0; + private static final long serialVersionUID = 0; - public final InetSocketAddress address; + private final EndPoint endPoint; - public ConnectionException(InetSocketAddress address, String msg, Throwable cause) { - super(msg, cause); - this.address = address; - } + public ConnectionException(EndPoint endPoint, String msg, Throwable cause) { + super(msg, cause); + this.endPoint = endPoint; + } - public ConnectionException(InetSocketAddress address, String msg) { - super(msg); - this.address = address; - } + public ConnectionException(EndPoint endPoint, String msg) { + super(msg); + this.endPoint = endPoint; + } - @Override - public InetAddress getHost() { - return address == null ? null : address.getAddress(); - } + @Override + public EndPoint getEndPoint() { + return endPoint; + } - @Override - public InetSocketAddress getAddress() { - return address; - } + @Override + @Deprecated + public InetSocketAddress getAddress() { + return (endPoint == null) ? null : endPoint.resolve(); + } - @Override - public String getMessage() { - return address == null ? getRawMessage() : String.format("[%s] %s", address, getRawMessage()); - } + @Override + @Deprecated + public InetAddress getHost() { + return (endPoint == null) ? null : endPoint.resolve().getAddress(); + } - @Override - public ConnectionException copy() { - return new ConnectionException(address, getRawMessage(), this); - } + @Override + public String getMessage() { + return endPoint == null ? getRawMessage() : String.format("[%s] %s", endPoint, getRawMessage()); + } - String getRawMessage() { - return super.getMessage(); - } + @Override + public ConnectionException copy() { + return new ConnectionException(endPoint, getRawMessage(), this); + } + String getRawMessage() { + return super.getMessage(); + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/CoordinatorException.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/CoordinatorException.java index 23432d75064..ce2474999a9 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/exceptions/CoordinatorException.java +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/CoordinatorException.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,30 +17,40 @@ */ package com.datastax.driver.core.exceptions; +import com.datastax.driver.core.EndPoint; import java.net.InetAddress; import java.net.InetSocketAddress; /** - * An interface for exceptions that are able to report the address of the coordinator host - * that was contacted. + * An interface for exceptions that are able to report the address of the coordinator host that was + * contacted. */ public interface CoordinatorException { - /** - * The coordinator host that was contacted. - *

- * This is a shortcut for {@link InetSocketAddress#getAddress() getAddress().getAddress()}. - * - * @return The coordinator host that was contacted; - * may be {@code null} if the coordinator is not known. - */ - InetAddress getHost(); + /** + * The connection information of the coordinator host that was contacted. May be {@code null} if + * the coordinator is not known. + */ + EndPoint getEndPoint(); + + /** + * The coordinator host that was contacted; may be {@code null} if the coordinator is not known. + * + * @deprecated {@link #getEndPoint()} provides more accurate information if the connection + * information consists of more than a socket address. This method is a shortcut for {@code + * getEndPoint().resolve().getAddress()}. + */ + @Deprecated + InetAddress getHost(); - /** - * The full address of the coordinator host that was contacted. - * - * @return the full address of the coordinator host that was contacted; - * may be {@code null} if the coordinator is not known. - */ - InetSocketAddress getAddress(); + /** + * The full address of the coordinator host that was contacted; may be {@code null} if the + * coordinator is not known. + * + * @deprecated {@link #getEndPoint()} provides more accurate information if the connection + * information consists of more than a socket address. This method is a shortcut for {@code + * getEndPoint().resolve()}. + */ + @Deprecated + InetSocketAddress getAddress(); } diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/CrcMismatchException.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/CrcMismatchException.java new file mode 100644 index 00000000000..22ccf21db60 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/CrcMismatchException.java @@ -0,0 +1,43 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core.exceptions; + +/** + * Thrown when the checksums in a server response don't match (protocol v5 or above). + * + *

This indicates a data corruption issue, either due to a hardware issue on the client, or on + * the network between the server and the client. It is not recoverable: the driver will drop the + * connection. + */ +public class CrcMismatchException extends DriverException { + + private static final long serialVersionUID = 0; + + public CrcMismatchException(String message) { + super(message); + } + + public CrcMismatchException(String message, Throwable cause) { + super(message, cause); + } + + @Override + public CrcMismatchException copy() { + return new CrcMismatchException(getMessage(), this); + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/DriverException.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/DriverException.java index 7c8684053f7..1e806a57cc9 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/exceptions/DriverException.java +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/DriverException.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,37 +17,34 @@ */ package com.datastax.driver.core.exceptions; -/** - * Top level class for exceptions thrown by the driver. - */ +/** Top level class for exceptions thrown by the driver. */ public class DriverException extends RuntimeException { - private static final long serialVersionUID = 0; + private static final long serialVersionUID = 0; - public DriverException(String message) { - super(message); - } + public DriverException(String message) { + super(message); + } - public DriverException(Throwable cause) { - super(cause); - } + public DriverException(Throwable cause) { + super(cause); + } - public DriverException(String message, Throwable cause) { - super(message, cause); - } + public DriverException(String message, Throwable cause) { + super(message, cause); + } - /** - * Copy the exception. - *

- * This returns a new exception, equivalent to the original one, except that - * because a new object is created in the current thread, the top-most - * element in the stacktrace of the exception will refer to the current - * thread (this is mainly intended for internal use by the driver). The cause of - * the copied exception will be the original exception. - * - * @return a copy/clone of this exception. - */ - public DriverException copy() { - return new DriverException(getMessage(), this); - } + /** + * Copy the exception. + * + *

This returns a new exception, equivalent to the original one, except that because a new + * object is created in the current thread, the top-most element in the stacktrace of the + * exception will refer to the current thread (this is mainly intended for internal use by the + * driver). The cause of the copied exception will be the original exception. + * + * @return a copy/clone of this exception. + */ + public DriverException copy() { + return new DriverException(getMessage(), this); + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/DriverInternalError.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/DriverInternalError.java index dba39851ecc..d408c968490 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/exceptions/DriverInternalError.java +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/DriverInternalError.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,28 +19,27 @@ /** * An unexpected error happened internally. - *

- * This should never be raised and indicates a bug (either in the driver or in - * Cassandra). + * + *

This should never be raised and indicates a bug (either in the driver or in Cassandra). */ public class DriverInternalError extends DriverException { - private static final long serialVersionUID = 0; + private static final long serialVersionUID = 0; - public DriverInternalError(String message) { - super(message); - } + public DriverInternalError(String message) { + super(message); + } - public DriverInternalError(Throwable cause) { - super(cause); - } + public DriverInternalError(Throwable cause) { + super(cause); + } - public DriverInternalError(String message, Throwable cause) { - super(message, cause); - } + public DriverInternalError(String message, Throwable cause) { + super(message, cause); + } - @Override - public DriverInternalError copy() { - return new DriverInternalError(getMessage(), this); - } + @Override + public DriverInternalError copy() { + return new DriverInternalError(getMessage(), this); + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/FrameTooLongException.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/FrameTooLongException.java index 7b3aced1768..9d15ee8aa6e 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/exceptions/FrameTooLongException.java +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/FrameTooLongException.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,35 +18,33 @@ package com.datastax.driver.core.exceptions; /** - * Indicates that the response frame for a request exceeded - * {@link com.datastax.driver.core.Frame.Decoder.DecoderForStreamIdSize#MAX_FRAME_LENGTH} - * (default: 256MB, configurable via com.datastax.driver.NATIVE_TRANSPORT_MAX_FRAME_SIZE_IN_MB - * system property) and thus was not parsed. + * Indicates that the response frame for a request exceeded {@link + * com.datastax.driver.core.Frame.Decoder.DecoderForStreamIdSize#MAX_FRAME_LENGTH} (default: 256MB, + * configurable via com.datastax.driver.NATIVE_TRANSPORT_MAX_FRAME_SIZE_IN_MB system property) and + * thus was not parsed. */ public class FrameTooLongException extends DriverException { - private static final long serialVersionUID = 0; + private static final long serialVersionUID = 0; - private final int streamId; + private final int streamId; - public FrameTooLongException(int streamId) { - this(streamId, null); - } + public FrameTooLongException(int streamId) { + this(streamId, null); + } - private FrameTooLongException(int streamId, Throwable cause) { - super("Response frame exceeded maximum allowed length", cause); - this.streamId = streamId; - } + private FrameTooLongException(int streamId, Throwable cause) { + super("Response frame exceeded maximum allowed length", cause); + this.streamId = streamId; + } - /** - * @return The stream id associated with the frame that caused this exception. - */ - public int getStreamId() { - return streamId; - } + /** @return The stream id associated with the frame that caused this exception. */ + public int getStreamId() { + return streamId; + } - @Override - public FrameTooLongException copy() { - return new FrameTooLongException(streamId, this); - } + @Override + public FrameTooLongException copy() { + return new FrameTooLongException(streamId, this); + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/FunctionExecutionException.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/FunctionExecutionException.java index d4564dc7718..81f2b62fe23 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/exceptions/FunctionExecutionException.java +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/FunctionExecutionException.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,46 +17,47 @@ */ package com.datastax.driver.core.exceptions; +import com.datastax.driver.core.EndPoint; import java.net.InetAddress; import java.net.InetSocketAddress; -/** - * Error during the execution of a function. - */ -public class FunctionExecutionException extends QueryExecutionException implements CoordinatorException { - - private static final long serialVersionUID = 0; - - private final InetSocketAddress address; - - public FunctionExecutionException(InetSocketAddress address, String msg) { - super(msg); - this.address = address; - } - - private FunctionExecutionException(InetSocketAddress address, String msg, Throwable cause) { - super(msg, cause); - this.address = address; - } - - /** - * {@inheritDoc} - */ - @Override - public InetAddress getHost() { - return address.getAddress(); - } - - /** - * {@inheritDoc} - */ - @Override - public InetSocketAddress getAddress() { - return address; - } - - @Override - public DriverException copy() { - return new FunctionExecutionException(address, getMessage(), this); - } +/** Error during the execution of a function. */ +public class FunctionExecutionException extends QueryExecutionException + implements CoordinatorException { + + private static final long serialVersionUID = 0; + + private final EndPoint endPoint; + + public FunctionExecutionException(EndPoint endPoint, String msg) { + super(msg); + this.endPoint = endPoint; + } + + private FunctionExecutionException(EndPoint endPoint, String msg, Throwable cause) { + super(msg, cause); + this.endPoint = endPoint; + } + + @Override + public EndPoint getEndPoint() { + return endPoint; + } + + @Override + @Deprecated + public InetSocketAddress getAddress() { + return (endPoint == null) ? null : endPoint.resolve(); + } + + @Override + @Deprecated + public InetAddress getHost() { + return (endPoint == null) ? null : endPoint.resolve().getAddress(); + } + + @Override + public DriverException copy() { + return new FunctionExecutionException(endPoint, getMessage(), this); + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/InvalidConfigurationInQueryException.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/InvalidConfigurationInQueryException.java index 8b64291e561..4bdb1b604e8 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/exceptions/InvalidConfigurationInQueryException.java +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/InvalidConfigurationInQueryException.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,25 +17,26 @@ */ package com.datastax.driver.core.exceptions; -import java.net.InetSocketAddress; +import com.datastax.driver.core.EndPoint; /** - * A specific invalid query exception that indicates that the query is invalid - * because of some configuration problem. - *

- * This is generally throw by query that manipulate the schema (CREATE and - * ALTER) when the required configuration options are invalid. + * A specific invalid query exception that indicates that the query is invalid because of some + * configuration problem. + * + *

This is generally throw by query that manipulate the schema (CREATE and ALTER) when the + * required configuration options are invalid. */ -public class InvalidConfigurationInQueryException extends InvalidQueryException implements CoordinatorException { +public class InvalidConfigurationInQueryException extends InvalidQueryException + implements CoordinatorException { - private static final long serialVersionUID = 0; + private static final long serialVersionUID = 0; - public InvalidConfigurationInQueryException(InetSocketAddress address, String msg) { - super(address, msg); - } + public InvalidConfigurationInQueryException(EndPoint endPoint, String msg) { + super(endPoint, msg); + } - @Override - public InvalidConfigurationInQueryException copy() { - return new InvalidConfigurationInQueryException(getAddress(), getMessage()); - } + @Override + public InvalidConfigurationInQueryException copy() { + return new InvalidConfigurationInQueryException(getEndPoint(), getMessage()); + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/InvalidQueryException.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/InvalidQueryException.java index 5bb8892afe5..5bdc6451350 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/exceptions/InvalidQueryException.java +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/InvalidQueryException.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,48 +17,55 @@ */ package com.datastax.driver.core.exceptions; +import com.datastax.driver.core.EndPoint; import java.net.InetAddress; import java.net.InetSocketAddress; -/** - * Indicates a syntactically correct but invalid query. - */ -public class InvalidQueryException extends QueryValidationException implements CoordinatorException { +/** Indicates a syntactically correct but invalid query. */ +public class InvalidQueryException extends QueryValidationException + implements CoordinatorException { + + private static final long serialVersionUID = 0; - private static final long serialVersionUID = 0; + private final EndPoint endPoint; - private final InetSocketAddress address; + public InvalidQueryException(String msg) { + this(null, msg); + } - public InvalidQueryException(String msg) { - this(null, msg); - } + public InvalidQueryException(EndPoint endPoint, String msg) { + super(msg); + this.endPoint = endPoint; + } - public InvalidQueryException(InetSocketAddress address, String msg) { - super(msg); - this.address = address; - } + public InvalidQueryException(String msg, Throwable cause) { + this(null, msg, cause); + } - public InvalidQueryException(String msg, Throwable cause) { - this(null, msg, cause); - } + public InvalidQueryException(EndPoint endPoint, String msg, Throwable cause) { + super(msg, cause); + this.endPoint = endPoint; + } - public InvalidQueryException(InetSocketAddress address, String msg, Throwable cause) { - super(msg, cause); - this.address = address; - } + @Override + public DriverException copy() { + return new InvalidQueryException(getEndPoint(), getMessage(), this); + } - @Override - public DriverException copy() { - return new InvalidQueryException(getAddress(), getMessage(), this); - } + @Override + public EndPoint getEndPoint() { + return endPoint; + } - @Override - public InetAddress getHost() { - return address != null ? address.getAddress() : null; - } + @Override + @Deprecated + public InetSocketAddress getAddress() { + return (endPoint == null) ? null : endPoint.resolve(); + } - @Override - public InetSocketAddress getAddress() { - return address; - } + @Override + @Deprecated + public InetAddress getHost() { + return (endPoint == null) ? null : endPoint.resolve().getAddress(); + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/InvalidTypeException.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/InvalidTypeException.java index 249e6a3cdca..e6829dcfd78 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/exceptions/InvalidTypeException.java +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/InvalidTypeException.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,26 +18,24 @@ package com.datastax.driver.core.exceptions; /** - * Thrown when a {@link com.datastax.driver.core.TypeCodec} - * is unable to perform the requested operation (serialization, - * deserialization, parsing or formatting) because the - * object or the byte buffer content being processed does not - * comply with the expected Java and/or CQL type. + * Thrown when a {@link com.datastax.driver.core.TypeCodec} is unable to perform the requested + * operation (serialization, deserialization, parsing or formatting) because the object or the byte + * buffer content being processed does not comply with the expected Java and/or CQL type. */ public class InvalidTypeException extends DriverException { - private static final long serialVersionUID = 0; + private static final long serialVersionUID = 0; - public InvalidTypeException(String msg) { - super(msg); - } + public InvalidTypeException(String msg) { + super(msg); + } - public InvalidTypeException(String msg, Throwable cause) { - super(msg, cause); - } + public InvalidTypeException(String msg, Throwable cause) { + super(msg, cause); + } - @Override - public InvalidTypeException copy() { - return new InvalidTypeException(getMessage(), this); - } + @Override + public InvalidTypeException copy() { + return new InvalidTypeException(getMessage(), this); + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/NoHostAvailableException.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/NoHostAvailableException.java index e3dd6f49001..06c20f88d77 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/exceptions/NoHostAvailableException.java +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/NoHostAvailableException.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,111 +17,114 @@ */ package com.datastax.driver.core.exceptions; +import com.datastax.driver.core.EndPoint; import java.io.PrintWriter; import java.io.StringWriter; -import java.net.InetSocketAddress; import java.util.HashMap; import java.util.Map; /** - * Exception thrown when a query cannot be performed because no host is - * available. - *

- * This exception is thrown if either: + * Exception thrown when a query cannot be performed because no host is available. + * + *

This exception is thrown if either: + * *

    - *
  • there is no host live in the cluster at the moment of the query;
  • - *
  • all hosts that have been tried have failed.
  • + *
  • there is no host live in the cluster at the moment of the query; + *
  • all hosts that have been tried have failed. *
- *

- * For debugging purposes, the list of hosts that have been tried along with the - * failure cause can be retrieved using the {@link #getErrors()} method. + * + *

For debugging purposes, the list of hosts that have been tried along with the failure cause + * can be retrieved using the {@link #getErrors()} method. */ public class NoHostAvailableException extends DriverException { - private static final long serialVersionUID = 0; + private static final long serialVersionUID = 0; - private static final int MAX_ERRORS_IN_DEFAULT_MESSAGE = 3; + private static final int MAX_ERRORS_IN_DEFAULT_MESSAGE = 3; - private final Map errors; + private final Map errors; - public NoHostAvailableException(Map errors) { - super(makeMessage(errors, MAX_ERRORS_IN_DEFAULT_MESSAGE, false, false)); - this.errors = errors; - } + public NoHostAvailableException(Map errors) { + super(makeMessage(errors, MAX_ERRORS_IN_DEFAULT_MESSAGE, false, false)); + this.errors = errors; + } - private NoHostAvailableException(String message, Throwable cause, Map errors) { - super(message, cause); - this.errors = errors; - } + private NoHostAvailableException( + String message, Throwable cause, Map errors) { + super(message, cause); + this.errors = errors; + } - /** - * Return the hosts tried along with the error encountered while trying - * them. - * - * @return a map containing for each tried host the error triggered when - * trying it. - */ - public Map getErrors() { - return new HashMap(errors); - } + /** + * Return the hosts tried along with the error encountered while trying them. + * + * @return a map containing for each tried host the error triggered when trying it. + */ + public Map getErrors() { + return new HashMap(errors); + } - /** - * Builds a custom message for this exception. - * - * @param maxErrors the maximum number of errors displayed (useful to limit the size of the message for big clusters). Beyond this limit, - * host names are still displayed, but not the associated errors. Set to {@code Integer.MAX_VALUE} to display all hosts. - * @param formatted whether to format the output (line break between each host). - * @param includeStackTraces whether to include the full stacktrace of each host error. Note that this automatically implies - * {@code formatted}. - * @return the message. - */ - public String getCustomMessage(int maxErrors, boolean formatted, boolean includeStackTraces) { - if (includeStackTraces) - formatted = true; - return makeMessage(errors, maxErrors, formatted, includeStackTraces); - } + /** + * Builds a custom message for this exception. + * + * @param maxErrors the maximum number of errors displayed (useful to limit the size of the + * message for big clusters). Beyond this limit, host names are still displayed, but not the + * associated errors. Set to {@code Integer.MAX_VALUE} to display all hosts. + * @param formatted whether to format the output (line break between each host). + * @param includeStackTraces whether to include the full stacktrace of each host error. Note that + * this automatically implies {@code formatted}. + * @return the message. + */ + public String getCustomMessage(int maxErrors, boolean formatted, boolean includeStackTraces) { + if (includeStackTraces) formatted = true; + return makeMessage(errors, maxErrors, formatted, includeStackTraces); + } - @Override - public NoHostAvailableException copy() { - return new NoHostAvailableException(getMessage(), this, errors); - } + @Override + public NoHostAvailableException copy() { + return new NoHostAvailableException(getMessage(), this, errors); + } - private static String makeMessage(Map errors, int maxErrorsInMessage, boolean formatted, boolean includeStackTraces) { - if (errors.size() == 0) - return "All host(s) tried for query failed (no host was tried)"; + private static String makeMessage( + Map errors, + int maxErrorsInMessage, + boolean formatted, + boolean includeStackTraces) { + if (errors.size() == 0) return "All host(s) tried for query failed (no host was tried)"; - StringWriter stringWriter = new StringWriter(); - PrintWriter out = new PrintWriter(stringWriter); + StringWriter stringWriter = new StringWriter(); + PrintWriter out = new PrintWriter(stringWriter); - out.print("All host(s) tried for query failed (tried:"); - out.print(formatted ? "\n" : " "); + out.print("All host(s) tried for query failed (tried:"); + out.print(formatted ? "\n" : " "); - int n = 0; - boolean truncated = false; - for (Map.Entry entry : errors.entrySet()) { - if (n > 0) out.print(formatted ? "\n" : ", "); - out.print(entry.getKey()); - if (n < maxErrorsInMessage) { - if (includeStackTraces) { - out.print("\n"); - entry.getValue().printStackTrace(out); - out.print("\n"); - } else { - out.printf(" (%s)", entry.getValue()); - } - } else { - truncated = true; - } - n += 1; + int n = 0; + boolean truncated = false; + for (Map.Entry entry : errors.entrySet()) { + if (n > 0) out.print(formatted ? "\n" : ", "); + out.print(entry.getKey()); + if (n < maxErrorsInMessage) { + if (includeStackTraces) { + out.print("\n"); + entry.getValue().printStackTrace(out); + out.print("\n"); + } else { + out.printf(" (%s)", entry.getValue()); } - if (truncated) { - out.print(formatted ? "\n" : " "); - out.printf("[only showing errors of first %d hosts, use getErrors() for more details]", maxErrorsInMessage); - } - if (formatted && !includeStackTraces) - out.print("\n"); - out.print(")"); - out.close(); - return stringWriter.toString(); + } else { + truncated = true; + } + n += 1; + } + if (truncated) { + out.print(formatted ? "\n" : " "); + out.printf( + "[only showing errors of first %d hosts, use getErrors() for more details]", + maxErrorsInMessage); } + if (formatted && !includeStackTraces) out.print("\n"); + out.print(")"); + out.close(); + return stringWriter.toString(); + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/OperationTimedOutException.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/OperationTimedOutException.java index 93f8f778d59..4c7b8b93923 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/exceptions/OperationTimedOutException.java +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/OperationTimedOutException.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,33 +17,31 @@ */ package com.datastax.driver.core.exceptions; +import com.datastax.driver.core.EndPoint; import com.datastax.driver.core.SocketOptions; -import java.net.InetSocketAddress; - /** * Thrown on a client-side timeout, i.e. when the client didn't hear back from the server within * {@link SocketOptions#getReadTimeoutMillis()}. */ public class OperationTimedOutException extends ConnectionException { - private static final long serialVersionUID = 0; - - public OperationTimedOutException(InetSocketAddress address) { - super(address, "Operation timed out"); - } + private static final long serialVersionUID = 0; - public OperationTimedOutException(InetSocketAddress address, String msg) { - super(address, msg); - } + public OperationTimedOutException(EndPoint endPoint) { + super(endPoint, "Operation timed out"); + } - public OperationTimedOutException(InetSocketAddress address, String msg, Throwable cause) { - super(address, msg, cause); - } + public OperationTimedOutException(EndPoint endPoint, String msg) { + super(endPoint, msg); + } - @Override - public OperationTimedOutException copy() { - return new OperationTimedOutException(address, getRawMessage(), this); - } + public OperationTimedOutException(EndPoint endPoint, String msg, Throwable cause) { + super(endPoint, msg, cause); + } + @Override + public OperationTimedOutException copy() { + return new OperationTimedOutException(getEndPoint(), getRawMessage(), this); + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/OverloadedException.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/OverloadedException.java index fdbd658f900..42bc0fbb79d 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/exceptions/OverloadedException.java +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/OverloadedException.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,49 +17,47 @@ */ package com.datastax.driver.core.exceptions; +import com.datastax.driver.core.EndPoint; import java.net.InetAddress; import java.net.InetSocketAddress; -/** - * Indicates that the contacted host reported itself being overloaded. - */ +/** Indicates that the contacted host reported itself being overloaded. */ public class OverloadedException extends QueryExecutionException implements CoordinatorException { - private static final long serialVersionUID = 0; - - private final InetSocketAddress address; - - public OverloadedException(InetSocketAddress address, String message) { - super(String.format("Queried host (%s) was overloaded: %s", address, message)); - this.address = address; - } - - /** - * Private constructor used solely when copying exceptions. - */ - private OverloadedException(InetSocketAddress address, String message, OverloadedException cause) { - super(message, cause); - this.address = address; - } - - /** - * {@inheritDoc} - */ - @Override - public InetAddress getHost() { - return address.getAddress(); - } - - /** - * {@inheritDoc} - */ - @Override - public InetSocketAddress getAddress() { - return address; - } - - @Override - public OverloadedException copy() { - return new OverloadedException(address, getMessage(), this); - } + private static final long serialVersionUID = 0; + + private final EndPoint endPoint; + + public OverloadedException(EndPoint endPoint, String message) { + super(String.format("Queried host (%s) was overloaded: %s", endPoint, message)); + this.endPoint = endPoint; + } + + /** Private constructor used solely when copying exceptions. */ + private OverloadedException(EndPoint endPoint, String message, OverloadedException cause) { + super(message, cause); + this.endPoint = endPoint; + } + + @Override + public EndPoint getEndPoint() { + return endPoint; + } + + @Override + @Deprecated + public InetSocketAddress getAddress() { + return (endPoint == null) ? null : endPoint.resolve(); + } + + @Override + @Deprecated + public InetAddress getHost() { + return (endPoint == null) ? null : endPoint.resolve().getAddress(); + } + + @Override + public OverloadedException copy() { + return new OverloadedException(endPoint, getMessage(), this); + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/PagingStateException.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/PagingStateException.java index 8736fcd2674..0e05f0d9b43 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/exceptions/PagingStateException.java +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/PagingStateException.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,22 +18,21 @@ package com.datastax.driver.core.exceptions; /** - * Indicates an error while deserializing a (previously serialized) - * {@link com.datastax.driver.core.PagingState} object, - * or when a paging state does not match the statement being executed. + * Indicates an error while deserializing a (previously serialized) {@link + * com.datastax.driver.core.PagingState} object, or when a paging state does not match the statement + * being executed. * * @see com.datastax.driver.core.PagingState */ public class PagingStateException extends DriverException { - private static final long serialVersionUID = 0; - - public PagingStateException(String msg) { - super(msg); - } + private static final long serialVersionUID = 0; - public PagingStateException(String msg, Throwable cause) { - super(msg, cause); - } + public PagingStateException(String msg) { + super(msg); + } + public PagingStateException(String msg, Throwable cause) { + super(msg, cause); + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/ProtocolError.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/ProtocolError.java index 3f5cc6a32b7..6628fb0fb90 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/exceptions/ProtocolError.java +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/ProtocolError.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,53 +17,54 @@ */ package com.datastax.driver.core.exceptions; +import com.datastax.driver.core.EndPoint; import java.net.InetAddress; import java.net.InetSocketAddress; /** - * Indicates that the contacted host reported a protocol error. - * Protocol errors indicate that the client triggered a protocol - * violation (for instance, a QUERY message is sent before a STARTUP one has been sent). - * Protocol errors should be considered as a bug in the driver and reported as such. + * Indicates that the contacted host reported a protocol error. Protocol errors indicate that the + * client triggered a protocol violation (for instance, a QUERY message is sent before a STARTUP one + * has been sent). Protocol errors should be considered as a bug in the driver and reported as such. */ public class ProtocolError extends DriverInternalError implements CoordinatorException { - private static final long serialVersionUID = 0; + private static final long serialVersionUID = 0; - private final InetSocketAddress address; + private final EndPoint endPoint; - public ProtocolError(InetSocketAddress address, String message) { - super(String.format("An unexpected protocol error occurred on host %s. This is a bug in this library, please report: %s", address, message)); - this.address = address; - } + public ProtocolError(EndPoint endPoint, String message) { + super( + String.format( + "An unexpected protocol error occurred on host %s. This is a bug in this library, please report: %s", + endPoint, message)); + this.endPoint = endPoint; + } - /** - * Private constructor used solely when copying exceptions. - */ - private ProtocolError(InetSocketAddress address, String message, ProtocolError cause) { - super(message, cause); - this.address = address; - } + /** Private constructor used solely when copying exceptions. */ + private ProtocolError(EndPoint endPoint, String message, ProtocolError cause) { + super(message, cause); + this.endPoint = endPoint; + } + @Override + public EndPoint getEndPoint() { + return endPoint; + } - /** - * {@inheritDoc} - */ - @Override - public InetAddress getHost() { - return address.getAddress(); - } + @Override + @Deprecated + public InetSocketAddress getAddress() { + return (endPoint == null) ? null : endPoint.resolve(); + } - /** - * {@inheritDoc} - */ - @Override - public InetSocketAddress getAddress() { - return address; - } + @Override + @Deprecated + public InetAddress getHost() { + return (endPoint == null) ? null : endPoint.resolve().getAddress(); + } - @Override - public ProtocolError copy() { - return new ProtocolError(address, getMessage(), this); - } + @Override + public ProtocolError copy() { + return new ProtocolError(endPoint, getMessage(), this); + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/QueryConsistencyException.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/QueryConsistencyException.java index 018e41de652..e82306270e3 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/exceptions/QueryConsistencyException.java +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/QueryConsistencyException.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,96 +18,105 @@ package com.datastax.driver.core.exceptions; import com.datastax.driver.core.ConsistencyLevel; - +import com.datastax.driver.core.EndPoint; import java.net.InetAddress; import java.net.InetSocketAddress; /** * A failure to reach the required consistency level during the execution of a query. - *

- * Such an exception is returned when the query has been tried by Cassandra but - * cannot be achieved with the requested consistency level because either: + * + *

Such an exception is returned when the query has been tried by Cassandra but cannot be + * achieved with the requested consistency level because either: + * *

    - *
  • the coordinator did not receive enough replica responses within the rpc timeout - * set for Cassandra;
  • - *
  • some replicas replied with an error.
  • - *
. + *
  • the coordinator did not receive enough replica responses within the rpc timeout set for + * Cassandra; + *
  • some replicas replied with an error. + * + * + * . */ @SuppressWarnings("serial") -public abstract class QueryConsistencyException extends QueryExecutionException implements CoordinatorException { +public abstract class QueryConsistencyException extends QueryExecutionException + implements CoordinatorException { + + private final EndPoint endPoint; + private final ConsistencyLevel consistency; + private final int received; + private final int required; - private final InetSocketAddress address; - private final ConsistencyLevel consistency; - private final int received; - private final int required; + protected QueryConsistencyException( + EndPoint endPoint, String msg, ConsistencyLevel consistency, int received, int required) { + super(msg); + this.endPoint = endPoint; + this.consistency = consistency; + this.received = received; + this.required = required; + } - protected QueryConsistencyException(InetSocketAddress address, String msg, ConsistencyLevel consistency, int received, int required) { - super(msg); - this.address = address; - this.consistency = consistency; - this.received = received; - this.required = required; - } + protected QueryConsistencyException( + EndPoint endPoint, + String msg, + Throwable cause, + ConsistencyLevel consistency, + int received, + int required) { + super(msg, cause); + this.endPoint = endPoint; + this.consistency = consistency; + this.received = received; + this.required = required; + } - protected QueryConsistencyException(InetSocketAddress address, String msg, Throwable cause, ConsistencyLevel consistency, int received, int required) { - super(msg, cause); - this.address = address; - this.consistency = consistency; - this.received = received; - this.required = required; - } + /** + * The consistency level of the operation that failed. + * + * @return the consistency level of the operation that failed. + */ + public ConsistencyLevel getConsistencyLevel() { + return consistency; + } - /** - * The consistency level of the operation that failed. - * - * @return the consistency level of the operation that failed. - */ - public ConsistencyLevel getConsistencyLevel() { - return consistency; - } + /** + * The number of replicas that had acknowledged/responded to the operation before it failed. + * + * @return the number of replica that had acknowledged/responded the operation before it failed. + */ + public int getReceivedAcknowledgements() { + return received; + } - /** - * The number of replica that had acknowledged/responded to the operation - * before it failed. - * - * @return the number of replica that had acknowledged/responded the - * operation before it failed. - */ - public int getReceivedAcknowledgements() { - return received; - } + /** + * The minimum number of replica acknowledgements/responses that were required to fulfill the + * operation. + * + * @return The minimum number of replica acknowledgements/response that were required to fulfill + * the operation. + */ + public int getRequiredAcknowledgements() { + return required; + } - /** - * The minimum number of replica acknowledgements/responses that were - * required to fulfill the operation. - * - * @return The minimum number of replica acknowledgements/response that - * were required to fulfill the operation. - */ - public int getRequiredAcknowledgements() { - return required; - } + /** + * {@inheritDoc} + * + *

    Note that this is the information of the host that coordinated the query, not the + * one that timed out. + */ + @Override + public EndPoint getEndPoint() { + return endPoint; + } - /** - * The coordinator host that caused this exception to be thrown. - * Note that this is the query coordinator host, not the host which timed out. - * - * @return The coordinator host that caused this exception to be thrown, or {@code null} if this exception has been generated driver-side. - */ - @Override - public InetAddress getHost() { - return address.getAddress(); - } + @Override + @Deprecated + public InetSocketAddress getAddress() { + return (endPoint == null) ? null : endPoint.resolve(); + } - /** - * The full address of the coordinator host that caused this exception to be thrown. - * Note that this is the query coordinator host, not the host which timed out. - * - * @return the full address of the coordinator host that caused this exception to be thrown, - * or {@code null} if this exception has been generated driver-side. - */ - @Override - public InetSocketAddress getAddress() { - return address; - } + @Override + @Deprecated + public InetAddress getHost() { + return (endPoint == null) ? null : endPoint.resolve().getAddress(); + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/QueryExecutionException.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/QueryExecutionException.java index 4f38e49948f..67b8a2747a7 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/exceptions/QueryExecutionException.java +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/QueryExecutionException.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,18 +19,18 @@ /** * Exception related to the execution of a query. - *

    - * This corresponds to the exception that Cassandra throws when a (valid) query - * cannot be executed (TimeoutException, UnavailableException, ...). + * + *

    This corresponds to the exception that Cassandra throws when a (valid) query cannot be + * executed (TimeoutException, UnavailableException, ...). */ @SuppressWarnings("serial") public abstract class QueryExecutionException extends DriverException { - protected QueryExecutionException(String msg) { - super(msg); - } + protected QueryExecutionException(String msg) { + super(msg); + } - protected QueryExecutionException(String msg, Throwable cause) { - super(msg, cause); - } + protected QueryExecutionException(String msg, Throwable cause) { + super(msg, cause); + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/QueryValidationException.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/QueryValidationException.java index 8e6572f7e0d..68707128814 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/exceptions/QueryValidationException.java +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/QueryValidationException.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,17 +18,17 @@ package com.datastax.driver.core.exceptions; /** - * An exception indicating that a query cannot be executed because it is - * syntactically incorrect, invalid, unauthorized or any other reason. + * An exception indicating that a query cannot be executed because it is syntactically incorrect, + * invalid, unauthorized or any other reason. */ @SuppressWarnings("serial") public abstract class QueryValidationException extends DriverException { - protected QueryValidationException(String msg) { - super(msg); - } + protected QueryValidationException(String msg) { + super(msg); + } - protected QueryValidationException(String msg, Throwable cause) { - super(msg, cause); - } + protected QueryValidationException(String msg, Throwable cause) { + super(msg, cause); + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/ReadFailureException.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/ReadFailureException.java index 43db5ef7aa9..b1e76b1e739 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/exceptions/ReadFailureException.java +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/ReadFailureException.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,130 +18,180 @@ package com.datastax.driver.core.exceptions; import com.datastax.driver.core.ConsistencyLevel; - +import com.datastax.driver.core.EndPoint; import java.net.InetAddress; -import java.net.InetSocketAddress; import java.util.Collections; import java.util.Map; /** * A non-timeout error during a read query. - *

    - * This happens when some of the replicas that were contacted by the coordinator replied with an error. + * + *

    This happens when some of the replicas that were contacted by the coordinator replied with an + * error. */ @SuppressWarnings("serial") public class ReadFailureException extends QueryConsistencyException { - private final int failed; - private final boolean dataPresent; - private final Map failuresMap; + private final int failed; + private final boolean dataPresent; + private final Map failuresMap; - /** - * This constructor should only be used internally by the driver - * when decoding error responses. - */ - public ReadFailureException(ConsistencyLevel consistency, int received, int required, int failed, Map failuresMap, boolean dataPresent) { - this(null, consistency, received, required, failed, failuresMap, dataPresent); - } + /** + * This constructor should only be used internally by the driver when decoding error responses. + */ + public ReadFailureException( + ConsistencyLevel consistency, + int received, + int required, + int failed, + Map failuresMap, + boolean dataPresent) { + this(null, consistency, received, required, failed, failuresMap, dataPresent); + } - /** - * @deprecated Legacy constructor for backward compatibility. - */ - @Deprecated - public ReadFailureException(ConsistencyLevel consistency, int received, int required, int failed, boolean dataPresent) { - this(null, consistency, received, required, failed, Collections.emptyMap(), dataPresent); - } + /** @deprecated Legacy constructor for backward compatibility. */ + @Deprecated + public ReadFailureException( + ConsistencyLevel consistency, int received, int required, int failed, boolean dataPresent) { + this( + null, + consistency, + received, + required, + failed, + Collections.emptyMap(), + dataPresent); + } - public ReadFailureException(InetSocketAddress address, ConsistencyLevel consistency, int received, int required, int failed, Map failuresMap, boolean dataPresent) { - super(address, String.format("Cassandra failure during read query at consistency %s " - + "(%d responses were required but only %d replica responded, %d failed)", - consistency, required, received, failed), - consistency, - received, - required); - this.failed = failed; - this.failuresMap = failuresMap; - this.dataPresent = dataPresent; - } + public ReadFailureException( + EndPoint endPoint, + ConsistencyLevel consistency, + int received, + int required, + int failed, + Map failuresMap, + boolean dataPresent) { + super( + endPoint, + String.format( + "Cassandra failure during read query at consistency %s " + + "(%d responses were required but only %d replica responded, %d failed)", + consistency, required, received, failed), + consistency, + received, + required); + this.failed = failed; + this.failuresMap = failuresMap; + this.dataPresent = dataPresent; + } - /** - * @deprecated Legacy constructor for backward compatibility. - */ - @Deprecated - public ReadFailureException(InetSocketAddress address, ConsistencyLevel consistency, int received, int required, int failed, boolean dataPresent) { - this(address, consistency, received, required, failed, Collections.emptyMap(), dataPresent); - } + /** @deprecated Legacy constructor for backward compatibility. */ + @Deprecated + public ReadFailureException( + EndPoint endPoint, + ConsistencyLevel consistency, + int received, + int required, + int failed, + boolean dataPresent) { + this( + endPoint, + consistency, + received, + required, + failed, + Collections.emptyMap(), + dataPresent); + } - private ReadFailureException(InetSocketAddress address, String msg, Throwable cause, ConsistencyLevel consistency, int received, int required, int failed, Map failuresMap, boolean dataPresent) { - super(address, msg, cause, consistency, received, required); - this.failed = failed; - this.failuresMap = failuresMap; - this.dataPresent = dataPresent; - } + private ReadFailureException( + EndPoint endPoint, + String msg, + Throwable cause, + ConsistencyLevel consistency, + int received, + int required, + int failed, + Map failuresMap, + boolean dataPresent) { + super(endPoint, msg, cause, consistency, received, required); + this.failed = failed; + this.failuresMap = failuresMap; + this.dataPresent = dataPresent; + } - /** - * Returns the number of replicas that experienced a failure while executing the request. - * - * @return the number of failures. - */ - public int getFailures() { - return failed; - } + /** + * Returns the number of replicas that experienced a failure while executing the request. + * + * @return the number of failures. + */ + public int getFailures() { + return failed; + } - /** - * Returns the a failure reason code for each node that failed. - *

    - * At the time of writing, the existing reason codes are: - *

      - *
    • {@code 0x0000}: the error does not have a specific code assigned yet, or the cause is - * unknown.
    • - *
    • {@code 0x0001}: The read operation scanned too many tombstones (as defined by - * {@code tombstone_failure_threshold} in {@code cassandra.yaml}, causing a - * {@code TombstoneOverwhelmingException}.
    • - *
    - * (please refer to the Cassandra documentation for your version for the most up-to-date list - * of errors) - *

    - * This feature is available for protocol v5 or above only. With lower protocol versions, the - * map will always be empty. - * - * @return a map of IP addresses to failure codes. - */ - public Map getFailuresMap() { - return failuresMap; - } + /** + * Returns the a failure reason code for each node that failed. + * + *

    At the time of writing, the existing reason codes are: + * + *

      + *
    • {@code 0x0000}: the error does not have a specific code assigned yet, or the cause is + * unknown. + *
    • {@code 0x0001}: The read operation scanned too many tombstones (as defined by {@code + * tombstone_failure_threshold} in {@code cassandra.yaml}, causing a {@code + * TombstoneOverwhelmingException}. + *
    + * + * (please refer to the Cassandra documentation for your version for the most up-to-date list of + * errors) + * + *

    This feature is available for protocol v5 or above only. With lower protocol versions, the + * map will always be empty. + * + * @return a map of IP addresses to failure codes. + */ + public Map getFailuresMap() { + return failuresMap; + } - /** - * Whether the actual data was amongst the received replica responses. - *

    - * During reads, Cassandra doesn't request data from every replica to - * minimize internal network traffic. Instead, some replicas are only asked - * for a checksum of the data. A read timeout may occurred even if enough - * replicas have responded to fulfill the consistency level if only checksum - * responses have been received. This method allows to detect that case. - * - * @return whether the data was amongst the received replica responses. - */ - public boolean wasDataRetrieved() { - return dataPresent; - } + /** + * Whether the actual data was amongst the received replica responses. + * + *

    During reads, Cassandra doesn't request data from every replica to minimize internal network + * traffic. Instead, some replicas are only asked for a checksum of the data. A read timeout may + * occurred even if enough replicas have responded to fulfill the consistency level if only + * checksum responses have been received. This method allows to detect that case. + * + * @return whether the data was amongst the received replica responses. + */ + public boolean wasDataRetrieved() { + return dataPresent; + } - @Override - public ReadFailureException copy() { - return new ReadFailureException(getAddress(), getMessage(), this, getConsistencyLevel(), getReceivedAcknowledgements(), - getRequiredAcknowledgements(), getFailures(), getFailuresMap(), wasDataRetrieved()); - } + @Override + public ReadFailureException copy() { + return new ReadFailureException( + getEndPoint(), + getMessage(), + this, + getConsistencyLevel(), + getReceivedAcknowledgements(), + getRequiredAcknowledgements(), + getFailures(), + getFailuresMap(), + wasDataRetrieved()); + } - public ReadFailureException copy(InetSocketAddress address) { - return new ReadFailureException( - address, - getMessage(), - this, - getConsistencyLevel(), - getReceivedAcknowledgements(), - getRequiredAcknowledgements(), - failed, - getFailuresMap(), - dataPresent); - } + public ReadFailureException copy(EndPoint endPoint) { + return new ReadFailureException( + endPoint, + getMessage(), + this, + getConsistencyLevel(), + getReceivedAcknowledgements(), + getRequiredAcknowledgements(), + failed, + getFailuresMap(), + dataPresent); + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/ReadTimeoutException.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/ReadTimeoutException.java index e1da94e098e..c9803ccd43f 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/exceptions/ReadTimeoutException.java +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/ReadTimeoutException.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,104 +18,112 @@ package com.datastax.driver.core.exceptions; import com.datastax.driver.core.ConsistencyLevel; +import com.datastax.driver.core.EndPoint; -import java.net.InetSocketAddress; - -/** - * A Cassandra timeout during a read query. - */ +/** A Cassandra timeout during a read query. */ public class ReadTimeoutException extends QueryConsistencyException { - private static final long serialVersionUID = 0; - - private final boolean dataPresent; + private static final long serialVersionUID = 0; - /** - * This constructor should only be used internally by the driver - * when decoding error responses. - */ - public ReadTimeoutException(ConsistencyLevel consistency, int received, int required, boolean dataPresent) { - this(null, consistency, received, required, dataPresent); - } + private final boolean dataPresent; - public ReadTimeoutException(InetSocketAddress address, ConsistencyLevel consistency, int received, int required, boolean dataPresent) { - super( - address, - String.format("Cassandra timeout during read query at consistency %s (%s)", consistency, formatDetails(received, required, dataPresent)), - consistency, - received, - required - ); - this.dataPresent = dataPresent; - } + /** + * This constructor should only be used internally by the driver when decoding error responses. + */ + public ReadTimeoutException( + ConsistencyLevel consistency, int received, int required, boolean dataPresent) { + this(null, consistency, received, required, dataPresent); + } - private ReadTimeoutException(InetSocketAddress address, String msg, Throwable cause, ConsistencyLevel consistency, int received, int required, boolean dataPresent) { - super(address, msg, cause, consistency, received, required); - this.dataPresent = dataPresent; - } + public ReadTimeoutException( + EndPoint endPoint, + ConsistencyLevel consistency, + int received, + int required, + boolean dataPresent) { + super( + endPoint, + String.format( + "Cassandra timeout during read query at consistency %s (%s). " + + "In case this was generated during read repair, the consistency level is not representative of the actual consistency.", + consistency, formatDetails(received, required, dataPresent)), + consistency, + received, + required); + this.dataPresent = dataPresent; + } - private static String formatDetails(int received, int required, boolean dataPresent) { - if (received < required) - return String.format("%d responses were required but only %d replica responded", required, received); - else if (!dataPresent) - return "the replica queried for data didn't respond"; - else - return "timeout while waiting for repair of inconsistent replica"; - } + private ReadTimeoutException( + EndPoint endPoint, + String msg, + Throwable cause, + ConsistencyLevel consistency, + int received, + int required, + boolean dataPresent) { + super(endPoint, msg, cause, consistency, received, required); + this.dataPresent = dataPresent; + } - /** - * Whether the actual data was amongst the received replica responses. - *

    - * During reads, Cassandra doesn't request data from every replica to - * minimize internal network traffic. Instead, some replicas are only asked - * for a checksum of the data. A read timeout may occurred even if enough - * replicas have responded to fulfill the consistency level if only checksum - * responses have been received. This method allows to detect that case. - * - * @return whether the data was amongst the received replica responses. - */ - public boolean wasDataRetrieved() { - return dataPresent; - } + private static String formatDetails(int received, int required, boolean dataPresent) { + if (received < required) + return String.format( + "%d responses were required but only %d replica responded", required, received); + else if (!dataPresent) return "the replica queried for data didn't respond"; + else return "timeout while waiting for repair of inconsistent replica"; + } - @Override - public ReadTimeoutException copy() { - return new ReadTimeoutException( - getAddress(), - getMessage(), - this, - getConsistencyLevel(), - getReceivedAcknowledgements(), - getRequiredAcknowledgements(), - wasDataRetrieved() - ); - } + /** + * Whether the actual data was amongst the received replica responses. + * + *

    During reads, Cassandra doesn't request data from every replica to minimize internal network + * traffic. Instead, some replicas are only asked for a checksum of the data. A read timeout may + * have occurred even if enough replicas have responded to fulfill the consistency level, if only + * checksum responses have been received. This method allows to detect that case. + * + * @return whether the data was amongst the received replica responses. + */ + public boolean wasDataRetrieved() { + return dataPresent; + } - /** - * Create a copy of this exception with a nicer stack trace, and including the coordinator - * address that caused this exception to be raised. - *

    - * This method is mainly intended for internal use by the driver and exists mainly because: - *

      - *
    1. the original exception was decoded from a response frame - * and at that time, the coordinator address was not available; and
    2. - *
    3. the newly-created exception will refer to the current thread in its stack trace, - * which generally yields a more user-friendly stack trace that the original one.
    4. - *
    - * - * @param address The full address of the host that caused this exception to be thrown. - * @return a copy/clone of this exception, but with the given host address instead of the original one. - */ - public ReadTimeoutException copy(InetSocketAddress address) { - return new ReadTimeoutException( - address, - getMessage(), - this, - getConsistencyLevel(), - getReceivedAcknowledgements(), - getRequiredAcknowledgements(), - wasDataRetrieved() - ); - } + @Override + public ReadTimeoutException copy() { + return new ReadTimeoutException( + getEndPoint(), + getMessage(), + this, + getConsistencyLevel(), + getReceivedAcknowledgements(), + getRequiredAcknowledgements(), + wasDataRetrieved()); + } + /** + * Create a copy of this exception with a nicer stack trace, and including the coordinator address + * that caused this exception to be raised. + * + *

    This method is mainly intended for internal use by the driver and exists mainly because: + * + *

      + *
    1. the original exception was decoded from a response frame and at that time, the + * coordinator address was not available; and + *
    2. the newly-created exception will refer to the current thread in its stack trace, which + * generally yields a more user-friendly stack trace that the original one. + *
    + * + * @param endPoint The full address of the host that caused this exception to be thrown. + * @return a copy/clone of this exception, but with the given host address instead of the original + * one. + */ + public ReadTimeoutException copy(EndPoint endPoint) { + return new ReadTimeoutException( + endPoint, + getMessage(), + this, + getConsistencyLevel(), + getReceivedAcknowledgements(), + getRequiredAcknowledgements(), + wasDataRetrieved()); + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/ServerError.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/ServerError.java index 87a503c18e8..f616cb74d11 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/exceptions/ServerError.java +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/ServerError.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,51 +17,50 @@ */ package com.datastax.driver.core.exceptions; +import com.datastax.driver.core.EndPoint; import java.net.InetAddress; import java.net.InetSocketAddress; /** - * Indicates that the contacted host reported an internal error. - * This should be considered as a bug in Cassandra and reported as such. + * Indicates that the contacted host reported an internal error. This should be considered as a bug + * in Cassandra and reported as such. */ public class ServerError extends DriverInternalError implements CoordinatorException { - private static final long serialVersionUID = 0; + private static final long serialVersionUID = 0; - private final InetSocketAddress address; + private final EndPoint endPoint; - public ServerError(InetSocketAddress address, String message) { - super(String.format("An unexpected error occurred server side on %s: %s", address, message)); - this.address = address; - } + public ServerError(EndPoint endPoint, String message) { + super(String.format("An unexpected error occurred server side on %s: %s", endPoint, message)); + this.endPoint = endPoint; + } - /** - * Private constructor used solely when copying exceptions. - */ - private ServerError(InetSocketAddress address, String message, ServerError cause) { - super(message, cause); - this.address = address; - } + /** Private constructor used solely when copying exceptions. */ + private ServerError(EndPoint endPoint, String message, ServerError cause) { + super(message, cause); + this.endPoint = endPoint; + } + @Override + public EndPoint getEndPoint() { + return endPoint; + } - /** - * {@inheritDoc} - */ - @Override - public InetAddress getHost() { - return address.getAddress(); - } + @Override + @Deprecated + public InetSocketAddress getAddress() { + return (endPoint == null) ? null : endPoint.resolve(); + } - /** - * {@inheritDoc} - */ - @Override - public InetSocketAddress getAddress() { - return address; - } + @Override + @Deprecated + public InetAddress getHost() { + return (endPoint == null) ? null : endPoint.resolve().getAddress(); + } - @Override - public ServerError copy() { - return new ServerError(address, getMessage(), this); - } + @Override + public ServerError copy() { + return new ServerError(endPoint, getMessage(), this); + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/SyntaxError.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/SyntaxError.java index 51593cb06aa..f9c6d1272e7 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/exceptions/SyntaxError.java +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/SyntaxError.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,46 +17,46 @@ */ package com.datastax.driver.core.exceptions; +import com.datastax.driver.core.EndPoint; import java.net.InetAddress; import java.net.InetSocketAddress; -/** - * Indicates a syntax error in a query. - */ +/** Indicates a syntax error in a query. */ public class SyntaxError extends QueryValidationException implements CoordinatorException { - private static final long serialVersionUID = 0; - - private final InetSocketAddress address; - - public SyntaxError(InetSocketAddress address, String msg) { - super(msg); - this.address = address; - } - - private SyntaxError(InetSocketAddress address, String msg, Throwable cause) { - super(msg, cause); - this.address = address; - } - - /** - * {@inheritDoc} - */ - @Override - public InetAddress getHost() { - return address.getAddress(); - } - - /** - * {@inheritDoc} - */ - @Override - public InetSocketAddress getAddress() { - return address; - } - - @Override - public SyntaxError copy() { - return new SyntaxError(getAddress(), getMessage(), this); - } + private static final long serialVersionUID = 0; + + private final EndPoint endPoint; + + public SyntaxError(EndPoint endPoint, String msg) { + super(msg); + this.endPoint = endPoint; + } + + private SyntaxError(EndPoint endPoint, String msg, Throwable cause) { + super(msg, cause); + this.endPoint = endPoint; + } + + @Override + public EndPoint getEndPoint() { + return endPoint; + } + + @Override + @Deprecated + public InetSocketAddress getAddress() { + return (endPoint == null) ? null : endPoint.resolve(); + } + + @Override + @Deprecated + public InetAddress getHost() { + return (endPoint == null) ? null : endPoint.resolve().getAddress(); + } + + @Override + public SyntaxError copy() { + return new SyntaxError(getEndPoint(), getMessage(), this); + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/TraceRetrievalException.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/TraceRetrievalException.java index 23a48824f52..10b77d8fc60 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/exceptions/TraceRetrievalException.java +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/TraceRetrievalException.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,18 +24,18 @@ */ public class TraceRetrievalException extends DriverException { - private static final long serialVersionUID = 0; + private static final long serialVersionUID = 0; - public TraceRetrievalException(String message) { - super(message); - } + public TraceRetrievalException(String message) { + super(message); + } - public TraceRetrievalException(String message, Throwable cause) { - super(message, cause); - } + public TraceRetrievalException(String message, Throwable cause) { + super(message, cause); + } - @Override - public TraceRetrievalException copy() { - return new TraceRetrievalException(getMessage(), this); - } + @Override + public TraceRetrievalException copy() { + return new TraceRetrievalException(getMessage(), this); + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/TransportException.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/TransportException.java index 02ca568271d..dab3868e76e 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/exceptions/TransportException.java +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/TransportException.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,27 +17,26 @@ */ package com.datastax.driver.core.exceptions; -import java.net.InetSocketAddress; +import com.datastax.driver.core.EndPoint; /** - * A connection exception that has to do with the transport itself, i.e. that - * suggests the node is down. + * A connection exception that has to do with the transport itself, i.e. that suggests the node is + * down. */ public class TransportException extends ConnectionException { - private static final long serialVersionUID = 0; - - public TransportException(InetSocketAddress address, String msg, Throwable cause) { - super(address, msg, cause); - } + private static final long serialVersionUID = 0; - public TransportException(InetSocketAddress address, String msg) { - super(address, msg); - } + public TransportException(EndPoint endPoint, String msg, Throwable cause) { + super(endPoint, msg, cause); + } - @Override - public TransportException copy() { - return new TransportException(address, getRawMessage(), this); - } + public TransportException(EndPoint endPoint, String msg) { + super(endPoint, msg); + } + @Override + public TransportException copy() { + return new TransportException(getEndPoint(), getRawMessage(), this); + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/TruncateException.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/TruncateException.java index db7735504e4..76b412df87e 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/exceptions/TruncateException.java +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/TruncateException.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,46 +17,46 @@ */ package com.datastax.driver.core.exceptions; +import com.datastax.driver.core.EndPoint; import java.net.InetAddress; import java.net.InetSocketAddress; -/** - * Error during a truncation operation. - */ +/** Error during a truncation operation. */ public class TruncateException extends QueryExecutionException implements CoordinatorException { - private static final long serialVersionUID = 0; - - private final InetSocketAddress address; - - public TruncateException(InetSocketAddress address, String msg) { - super(msg); - this.address = address; - } - - private TruncateException(InetSocketAddress address, String msg, Throwable cause) { - super(msg, cause); - this.address = address; - } - - /** - * {@inheritDoc} - */ - @Override - public InetAddress getHost() { - return address.getAddress(); - } - - /** - * {@inheritDoc} - */ - @Override - public InetSocketAddress getAddress() { - return address; - } - - @Override - public TruncateException copy() { - return new TruncateException(getAddress(), getMessage(), this); - } + private static final long serialVersionUID = 0; + + private final EndPoint endPoint; + + public TruncateException(EndPoint endPoint, String msg) { + super(msg); + this.endPoint = endPoint; + } + + private TruncateException(EndPoint endPoint, String msg, Throwable cause) { + super(msg, cause); + this.endPoint = endPoint; + } + + @Override + public EndPoint getEndPoint() { + return endPoint; + } + + @Override + @Deprecated + public InetSocketAddress getAddress() { + return (endPoint == null) ? null : endPoint.resolve(); + } + + @Override + @Deprecated + public InetAddress getHost() { + return (endPoint == null) ? null : endPoint.resolve().getAddress(); + } + + @Override + public TruncateException copy() { + return new TruncateException(getEndPoint(), getMessage(), this); + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/UnauthorizedException.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/UnauthorizedException.java index 846a2f49f07..e967d260c96 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/exceptions/UnauthorizedException.java +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/UnauthorizedException.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,47 +17,50 @@ */ package com.datastax.driver.core.exceptions; +import com.datastax.driver.core.EndPoint; import java.net.InetAddress; import java.net.InetSocketAddress; /** - * Indicates that a query cannot be performed due to the authorization - * restrictions of the logged user. + * Indicates that a query cannot be performed due to the authorization restrictions of the logged + * user. */ -public class UnauthorizedException extends QueryValidationException implements CoordinatorException { - - private static final long serialVersionUID = 0; - - private final InetSocketAddress address; - - public UnauthorizedException(InetSocketAddress address, String msg) { - super(msg); - this.address = address; - } - - private UnauthorizedException(InetSocketAddress address, String msg, Throwable cause) { - super(msg, cause); - this.address = address; - } - - /** - * {@inheritDoc} - */ - @Override - public InetAddress getHost() { - return address.getAddress(); - } - - /** - * {@inheritDoc} - */ - @Override - public InetSocketAddress getAddress() { - return address; - } - - @Override - public UnauthorizedException copy() { - return new UnauthorizedException(getAddress(), getMessage(), this); - } +public class UnauthorizedException extends QueryValidationException + implements CoordinatorException { + + private static final long serialVersionUID = 0; + + private final EndPoint endPoint; + + public UnauthorizedException(EndPoint endPoint, String msg) { + super(msg); + this.endPoint = endPoint; + } + + private UnauthorizedException(EndPoint endPoint, String msg, Throwable cause) { + super(msg, cause); + this.endPoint = endPoint; + } + + @Override + public EndPoint getEndPoint() { + return endPoint; + } + + @Override + @Deprecated + public InetSocketAddress getAddress() { + return (endPoint == null) ? null : endPoint.resolve(); + } + + @Override + @Deprecated + public InetAddress getHost() { + return (endPoint == null) ? null : endPoint.resolve().getAddress(); + } + + @Override + public UnauthorizedException copy() { + return new UnauthorizedException(getEndPoint(), getMessage(), this); + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/UnavailableException.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/UnavailableException.java index edaa8a20f9d..6b5c78b85a3 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/exceptions/UnavailableException.java +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/UnavailableException.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,115 +18,128 @@ package com.datastax.driver.core.exceptions; import com.datastax.driver.core.ConsistencyLevel; - +import com.datastax.driver.core.EndPoint; import java.net.InetAddress; import java.net.InetSocketAddress; /** - * Exception thrown when the coordinator knows there is not enough replicas - * alive to perform a query with the requested consistency level. + * Exception thrown when the coordinator knows there is not enough replicas alive to perform a query + * with the requested consistency level. */ public class UnavailableException extends QueryExecutionException implements CoordinatorException { - private static final long serialVersionUID = 0; + private static final long serialVersionUID = 0; + + private final EndPoint endPoint; + private final ConsistencyLevel consistency; + private final int required; + private final int alive; - private final InetSocketAddress address; - private final ConsistencyLevel consistency; - private final int required; - private final int alive; + /** + * This constructor should only be used internally by the driver when decoding error responses. + */ + public UnavailableException(ConsistencyLevel consistency, int required, int alive) { + this(null, consistency, required, alive); + } - /** - * This constructor should only be used internally by the driver - * when decoding error responses. - */ - public UnavailableException(ConsistencyLevel consistency, int required, int alive) { - this(null, consistency, required, alive); - } + public UnavailableException( + EndPoint endPoint, ConsistencyLevel consistency, int required, int alive) { + super( + String.format( + "Not enough replicas available for query at consistency %s (%d required but only %d alive)", + consistency, required, alive)); + this.endPoint = endPoint; + this.consistency = consistency; + this.required = required; + this.alive = alive; + } - public UnavailableException(InetSocketAddress address, ConsistencyLevel consistency, int required, int alive) { - super(String.format("Not enough replicas available for query at consistency %s (%d required but only %d alive)", consistency, required, alive)); - this.address = address; - this.consistency = consistency; - this.required = required; - this.alive = alive; - } + private UnavailableException( + EndPoint endPoint, + String message, + Throwable cause, + ConsistencyLevel consistency, + int required, + int alive) { + super(message, cause); + this.endPoint = endPoint; + this.consistency = consistency; + this.required = required; + this.alive = alive; + } - private UnavailableException(InetSocketAddress address, String message, Throwable cause, ConsistencyLevel consistency, int required, int alive) { - super(message, cause); - this.address = address; - this.consistency = consistency; - this.required = required; - this.alive = alive; - } + /** + * The consistency level of the operation triggering this unavailable exception. + * + * @return the consistency level of the operation triggering this unavailable exception. + */ + public ConsistencyLevel getConsistencyLevel() { + return consistency; + } - /** - * The consistency level of the operation triggering this unavailable exception. - * - * @return the consistency level of the operation triggering this unavailable exception. - */ - public ConsistencyLevel getConsistencyLevel() { - return consistency; - } + /** + * The number of replica acknowledgements/responses required to perform the operation (with its + * required consistency level). + * + * @return the number of replica acknowledgements/responses required to perform the operation. + */ + public int getRequiredReplicas() { + return required; + } - /** - * The number of replica acknowledgements/responses required to perform the - * operation (with its required consistency level). - * - * @return the number of replica acknowledgements/responses required to perform the - * operation. - */ - public int getRequiredReplicas() { - return required; - } + /** + * The number of replicas that were known to be alive by the Cassandra coordinator node when it + * tried to execute the operation. + * + * @return The number of replicas that were known to be alive by the Cassandra coordinator node + * when it tried to execute the operation. + */ + public int getAliveReplicas() { + return alive; + } - /** - * The number of replicas that were known to be alive by the Cassandra - * coordinator node when it tried to execute the operation. - * - * @return The number of replicas that were known to be alive by the Cassandra - * coordinator node when it tried to execute the operation. - */ - public int getAliveReplicas() { - return alive; - } + @Override + public EndPoint getEndPoint() { + return endPoint; + } - /** - * {@inheritDoc} - */ - @Override - public InetAddress getHost() { - return address.getAddress(); - } + @Override + @Deprecated + public InetSocketAddress getAddress() { + return (endPoint == null) ? null : endPoint.resolve(); + } - /** - * {@inheritDoc} - */ - @Override - public InetSocketAddress getAddress() { - return address; - } + @Override + @Deprecated + public InetAddress getHost() { + return (endPoint == null) ? null : endPoint.resolve().getAddress(); + } - @Override - public UnavailableException copy() { - return new UnavailableException(getAddress(), getMessage(), this, consistency, required, alive); - } + @Override + public UnavailableException copy() { + return new UnavailableException( + getEndPoint(), getMessage(), this, consistency, required, alive); + } - /** - * Create a copy of this exception with a nicer stack trace, and including the coordinator - * address that caused this exception to be raised. - *

    - * This method is mainly intended for internal use by the driver and exists mainly because: - *

      - *
    1. the original exception was decoded from a response frame - * and at that time, the coordinator address was not available; and
    2. - *
    3. the newly-created exception will refer to the current thread in its stack trace, - * which generally yields a more user-friendly stack trace that the original one.
    4. - *
    - * - * @param address The full address of the host that caused this exception to be thrown. - * @return a copy/clone of this exception, but with the given host address instead of the original one. - */ - public UnavailableException copy(InetSocketAddress address) { - return new UnavailableException(address, getMessage(), this, consistency, required, alive); - } + /** + * Create a copy of this exception with a nicer stack trace, and including the coordinator address + * that caused this exception to be raised. + * + *

    This method is mainly intended for internal use by the driver and exists mainly because: + * + *

      + *
    1. the original exception was decoded from a response frame and at that time, the + * coordinator address was not available; and + *
    2. the newly-created exception will refer to the current thread in its stack trace, which + * generally yields a more user-friendly stack trace that the original one. + *
    + * + * @param endPoint The full connection information of the host that caused this exception to be + * thrown. + * @return a copy/clone of this exception, but with the given host address instead of the original + * one. + */ + public UnavailableException copy(EndPoint endPoint) { + return new UnavailableException(endPoint, getMessage(), this, consistency, required, alive); + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/UnpreparedException.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/UnpreparedException.java index 95ecdb32dd1..6a951433079 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/exceptions/UnpreparedException.java +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/UnpreparedException.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,49 +17,50 @@ */ package com.datastax.driver.core.exceptions; +import com.datastax.driver.core.EndPoint; import java.net.InetAddress; import java.net.InetSocketAddress; -/** - * Indicates that the contacted host replied with an UNPREPARED error code. - */ +/** Indicates that the contacted host replied with an UNPREPARED error code. */ public class UnpreparedException extends QueryValidationException implements CoordinatorException { - private static final long serialVersionUID = 0; - - private final InetSocketAddress address; - - public UnpreparedException(InetSocketAddress address, String message) { - super(String.format("A prepared query was submitted on %s but was not known of that node: %s", address, message)); - this.address = address; - } - - /** - * Private constructor used solely when copying exceptions. - */ - private UnpreparedException(InetSocketAddress address, String message, UnpreparedException cause) { - super(message, cause); - this.address = address; - } - - /** - * {@inheritDoc} - */ - @Override - public InetAddress getHost() { - return address.getAddress(); - } - - /** - * {@inheritDoc} - */ - @Override - public InetSocketAddress getAddress() { - return address; - } - - @Override - public UnpreparedException copy() { - return new UnpreparedException(address, getMessage(), this); - } + private static final long serialVersionUID = 0; + + private final EndPoint endPoint; + + public UnpreparedException(EndPoint endPoint, String message) { + super( + String.format( + "A prepared query was submitted on %s but was not known of that node: %s", + endPoint, message)); + this.endPoint = endPoint; + } + + /** Private constructor used solely when copying exceptions. */ + private UnpreparedException(EndPoint endPoint, String message, UnpreparedException cause) { + super(message, cause); + this.endPoint = endPoint; + } + + @Override + public EndPoint getEndPoint() { + return endPoint; + } + + @Override + @Deprecated + public InetSocketAddress getAddress() { + return (endPoint == null) ? null : endPoint.resolve(); + } + + @Override + @Deprecated + public InetAddress getHost() { + return (endPoint == null) ? null : endPoint.resolve().getAddress(); + } + + @Override + public UnpreparedException copy() { + return new UnpreparedException(endPoint, getMessage(), this); + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/UnresolvedUserTypeException.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/UnresolvedUserTypeException.java index 752587ec46c..94ff48a126d 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/exceptions/UnresolvedUserTypeException.java +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/UnresolvedUserTypeException.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,43 +19,40 @@ /** * Thrown when a user type cannot be resolved. - *

    - * This exception can be raised when the driver is rebuilding - * its schema metadata, and a user-defined type cannot be completely - * constructed due to some missing information. - * It should only appear in the driver logs, never in client code. - * It shouldn't be considered as a severe error as long as it only - * appears occasionally. + * + *

    This exception can be raised when the driver is rebuilding its schema metadata, and a + * user-defined type cannot be completely constructed due to some missing information. It should + * only appear in the driver logs, never in client code. It shouldn't be considered as a severe + * error as long as it only appears occasionally. */ public class UnresolvedUserTypeException extends DriverException { - private final String keyspaceName; - - private final String name; + private final String keyspaceName; - public UnresolvedUserTypeException(String keyspaceName, String name) { - super(String.format("Cannot resolve user type %s.%s", keyspaceName, name)); - this.keyspaceName = keyspaceName; - this.name = name; - } + private final String name; - private UnresolvedUserTypeException(String keyspaceName, String name, Throwable cause) { - super(String.format("Cannot resolve user type %s.%s", keyspaceName, name), cause); - this.keyspaceName = keyspaceName; - this.name = name; - } + public UnresolvedUserTypeException(String keyspaceName, String name) { + super(String.format("Cannot resolve user type %s.%s", keyspaceName, name)); + this.keyspaceName = keyspaceName; + this.name = name; + } - public String getKeyspaceName() { - return keyspaceName; - } + private UnresolvedUserTypeException(String keyspaceName, String name, Throwable cause) { + super(String.format("Cannot resolve user type %s.%s", keyspaceName, name), cause); + this.keyspaceName = keyspaceName; + this.name = name; + } - public String getName() { - return name; - } + public String getKeyspaceName() { + return keyspaceName; + } - @Override - public UnresolvedUserTypeException copy() { - return new UnresolvedUserTypeException(keyspaceName, name, this); - } + public String getName() { + return name; + } + @Override + public UnresolvedUserTypeException copy() { + return new UnresolvedUserTypeException(keyspaceName, name, this); + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/UnsupportedFeatureException.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/UnsupportedFeatureException.java index ec32cad3c18..b45df6ebda9 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/exceptions/UnsupportedFeatureException.java +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/UnsupportedFeatureException.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,23 +19,23 @@ import com.datastax.driver.core.ProtocolVersion; -/** - * Exception thrown when a feature is not supported by the native protocol - * currently in use. - */ +/** Exception thrown when a feature is not supported by the native protocol currently in use. */ public class UnsupportedFeatureException extends DriverException { - private static final long serialVersionUID = 0; - - private final ProtocolVersion currentVersion; + private static final long serialVersionUID = 0; - public UnsupportedFeatureException(ProtocolVersion currentVersion, String msg) { - super("Unsupported feature with the native protocol " + currentVersion + " (which is currently in use): " + msg); - this.currentVersion = currentVersion; - } + private final ProtocolVersion currentVersion; - public ProtocolVersion getCurrentVersion() { - return currentVersion; - } + public UnsupportedFeatureException(ProtocolVersion currentVersion, String msg) { + super( + "Unsupported feature with the native protocol " + + currentVersion + + " (which is currently in use): " + + msg); + this.currentVersion = currentVersion; + } + public ProtocolVersion getCurrentVersion() { + return currentVersion; + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/UnsupportedProtocolVersionException.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/UnsupportedProtocolVersionException.java index f1c8931d986..7c6b381bcfe 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/exceptions/UnsupportedProtocolVersionException.java +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/UnsupportedProtocolVersionException.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,83 +17,97 @@ */ package com.datastax.driver.core.exceptions; +import com.datastax.driver.core.EndPoint; import com.datastax.driver.core.ProtocolVersion; - import java.net.InetAddress; import java.net.InetSocketAddress; /** - * Indicates that we've attempted to connect to a Cassandra node with a protocol version - * that it cannot handle (e.g., connecting to a C* 1.2 node with protocol version 2). + * Indicates that we've attempted to connect to a Cassandra node with a protocol version that it + * cannot handle (e.g., connecting to a C* 1.2 node with protocol version 2). */ -public class UnsupportedProtocolVersionException extends DriverException implements CoordinatorException { - - private static final long serialVersionUID = 0; - - private final InetSocketAddress address; - - private final ProtocolVersion unsupportedVersion; - - private final ProtocolVersion serverVersion; - - public UnsupportedProtocolVersionException(InetSocketAddress address, ProtocolVersion unsupportedVersion, ProtocolVersion serverVersion) { - super(makeErrorMessage(address, unsupportedVersion, serverVersion)); - this.address = address; - this.unsupportedVersion = unsupportedVersion; - this.serverVersion = serverVersion; - } - - public UnsupportedProtocolVersionException(InetSocketAddress address, ProtocolVersion unsupportedVersion, ProtocolVersion serverVersion, Throwable cause) { - super(makeErrorMessage(address, unsupportedVersion, serverVersion), cause); - this.address = address; - this.unsupportedVersion = unsupportedVersion; - this.serverVersion = serverVersion; - } - - private static String makeErrorMessage(InetSocketAddress address, ProtocolVersion unsupportedVersion, ProtocolVersion serverVersion) { - return unsupportedVersion == serverVersion - ? String.format("[%s] Host does not support protocol version %s", address, unsupportedVersion) - : String.format("[%s] Host does not support protocol version %s but %s", address, unsupportedVersion, serverVersion); - } - - @Override - public InetAddress getHost() { - return address.getAddress(); - } - - @Override - public InetSocketAddress getAddress() { - return address; - } - - /** - * The version with which the server replied. - *

    - * Note that this version is not necessarily a supported version. - * While this is usually the case, in rare situations, - * the server might respond with an unsupported version, - * to ensure that the client can decode its response properly. - * See CASSANDRA-11464 for more details. - * - * @return The version with which the server replied. - */ - public ProtocolVersion getServerVersion() { - return serverVersion; - } - - /** - * The version with which the client sent its request. - * - * @return The version with which the client sent its request. - */ - public ProtocolVersion getUnsupportedVersion() { - return unsupportedVersion; - } - - @Override - public UnsupportedProtocolVersionException copy() { - return new UnsupportedProtocolVersionException(address, unsupportedVersion, serverVersion, this); - } - - +public class UnsupportedProtocolVersionException extends DriverException + implements CoordinatorException { + + private static final long serialVersionUID = 0; + + private final EndPoint endPoint; + + private final ProtocolVersion unsupportedVersion; + + private final ProtocolVersion serverVersion; + + public UnsupportedProtocolVersionException( + EndPoint endPoint, ProtocolVersion unsupportedVersion, ProtocolVersion serverVersion) { + super(makeErrorMessage(endPoint, unsupportedVersion, serverVersion)); + this.endPoint = endPoint; + this.unsupportedVersion = unsupportedVersion; + this.serverVersion = serverVersion; + } + + public UnsupportedProtocolVersionException( + EndPoint endPoint, + ProtocolVersion unsupportedVersion, + ProtocolVersion serverVersion, + Throwable cause) { + super(makeErrorMessage(endPoint, unsupportedVersion, serverVersion), cause); + this.endPoint = endPoint; + this.unsupportedVersion = unsupportedVersion; + this.serverVersion = serverVersion; + } + + private static String makeErrorMessage( + EndPoint endPoint, ProtocolVersion unsupportedVersion, ProtocolVersion serverVersion) { + return unsupportedVersion == serverVersion + ? String.format( + "[%s] Host does not support protocol version %s", endPoint, unsupportedVersion) + : String.format( + "[%s] Host does not support protocol version %s but %s", + endPoint, unsupportedVersion, serverVersion); + } + + @Override + public EndPoint getEndPoint() { + return endPoint; + } + + @Override + @Deprecated + public InetSocketAddress getAddress() { + return (endPoint == null) ? null : endPoint.resolve(); + } + + @Override + @Deprecated + public InetAddress getHost() { + return (endPoint == null) ? null : endPoint.resolve().getAddress(); + } + + /** + * The version with which the server replied. + * + *

    Note that this version is not necessarily a supported version. While this is usually the + * case, in rare situations, the server might respond with an unsupported version, to ensure that + * the client can decode its response properly. See CASSANDRA-11464 for more details. + * + * @return The version with which the server replied. + */ + public ProtocolVersion getServerVersion() { + return serverVersion; + } + + /** + * The version with which the client sent its request. + * + * @return The version with which the client sent its request. + */ + public ProtocolVersion getUnsupportedVersion() { + return unsupportedVersion; + } + + @Override + public UnsupportedProtocolVersionException copy() { + return new UnsupportedProtocolVersionException( + endPoint, unsupportedVersion, serverVersion, this); + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/WrappingEndPoint.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/WrappingEndPoint.java new file mode 100644 index 00000000000..5572788311f --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/WrappingEndPoint.java @@ -0,0 +1,36 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core.exceptions; + +import com.datastax.driver.core.EndPoint; +import java.net.InetSocketAddress; + +// The sole purpose of this class is to allow some exception types to preserve a constructor that +// takes an InetSocketAddress (for backward compatibility). +class WrappingEndPoint implements EndPoint { + private final InetSocketAddress address; + + WrappingEndPoint(InetSocketAddress address) { + this.address = address; + } + + @Override + public InetSocketAddress resolve() { + return address; + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/WriteFailureException.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/WriteFailureException.java index 60d8d9acb89..a66c25fa59a 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/exceptions/WriteFailureException.java +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/WriteFailureException.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,125 +18,175 @@ package com.datastax.driver.core.exceptions; import com.datastax.driver.core.ConsistencyLevel; +import com.datastax.driver.core.EndPoint; import com.datastax.driver.core.WriteType; - import java.net.InetAddress; -import java.net.InetSocketAddress; import java.util.Collections; import java.util.Map; /** * A non-timeout error during a write query. - *

    - * This happens when some of the replicas that were contacted by the coordinator replied with an error. + * + *

    This happens when some of the replicas that were contacted by the coordinator replied with an + * error. */ @SuppressWarnings("serial") public class WriteFailureException extends QueryConsistencyException { - private final WriteType writeType; - private final int failed; - private final Map failuresMap; + private final WriteType writeType; + private final int failed; + private final Map failuresMap; - /** - * This constructor should only be used internally by the driver - * when decoding error responses. - */ - public WriteFailureException(ConsistencyLevel consistency, WriteType writeType, int received, int required, int failed, Map failuresMap) { - this(null, consistency, writeType, received, required, failed, failuresMap); - } + /** + * This constructor should only be used internally by the driver when decoding error responses. + */ + public WriteFailureException( + ConsistencyLevel consistency, + WriteType writeType, + int received, + int required, + int failed, + Map failuresMap) { + this(null, consistency, writeType, received, required, failed, failuresMap); + } - /** - * @deprecated Legacy constructor for backward compatibility. - */ - @Deprecated - public WriteFailureException(ConsistencyLevel consistency, WriteType writeType, int received, int required, int failed) { - this(null, consistency, writeType, received, required, failed, Collections.emptyMap()); - } + /** @deprecated Legacy constructor for backward compatibility. */ + @Deprecated + public WriteFailureException( + ConsistencyLevel consistency, WriteType writeType, int received, int required, int failed) { + this( + null, + consistency, + writeType, + received, + required, + failed, + Collections.emptyMap()); + } - public WriteFailureException(InetSocketAddress address, ConsistencyLevel consistency, WriteType writeType, int received, int required, int failed, Map failuresMap) { - super(address, String.format("Cassandra failure during write query at consistency %s " - + "(%d responses were required but only %d replica responded, %d failed)", - consistency, required, received, failed), - consistency, - received, - required); - this.writeType = writeType; - this.failed = failed; - this.failuresMap = failuresMap; - } + public WriteFailureException( + EndPoint endPoint, + ConsistencyLevel consistency, + WriteType writeType, + int received, + int required, + int failed, + Map failuresMap) { + super( + endPoint, + String.format( + "Cassandra failure during write query at consistency %s " + + "(%d responses were required but only %d replica responded, %d failed)", + consistency, required, received, failed), + consistency, + received, + required); + this.writeType = writeType; + this.failed = failed; + this.failuresMap = failuresMap; + } - /** - * @deprecated Legacy constructor for backward compatibility. - */ - @Deprecated - public WriteFailureException(InetSocketAddress address, ConsistencyLevel consistency, WriteType writeType, int received, int required, int failed) { - this(address, consistency, writeType, received, required, failed, Collections.emptyMap()); - } + /** @deprecated Legacy constructor for backward compatibility. */ + @Deprecated + public WriteFailureException( + EndPoint endPoint, + ConsistencyLevel consistency, + WriteType writeType, + int received, + int required, + int failed) { + this( + endPoint, + consistency, + writeType, + received, + required, + failed, + Collections.emptyMap()); + } - private WriteFailureException(InetSocketAddress address, String msg, Throwable cause, - ConsistencyLevel consistency, WriteType writeType, int received, int required, int failed, Map failuresMap) { - super(address, msg, cause, consistency, received, required); - this.writeType = writeType; - this.failed = failed; - this.failuresMap = failuresMap; - } + private WriteFailureException( + EndPoint endPoint, + String msg, + Throwable cause, + ConsistencyLevel consistency, + WriteType writeType, + int received, + int required, + int failed, + Map failuresMap) { + super(endPoint, msg, cause, consistency, received, required); + this.writeType = writeType; + this.failed = failed; + this.failuresMap = failuresMap; + } - /** - * The type of the write for which a timeout was raised. - * - * @return the type of the write for which a timeout was raised. - */ - public WriteType getWriteType() { - return writeType; - } + /** + * The type of the write for which a timeout was raised. + * + * @return the type of the write for which a timeout was raised. + */ + public WriteType getWriteType() { + return writeType; + } - /** - * Returns the number of replicas that experienced a failure while executing the request. - * - * @return the number of failures. - */ - public int getFailures() { - return failed; - } + /** + * Returns the number of replicas that experienced a failure while executing the request. + * + * @return the number of failures. + */ + public int getFailures() { + return failed; + } - /** - * Returns the a failure reason code for each node that failed. - *

    - * At the time of writing, the existing reason codes are: - *

      - *
    • {@code 0x0000}: the error does not have a specific code assigned yet, or the cause is - * unknown.
    • - *
    • {@code 0x0001}: The read operation scanned too many tombstones (as defined by - * {@code tombstone_failure_threshold} in {@code cassandra.yaml}, causing a - * {@code TombstoneOverwhelmingException}.
    • - *
    - * (please refer to the Cassandra documentation for your version for the most up-to-date list - * of errors) - *

    - * This feature is available for protocol v5 or above only. With lower protocol versions, the - * map will always be empty. - * - * @return a map of IP addresses to failure codes. - */ - public Map getFailuresMap() { - return failuresMap; - } + /** + * Returns the a failure reason code for each node that failed. + * + *

    At the time of writing, the existing reason codes are: + * + *

      + *
    • {@code 0x0000}: the error does not have a specific code assigned yet, or the cause is + * unknown. + *
    • {@code 0x0001}: The read operation scanned too many tombstones (as defined by {@code + * tombstone_failure_threshold} in {@code cassandra.yaml}, causing a {@code + * TombstoneOverwhelmingException}. + *
    + * + * (please refer to the Cassandra documentation for your version for the most up-to-date list of + * errors) + * + *

    This feature is available for protocol v5 or above only. With lower protocol versions, the + * map will always be empty. + * + * @return a map of IP addresses to failure codes. + */ + public Map getFailuresMap() { + return failuresMap; + } - @Override - public WriteFailureException copy() { - return new WriteFailureException(getAddress(), getMessage(), this, getConsistencyLevel(), getWriteType(), - getReceivedAcknowledgements(), getRequiredAcknowledgements(), getFailures(), failuresMap); - } + @Override + public WriteFailureException copy() { + return new WriteFailureException( + getEndPoint(), + getMessage(), + this, + getConsistencyLevel(), + getWriteType(), + getReceivedAcknowledgements(), + getRequiredAcknowledgements(), + getFailures(), + failuresMap); + } - public WriteFailureException copy(InetSocketAddress address) { - return new WriteFailureException( - address, - getMessage(), - this, - getConsistencyLevel(), - getWriteType(), - getReceivedAcknowledgements(), - getRequiredAcknowledgements(), - failed, - failuresMap); - } + public WriteFailureException copy(EndPoint endPoint) { + return new WriteFailureException( + endPoint, + getMessage(), + this, + getConsistencyLevel(), + getWriteType(), + getReceivedAcknowledgements(), + getRequiredAcknowledgements(), + failed, + failuresMap); + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/WriteTimeoutException.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/WriteTimeoutException.java index df0c80916cc..b72c7a93a7e 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/exceptions/WriteTimeoutException.java +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/WriteTimeoutException.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,89 +18,100 @@ package com.datastax.driver.core.exceptions; import com.datastax.driver.core.ConsistencyLevel; +import com.datastax.driver.core.EndPoint; import com.datastax.driver.core.WriteType; -import java.net.InetSocketAddress; - -/** - * A Cassandra timeout during a write query. - */ +/** A Cassandra timeout during a write query. */ public class WriteTimeoutException extends QueryConsistencyException { - private static final long serialVersionUID = 0; - - private final WriteType writeType; + private static final long serialVersionUID = 0; - /** - * This constructor should only be used internally by the driver - * when decoding error responses. - */ - public WriteTimeoutException(ConsistencyLevel consistency, WriteType writeType, int received, int required) { - this(null, consistency, writeType, received, required); - } + private final WriteType writeType; - public WriteTimeoutException(InetSocketAddress address, ConsistencyLevel consistency, WriteType writeType, int received, int required) { - super( - address, - String.format("Cassandra timeout during write query at consistency %s (%d replica were required but only %d acknowledged the write)", consistency, required, received), - consistency, - received, - required); - this.writeType = writeType; - } + /** + * This constructor should only be used internally by the driver when decoding error responses. + */ + public WriteTimeoutException( + ConsistencyLevel consistency, WriteType writeType, int received, int required) { + this(null, consistency, writeType, received, required); + } - private WriteTimeoutException(InetSocketAddress address, String msg, Throwable cause, ConsistencyLevel consistency, WriteType writeType, int received, int required) { - super(address, msg, cause, consistency, received, required); - this.writeType = writeType; - } + public WriteTimeoutException( + EndPoint endPoint, + ConsistencyLevel consistency, + WriteType writeType, + int received, + int required) { + super( + endPoint, + String.format( + "Cassandra timeout during %s write query at consistency %s " + + "(%d replica were required but only %d acknowledged the write)", + writeType, consistency, required, received), + consistency, + received, + required); + this.writeType = writeType; + } - /** - * The type of the write for which a timeout was raised. - * - * @return the type of the write for which a timeout was raised. - */ - public WriteType getWriteType() { - return writeType; - } + private WriteTimeoutException( + EndPoint endPoint, + String msg, + Throwable cause, + ConsistencyLevel consistency, + WriteType writeType, + int received, + int required) { + super(endPoint, msg, cause, consistency, received, required); + this.writeType = writeType; + } - @Override - public WriteTimeoutException copy() { - return new WriteTimeoutException( - getAddress(), - getMessage(), - this, - getConsistencyLevel(), - getWriteType(), - getReceivedAcknowledgements(), - getRequiredAcknowledgements() - ); - } + /** + * The type of the write for which a timeout was raised. + * + * @return the type of the write for which a timeout was raised. + */ + public WriteType getWriteType() { + return writeType; + } - /** - * Create a copy of this exception with a nicer stack trace, and including the coordinator - * address that caused this exception to be raised. - *

    - * This method is mainly intended for internal use by the driver and exists mainly because: - *

      - *
    1. the original exception was decoded from a response frame - * and at that time, the coordinator address was not available; and
    2. - *
    3. the newly-created exception will refer to the current thread in its stack trace, - * which generally yields a more user-friendly stack trace that the original one.
    4. - *
    - * - * @param address The full address of the host that caused this exception to be thrown. - * @return a copy/clone of this exception, but with the given host address instead of the original one. - */ - public WriteTimeoutException copy(InetSocketAddress address) { - return new WriteTimeoutException( - address, - getMessage(), - this, - getConsistencyLevel(), - getWriteType(), - getReceivedAcknowledgements(), - getRequiredAcknowledgements() - ); - } + @Override + public WriteTimeoutException copy() { + return new WriteTimeoutException( + getEndPoint(), + getMessage(), + this, + getConsistencyLevel(), + getWriteType(), + getReceivedAcknowledgements(), + getRequiredAcknowledgements()); + } + /** + * Create a copy of this exception with a nicer stack trace, and including the coordinator address + * that caused this exception to be raised. + * + *

    This method is mainly intended for internal use by the driver and exists mainly because: + * + *

      + *
    1. the original exception was decoded from a response frame and at that time, the + * coordinator address was not available; and + *
    2. the newly-created exception will refer to the current thread in its stack trace, which + * generally yields a more user-friendly stack trace that the original one. + *
    + * + * @param address The full address of the host that caused this exception to be thrown. + * @return a copy/clone of this exception, but with the given host address instead of the original + * one. + */ + public WriteTimeoutException copy(EndPoint address) { + return new WriteTimeoutException( + address, + getMessage(), + this, + getConsistencyLevel(), + getWriteType(), + getReceivedAcknowledgements(), + getRequiredAcknowledgements()); + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/package-info.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/package-info.java index c76e4bae728..e4205946ebd 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/exceptions/package-info.java +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/package-info.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -13,7 +15,5 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -/** - * Exceptions thrown by the DataStax Java driver for Cassandra. - */ +/** Exceptions thrown by the Java Driver for Cassandra. */ package com.datastax.driver.core.exceptions; diff --git a/driver-core/src/main/java/com/datastax/driver/core/package-info.java b/driver-core/src/main/java/com/datastax/driver/core/package-info.java index f5003da2160..ae82e0e5544 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/package-info.java +++ b/driver-core/src/main/java/com/datastax/driver/core/package-info.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -14,8 +16,8 @@ * limitations under the License. */ /** - * The main package for the DataStax Java driver for Cassandra. - *

    - * The main entry for this package is the {@link com.datastax.driver.core.Cluster} class. + * The main package for the Java Driver for Cassandra. + * + *

    The main entry for this package is the {@link com.datastax.driver.core.Cluster} class. */ package com.datastax.driver.core; diff --git a/driver-core/src/main/java/com/datastax/driver/core/policies/AddressTranslator.java b/driver-core/src/main/java/com/datastax/driver/core/policies/AddressTranslator.java index 1555b6c3aec..981b3b5748a 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/policies/AddressTranslator.java +++ b/driver-core/src/main/java/com/datastax/driver/core/policies/AddressTranslator.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,56 +18,49 @@ package com.datastax.driver.core.policies; import com.datastax.driver.core.Cluster; - import java.net.InetSocketAddress; /** - * Translates IP addresses received from Cassandra nodes into locally queriable - * addresses. - *

    - * The driver auto-detect new Cassandra nodes added to the cluster through server - * side pushed notifications and through checking the system tables. For each - * node, the address the driver will receive will correspond to the address set as - * {@code rpc_address} in the node yaml file. In most case, this is the correct - * address to use by the driver and that is what is used by default. However, - * sometimes the addresses received through this mechanism will either not be - * reachable directly by the driver or should not be the preferred address to use - * to reach the node (for instance, the {@code rpc_address} set on Cassandra nodes - * might be a private IP, but some clients may have to use a public IP, or - * pass by a router to reach that node). This interface allows to deal with - * such cases, by allowing to translate an address as sent by a Cassandra node - * to another address to be used by the driver for connection. - *

    - * Please note that the contact points addresses provided while creating the - * {@link Cluster} instance are not "translated", only IP address retrieved from or sent - * by Cassandra nodes to the driver are. + * Translates IP addresses received from Cassandra nodes into locally queriable addresses. + * + *

    The driver auto-detect new Cassandra nodes added to the cluster through server side pushed + * notifications and through checking the system tables. For each node, the address the driver will + * receive will correspond to the address set as {@code rpc_address} in the node yaml file. In most + * case, this is the correct address to use by the driver and that is what is used by default. + * However, sometimes the addresses received through this mechanism will either not be reachable + * directly by the driver or should not be the preferred address to use to reach the node (for + * instance, the {@code rpc_address} set on Cassandra nodes might be a private IP, but some clients + * may have to use a public IP, or pass by a router to reach that node). This interface allows to + * deal with such cases, by allowing to translate an address as sent by a Cassandra node to another + * address to be used by the driver for connection. + * + *

    Please note that the contact points addresses provided while creating the {@link Cluster} + * instance are not "translated", only IP address retrieved from or sent by Cassandra nodes to the + * driver are. */ public interface AddressTranslator { - /** - * Initializes this address translator. - * - * @param cluster the {@code Cluster} instance for which the translator is created. - */ - void init(Cluster cluster); + /** + * Initializes this address translator. + * + * @param cluster the {@code Cluster} instance for which the translator is created. + */ + void init(Cluster cluster); - /** - * Translates a Cassandra {@code rpc_address} to another address if necessary. - * - * @param address the address of a node as returned by Cassandra. Note that - * if the {@code rpc_address} of a node has been configured to {@code 0.0.0.0} - * server side, then the provided address will be the node {@code listen_address}, - * *not* {@code 0.0.0.0}. Also note that the port for {@code InetSocketAddress} - * will always be the one set at Cluster construction time (9042 by default). - * @return the address the driver should actually use to connect to the node - * designated by {@code address}. If the return is {@code null}, then {@code - * address} will be used by the driver (it is thus equivalent to returning - * {@code address} directly) - */ - InetSocketAddress translate(InetSocketAddress address); + /** + * Translates a Cassandra {@code rpc_address} to another address if necessary. + * + * @param address the address of a node as returned by Cassandra. Note that if the {@code + * rpc_address} of a node has been configured to {@code 0.0.0.0} server side, then the + * provided address will be the node {@code listen_address}, *not* {@code 0.0.0.0}. Also note + * that the port for {@code InetSocketAddress} will always be the one set at Cluster + * construction time (9042 by default). + * @return the address the driver should actually use to connect to the node designated by {@code + * address}. If the return is {@code null}, then {@code address} will be used by the driver + * (it is thus equivalent to returning {@code address} directly) + */ + InetSocketAddress translate(InetSocketAddress address); - /** - * Called at {@link Cluster} shutdown. - */ - void close(); + /** Called at {@link Cluster} shutdown. */ + void close(); } diff --git a/driver-core/src/main/java/com/datastax/driver/core/policies/ChainableLoadBalancingPolicy.java b/driver-core/src/main/java/com/datastax/driver/core/policies/ChainableLoadBalancingPolicy.java index a9d2f1e56c7..8995d558352 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/policies/ChainableLoadBalancingPolicy.java +++ b/driver-core/src/main/java/com/datastax/driver/core/policies/ChainableLoadBalancingPolicy.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,14 +17,12 @@ */ package com.datastax.driver.core.policies; -/** - * A load balancing policy that wraps another policy. - */ +/** A load balancing policy that wraps another policy. */ public interface ChainableLoadBalancingPolicy extends LoadBalancingPolicy { - /** - * Returns the child policy. - * - * @return the child policy. - */ - LoadBalancingPolicy getChildPolicy(); + /** + * Returns the child policy. + * + * @return the child policy. + */ + LoadBalancingPolicy getChildPolicy(); } diff --git a/driver-core/src/main/java/com/datastax/driver/core/policies/Clock.java b/driver-core/src/main/java/com/datastax/driver/core/policies/Clock.java index a382332e4ac..00c32dbcc11 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/policies/Clock.java +++ b/driver-core/src/main/java/com/datastax/driver/core/policies/Clock.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,13 +17,11 @@ */ package com.datastax.driver.core.policies; -/** - * Wraps System.nanoTime() to make it easy to mock in tests. - */ +/** Wraps System.nanoTime() to make it easy to mock in tests. */ class Clock { - static final Clock DEFAULT = new Clock(); + static final Clock DEFAULT = new Clock(); - long nanoTime() { - return System.nanoTime(); - } + long nanoTime() { + return System.nanoTime(); + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/policies/ConstantReconnectionPolicy.java b/driver-core/src/main/java/com/datastax/driver/core/policies/ConstantReconnectionPolicy.java index e50d7eb0fe7..b5d90f19437 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/policies/ConstantReconnectionPolicy.java +++ b/driver-core/src/main/java/com/datastax/driver/core/policies/ConstantReconnectionPolicy.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,61 +19,60 @@ import com.datastax.driver.core.Cluster; -/** - * A reconnection policy that waits a constant time between each reconnection attempt. - */ +/** A reconnection policy that waits a constant time between each reconnection attempt. */ public class ConstantReconnectionPolicy implements ReconnectionPolicy { - private final long delayMs; + private final long delayMs; - /** - * Creates a reconnection policy that creates with the provided constant wait - * time between reconnection attempts. - * - * @param constantDelayMs the constant delay in milliseconds to use. - */ - public ConstantReconnectionPolicy(long constantDelayMs) { - if (constantDelayMs < 0) - throw new IllegalArgumentException(String.format("Invalid negative delay (got %d)", constantDelayMs)); + /** + * Creates a reconnection policy that creates with the provided constant wait time between + * reconnection attempts. + * + * @param constantDelayMs the constant delay in milliseconds to use. + */ + public ConstantReconnectionPolicy(long constantDelayMs) { + if (constantDelayMs < 0) + throw new IllegalArgumentException( + String.format("Invalid negative delay (got %d)", constantDelayMs)); - this.delayMs = constantDelayMs; - } + this.delayMs = constantDelayMs; + } - /** - * The constant delay used by this reconnection policy. - * - * @return the constant delay used by this reconnection policy. - */ - public long getConstantDelayMs() { - return delayMs; - } + /** + * The constant delay used by this reconnection policy. + * + * @return the constant delay used by this reconnection policy. + */ + public long getConstantDelayMs() { + return delayMs; + } - /** - * A new schedule that uses a constant {@code getConstantDelayMs()} delay - * between reconnection attempt. - * - * @return the newly created schedule. - */ - @Override - public ReconnectionSchedule newSchedule() { - return new ConstantSchedule(); - } + /** + * A new schedule that uses a constant {@code getConstantDelayMs()} delay between reconnection + * attempt. + * + * @return the newly created schedule. + */ + @Override + public ReconnectionSchedule newSchedule() { + return new ConstantSchedule(); + } - private class ConstantSchedule implements ReconnectionSchedule { - - @Override - public long nextDelayMs() { - return delayMs; - } - } + private class ConstantSchedule implements ReconnectionSchedule { @Override - public void init(Cluster cluster) { - // nothing to do + public long nextDelayMs() { + return delayMs; } + } - @Override - public void close() { - // nothing to do - } + @Override + public void init(Cluster cluster) { + // nothing to do + } + + @Override + public void close() { + // nothing to do + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/policies/ConstantSpeculativeExecutionPolicy.java b/driver-core/src/main/java/com/datastax/driver/core/policies/ConstantSpeculativeExecutionPolicy.java index af22fcae30e..bcb69f7b4ca 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/policies/ConstantSpeculativeExecutionPolicy.java +++ b/driver-core/src/main/java/com/datastax/driver/core/policies/ConstantSpeculativeExecutionPolicy.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,51 +21,58 @@ import com.datastax.driver.core.Host; import com.datastax.driver.core.Statement; import com.google.common.base.Preconditions; - import java.util.concurrent.atomic.AtomicInteger; /** - * A {@link SpeculativeExecutionPolicy} that schedules a given number of speculative executions, separated by a fixed delay. + * A {@link SpeculativeExecutionPolicy} that schedules a given number of speculative executions, + * separated by a fixed delay. */ public class ConstantSpeculativeExecutionPolicy implements SpeculativeExecutionPolicy { - private final int maxSpeculativeExecutions; - private final long constantDelayMillis; + private final int maxSpeculativeExecutions; + private final long constantDelayMillis; - /** - * Builds a new instance. - * - * @param constantDelayMillis the delay between each speculative execution. Must be strictly positive. - * @param maxSpeculativeExecutions the number of speculative executions. Must be strictly positive. - * @throws IllegalArgumentException if one of the arguments does not respect the preconditions above. - */ - public ConstantSpeculativeExecutionPolicy(final long constantDelayMillis, final int maxSpeculativeExecutions) { - Preconditions.checkArgument(constantDelayMillis > 0, - "delay must be strictly positive (was %d)", constantDelayMillis); - Preconditions.checkArgument(maxSpeculativeExecutions > 0, - "number of speculative executions must be strictly positive (was %d)", maxSpeculativeExecutions); - this.constantDelayMillis = constantDelayMillis; - this.maxSpeculativeExecutions = maxSpeculativeExecutions; - } + /** + * Builds a new instance. + * + * @param constantDelayMillis the delay between each speculative execution. Must be >= 0. A zero + * delay means it should immediately send `maxSpeculativeExecutions` requests along with the + * original request. + * @param maxSpeculativeExecutions the number of speculative executions. Must be strictly + * positive. + * @throws IllegalArgumentException if one of the arguments does not respect the preconditions + * above. + */ + public ConstantSpeculativeExecutionPolicy( + final long constantDelayMillis, final int maxSpeculativeExecutions) { + Preconditions.checkArgument( + constantDelayMillis >= 0, "delay must be >= 0 (was %d)", constantDelayMillis); + Preconditions.checkArgument( + maxSpeculativeExecutions > 0, + "number of speculative executions must be strictly positive (was %d)", + maxSpeculativeExecutions); + this.constantDelayMillis = constantDelayMillis; + this.maxSpeculativeExecutions = maxSpeculativeExecutions; + } - @Override - public SpeculativeExecutionPlan newPlan(String loggedKeyspace, Statement statement) { - return new SpeculativeExecutionPlan() { - private final AtomicInteger remaining = new AtomicInteger(maxSpeculativeExecutions); + @Override + public SpeculativeExecutionPlan newPlan(String loggedKeyspace, Statement statement) { + return new SpeculativeExecutionPlan() { + private final AtomicInteger remaining = new AtomicInteger(maxSpeculativeExecutions); - @Override - public long nextExecution(Host lastQueried) { - return (remaining.getAndDecrement() > 0) ? constantDelayMillis : -1; - } - }; - } + @Override + public long nextExecution(Host lastQueried) { + return (remaining.getAndDecrement() > 0) ? constantDelayMillis : -1; + } + }; + } - @Override - public void init(Cluster cluster) { - // do nothing - } + @Override + public void init(Cluster cluster) { + // do nothing + } - @Override - public void close() { - // do nothing - } + @Override + public void close() { + // do nothing + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/policies/DCAwareRoundRobinPolicy.java b/driver-core/src/main/java/com/datastax/driver/core/policies/DCAwareRoundRobinPolicy.java index 598e1ba7076..a05d5087e4b 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/policies/DCAwareRoundRobinPolicy.java +++ b/driver-core/src/main/java/com/datastax/driver/core/policies/DCAwareRoundRobinPolicy.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,352 +17,381 @@ */ package com.datastax.driver.core.policies; -import com.datastax.driver.core.*; +import com.datastax.driver.core.Cluster; +import com.datastax.driver.core.Configuration; +import com.datastax.driver.core.ConsistencyLevel; +import com.datastax.driver.core.Host; +import com.datastax.driver.core.HostDistance; +import com.datastax.driver.core.Statement; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Joiner; import com.google.common.base.Preconditions; import com.google.common.base.Strings; import com.google.common.collect.AbstractIterator; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.*; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Random; +import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.atomic.AtomicInteger; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * A data-center aware Round-robin load balancing policy. - *

    - * This policy provides round-robin queries over the node of the local - * data center. It also includes in the query plans returned a configurable - * number of hosts in the remote data centers, but those are always tried - * after the local nodes. In other words, this policy guarantees that no - * host in a remote data center will be queried unless no host in the local - * data center can be reached. - *

    - * If used with a single data center, this policy is equivalent to the - * {@link RoundRobinPolicy}, but its DC awareness incurs a slight overhead - * so the latter should be preferred to this policy in that case. + * + *

    This policy provides round-robin queries over the node of the local data center. It also + * includes in the query plans returned a configurable number of hosts in the remote data centers, + * but those are always tried after the local nodes. In other words, this policy guarantees that no + * host in a remote data center will be queried unless no host in the local data center can be + * reached. + * + *

    If used with a single data center, this policy is equivalent to the {@link RoundRobinPolicy}, + * but its DC awareness incurs a slight overhead so the latter should be preferred to this policy in + * that case. */ public class DCAwareRoundRobinPolicy implements LoadBalancingPolicy { - private static final Logger logger = LoggerFactory.getLogger(DCAwareRoundRobinPolicy.class); - - /** - * Returns a builder to create a new instance. - * - * @return the builder. - */ - public static Builder builder() { - return new Builder(); + private static final Logger logger = LoggerFactory.getLogger(DCAwareRoundRobinPolicy.class); + + /** + * Returns a builder to create a new instance. + * + * @return the builder. + */ + public static Builder builder() { + return new Builder(); + } + + private static final String UNSET = ""; + + private final ConcurrentMap> perDcLiveHosts = + new ConcurrentHashMap>(); + private final AtomicInteger index = new AtomicInteger(); + + @VisibleForTesting volatile String localDc; + + private final int usedHostsPerRemoteDc; + private final boolean dontHopForLocalCL; + + private volatile Configuration configuration; + + private DCAwareRoundRobinPolicy( + String localDc, + int usedHostsPerRemoteDc, + boolean allowRemoteDCsForLocalConsistencyLevel, + boolean allowEmptyLocalDc) { + if (!allowEmptyLocalDc && Strings.isNullOrEmpty(localDc)) + throw new IllegalArgumentException("Null or empty data center specified for DC-aware policy"); + this.localDc = localDc == null ? UNSET : localDc; + this.usedHostsPerRemoteDc = usedHostsPerRemoteDc; + this.dontHopForLocalCL = !allowRemoteDCsForLocalConsistencyLevel; + } + + @Override + public void init(Cluster cluster, Collection hosts) { + if (localDc != UNSET) + logger.info("Using provided data-center name '{}' for DCAwareRoundRobinPolicy", localDc); + + this.configuration = cluster.getConfiguration(); + + ArrayList notInLocalDC = new ArrayList(); + + for (Host host : hosts) { + String dc = dc(host); + + // If the localDC was in "auto-discover" mode and it's the first host for which we have a DC, + // use it. + if (localDc == UNSET && dc != UNSET) { + logger.info( + "Using data-center name '{}' for DCAwareRoundRobinPolicy (if this is incorrect, please provide the correct datacenter name with DCAwareRoundRobinPolicy constructor)", + dc); + localDc = dc; + } else if (!dc.equals(localDc)) + notInLocalDC.add(String.format("%s (%s)", host.toString(), dc)); + + CopyOnWriteArrayList prev = perDcLiveHosts.get(dc); + if (prev == null) + perDcLiveHosts.put(dc, new CopyOnWriteArrayList(Collections.singletonList(host))); + else prev.addIfAbsent(host); } - private static final String UNSET = ""; - - private final ConcurrentMap> perDcLiveHosts = new ConcurrentHashMap>(); - private final AtomicInteger index = new AtomicInteger(); - - @VisibleForTesting - volatile String localDc; - - private final int usedHostsPerRemoteDc; - private final boolean dontHopForLocalCL; - - private volatile Configuration configuration; - - private DCAwareRoundRobinPolicy(String localDc, int usedHostsPerRemoteDc, boolean allowRemoteDCsForLocalConsistencyLevel, boolean allowEmptyLocalDc) { - if (!allowEmptyLocalDc && Strings.isNullOrEmpty(localDc)) - throw new IllegalArgumentException("Null or empty data center specified for DC-aware policy"); - this.localDc = localDc == null ? UNSET : localDc; - this.usedHostsPerRemoteDc = usedHostsPerRemoteDc; - this.dontHopForLocalCL = !allowRemoteDCsForLocalConsistencyLevel; + if (notInLocalDC.size() > 0) { + String nonLocalHosts = Joiner.on(",").join(notInLocalDC); + logger.warn( + "Some contact points don't match local data center. Local DC = {}. Non-conforming contact points: {}", + localDc, + nonLocalHosts); } - @Override - public void init(Cluster cluster, Collection hosts) { - if (localDc != UNSET) - logger.info("Using provided data-center name '{}' for DCAwareRoundRobinPolicy", localDc); - - this.configuration = cluster.getConfiguration(); - - ArrayList notInLocalDC = new ArrayList(); - - for (Host host : hosts) { - String dc = dc(host); - - // If the localDC was in "auto-discover" mode and it's the first host for which we have a DC, use it. - if (localDc == UNSET && dc != UNSET) { - logger.info("Using data-center name '{}' for DCAwareRoundRobinPolicy (if this is incorrect, please provide the correct datacenter name with DCAwareRoundRobinPolicy constructor)", dc); - localDc = dc; - } else if (!dc.equals(localDc)) - notInLocalDC.add(String.format("%s (%s)", host.toString(), dc)); - - CopyOnWriteArrayList prev = perDcLiveHosts.get(dc); - if (prev == null) - perDcLiveHosts.put(dc, new CopyOnWriteArrayList(Collections.singletonList(host))); - else - prev.addIfAbsent(host); - } - - if (notInLocalDC.size() > 0) { - String nonLocalHosts = Joiner.on(",").join(notInLocalDC); - logger.warn("Some contact points don't match local data center. Local DC = {}. Non-conforming contact points: {}", localDc, nonLocalHosts); + this.index.set(new Random().nextInt(Math.max(hosts.size(), 1))); + } + + private String dc(Host host) { + String dc = host.getDatacenter(); + return dc == null ? localDc : dc; + } + + @SuppressWarnings("unchecked") + private static CopyOnWriteArrayList cloneList(CopyOnWriteArrayList list) { + return (CopyOnWriteArrayList) list.clone(); + } + + /** + * Return the HostDistance for the provided host. + * + *

    This policy consider nodes in the local datacenter as {@code LOCAL}. For each remote + * datacenter, it considers a configurable number of hosts as {@code REMOTE} and the rest is + * {@code IGNORED}. + * + *

    To configure how many hosts in each remote datacenter should be considered, see {@link + * Builder#withUsedHostsPerRemoteDc(int)}. + * + * @param host the host of which to return the distance of. + * @return the HostDistance to {@code host}. + */ + @Override + public HostDistance distance(Host host) { + String dc = dc(host); + if (dc == UNSET || dc.equals(localDc)) return HostDistance.LOCAL; + + CopyOnWriteArrayList dcHosts = perDcLiveHosts.get(dc); + if (dcHosts == null || usedHostsPerRemoteDc == 0) return HostDistance.IGNORED; + + // We need to clone, otherwise our subList call is not thread safe + dcHosts = cloneList(dcHosts); + return dcHosts.subList(0, Math.min(dcHosts.size(), usedHostsPerRemoteDc)).contains(host) + ? HostDistance.REMOTE + : HostDistance.IGNORED; + } + + /** + * Returns the hosts to use for a new query. + * + *

    The returned plan will always try each known host in the local datacenter first, and then, + * if none of the local host is reachable, will try up to a configurable number of other host per + * remote datacenter. The order of the local node in the returned query plan will follow a + * Round-robin algorithm. + * + * @param loggedKeyspace the keyspace currently logged in on for this query. + * @param statement the query for which to build the plan. + * @return a new query plan, i.e. an iterator indicating which host to try first for querying, + * which one to use as failover, etc... + */ + @Override + public Iterator newQueryPlan(String loggedKeyspace, final Statement statement) { + + CopyOnWriteArrayList localLiveHosts = perDcLiveHosts.get(localDc); + final List hosts = + localLiveHosts == null ? Collections.emptyList() : cloneList(localLiveHosts); + final int startIdx = index.getAndIncrement(); + + return new AbstractIterator() { + + private int idx = startIdx; + private int remainingLocal = hosts.size(); + + // For remote Dcs + private Iterator remoteDcs; + private List currentDcHosts; + private int currentDcRemaining; + + @Override + protected Host computeNext() { + while (true) { + if (remainingLocal > 0) { + remainingLocal--; + int c = idx++ % hosts.size(); + if (c < 0) { + c += hosts.size(); + } + return hosts.get(c); + } + + if (currentDcHosts != null && currentDcRemaining > 0) { + currentDcRemaining--; + int c = idx++ % currentDcHosts.size(); + if (c < 0) { + c += currentDcHosts.size(); + } + return currentDcHosts.get(c); + } + + ConsistencyLevel cl = + statement.getConsistencyLevel() == null + ? configuration.getQueryOptions().getConsistencyLevel() + : statement.getConsistencyLevel(); + + if (dontHopForLocalCL && cl.isDCLocal()) return endOfData(); + + if (remoteDcs == null) { + Set copy = new HashSet(perDcLiveHosts.keySet()); + copy.remove(localDc); + remoteDcs = copy.iterator(); + } + + if (!remoteDcs.hasNext()) break; + + String nextRemoteDc = remoteDcs.next(); + CopyOnWriteArrayList nextDcHosts = perDcLiveHosts.get(nextRemoteDc); + if (nextDcHosts != null) { + // Clone for thread safety + List dcHosts = cloneList(nextDcHosts); + currentDcHosts = dcHosts.subList(0, Math.min(dcHosts.size(), usedHostsPerRemoteDc)); + currentDcRemaining = currentDcHosts.size(); + } } - - this.index.set(new Random().nextInt(Math.max(hosts.size(), 1))); - + return endOfData(); + } + }; + } + + @Override + public void onUp(Host host) { + String dc = dc(host); + + // If the localDC was in "auto-discover" mode and it's the first host for which we have a DC, + // use it. + if (localDc == UNSET && dc != UNSET) { + logger.info( + "Using data-center name '{}' for DCAwareRoundRobinPolicy (if this is incorrect, please provide the correct datacenter name with DCAwareRoundRobinPolicy constructor)", + dc); + localDc = dc; } - private String dc(Host host) { - String dc = host.getDatacenter(); - return dc == null ? localDc : dc; - } - - @SuppressWarnings("unchecked") - private static CopyOnWriteArrayList cloneList(CopyOnWriteArrayList list) { - return (CopyOnWriteArrayList) list.clone(); + CopyOnWriteArrayList dcHosts = perDcLiveHosts.get(dc); + if (dcHosts == null) { + CopyOnWriteArrayList newMap = + new CopyOnWriteArrayList(Collections.singletonList(host)); + dcHosts = perDcLiveHosts.putIfAbsent(dc, newMap); + // If we've successfully put our new host, we're good, otherwise we've been beaten so continue + if (dcHosts == null) return; } + dcHosts.addIfAbsent(host); + } + + @Override + public void onDown(Host host) { + CopyOnWriteArrayList dcHosts = perDcLiveHosts.get(dc(host)); + if (dcHosts != null) dcHosts.remove(host); + } + + @Override + public void onAdd(Host host) { + onUp(host); + } + + @Override + public void onRemove(Host host) { + onDown(host); + } + + @Override + public void close() { + // nothing to do + } + + /** Helper class to build the policy. */ + public static class Builder { + private String localDc; + private int usedHostsPerRemoteDc; + private boolean allowRemoteDCsForLocalConsistencyLevel; /** - * Return the HostDistance for the provided host. - *

    - * This policy consider nodes in the local datacenter as {@code LOCAL}. - * For each remote datacenter, it considers a configurable number of - * hosts as {@code REMOTE} and the rest is {@code IGNORED}. - *

    - * To configure how many hosts in each remote datacenter should be considered, - * see {@link Builder#withUsedHostsPerRemoteDc(int)}. + * Sets the name of the datacenter that will be considered "local" by the policy. + * + *

    This must be the name as known by Cassandra (in other words, the name in that appears in + * {@code system.peers}, or in the output of admin tools like nodetool). * - * @param host the host of which to return the distance of. - * @return the HostDistance to {@code host}. + *

    If this method isn't called, the policy will default to the datacenter of the first node + * connected to. This will always be ok if all the contact points use at {@code Cluster} + * creation are in the local data-center. Otherwise, you should provide the name yourself with + * this method. + * + * @param localDc the name of the datacenter. It should not be {@code null}. + * @return this builder. */ - @Override - public HostDistance distance(Host host) { - String dc = dc(host); - if (dc == UNSET || dc.equals(localDc)) - return HostDistance.LOCAL; - - CopyOnWriteArrayList dcHosts = perDcLiveHosts.get(dc); - if (dcHosts == null || usedHostsPerRemoteDc == 0) - return HostDistance.IGNORED; - - // We need to clone, otherwise our subList call is not thread safe - dcHosts = cloneList(dcHosts); - return dcHosts.subList(0, Math.min(dcHosts.size(), usedHostsPerRemoteDc)).contains(host) - ? HostDistance.REMOTE - : HostDistance.IGNORED; + public Builder withLocalDc(String localDc) { + Preconditions.checkArgument( + !Strings.isNullOrEmpty(localDc), + "localDc name can't be null or empty. If you want to let the policy autodetect the datacenter, don't call Builder.withLocalDC"); + this.localDc = localDc; + return this; } /** - * Returns the hosts to use for a new query. - *

    - * The returned plan will always try each known host in the local - * datacenter first, and then, if none of the local host is reachable, - * will try up to a configurable number of other host per remote datacenter. - * The order of the local node in the returned query plan will follow a - * Round-robin algorithm. + * Sets the number of hosts per remote datacenter that the policy should consider. + * + *

    The policy's {@code distance()} method will return a {@code HostDistance.REMOTE} distance + * for only {@code usedHostsPerRemoteDc} hosts per remote datacenter. Other hosts of the remote + * datacenters will be ignored (and thus no connections to them will be maintained). + * + *

    If {@code usedHostsPerRemoteDc > 0}, then if for a query no host in the local datacenter + * can be reached and if the consistency level of the query is not {@code LOCAL_ONE} or {@code + * LOCAL_QUORUM}, then up to {@code usedHostsPerRemoteDc} hosts per remote datacenter will be + * tried by the policy as a fallback. By default, no remote host will be used for {@code + * LOCAL_ONE} and {@code LOCAL_QUORUM}, since this would change the meaning of the consistency + * level, somewhat breaking the consistency contract (this can be overridden with {@link + * #allowRemoteDCsForLocalConsistencyLevel()}). + * + *

    If this method isn't called, the policy will default to 0. * - * @param loggedKeyspace the keyspace currently logged in on for this - * query. - * @param statement the query for which to build the plan. - * @return a new query plan, i.e. an iterator indicating which host to - * try first for querying, which one to use as failover, etc... + * @param usedHostsPerRemoteDc the number. + * @return this builder. + * @deprecated This functionality will be removed in the next major release of the driver. DC + * failover shouldn't be done in the driver, which does not have the necessary context to + * know what makes sense considering application semantics. */ - @Override - public Iterator newQueryPlan(String loggedKeyspace, final Statement statement) { - - CopyOnWriteArrayList localLiveHosts = perDcLiveHosts.get(localDc); - final List hosts = localLiveHosts == null ? Collections.emptyList() : cloneList(localLiveHosts); - final int startIdx = index.getAndIncrement(); - - return new AbstractIterator() { - - private int idx = startIdx; - private int remainingLocal = hosts.size(); - - // For remote Dcs - private Iterator remoteDcs; - private List currentDcHosts; - private int currentDcRemaining; - - @Override - protected Host computeNext() { - while (true) { - if (remainingLocal > 0) { - remainingLocal--; - int c = idx++ % hosts.size(); - if (c < 0) { - c += hosts.size(); - } - return hosts.get(c); - } - - if (currentDcHosts != null && currentDcRemaining > 0) { - currentDcRemaining--; - int c = idx++ % currentDcHosts.size(); - if (c < 0) { - c += currentDcHosts.size(); - } - return currentDcHosts.get(c); - } - - ConsistencyLevel cl = statement.getConsistencyLevel() == null - ? configuration.getQueryOptions().getConsistencyLevel() - : statement.getConsistencyLevel(); - - if (dontHopForLocalCL && cl.isDCLocal()) - return endOfData(); - - if (remoteDcs == null) { - Set copy = new HashSet(perDcLiveHosts.keySet()); - copy.remove(localDc); - remoteDcs = copy.iterator(); - } - - if (!remoteDcs.hasNext()) - break; - - String nextRemoteDc = remoteDcs.next(); - CopyOnWriteArrayList nextDcHosts = perDcLiveHosts.get(nextRemoteDc); - if (nextDcHosts != null) { - // Clone for thread safety - List dcHosts = cloneList(nextDcHosts); - currentDcHosts = dcHosts.subList(0, Math.min(dcHosts.size(), usedHostsPerRemoteDc)); - currentDcRemaining = currentDcHosts.size(); - } - } - return endOfData(); - } - }; - } - - @Override - public void onUp(Host host) { - String dc = dc(host); - - // If the localDC was in "auto-discover" mode and it's the first host for which we have a DC, use it. - if (localDc == UNSET && dc != UNSET) { - logger.info("Using data-center name '{}' for DCAwareRoundRobinPolicy (if this is incorrect, please provide the correct datacenter name with DCAwareRoundRobinPolicy constructor)", dc); - localDc = dc; - } - - CopyOnWriteArrayList dcHosts = perDcLiveHosts.get(dc); - if (dcHosts == null) { - CopyOnWriteArrayList newMap = new CopyOnWriteArrayList(Collections.singletonList(host)); - dcHosts = perDcLiveHosts.putIfAbsent(dc, newMap); - // If we've successfully put our new host, we're good, otherwise we've been beaten so continue - if (dcHosts == null) - return; - } - dcHosts.addIfAbsent(host); - } - - @Override - public void onDown(Host host) { - CopyOnWriteArrayList dcHosts = perDcLiveHosts.get(dc(host)); - if (dcHosts != null) - dcHosts.remove(host); - } - - @Override - public void onAdd(Host host) { - onUp(host); + @Deprecated + public Builder withUsedHostsPerRemoteDc(int usedHostsPerRemoteDc) { + Preconditions.checkArgument( + usedHostsPerRemoteDc >= 0, "usedHostsPerRemoteDc must be equal or greater than 0"); + this.usedHostsPerRemoteDc = usedHostsPerRemoteDc; + return this; } - @Override - public void onRemove(Host host) { - onDown(host); - } - - @Override - public void close() { - // nothing to do + /** + * Allows the policy to return remote hosts when building query plans for queries having + * consistency level {@code LOCAL_ONE} or {@code LOCAL_QUORUM}. + * + *

    When used in conjunction with {@link #withUsedHostsPerRemoteDc(int) usedHostsPerRemoteDc} + * > 0, this overrides the policy of never using remote datacenter nodes for {@code LOCAL_ONE} + * and {@code LOCAL_QUORUM} queries. It is however inadvisable to do so in almost all cases, as + * this would potentially break consistency guarantees and if you are fine with that, it's + * probably better to use a weaker consistency like {@code ONE}, {@code TWO} or {@code THREE}. + * As such, this method should generally be avoided; use it only if you know and understand what + * you do. + * + * @return this builder. + * @deprecated This functionality will be removed in the next major release of the driver. DC + * failover shouldn't be done in the driver, which does not have the necessary context to + * know what makes sense considering application semantics. + */ + @Deprecated + public Builder allowRemoteDCsForLocalConsistencyLevel() { + this.allowRemoteDCsForLocalConsistencyLevel = true; + return this; } /** - * Helper class to build the policy. + * Builds the policy configured by this builder. + * + * @return the policy. */ - public static class Builder { - private String localDc; - private int usedHostsPerRemoteDc; - private boolean allowRemoteDCsForLocalConsistencyLevel; - - /** - * Sets the name of the datacenter that will be considered "local" by the policy. - *

    - * This must be the name as known by Cassandra (in other words, the name in that appears in - * {@code system.peers}, or in the output of admin tools like nodetool). - *

    - * If this method isn't called, the policy will default to the datacenter of the first node - * connected to. This will always be ok if all the contact points use at {@code Cluster} - * creation are in the local data-center. Otherwise, you should provide the name yourself - * with this method. - * - * @param localDc the name of the datacenter. It should not be {@code null}. - * @return this builder. - */ - public Builder withLocalDc(String localDc) { - Preconditions.checkArgument(!Strings.isNullOrEmpty(localDc), - "localDc name can't be null or empty. If you want to let the policy autodetect the datacenter, don't call Builder.withLocalDC"); - this.localDc = localDc; - return this; - } - - /** - * Sets the number of hosts per remote datacenter that the policy should consider. - *

    - * The policy's {@code distance()} method will return a {@code HostDistance.REMOTE} distance for only {@code usedHostsPerRemoteDc} - * hosts per remote datacenter. Other hosts of the remote datacenters will be ignored (and thus no connections to them will be - * maintained). - *

    - * If {@code usedHostsPerRemoteDc > 0}, then if for a query no host in the local datacenter can be reached and if the consistency - * level of the query is not {@code LOCAL_ONE} or {@code LOCAL_QUORUM}, then up to {@code usedHostsPerRemoteDc} hosts per remote - * datacenter will be tried by the policy as a fallback. By default, no remote host will be used for {@code LOCAL_ONE} and - * {@code LOCAL_QUORUM}, since this would change the meaning of the consistency level, somewhat breaking the consistency contract - * (this can be overridden with {@link #allowRemoteDCsForLocalConsistencyLevel()}). - *

    - * If this method isn't called, the policy will default to 0. - * - * @param usedHostsPerRemoteDc the number. - * @return this builder. - */ - public Builder withUsedHostsPerRemoteDc(int usedHostsPerRemoteDc) { - Preconditions.checkArgument(usedHostsPerRemoteDc >= 0, - "usedHostsPerRemoteDc must be equal or greater than 0"); - this.usedHostsPerRemoteDc = usedHostsPerRemoteDc; - return this; - } - - /** - * Allows the policy to return remote hosts when building query plans for queries having consistency level {@code LOCAL_ONE} - * or {@code LOCAL_QUORUM}. - *

    - * When used in conjunction with {@link #withUsedHostsPerRemoteDc(int) usedHostsPerRemoteDc} > 0, this overrides the policy of - * never using remote datacenter nodes for {@code LOCAL_ONE} and {@code LOCAL_QUORUM} queries. It is however inadvisable to do - * so in almost all cases, as this would potentially break consistency guarantees and if you are fine with that, it's probably - * better to use a weaker consistency like {@code ONE}, {@code TWO} or {@code THREE}. As such, this method should generally be - * avoided; use it only if you know and understand what you do. - * - * @return this builder. - */ - public Builder allowRemoteDCsForLocalConsistencyLevel() { - this.allowRemoteDCsForLocalConsistencyLevel = true; - return this; - } - - /** - * Builds the policy configured by this builder. - * - * @return the policy. - */ - public DCAwareRoundRobinPolicy build() { - if (usedHostsPerRemoteDc == 0 && allowRemoteDCsForLocalConsistencyLevel) { - logger.warn("Setting allowRemoteDCsForLocalConsistencyLevel has no effect if usedHostsPerRemoteDc = 0. " - + "This setting will be ignored"); - } - return new DCAwareRoundRobinPolicy(localDc, usedHostsPerRemoteDc, allowRemoteDCsForLocalConsistencyLevel, true); - } + public DCAwareRoundRobinPolicy build() { + if (usedHostsPerRemoteDc == 0 && allowRemoteDCsForLocalConsistencyLevel) { + logger.warn( + "Setting allowRemoteDCsForLocalConsistencyLevel has no effect if usedHostsPerRemoteDc = 0. " + + "This setting will be ignored"); + } + return new DCAwareRoundRobinPolicy( + localDc, usedHostsPerRemoteDc, allowRemoteDCsForLocalConsistencyLevel, true); } + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/policies/DefaultRetryPolicy.java b/driver-core/src/main/java/com/datastax/driver/core/policies/DefaultRetryPolicy.java index 237f3595a77..f59f0f67565 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/policies/DefaultRetryPolicy.java +++ b/driver-core/src/main/java/com/datastax/driver/core/policies/DefaultRetryPolicy.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,113 +22,141 @@ import com.datastax.driver.core.Statement; import com.datastax.driver.core.WriteType; import com.datastax.driver.core.exceptions.DriverException; +import com.datastax.driver.core.exceptions.ReadFailureException; +import com.datastax.driver.core.exceptions.WriteFailureException; /** * The default retry policy. - *

    - * This policy retries queries in only two cases: + * + *

    This policy retries queries in only two cases: + * *

      - *
    • On a read timeout, if enough replicas replied but data was not retrieved.
    • - *
    • On a write timeout, if we timeout while writing the distributed log used by batch statements.
    • + *
    • On a read timeout, retries once on the same host if enough replicas replied but data was + * not retrieved. + *
    • On a write timeout, retries once on the same host if we timeout while writing the + * distributed log used by batch statements. + *
    • On an unavailable exception, retries once on the next host. + *
    • On a request error, such as a client timeout, the query is retried on the next host. Do not + * retry on read or write failures. *
    - *

    - * This retry policy is conservative in that it will never retry with a - * different consistency level than the one of the initial operation. - *

    - * In some cases, it may be convenient to use a more aggressive retry policy - * like {@link DowngradingConsistencyRetryPolicy}. + * + *

    This retry policy is conservative in that it will never retry with a different consistency + * level than the one of the initial operation. + * + *

    In some cases, it may be convenient to use a more aggressive retry policy like {@link + * DowngradingConsistencyRetryPolicy}. */ public class DefaultRetryPolicy implements RetryPolicy { - public static final DefaultRetryPolicy INSTANCE = new DefaultRetryPolicy(); + public static final DefaultRetryPolicy INSTANCE = new DefaultRetryPolicy(); - private DefaultRetryPolicy() { - } + private DefaultRetryPolicy() {} - /** - * {@inheritDoc} - *

    - * This implementation triggers a maximum of one retry, and only if enough - * replicas had responded to the read request but data was not retrieved - * amongst those. Indeed, that case usually means that enough replica - * are alive to satisfy the consistency but the coordinator picked a - * dead one for data retrieval, not having detected that replica as dead - * yet. The reasoning for retrying then is that by the time we get the - * timeout the dead replica will likely have been detected as dead and - * the retry has a high chance of success. - * - * @return {@code RetryDecision.retry(cl)} if no retry attempt has yet been tried and - * {@code receivedResponses >= requiredResponses && !dataRetrieved}, {@code RetryDecision.rethrow()} otherwise. - */ - @Override - public RetryDecision onReadTimeout(Statement statement, ConsistencyLevel cl, int requiredResponses, int receivedResponses, boolean dataRetrieved, int nbRetry) { - if (nbRetry != 0) - return RetryDecision.rethrow(); + /** + * {@inheritDoc} + * + *

    This implementation triggers a maximum of one retry, and only if enough replicas had + * responded to the read request but data was not retrieved amongst those. Indeed, that case + * usually means that enough replica are alive to satisfy the consistency but the coordinator + * picked a dead one for data retrieval, not having detected that replica as dead yet. The + * reasoning for retrying then is that by the time we get the timeout the dead replica will likely + * have been detected as dead and the retry has a high chance of success. + * + * @return {@code RetryDecision.retry(cl)} if no retry attempt has yet been tried and {@code + * receivedResponses >= requiredResponses && !dataRetrieved}, {@code RetryDecision.rethrow()} + * otherwise. + */ + @Override + public RetryDecision onReadTimeout( + Statement statement, + ConsistencyLevel cl, + int requiredResponses, + int receivedResponses, + boolean dataRetrieved, + int nbRetry) { + if (nbRetry != 0) return RetryDecision.rethrow(); - return receivedResponses >= requiredResponses && !dataRetrieved ? RetryDecision.retry(cl) : RetryDecision.rethrow(); - } + return receivedResponses >= requiredResponses && !dataRetrieved + ? RetryDecision.retry(cl) + : RetryDecision.rethrow(); + } - /** - * {@inheritDoc} - *

    - * This implementation triggers a maximum of one retry, and only in the case of - * a {@code WriteType.BATCH_LOG} write. The reasoning for the retry in - * that case is that write to the distributed batch log is tried by the - * coordinator of the write against a small subset of all the nodes alive - * in the local datacenter. Hence, a timeout usually means that none of - * the nodes in that subset were alive but the coordinator hasn't - * detected them as dead. By the time we get the timeout the dead - * nodes will likely have been detected as dead and the retry has thus a - * high chance of success. - * - * @return {@code RetryDecision.retry(cl)} if no retry attempt has yet been tried and - * {@code writeType == WriteType.BATCH_LOG}, {@code RetryDecision.rethrow()} otherwise. - */ - @Override - public RetryDecision onWriteTimeout(Statement statement, ConsistencyLevel cl, WriteType writeType, int requiredAcks, int receivedAcks, int nbRetry) { - if (nbRetry != 0) - return RetryDecision.rethrow(); + /** + * {@inheritDoc} + * + *

    This implementation triggers a maximum of one retry, and only in the case of a {@code + * WriteType.BATCH_LOG} write. The reasoning for the retry in that case is that write to the + * distributed batch log is tried by the coordinator of the write against a small subset of all + * the nodes alive in the local datacenter. Hence, a timeout usually means that none of the nodes + * in that subset were alive but the coordinator hasn't detected them as dead. By the time we get + * the timeout the dead nodes will likely have been detected as dead and the retry has thus a high + * chance of success. + * + * @return {@code RetryDecision.retry(cl)} if no retry attempt has yet been tried and {@code + * writeType == WriteType.BATCH_LOG}, {@code RetryDecision.rethrow()} otherwise. + */ + @Override + public RetryDecision onWriteTimeout( + Statement statement, + ConsistencyLevel cl, + WriteType writeType, + int requiredAcks, + int receivedAcks, + int nbRetry) { + if (nbRetry != 0) return RetryDecision.rethrow(); - // If the batch log write failed, retry the operation as this might just be we were unlucky at picking candidates - // JAVA-764: testing the write type automatically filters out serial consistency levels as these have always WriteType.CAS. - return writeType == WriteType.BATCH_LOG ? RetryDecision.retry(cl) : RetryDecision.rethrow(); - } + // If the batch log write failed, retry the operation as this might just be we were unlucky at + // picking candidates + // JAVA-764: testing the write type automatically filters out serial consistency levels as these + // have always WriteType.CAS. + return writeType == WriteType.BATCH_LOG ? RetryDecision.retry(cl) : RetryDecision.rethrow(); + } - /** - * {@inheritDoc} - *

    - * This implementation does the following: - *

      - *
    • if this is the first retry ({@code nbRetry == 0}), it triggers a retry on the next host in the query plan - * with the same consistency level ({@link RetryPolicy.RetryDecision#tryNextHost(ConsistencyLevel) RetryDecision#tryNextHost(null)}. - * The rationale is that the first coordinator might have been network-isolated from all other nodes (thinking - * they're down), but still able to communicate with the client; in that case, retrying on the same host has almost - * no chance of success, but moving to the next host might solve the issue.
    • - *
    • otherwise, the exception is rethrow.
    • - *
    - */ - @Override - public RetryDecision onUnavailable(Statement statement, ConsistencyLevel cl, int requiredReplica, int aliveReplica, int nbRetry) { - return (nbRetry == 0) - ? RetryDecision.tryNextHost(null) - : RetryDecision.rethrow(); - } + /** + * {@inheritDoc} + * + *

    This implementation does the following: + * + *

      + *
    • if this is the first retry ({@code nbRetry == 0}), it triggers a retry on the next host + * in the query plan with the same consistency level ({@link + * RetryPolicy.RetryDecision#tryNextHost(ConsistencyLevel) RetryDecision#tryNextHost(null)}. + * The rationale is that the first coordinator might have been network-isolated from all + * other nodes (thinking they're down), but still able to communicate with the client; in + * that case, retrying on the same host has almost no chance of success, but moving to the + * next host might solve the issue. + *
    • otherwise, the exception is rethrow. + *
    + */ + @Override + public RetryDecision onUnavailable( + Statement statement, + ConsistencyLevel cl, + int requiredReplica, + int aliveReplica, + int nbRetry) { + return (nbRetry == 0) ? RetryDecision.tryNextHost(null) : RetryDecision.rethrow(); + } - /** - * {@inheritDoc} - */ - @Override - public RetryDecision onRequestError(Statement statement, ConsistencyLevel cl, DriverException e, int nbRetry) { - return RetryDecision.tryNextHost(cl); + /** {@inheritDoc} */ + @Override + public RetryDecision onRequestError( + Statement statement, ConsistencyLevel cl, DriverException e, int nbRetry) { + // do not retry these by default as they generally indicate a data problem or + // other issue that is unlikely to be resolved by a retry. + if (e instanceof WriteFailureException || e instanceof ReadFailureException) { + return RetryDecision.rethrow(); } + return RetryDecision.tryNextHost(cl); + } - @Override - public void init(Cluster cluster) { - // nothing to do - } + @Override + public void init(Cluster cluster) { + // nothing to do + } - @Override - public void close() { - // nothing to do - } + @Override + public void close() { + // nothing to do + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/policies/DowngradingConsistencyRetryPolicy.java b/driver-core/src/main/java/com/datastax/driver/core/policies/DowngradingConsistencyRetryPolicy.java index 413e119de1a..f0e289331dc 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/policies/DowngradingConsistencyRetryPolicy.java +++ b/driver-core/src/main/java/com/datastax/driver/core/policies/DowngradingConsistencyRetryPolicy.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,191 +22,218 @@ import com.datastax.driver.core.Statement; import com.datastax.driver.core.WriteType; import com.datastax.driver.core.exceptions.DriverException; +import com.datastax.driver.core.exceptions.ReadFailureException; +import com.datastax.driver.core.exceptions.WriteFailureException; /** - * A retry policy that sometimes retries with a lower consistency level than - * the one initially requested. - *

    - * BEWARE: this policy may retry queries using a lower consistency - * level than the one initially requested. By doing so, it may break - * consistency guarantees. In other words, if you use this retry policy, - * there are cases (documented below) where a read at {@code QUORUM} - * may not see a preceding write at {@code QUORUM}. Do not use this - * policy unless you have understood the cases where this can happen and - * are ok with that. It is also highly recommended to always wrap this - * policy into {@link LoggingRetryPolicy} to log the occurrences of - * such consistency breaks. - *

    - * This policy implements the same retries than the {@link DefaultRetryPolicy} - * policy. But on top of that, it also retries in the following cases: + * A retry policy that sometimes retries with a lower consistency level than the one initially + * requested. + * + *

    BEWARE: this policy may retry queries using a lower consistency level than the one + * initially requested. By doing so, it may break consistency guarantees. In other words, if you use + * this retry policy, there are cases (documented below) where a read at {@code QUORUM} may + * not see a preceding write at {@code QUORUM}. Do not use this policy unless you have + * understood the cases where this can happen and are ok with that. It is also highly recommended to + * always wrap this policy into {@link LoggingRetryPolicy} to log the occurrences of such + * consistency breaks. + * + *

    This policy implements the same retries than the {@link DefaultRetryPolicy} policy. But on top + * of that, it also retries in the following cases: + * *

      - *
    • On a read timeout: if the number of replicas that responded is - * greater than one, but lower than is required by the requested - * consistency level, the operation is retried at a lower consistency - * level.
    • - *
    • On a write timeout: if the operation is a {@code - * WriteType.UNLOGGED_BATCH} and at least one replica acknowledged the - * write, the operation is retried at a lower consistency level. - * Furthermore, for other operations, if at least one replica acknowledged - * the write, the timeout is ignored.
    • - *
    • On an unavailable exception: if at least one replica is alive, the - * operation is retried at a lower consistency level.
    • + *
    • On a read timeout: if the number of replicas that responded is greater than one, but lower + * than is required by the requested consistency level, the operation is retried at a lower + * consistency level. + *
    • On a write timeout: if the operation is a {@code WriteType.UNLOGGED_BATCH} and at least one + * replica acknowledged the write, the operation is retried at a lower consistency level. + * Furthermore, for other operations, if at least one replica acknowledged the write, the + * timeout is ignored. + *
    • On an unavailable exception: if at least one replica is alive, the operation is retried at + * a lower consistency level. *
    + * * The lower consistency level to use for retries is determined by the following rules: + * *
      - *
    • if more than 3 replicas responded, use {@code THREE}.
    • - *
    • if 1, 2 or 3 replicas responded, use the corresponding level {@code ONE}, {@code TWO} or {@code THREE}.
    • + *
    • if more than 3 replicas responded, use {@code THREE}. + *
    • if 1, 2 or 3 replicas responded, use the corresponding level {@code ONE}, {@code TWO} or + * {@code THREE}. *
    - * Note that if the initial consistency level was {@code EACH_QUORUM}, Cassandra returns the number of live replicas - * in the datacenter that failed to reach consistency, not the overall number in the cluster. Therefore if this - * number is 0, we still retry at {@code ONE}, on the assumption that a host may still be up in another datacenter. - *

    - * The reasoning being this retry policy is the following one. If, based - * on the information the Cassandra coordinator node returns, retrying the - * operation with the initially requested consistency has a chance to - * succeed, do it. Otherwise, if based on this information we know the - * initially requested consistency level cannot be achieve currently, then: + * + * Note that if the initial consistency level was {@code EACH_QUORUM}, Cassandra returns the number + * of live replicas in the datacenter that failed to reach consistency, not the overall + * number in the cluster. Therefore if this number is 0, we still retry at {@code ONE}, on the + * assumption that a host may still be up in another datacenter. + * + *

    The reasoning being this retry policy is the following one. If, based on the information the + * Cassandra coordinator node returns, retrying the operation with the initially requested + * consistency has a chance to succeed, do it. Otherwise, if based on this information we know + * the initially requested consistency level cannot be achieved currently, then: + * *

      - *
    • For writes, ignore the exception (thus silently failing the - * consistency requirement) if we know the write has been persisted on at - * least one replica.
    • - *
    • For reads, try reading at a lower consistency level (thus silently - * failing the consistency requirement).
    • + *
    • For writes, ignore the exception (thus silently failing the consistency requirement) if we + * know the write has been persisted on at least one replica. + *
    • For reads, try reading at a lower consistency level (thus silently failing the consistency + * requirement). *
    - * In other words, this policy implements the idea that if the requested - * consistency level cannot be achieved, the next best thing for writes is - * to make sure the data is persisted, and that reading something is better - * than reading nothing, even if there is a risk of reading stale data. + * + * In other words, this policy implements the idea that if the requested consistency level cannot be + * achieved, the next best thing for writes is to make sure the data is persisted, and that reading + * something is better than reading nothing, even if there is a risk of reading stale data. + * + * @deprecated as of version 3.5.0, this retry policy has been deprecated, and it will be removed in + * 4.0.0. See our upgrade + * guide to understand how to migrate existing applications that rely on this policy. */ +@Deprecated +@SuppressWarnings("DeprecatedIsStillUsed") public class DowngradingConsistencyRetryPolicy implements RetryPolicy { - public static final DowngradingConsistencyRetryPolicy INSTANCE = new DowngradingConsistencyRetryPolicy(); - - private DowngradingConsistencyRetryPolicy() { + public static final DowngradingConsistencyRetryPolicy INSTANCE = + new DowngradingConsistencyRetryPolicy(); + + private DowngradingConsistencyRetryPolicy() {} + + private RetryDecision maxLikelyToWorkCL(int knownOk, ConsistencyLevel currentCL) { + if (knownOk >= 3) return RetryDecision.retry(ConsistencyLevel.THREE); + + if (knownOk == 2) return RetryDecision.retry(ConsistencyLevel.TWO); + + // JAVA-1005: EACH_QUORUM does not report a global number of alive replicas + // so even if we get 0 alive replicas, there might be + // a node up in some other datacenter + if (knownOk == 1 || currentCL == ConsistencyLevel.EACH_QUORUM) + return RetryDecision.retry(ConsistencyLevel.ONE); + + return RetryDecision.rethrow(); + } + + /** + * {@inheritDoc} + * + *

    This implementation triggers a maximum of one retry. If less replicas responded than + * required by the consistency level (but at least one replica did respond), the operation is + * retried at a lower consistency level. If enough replicas responded but data was not retrieved, + * the operation is retried with the initial consistency level. Otherwise, an exception is thrown. + */ + @Override + public RetryDecision onReadTimeout( + Statement statement, + ConsistencyLevel cl, + int requiredResponses, + int receivedResponses, + boolean dataRetrieved, + int nbRetry) { + if (nbRetry != 0) return RetryDecision.rethrow(); + + // CAS reads are not all that useful in terms of visibility of the writes since CAS write + // supports the + // normal consistency levels on the committing phase. So the main use case for CAS reads is + // probably for + // when you've timed out on a CAS write and want to make sure what happened. Downgrading in that + // case + // would be always wrong so we just special case to rethrow. + if (cl.isSerial()) return RetryDecision.rethrow(); + + if (receivedResponses < requiredResponses) { + // Tries the biggest CL that is expected to work + return maxLikelyToWorkCL(receivedResponses, cl); } - private RetryDecision maxLikelyToWorkCL(int knownOk, ConsistencyLevel currentCL) { - if (knownOk >= 3) - return RetryDecision.retry(ConsistencyLevel.THREE); - - if (knownOk == 2) - return RetryDecision.retry(ConsistencyLevel.TWO); - - // JAVA-1005: EACH_QUORUM does not report a global number of alive replicas - // so even if we get 0 alive replicas, there might be - // a node up in some other datacenter - if (knownOk == 1 || currentCL == ConsistencyLevel.EACH_QUORUM) - return RetryDecision.retry(ConsistencyLevel.ONE); - - return RetryDecision.rethrow(); - } - - /** - * {@inheritDoc} - *

    - * This implementation triggers a maximum of one retry. If less replica - * responded than required by the consistency level (but at least one - * replica did respond), the operation is retried at a lower - * consistency level. If enough replica responded but data was not - * retrieve, the operation is retried with the initial consistency - * level. Otherwise, an exception is thrown. - */ - @Override - public RetryDecision onReadTimeout(Statement statement, ConsistencyLevel cl, int requiredResponses, int receivedResponses, boolean dataRetrieved, int nbRetry) { - if (nbRetry != 0) - return RetryDecision.rethrow(); - - // CAS reads are not all that useful in terms of visibility of the writes since CAS write supports the - // normal consistency levels on the committing phase. So the main use case for CAS reads is probably for - // when you've timed out on a CAS write and want to make sure what happened. Downgrading in that case - // would be always wrong so we just special case to rethrow. - if (cl.isSerial()) - return RetryDecision.rethrow(); - - if (receivedResponses < requiredResponses) { - // Tries the biggest CL that is expected to work - return maxLikelyToWorkCL(receivedResponses, cl); - } - - return !dataRetrieved ? RetryDecision.retry(cl) : RetryDecision.rethrow(); - } - - /** - * {@inheritDoc} - *

    - * This implementation triggers a maximum of one retry. If {@code writeType == - * WriteType.BATCH_LOG}, the write is retried with the initial - * consistency level. If {@code writeType == WriteType.UNLOGGED_BATCH} - * and at least one replica acknowledged, the write is retried with a - * lower consistency level (with unlogged batch, a write timeout can - * always mean that part of the batch haven't been persisted at - * all, even if {@code receivedAcks > 0}). For other write types ({@code WriteType.SIMPLE} - * and {@code WriteType.BATCH}), if we know the write has been persisted on at - * least one replica, we ignore the exception. Otherwise, an exception is thrown. - */ - @Override - public RetryDecision onWriteTimeout(Statement statement, ConsistencyLevel cl, WriteType writeType, int requiredAcks, int receivedAcks, int nbRetry) { - if (nbRetry != 0) - return RetryDecision.rethrow(); - - switch (writeType) { - case SIMPLE: - case BATCH: - // Since we provide atomicity there is no point in retrying - return receivedAcks > 0 ? RetryDecision.ignore() : RetryDecision.rethrow(); - case UNLOGGED_BATCH: - // Since only part of the batch could have been persisted, - // retry with whatever consistency should allow to persist all - return maxLikelyToWorkCL(receivedAcks, cl); - case BATCH_LOG: - return RetryDecision.retry(cl); - } - // We want to rethrow on COUNTER and CAS, because in those case "we don't know" and don't want to guess - return RetryDecision.rethrow(); - } - - /** - * {@inheritDoc} - *

    - * This implementation triggers a maximum of one retry. If at least one replica - * is know to be alive, the operation is retried at a lower consistency - * level. - */ - @Override - public RetryDecision onUnavailable(Statement statement, ConsistencyLevel cl, int requiredReplica, int aliveReplica, int nbRetry) { - if (nbRetry != 0) - return RetryDecision.rethrow(); - - // JAVA-764: if the requested consistency level is serial, it means that the operation failed at the paxos phase of a LWT. - // Retry on the next host, on the assumption that the initial coordinator could be network-isolated. - if (cl.isSerial()) - return RetryDecision.tryNextHost(null); - - // Tries the biggest CL that is expected to work - return maxLikelyToWorkCL(aliveReplica, cl); - } - - /** - * {@inheritDoc} - *

    - * For historical reasons, this implementation triggers a retry on the next host in the query plan - * with the same consistency level, regardless of the statement's idempotence. - * Note that this breaks the general rule - * stated in {@link RetryPolicy#onRequestError(Statement, ConsistencyLevel, DriverException, int)}: - * "a retry should only be attempted if the request is known to be idempotent".` - */ - @Override - public RetryDecision onRequestError(Statement statement, ConsistencyLevel cl, DriverException e, int nbRetry) { - return RetryDecision.tryNextHost(cl); + return !dataRetrieved ? RetryDecision.retry(cl) : RetryDecision.rethrow(); + } + + /** + * {@inheritDoc} + * + *

    This implementation triggers a maximum of one retry. If {@code writeType == + * WriteType.BATCH_LOG}, the write is retried with the initial consistency level. If {@code + * writeType == WriteType.UNLOGGED_BATCH} and at least one replica acknowledged, the write is + * retried with a lower consistency level (with unlogged batch, a write timeout can always + * mean that part of the batch haven't been persisted at all, even if {@code receivedAcks > 0}). + * For other write types ({@code WriteType.SIMPLE} and {@code WriteType.BATCH}), if we know the + * write has been persisted on at least one replica, we ignore the exception. Otherwise, an + * exception is thrown. + */ + @Override + public RetryDecision onWriteTimeout( + Statement statement, + ConsistencyLevel cl, + WriteType writeType, + int requiredAcks, + int receivedAcks, + int nbRetry) { + if (nbRetry != 0) return RetryDecision.rethrow(); + + switch (writeType) { + case SIMPLE: + case BATCH: + // Since we provide atomicity there is no point in retrying + return receivedAcks > 0 ? RetryDecision.ignore() : RetryDecision.rethrow(); + case UNLOGGED_BATCH: + // Since only part of the batch could have been persisted, + // retry with whatever consistency should allow to persist all + return maxLikelyToWorkCL(receivedAcks, cl); + case BATCH_LOG: + return RetryDecision.retry(cl); } - - @Override - public void init(Cluster cluster) { - // nothing to do - } - - @Override - public void close() { - // nothing to do + // We want to rethrow on COUNTER and CAS, because in those case "we don't know" and don't want + // to guess + return RetryDecision.rethrow(); + } + + /** + * {@inheritDoc} + * + *

    This implementation triggers a maximum of one retry. If at least one replica is known to be + * alive, the operation is retried at a lower consistency level. + */ + @Override + public RetryDecision onUnavailable( + Statement statement, + ConsistencyLevel cl, + int requiredReplica, + int aliveReplica, + int nbRetry) { + if (nbRetry != 0) return RetryDecision.rethrow(); + + // JAVA-764: if the requested consistency level is serial, it means that the operation failed at + // the paxos phase of a LWT. + // Retry on the next host, on the assumption that the initial coordinator could be + // network-isolated. + if (cl.isSerial()) return RetryDecision.tryNextHost(null); + + // Tries the biggest CL that is expected to work + return maxLikelyToWorkCL(aliveReplica, cl); + } + + /** + * {@inheritDoc} + * + *

    For historical reasons, this implementation triggers a retry on the next host in the query + * plan with the same consistency level, regardless of the statement's idempotence. Note that this + * breaks the general rule stated in {@link RetryPolicy#onRequestError(Statement, + * ConsistencyLevel, DriverException, int)}: "a retry should only be attempted if the request is + * known to be idempotent".` + */ + @Override + public RetryDecision onRequestError( + Statement statement, ConsistencyLevel cl, DriverException e, int nbRetry) { + // do not retry these by default as they generally indicate a data problem or + // other issue that is unlikely to be resolved by a retry. + if (e instanceof WriteFailureException || e instanceof ReadFailureException) { + return RetryDecision.rethrow(); } + return RetryDecision.tryNextHost(cl); + } + + @Override + public void init(Cluster cluster) { + // nothing to do + } + + @Override + public void close() { + // nothing to do + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/policies/EC2MultiRegionAddressTranslator.java b/driver-core/src/main/java/com/datastax/driver/core/policies/EC2MultiRegionAddressTranslator.java index ce92338c84c..6d5e865879b 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/policies/EC2MultiRegionAddressTranslator.java +++ b/driver-core/src/main/java/com/datastax/driver/core/policies/EC2MultiRegionAddressTranslator.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,9 +20,10 @@ import com.datastax.driver.core.Cluster; import com.datastax.driver.core.exceptions.DriverException; import com.google.common.annotations.VisibleForTesting; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.util.Enumeration; +import java.util.Hashtable; import javax.naming.Context; import javax.naming.NamingEnumeration; import javax.naming.NamingException; @@ -28,123 +31,129 @@ import javax.naming.directory.Attributes; import javax.naming.directory.DirContext; import javax.naming.directory.InitialDirContext; -import java.net.InetAddress; -import java.net.InetSocketAddress; -import java.util.Enumeration; -import java.util.Hashtable; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** - * {@link AddressTranslator} implementation for a multi-region EC2 deployment where clients are also deployed in EC2. - *

    - * Its distinctive feature is that it translates addresses according to the location of the Cassandra host: + * {@link AddressTranslator} implementation for a multi-region EC2 deployment where clients are + * also deployed in EC2. + * + *

    Its distinctive feature is that it translates addresses according to the location of the + * Cassandra host: + * *

      - *
    • addresses in different EC2 regions (than the client) are unchanged;
    • - *
    • addresses in the same EC2 region are translated to private IPs.
    • + *
    • addresses in different EC2 regions (than the client) are unchanged; + *
    • addresses in the same EC2 region are translated to private IPs. *
    + * * This optimizes network costs, because Amazon charges more for communication over public IPs. - *

    - *

    - * Implementation note: this class performs a reverse DNS lookup of the origin address, to find the domain name of the target - * instance. Then it performs a forward DNS lookup of the domain name; the EC2 DNS does the private/public switch automatically - * based on location. + * + *

    + * + *

    Implementation note: this class performs a reverse DNS lookup of the origin address, to find + * the domain name of the target instance. Then it performs a forward DNS lookup of the domain name; + * the EC2 DNS does the private/public switch automatically based on location. */ public class EC2MultiRegionAddressTranslator implements AddressTranslator { - private static final Logger logger = LoggerFactory.getLogger(EC2MultiRegionAddressTranslator.class); + private static final Logger logger = + LoggerFactory.getLogger(EC2MultiRegionAddressTranslator.class); - // TODO when we switch to Netty 4.1, we can replace this with the Netty built-in DNS client - private final DirContext ctx; + // TODO when we switch to Netty 4.1, we can replace this with the Netty built-in DNS client + private final DirContext ctx; - public EC2MultiRegionAddressTranslator() { - Hashtable env = new Hashtable(); - env.put(Context.INITIAL_CONTEXT_FACTORY, "com.sun.jndi.dns.DnsContextFactory"); - try { - ctx = new InitialDirContext(env); - } catch (NamingException e) { - throw new DriverException("Could not create translator", e); - } + public EC2MultiRegionAddressTranslator() { + Hashtable env = new Hashtable(); + env.put(Context.INITIAL_CONTEXT_FACTORY, "com.sun.jndi.dns.DnsContextFactory"); + try { + ctx = new InitialDirContext(env); + } catch (NamingException e) { + throw new DriverException("Could not create translator", e); } + } - @VisibleForTesting - EC2MultiRegionAddressTranslator(DirContext ctx) { - this.ctx = ctx; - } + @VisibleForTesting + EC2MultiRegionAddressTranslator(DirContext ctx) { + this.ctx = ctx; + } - @Override - public void init(Cluster cluster) { - // nothing to do - } + @Override + public void init(Cluster cluster) { + // nothing to do + } - @Override - public InetSocketAddress translate(InetSocketAddress socketAddress) { - InetAddress address = socketAddress.getAddress(); - try { - // InetAddress#getHostName() is supposed to perform a reverse DNS lookup, but for some reason it doesn't work - // within the same EC2 region (it returns the IP address itself). - // We use an alternate implementation: - String domainName = lookupPtrRecord(reverse(address)); - if (domainName == null) { - logger.warn("Found no domain name for {}, returning it as-is", address); - return socketAddress; - } + @Override + public InetSocketAddress translate(InetSocketAddress socketAddress) { + InetAddress address = socketAddress.getAddress(); + try { + // InetAddress#getHostName() is supposed to perform a reverse DNS lookup, but for some reason + // it doesn't work + // within the same EC2 region (it returns the IP address itself). + // We use an alternate implementation: + String domainName = lookupPtrRecord(reverse(address)); + if (domainName == null) { + logger.warn("Found no domain name for {}, returning it as-is", address); + return socketAddress; + } - InetAddress translatedAddress = InetAddress.getByName(domainName); - logger.debug("Resolved {} to {}", address, translatedAddress); - return new InetSocketAddress(translatedAddress, socketAddress.getPort()); - } catch (Exception e) { - logger.warn("Error resolving " + address + ", returning it as-is", e); - return socketAddress; - } + InetAddress translatedAddress = InetAddress.getByName(domainName); + logger.debug("Resolved {} to {}", address, translatedAddress); + return new InetSocketAddress(translatedAddress, socketAddress.getPort()); + } catch (Exception e) { + logger.warn("Error resolving " + address + ", returning it as-is", e); + return socketAddress; } + } - private String lookupPtrRecord(String reversedDomain) throws Exception { - Attributes attrs = ctx.getAttributes(reversedDomain, new String[]{"PTR"}); - for (NamingEnumeration ae = attrs.getAll(); ae.hasMoreElements(); ) { - Attribute attr = (Attribute) ae.next(); - for (Enumeration vals = attr.getAll(); vals.hasMoreElements(); ) - return vals.nextElement().toString(); - } - return null; + private String lookupPtrRecord(String reversedDomain) throws Exception { + Attributes attrs = ctx.getAttributes(reversedDomain, new String[] {"PTR"}); + for (NamingEnumeration ae = attrs.getAll(); ae.hasMoreElements(); ) { + Attribute attr = (Attribute) ae.next(); + for (Enumeration vals = attr.getAll(); vals.hasMoreElements(); ) + return vals.nextElement().toString(); } + return null; + } - @Override - public void close() { - try { - ctx.close(); - } catch (NamingException e) { - logger.warn("Error closing translator", e); - } + @Override + public void close() { + try { + ctx.close(); + } catch (NamingException e) { + logger.warn("Error closing translator", e); } + } - // Builds the "reversed" domain name in the ARPA domain to perform the reverse lookup - @VisibleForTesting - static String reverse(InetAddress address) { - byte[] bytes = address.getAddress(); - if (bytes.length == 4) - return reverseIpv4(bytes); - else - return reverseIpv6(bytes); - } + // Builds the "reversed" domain name in the ARPA domain to perform the reverse lookup + @VisibleForTesting + static String reverse(InetAddress address) { + byte[] bytes = address.getAddress(); + if (bytes.length == 4) return reverseIpv4(bytes); + else return reverseIpv6(bytes); + } - private static String reverseIpv4(byte[] bytes) { - StringBuilder builder = new StringBuilder(); - for (int i = bytes.length - 1; i >= 0; i--) { - builder.append(bytes[i] & 0xFF).append('.'); - } - builder.append("in-addr.arpa"); - return builder.toString(); + private static String reverseIpv4(byte[] bytes) { + StringBuilder builder = new StringBuilder(); + for (int i = bytes.length - 1; i >= 0; i--) { + builder.append(bytes[i] & 0xFF).append('.'); } + builder.append("in-addr.arpa"); + return builder.toString(); + } - private static String reverseIpv6(byte[] bytes) { - StringBuilder builder = new StringBuilder(); - for (int i = bytes.length - 1; i >= 0; i--) { - byte b = bytes[i]; - int lowNibble = b & 0x0F; - int highNibble = b >> 4 & 0x0F; - builder.append(Integer.toHexString(lowNibble)).append('.') - .append(Integer.toHexString(highNibble)).append('.'); - } - builder.append("ip6.arpa"); - return builder.toString(); + private static String reverseIpv6(byte[] bytes) { + StringBuilder builder = new StringBuilder(); + for (int i = bytes.length - 1; i >= 0; i--) { + byte b = bytes[i]; + int lowNibble = b & 0x0F; + int highNibble = b >> 4 & 0x0F; + builder + .append(Integer.toHexString(lowNibble)) + .append('.') + .append(Integer.toHexString(highNibble)) + .append('.'); } + builder.append("ip6.arpa"); + return builder.toString(); + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/policies/ErrorAwarePolicy.java b/driver-core/src/main/java/com/datastax/driver/core/policies/ErrorAwarePolicy.java index adae2e1e933..e600a4e4005 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/policies/ErrorAwarePolicy.java +++ b/driver-core/src/main/java/com/datastax/driver/core/policies/ErrorAwarePolicy.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,351 +17,367 @@ */ package com.datastax.driver.core.policies; -import com.datastax.driver.core.*; -import com.datastax.driver.core.exceptions.*; +import static java.util.concurrent.TimeUnit.MINUTES; +import static java.util.concurrent.TimeUnit.NANOSECONDS; + +import com.datastax.driver.core.Cluster; +import com.datastax.driver.core.Host; +import com.datastax.driver.core.HostDistance; +import com.datastax.driver.core.LatencyTracker; +import com.datastax.driver.core.Statement; +import com.datastax.driver.core.exceptions.AlreadyExistsException; +import com.datastax.driver.core.exceptions.FunctionExecutionException; +import com.datastax.driver.core.exceptions.InvalidQueryException; +import com.datastax.driver.core.exceptions.QueryConsistencyException; +import com.datastax.driver.core.exceptions.SyntaxError; +import com.datastax.driver.core.exceptions.UnavailableException; import com.google.common.annotations.Beta; import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.AbstractIterator; import com.google.common.collect.ImmutableList; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import java.util.Collection; import java.util.Iterator; import java.util.List; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.TimeUnit; - -import static java.util.concurrent.TimeUnit.MINUTES; -import static java.util.concurrent.TimeUnit.NANOSECONDS; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Chainable load balancing policy that filters out hosts based on their error rates. - *

    - * When creating a query plan, this policy gathers a list of candidate hosts from its child policy; for each candidate - * host, it then determines whether it should be included into or excluded from the final query plan, based on its - * current error rate (measured over the last minute, with a 5-second granularity). - *

    - * Note that the policy should not blindly count all errors in its measurements: some type of errors (e.g. CQL syntax - * errors) can originate from the client and occur on all hosts, therefore they should not count towards the exclusion - * threshold or all hosts could become excluded. You can provide your own {@link ErrorFilter} to customize that logic. - *

    - * The policy follows the builder pattern to be created, the {@link Builder} class can be created with - * {@link #builder} method. - *

    - * This policy is currently in BETA mode and its behavior might be changing throughout different driver versions. + * + *

    When creating a query plan, this policy gathers a list of candidate hosts from its child + * policy; for each candidate host, it then determines whether it should be included into or + * excluded from the final query plan, based on its current error rate (measured over the last + * minute, with a 5-second granularity). + * + *

    Note that the policy should not blindly count all errors in its measurements: some type of + * errors (e.g. CQL syntax errors) can originate from the client and occur on all hosts, therefore + * they should not count towards the exclusion threshold or all hosts could become excluded. You can + * provide your own {@link ErrorFilter} to customize that logic. + * + *

    The policy follows the builder pattern to be created, the {@link Builder} class can be created + * with {@link #builder} method. + * + *

    This policy is currently in BETA mode and its behavior might be changing throughout different + * driver versions. */ @Beta public class ErrorAwarePolicy implements ChainableLoadBalancingPolicy { - private static final Logger logger = LoggerFactory.getLogger(ErrorAwarePolicy.class); + private static final Logger logger = LoggerFactory.getLogger(ErrorAwarePolicy.class); - private final LoadBalancingPolicy childPolicy; + private final LoadBalancingPolicy childPolicy; - private final long retryPeriodNanos; + private final long retryPeriodNanos; - PerHostErrorTracker errorTracker; + PerHostErrorTracker errorTracker; - private ErrorAwarePolicy(Builder builder) { - this.childPolicy = builder.childPolicy; - this.retryPeriodNanos = builder.retryPeriodNanos; - this.errorTracker = new PerHostErrorTracker(builder.maxErrorsPerMinute, builder.errorFilter, builder.clock); - } + private ErrorAwarePolicy(Builder builder) { + this.childPolicy = builder.childPolicy; + this.retryPeriodNanos = builder.retryPeriodNanos; + this.errorTracker = + new PerHostErrorTracker(builder.maxErrorsPerMinute, builder.errorFilter, builder.clock); + } - @Override - public LoadBalancingPolicy getChildPolicy() { - return childPolicy; - } + @Override + public LoadBalancingPolicy getChildPolicy() { + return childPolicy; + } - @Override - public void init(Cluster cluster, Collection hosts) { - childPolicy.init(cluster, hosts); - cluster.register(this.errorTracker); - } + @Override + public void init(Cluster cluster, Collection hosts) { + childPolicy.init(cluster, hosts); + cluster.register(this.errorTracker); + } - @Override - public HostDistance distance(Host host) { - return childPolicy.distance(host); - } + @Override + public HostDistance distance(Host host) { + return childPolicy.distance(host); + } - @Override - public Iterator newQueryPlan(String loggedKeyspace, Statement statement) { - final Iterator childQueryPlan = childPolicy.newQueryPlan(loggedKeyspace, statement); - - return new AbstractIterator() { - - @Override - protected Host computeNext() { - while (childQueryPlan.hasNext()) { - Host host = childQueryPlan.next(); - if (!errorTracker.isExcluded(host)) { - return host; - } - } - return endOfData(); - } - }; - } + @Override + public Iterator newQueryPlan(String loggedKeyspace, Statement statement) { + final Iterator childQueryPlan = childPolicy.newQueryPlan(loggedKeyspace, statement); - @Override - public void onAdd(Host host) { - childPolicy.onAdd(host); - } + return new AbstractIterator() { - @Override - public void onUp(Host host) { - childPolicy.onUp(host); + @Override + protected Host computeNext() { + while (childQueryPlan.hasNext()) { + Host host = childQueryPlan.next(); + if (!errorTracker.isExcluded(host)) { + return host; + } + } + return endOfData(); + } + }; + } + + @Override + public void onAdd(Host host) { + childPolicy.onAdd(host); + } + + @Override + public void onUp(Host host) { + childPolicy.onUp(host); + } + + @Override + public void onDown(Host host) { + childPolicy.onDown(host); + } + + @Override + public void onRemove(Host host) { + childPolicy.onRemove(host); + } + + /** + * Creates a new error aware policy builder given the child policy that the resulting policy + * should wrap. + * + * @param childPolicy the load balancing policy to wrap with error awareness. + * @return the created builder. + */ + public static Builder builder(LoadBalancingPolicy childPolicy) { + return new Builder(childPolicy); + } + + @Override + public void close() { + childPolicy.close(); + } + + /** Utility class to create a {@link ErrorAwarePolicy}. */ + public static class Builder { + final LoadBalancingPolicy childPolicy; + + private int maxErrorsPerMinute = 1; + private long retryPeriodNanos = NANOSECONDS.convert(2, MINUTES); + private Clock clock = Clock.DEFAULT; + + private ErrorFilter errorFilter = new DefaultErrorFilter(); + + /** + * Creates a {@link Builder} instance. + * + * @param childPolicy the load balancing policy to wrap with error awareness. + */ + public Builder(LoadBalancingPolicy childPolicy) { + this.childPolicy = childPolicy; } - @Override - public void onDown(Host host) { - childPolicy.onDown(host); + /** + * Defines the maximum number of errors allowed per minute for each host. + * + *

    The policy keeps track of the number of errors on each host (filtered by {@link + * Builder#withErrorsFilter(com.datastax.driver.core.policies.ErrorAwarePolicy.ErrorFilter)}) + * over a sliding 1-minute window. If a host had more than this number of errors, it will be + * excluded from the query plan for the duration defined by {@link #withRetryPeriod(long, + * TimeUnit)}. + * + *

    Default value for the threshold is 1. + * + * @param maxErrorsPerMinute the number. + * @return this {@link Builder} instance, for method chaining. + */ + public Builder withMaxErrorsPerMinute(int maxErrorsPerMinute) { + this.maxErrorsPerMinute = maxErrorsPerMinute; + return this; } - @Override - public void onRemove(Host host) { - childPolicy.onRemove(host); + /** + * Defines the time during which a host is excluded by the policy once it has exceeded {@link + * #withMaxErrorsPerMinute(int)}. + * + *

    Default value for the retry period is 2 minutes. + * + * @param retryPeriod the period of exclusion for a host. + * @param retryPeriodTimeUnit the time unit for the retry period. + * @return this {@link Builder} instance, for method chaining. + */ + public Builder withRetryPeriod(long retryPeriod, TimeUnit retryPeriodTimeUnit) { + this.retryPeriodNanos = retryPeriodTimeUnit.toNanos(retryPeriod); + return this; } /** - * Creates a new error aware policy builder given the child policy - * that the resulting policy should wrap. + * Provides a filter that will decide which errors are counted towards {@link + * #withMaxErrorsPerMinute(int)}. + * + *

    The default implementation will exclude from the error counting, the following exception + * types: * - * @param childPolicy the load balancing policy to wrap with error - * awareness. - * @return the created builder. + *

      + *
    • {@link QueryConsistencyException} and {@link UnavailableException}: the assumption is + * that these errors are most often caused by other replicas being unavailable, not by + * something wrong on the coordinator; + *
    • {@link InvalidQueryException}, {@link AlreadyExistsException}, {@link SyntaxError}: + * these are likely caused by a bad query in client code, that will fail on all hosts. + * Excluding hosts could lead to complete loss of connectivity, rather the solution is to + * fix the query; + *
    • {@link FunctionExecutionException}: similarly, this is caused by a bad function + * definition and likely to fail on all hosts. + *
    + * + * @param errorFilter the filter class that the policy will use. + * @return this {@link Builder} instance, for method chaining. */ - public static Builder builder(LoadBalancingPolicy childPolicy) { - return new Builder(childPolicy); + public Builder withErrorsFilter(ErrorFilter errorFilter) { + this.errorFilter = errorFilter; + return this; } - @Override - public void close() { - childPolicy.close(); + @VisibleForTesting + Builder withClock(Clock clock) { + this.clock = clock; + return this; } /** - * Utility class to create a {@link ErrorAwarePolicy}. + * Creates the {@link ErrorAwarePolicy} instance. + * + * @return the newly created {@link ErrorAwarePolicy}. */ - public static class Builder { - final LoadBalancingPolicy childPolicy; - - private int maxErrorsPerMinute = 1; - private long retryPeriodNanos = NANOSECONDS.convert(2, MINUTES); - private Clock clock = Clock.DEFAULT; - - private ErrorFilter errorFilter = new DefaultErrorFilter(); - - /** - * Creates a {@link Builder} instance. - * - * @param childPolicy the load balancing policy to wrap with error - * awareness. - */ - public Builder(LoadBalancingPolicy childPolicy) { - this.childPolicy = childPolicy; - } - - /** - * Defines the maximum number of errors allowed per minute for each host. - *

    - * The policy keeps track of the number of errors on each host (filtered by - * {@link Builder#withErrorsFilter(com.datastax.driver.core.policies.ErrorAwarePolicy.ErrorFilter)}) - * over a sliding 1-minute window. If a host had more than this number - * of errors, it will be excluded from the query plan for the duration defined by - * {@link #withRetryPeriod(long, TimeUnit)}. - *

    - * Default value for the threshold is 1. - * - * @param maxErrorsPerMinute the number. - * @return this {@link Builder} instance, for method chaining. - */ - public Builder withMaxErrorsPerMinute(int maxErrorsPerMinute) { - this.maxErrorsPerMinute = maxErrorsPerMinute; - return this; - } - - /** - * Defines the time during which a host is excluded by the policy once it has exceeded - * {@link #withMaxErrorsPerMinute(int)}. - *

    - * Default value for the retry period is 2 minutes. - * - * @param retryPeriod the period of exclusion for a host. - * @param retryPeriodTimeUnit the time unit for the retry period. - * @return this {@link Builder} instance, for method chaining. - */ - public Builder withRetryPeriod(long retryPeriod, TimeUnit retryPeriodTimeUnit) { - this.retryPeriodNanos = retryPeriodTimeUnit.toNanos(retryPeriod); - return this; - } - - /** - * Provides a filter that will decide which errors are counted towards {@link #withMaxErrorsPerMinute(int)}. - *

    - * The default implementation will exclude from the error counting, the following exception types: - *

      - *
    • {@link QueryConsistencyException} and {@link UnavailableException}: the assumption is that these errors - * are most often caused by other replicas being unavailable, not by something wrong on the coordinator;
    • - *
    • {@link InvalidQueryException}, {@link AlreadyExistsException}, {@link SyntaxError}: these are likely - * caused by a bad query in client code, that will fail on all hosts. Excluding hosts could lead to complete - * loss of connectivity, rather the solution is to fix the query;
    • - *
    • {@link FunctionExecutionException}: similarly, this is caused by a bad function definition and likely to - * fail on all hosts.
    • - *
    - * - * @param errorFilter the filter class that the policy will use. - * @return this {@link Builder} instance, for method chaining. - */ - public Builder withErrorsFilter(ErrorFilter errorFilter) { - this.errorFilter = errorFilter; - return this; - } - - @VisibleForTesting - Builder withClock(Clock clock) { - this.clock = clock; - return this; - } - - /** - * Creates the {@link ErrorAwarePolicy} instance. - * - * @return the newly created {@link ErrorAwarePolicy}. - */ - public ErrorAwarePolicy build() { - return new ErrorAwarePolicy(this); - } + public ErrorAwarePolicy build() { + return new ErrorAwarePolicy(this); } + } - class PerHostErrorTracker implements LatencyTracker { + class PerHostErrorTracker implements LatencyTracker { - private final int maxErrorsPerMinute; - private final ErrorFilter errorFilter; - private final Clock clock; - private final ConcurrentMap hostsCounts = new ConcurrentHashMap(); - private final ConcurrentMap exclusionTimes = new ConcurrentHashMap(); + private final int maxErrorsPerMinute; + private final ErrorFilter errorFilter; + private final Clock clock; + private final ConcurrentMap hostsCounts = + new ConcurrentHashMap(); + private final ConcurrentMap exclusionTimes = new ConcurrentHashMap(); - PerHostErrorTracker(int maxErrorsPerMinute, ErrorFilter errorFilter, Clock clock) { - this.maxErrorsPerMinute = maxErrorsPerMinute; - this.errorFilter = errorFilter; - this.clock = clock; - } + PerHostErrorTracker(int maxErrorsPerMinute, ErrorFilter errorFilter, Clock clock) { + this.maxErrorsPerMinute = maxErrorsPerMinute; + this.errorFilter = errorFilter; + this.clock = clock; + } - @Override - public void update(Host host, Statement statement, Exception exception, long newLatencyNanos) { - if (exception == null) { - return; - } - if (!errorFilter.shouldConsiderError(exception, host, statement)) { - return; - } - RollingCount hostCount = getOrCreateCount(host); - hostCount.increment(); - } + @Override + public void update(Host host, Statement statement, Exception exception, long newLatencyNanos) { + if (exception == null) { + return; + } + if (!errorFilter.shouldConsiderError(exception, host, statement)) { + return; + } + RollingCount hostCount = getOrCreateCount(host); + hostCount.increment(); + } - boolean isExcluded(Host host) { - Long excludedTime = exclusionTimes.get(host); - boolean expired = excludedTime != null && clock.nanoTime() - excludedTime >= retryPeriodNanos; - if (excludedTime == null || expired) { - if (maybeExcludeNow(host, excludedTime)) { - return true; - } - if (expired) { - // Cleanup, but make sure we don't overwrite if another thread just set it - exclusionTimes.remove(host, excludedTime); - } - return false; - } else { // host is already excluded - return true; - } + boolean isExcluded(Host host) { + Long excludedTime = exclusionTimes.get(host); + boolean expired = excludedTime != null && clock.nanoTime() - excludedTime >= retryPeriodNanos; + if (excludedTime == null || expired) { + if (maybeExcludeNow(host, excludedTime)) { + return true; } - - // Exclude if we're over the threshold - private boolean maybeExcludeNow(Host host, Long previousTime) { - RollingCount rollingCount = getOrCreateCount(host); - long count = rollingCount.get(); - if (count > maxErrorsPerMinute) { - excludeNow(host, count, previousTime); - return true; - } else { - return false; - } + if (expired) { + // Cleanup, but make sure we don't overwrite if another thread just set it + exclusionTimes.remove(host, excludedTime); } + return false; + } else { // host is already excluded + return true; + } + } - // Set the exclusion time to now, handling potential races - private void excludeNow(Host host, long count, Long previousTime) { - long now = clock.nanoTime(); - boolean didNotRace = (previousTime == null) - ? exclusionTimes.putIfAbsent(host, now) == null - : exclusionTimes.replace(host, previousTime, now); - - if (didNotRace && logger.isDebugEnabled()) { - logger.debug(String.format("Host %s encountered %d errors in the last minute, which is more " + - "than the maximum allowed (%d). It will be excluded from query plans for the " + - "next %d nanoseconds.", - host, count, maxErrorsPerMinute, retryPeriodNanos)); - } - } + // Exclude if we're over the threshold + private boolean maybeExcludeNow(Host host, Long previousTime) { + RollingCount rollingCount = getOrCreateCount(host); + long count = rollingCount.get(); + if (count > maxErrorsPerMinute) { + excludeNow(host, count, previousTime); + return true; + } else { + return false; + } + } - private RollingCount getOrCreateCount(Host host) { - RollingCount hostCount = hostsCounts.get(host); - if (hostCount == null) { - RollingCount tmp = new RollingCount(clock); - hostCount = hostsCounts.putIfAbsent(host, tmp); - if (hostCount == null) - hostCount = tmp; - } - return hostCount; - } + // Set the exclusion time to now, handling potential races + private void excludeNow(Host host, long count, Long previousTime) { + long now = clock.nanoTime(); + boolean didNotRace = + (previousTime == null) + ? exclusionTimes.putIfAbsent(host, now) == null + : exclusionTimes.replace(host, previousTime, now); + + if (didNotRace && logger.isDebugEnabled()) { + logger.debug( + String.format( + "Host %s encountered %d errors in the last minute, which is more " + + "than the maximum allowed (%d). It will be excluded from query plans for the " + + "next %d nanoseconds.", + host, count, maxErrorsPerMinute, retryPeriodNanos)); + } + } - @Override - public void onRegister(Cluster cluster) { - // nothing to do. - } + private RollingCount getOrCreateCount(Host host) { + RollingCount hostCount = hostsCounts.get(host); + if (hostCount == null) { + RollingCount tmp = new RollingCount(clock); + hostCount = hostsCounts.putIfAbsent(host, tmp); + if (hostCount == null) hostCount = tmp; + } + return hostCount; + } - @Override - public void onUnregister(Cluster cluster) { - // nothing to do. - } + @Override + public void onRegister(Cluster cluster) { + // nothing to do. } - static class DefaultErrorFilter implements ErrorFilter { - private static final List> IGNORED_EXCEPTIONS = - ImmutableList.>builder() - .add(FunctionExecutionException.class) - .add(QueryConsistencyException.class) - .add(UnavailableException.class) - .add(AlreadyExistsException.class) - .add(InvalidQueryException.class) - .add(SyntaxError.class) - .build(); - - @Override - public boolean shouldConsiderError(Exception e, Host host, Statement statement) { - for (Class ignoredException : IGNORED_EXCEPTIONS) { - if (ignoredException.isInstance(e)) - return false; - } - return true; - } + @Override + public void onUnregister(Cluster cluster) { + // nothing to do. } + } + + static class DefaultErrorFilter implements ErrorFilter { + private static final List> IGNORED_EXCEPTIONS = + ImmutableList.>builder() + .add(FunctionExecutionException.class) + .add(QueryConsistencyException.class) + .add(UnavailableException.class) + .add(AlreadyExistsException.class) + .add(InvalidQueryException.class) + .add(SyntaxError.class) + .build(); + @Override + public boolean shouldConsiderError(Exception e, Host host, Statement statement) { + for (Class ignoredException : IGNORED_EXCEPTIONS) { + if (ignoredException.isInstance(e)) return false; + } + return true; + } + } + + /** + * A filter for the errors considered by {@link ErrorAwarePolicy}. + * + *

    Only errors that indicate something wrong with a host should lead to its exclusion from + * query plans. + */ + public interface ErrorFilter { /** - * A filter for the errors considered by {@link ErrorAwarePolicy}. - *

    - * Only errors that indicate something wrong with a host should lead to its exclusion from query plans. + * Whether an error should be counted in the host's error rate. + * + * @param e the exception. + * @param host the host. + * @param statement the statement that caused the exception. + * @return {@code true} if the exception should be counted. */ - public interface ErrorFilter { - /** - * Whether an error should be counted in the host's error rate. - * - * @param e the exception. - * @param host the host. - * @param statement the statement that caused the exception. - * @return {@code true} if the exception should be counted. - */ - boolean shouldConsiderError(Exception e, Host host, Statement statement); - } + boolean shouldConsiderError(Exception e, Host host, Statement statement); + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/policies/ExponentialReconnectionPolicy.java b/driver-core/src/main/java/com/datastax/driver/core/policies/ExponentialReconnectionPolicy.java index 9ae41755036..c206c530f56 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/policies/ExponentialReconnectionPolicy.java +++ b/driver-core/src/main/java/com/datastax/driver/core/policies/ExponentialReconnectionPolicy.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,93 +20,95 @@ import com.datastax.driver.core.Cluster; /** - * A reconnection policy that waits exponentially longer between each - * reconnection attempt (but keeps a constant delay once a maximum delay is - * reached). + * A reconnection policy that waits exponentially longer between each reconnection attempt (but + * keeps a constant delay once a maximum delay is reached). */ public class ExponentialReconnectionPolicy implements ReconnectionPolicy { - private final long baseDelayMs; - private final long maxDelayMs; - private final long maxAttempts; - - /** - * Creates a reconnection policy waiting exponentially longer for each new attempt. - * - * @param baseDelayMs the base delay in milliseconds to use for - * the schedules created by this policy. - * @param maxDelayMs the maximum delay to wait between two attempts. - */ - public ExponentialReconnectionPolicy(long baseDelayMs, long maxDelayMs) { - if (baseDelayMs < 0 || maxDelayMs < 0) - throw new IllegalArgumentException("Invalid negative delay"); - if (baseDelayMs == 0) - throw new IllegalArgumentException("baseDelayMs must be strictly positive"); - if (maxDelayMs < baseDelayMs) - throw new IllegalArgumentException(String.format("maxDelayMs (got %d) cannot be smaller than baseDelayMs (got %d)", maxDelayMs, baseDelayMs)); - - this.baseDelayMs = baseDelayMs; - this.maxDelayMs = maxDelayMs; - - // Maximum number of attempts after which we overflow (which is kind of theoretical anyway, you'll - // die of old age before reaching that but hey ...) - int ceil = (baseDelayMs & (baseDelayMs - 1)) == 0 ? 0 : 1; - this.maxAttempts = 64 - Long.numberOfLeadingZeros(Long.MAX_VALUE / baseDelayMs) - ceil; - } - - /** - * The base delay in milliseconds for this policy (e.g. the delay before - * the first reconnection attempt). - * - * @return the base delay in milliseconds for this policy. - */ - public long getBaseDelayMs() { - return baseDelayMs; - } + private final long baseDelayMs; + private final long maxDelayMs; + private final long maxAttempts; + + /** + * Creates a reconnection policy waiting exponentially longer for each new attempt. + * + * @param baseDelayMs the base delay in milliseconds to use for the schedules created by this + * policy. + * @param maxDelayMs the maximum delay to wait between two attempts. + */ + public ExponentialReconnectionPolicy(long baseDelayMs, long maxDelayMs) { + if (baseDelayMs < 0 || maxDelayMs < 0) + throw new IllegalArgumentException("Invalid negative delay"); + if (baseDelayMs == 0) + throw new IllegalArgumentException("baseDelayMs must be strictly positive"); + if (maxDelayMs < baseDelayMs) + throw new IllegalArgumentException( + String.format( + "maxDelayMs (got %d) cannot be smaller than baseDelayMs (got %d)", + maxDelayMs, baseDelayMs)); + + this.baseDelayMs = baseDelayMs; + this.maxDelayMs = maxDelayMs; + + // Maximum number of attempts after which we overflow (which is kind of theoretical anyway, + // you'll + // die of old age before reaching that but hey ...) + int ceil = (baseDelayMs & (baseDelayMs - 1)) == 0 ? 0 : 1; + this.maxAttempts = 64 - Long.numberOfLeadingZeros(Long.MAX_VALUE / baseDelayMs) - ceil; + } + + /** + * The base delay in milliseconds for this policy (e.g. the delay before the first reconnection + * attempt). + * + * @return the base delay in milliseconds for this policy. + */ + public long getBaseDelayMs() { + return baseDelayMs; + } + + /** + * The maximum delay in milliseconds between reconnection attempts for this policy. + * + * @return the maximum delay in milliseconds between reconnection attempts for this policy. + */ + public long getMaxDelayMs() { + return maxDelayMs; + } + + /** + * A new schedule that used an exponentially growing delay between reconnection attempts. + * + *

    For this schedule, reconnection attempt {@code i} will be tried {@code Math.min(2^(i-1) * + * getBaseDelayMs(), getMaxDelayMs())} milliseconds after the previous one. + * + * @return the newly created schedule. + */ + @Override + public ReconnectionSchedule newSchedule() { + return new ExponentialSchedule(); + } + + private class ExponentialSchedule implements ReconnectionSchedule { + + private int attempts; - /** - * The maximum delay in milliseconds between reconnection attempts for this policy. - * - * @return the maximum delay in milliseconds between reconnection attempts for this policy. - */ - public long getMaxDelayMs() { - return maxDelayMs; - } - - /** - * A new schedule that used an exponentially growing delay between reconnection attempts. - *

    - * For this schedule, reconnection attempt {@code i} will be tried - * {@code Math.min(2^(i-1) * getBaseDelayMs(), getMaxDelayMs())} milliseconds after the previous one. - * - * @return the newly created schedule. - */ @Override - public ReconnectionSchedule newSchedule() { - return new ExponentialSchedule(); - } + public long nextDelayMs() { - private class ExponentialSchedule implements ReconnectionSchedule { + if (attempts > maxAttempts) return maxDelayMs; - private int attempts; - - @Override - public long nextDelayMs() { - - if (attempts > maxAttempts) - return maxDelayMs; - - return Math.min(baseDelayMs * (1L << attempts++), maxDelayMs); - } + return Math.min(baseDelayMs * (1L << attempts++), maxDelayMs); } + } - @Override - public void init(Cluster cluster) { - // nothing to do - } + @Override + public void init(Cluster cluster) { + // nothing to do + } - @Override - public void close() { - // nothing to do - } + @Override + public void close() { + // nothing to do + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/policies/FallthroughRetryPolicy.java b/driver-core/src/main/java/com/datastax/driver/core/policies/FallthroughRetryPolicy.java index a7acb7feab1..610283255b0 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/policies/FallthroughRetryPolicy.java +++ b/driver-core/src/main/java/com/datastax/driver/core/policies/FallthroughRetryPolicy.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,65 +25,82 @@ /** * A retry policy that never retries (nor ignores). - *

    - * All of the methods of this retry policy unconditionally return {@link RetryPolicy.RetryDecision#rethrow()}. - * If this policy is used, retry logic will have to be implemented in business code. + * + *

    All of the methods of this retry policy unconditionally return {@link + * RetryPolicy.RetryDecision#rethrow()}. If this policy is used, retry logic will have to be + * implemented in business code. */ public class FallthroughRetryPolicy implements RetryPolicy { - public static final FallthroughRetryPolicy INSTANCE = new FallthroughRetryPolicy(); - - private FallthroughRetryPolicy() { - } + public static final FallthroughRetryPolicy INSTANCE = new FallthroughRetryPolicy(); - /** - * {@inheritDoc} - *

    - * This implementation always returns {@code RetryDecision.rethrow()}. - */ - @Override - public RetryDecision onReadTimeout(Statement statement, ConsistencyLevel cl, int requiredResponses, int receivedResponses, boolean dataRetrieved, int nbRetry) { - return RetryDecision.rethrow(); - } + private FallthroughRetryPolicy() {} - /** - * {@inheritDoc} - *

    - * This implementation always returns {@code RetryDecision.rethrow()}. - */ - @Override - public RetryDecision onWriteTimeout(Statement statement, ConsistencyLevel cl, WriteType writeType, int requiredAcks, int receivedAcks, int nbRetry) { - return RetryDecision.rethrow(); - } + /** + * {@inheritDoc} + * + *

    This implementation always returns {@code RetryDecision.rethrow()}. + */ + @Override + public RetryDecision onReadTimeout( + Statement statement, + ConsistencyLevel cl, + int requiredResponses, + int receivedResponses, + boolean dataRetrieved, + int nbRetry) { + return RetryDecision.rethrow(); + } - /** - * {@inheritDoc} - *

    - * This implementation always returns {@code RetryDecision.rethrow()}. - */ - @Override - public RetryDecision onUnavailable(Statement statement, ConsistencyLevel cl, int requiredReplica, int aliveReplica, int nbRetry) { - return RetryDecision.rethrow(); - } + /** + * {@inheritDoc} + * + *

    This implementation always returns {@code RetryDecision.rethrow()}. + */ + @Override + public RetryDecision onWriteTimeout( + Statement statement, + ConsistencyLevel cl, + WriteType writeType, + int requiredAcks, + int receivedAcks, + int nbRetry) { + return RetryDecision.rethrow(); + } - /** - * {@inheritDoc} - *

    - * This implementation always returns {@code RetryDecision.rethrow()}. - */ - @Override - public RetryDecision onRequestError(Statement statement, ConsistencyLevel cl, DriverException e, int nbRetry) { - return RetryDecision.rethrow(); - } + /** + * {@inheritDoc} + * + *

    This implementation always returns {@code RetryDecision.rethrow()}. + */ + @Override + public RetryDecision onUnavailable( + Statement statement, + ConsistencyLevel cl, + int requiredReplica, + int aliveReplica, + int nbRetry) { + return RetryDecision.rethrow(); + } + /** + * {@inheritDoc} + * + *

    This implementation always returns {@code RetryDecision.rethrow()}. + */ + @Override + public RetryDecision onRequestError( + Statement statement, ConsistencyLevel cl, DriverException e, int nbRetry) { + return RetryDecision.rethrow(); + } - @Override - public void init(Cluster cluster) { - // nothing to do - } + @Override + public void init(Cluster cluster) { + // nothing to do + } - @Override - public void close() { - // nothing to do - } + @Override + public void close() { + // nothing to do + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/policies/HostFilterPolicy.java b/driver-core/src/main/java/com/datastax/driver/core/policies/HostFilterPolicy.java index cacb4607d0f..4dddb87aef4 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/policies/HostFilterPolicy.java +++ b/driver-core/src/main/java/com/datastax/driver/core/policies/HostFilterPolicy.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,151 +24,146 @@ import com.google.common.base.Predicate; import com.google.common.base.Predicates; import com.google.common.collect.ImmutableSet; - import java.util.ArrayList; import java.util.Collection; import java.util.Iterator; import java.util.List; /** - * A load balancing policy wrapper that ensures that only hosts matching the predicate - * will ever be returned. - *

    - * This policy wraps another load balancing policy and will delegate the choice - * of hosts to the wrapped policy with the exception that only hosts matching - * the predicate provided when constructing this policy will ever be - * returned. Any host not matching the predicate will be considered {@code IGNORED} - * and thus will not be connected to. + * A load balancing policy wrapper that ensures that only hosts matching the predicate will ever be + * returned. + * + *

    This policy wraps another load balancing policy and will delegate the choice of hosts to the + * wrapped policy with the exception that only hosts matching the predicate provided when + * constructing this policy will ever be returned. Any host not matching the predicate will be + * considered {@code IGNORED} and thus will not be connected to. */ public class HostFilterPolicy implements ChainableLoadBalancingPolicy { - private final LoadBalancingPolicy childPolicy; - private final Predicate predicate; - - /** - * Create a new policy that wraps the provided child policy but only "allows" hosts - * matching the predicate. - * - * @param childPolicy the wrapped policy. - * @param predicate the host predicate. Only hosts matching this predicate may get connected - * to (whether they will get connected to or not depends on the child policy). - */ - public HostFilterPolicy(LoadBalancingPolicy childPolicy, Predicate predicate) { - this.childPolicy = childPolicy; - this.predicate = predicate; - } - - @Override - public LoadBalancingPolicy getChildPolicy() { - return childPolicy; - } - - /** - * {@inheritDoc} - * - * @throws IllegalArgumentException if none of the host in {@code hosts} - * (which will correspond to the contact points) matches the predicate. - */ - @Override - public void init(Cluster cluster, Collection hosts) { - List whiteHosts = new ArrayList(hosts.size()); - for (Host host : hosts) - if (predicate.apply(host)) - whiteHosts.add(host); - - if (whiteHosts.isEmpty()) - throw new IllegalArgumentException(String.format("Cannot use HostFilterPolicy where the filter allows none of the contacts points (%s)", hosts)); - - childPolicy.init(cluster, whiteHosts); - } - - /** - * {@inheritDoc} - * - * @return {@link HostDistance#IGNORED} if {@code host} is not matching the predicate, the HostDistance - * as returned by the wrapped policy otherwise. - */ - @Override - public HostDistance distance(Host host) { - return predicate.apply(host) - ? childPolicy.distance(host) - : HostDistance.IGNORED; - } - - /** - * {@inheritDoc} - *

    - * It is guaranteed that only hosts matching the predicate will be returned. - */ - @Override - public Iterator newQueryPlan(String loggedKeyspace, Statement statement) { - // Just delegate to the child policy, since we filter the hosts not white - // listed upfront, the child policy will never see a host that is not white - // listed and thus can't return one. - return childPolicy.newQueryPlan(loggedKeyspace, statement); - } - - @Override - public void onUp(Host host) { - if (predicate.apply(host)) - childPolicy.onUp(host); - } - - @Override - public void onDown(Host host) { - if (predicate.apply(host)) - childPolicy.onDown(host); - } - - @Override - public void onAdd(Host host) { - if (predicate.apply(host)) - childPolicy.onAdd(host); - } - - @Override - public void onRemove(Host host) { - if (predicate.apply(host)) - childPolicy.onRemove(host); - } - - @Override - public void close() { - childPolicy.close(); - } - - /** - * Create a new policy that wraps the provided child policy but only "allows" hosts - * whose DC belongs to the provided list. - * - * @param childPolicy the wrapped policy. - * @param dcs the DCs. - * @return the policy. - */ - public static HostFilterPolicy fromDCWhiteList(LoadBalancingPolicy childPolicy, Iterable dcs) { - return new HostFilterPolicy(childPolicy, hostDCPredicate(dcs, true)); - } - - /** - * Create a new policy that wraps the provided child policy but only "forbids" hosts - * whose DC belongs to the provided list. - * - * @param childPolicy the wrapped policy. - * @param dcs the DCs. - * @return the policy. - */ - public static HostFilterPolicy fromDCBlackList(LoadBalancingPolicy childPolicy, Iterable dcs) { - return new HostFilterPolicy(childPolicy, Predicates.not(hostDCPredicate(dcs, false))); - } - - private static Predicate hostDCPredicate(Iterable dcs, final boolean includeNullDC) { - final ImmutableSet _dcs = ImmutableSet.copyOf(dcs); - return new Predicate() { - @Override - public boolean apply(Host host) { - String hdc = host.getDatacenter(); - return (hdc == null) ? includeNullDC : _dcs.contains(hdc); - } - }; - } - + private final LoadBalancingPolicy childPolicy; + private final Predicate predicate; + + /** + * Create a new policy that wraps the provided child policy but only "allows" hosts matching the + * predicate. + * + * @param childPolicy the wrapped policy. + * @param predicate the host predicate. Only hosts matching this predicate may get connected to + * (whether they will get connected to or not depends on the child policy). + */ + public HostFilterPolicy(LoadBalancingPolicy childPolicy, Predicate predicate) { + this.childPolicy = childPolicy; + this.predicate = predicate; + } + + @Override + public LoadBalancingPolicy getChildPolicy() { + return childPolicy; + } + + /** + * {@inheritDoc} + * + * @throws IllegalArgumentException if none of the host in {@code hosts} (which will correspond to + * the contact points) matches the predicate. + */ + @Override + public void init(Cluster cluster, Collection hosts) { + List whiteHosts = new ArrayList(hosts.size()); + for (Host host : hosts) if (predicate.apply(host)) whiteHosts.add(host); + + if (whiteHosts.isEmpty()) + throw new IllegalArgumentException( + String.format( + "Cannot use HostFilterPolicy where the filter allows none of the contacts points (%s)", + hosts)); + + childPolicy.init(cluster, whiteHosts); + } + + /** + * {@inheritDoc} + * + * @return {@link HostDistance#IGNORED} if {@code host} is not matching the predicate, the + * HostDistance as returned by the wrapped policy otherwise. + */ + @Override + public HostDistance distance(Host host) { + return predicate.apply(host) ? childPolicy.distance(host) : HostDistance.IGNORED; + } + + /** + * {@inheritDoc} + * + *

    It is guaranteed that only hosts matching the predicate will be returned. + */ + @Override + public Iterator newQueryPlan(String loggedKeyspace, Statement statement) { + // Just delegate to the child policy, since we filter the hosts not white + // listed upfront, the child policy will never see a host that is not white + // listed and thus can't return one. + return childPolicy.newQueryPlan(loggedKeyspace, statement); + } + + @Override + public void onUp(Host host) { + if (predicate.apply(host)) childPolicy.onUp(host); + } + + @Override + public void onDown(Host host) { + if (predicate.apply(host)) childPolicy.onDown(host); + } + + @Override + public void onAdd(Host host) { + if (predicate.apply(host)) childPolicy.onAdd(host); + } + + @Override + public void onRemove(Host host) { + if (predicate.apply(host)) childPolicy.onRemove(host); + } + + @Override + public void close() { + childPolicy.close(); + } + + /** + * Create a new policy that wraps the provided child policy but only "allows" hosts whose DC + * belongs to the provided list. + * + * @param childPolicy the wrapped policy. + * @param dcs the DCs. + * @return the policy. + */ + public static HostFilterPolicy fromDCWhiteList( + LoadBalancingPolicy childPolicy, Iterable dcs) { + return new HostFilterPolicy(childPolicy, hostDCPredicate(dcs, true)); + } + + /** + * Create a new policy that wraps the provided child policy but only "forbids" hosts whose DC + * belongs to the provided list. + * + * @param childPolicy the wrapped policy. + * @param dcs the DCs. + * @return the policy. + */ + public static HostFilterPolicy fromDCBlackList( + LoadBalancingPolicy childPolicy, Iterable dcs) { + return new HostFilterPolicy(childPolicy, Predicates.not(hostDCPredicate(dcs, false))); + } + + private static Predicate hostDCPredicate( + Iterable dcs, final boolean includeNullDC) { + final ImmutableSet _dcs = ImmutableSet.copyOf(dcs); + return new Predicate() { + @Override + public boolean apply(Host host) { + String hdc = host.getDatacenter(); + return (hdc == null) ? includeNullDC : _dcs.contains(hdc); + } + }; + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/policies/IdempotenceAwareRetryPolicy.java b/driver-core/src/main/java/com/datastax/driver/core/policies/IdempotenceAwareRetryPolicy.java index c9777c79dfe..f1a1009bcb1 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/policies/IdempotenceAwareRetryPolicy.java +++ b/driver-core/src/main/java/com/datastax/driver/core/policies/IdempotenceAwareRetryPolicy.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,94 +17,111 @@ */ package com.datastax.driver.core.policies; -import com.datastax.driver.core.*; +import com.datastax.driver.core.Cluster; +import com.datastax.driver.core.ConsistencyLevel; +import com.datastax.driver.core.QueryOptions; +import com.datastax.driver.core.Statement; +import com.datastax.driver.core.WriteType; import com.datastax.driver.core.exceptions.DriverException; /** * A retry policy that avoids retrying non-idempotent statements. - *

    - * In case of write timeouts or unexpected errors, this policy will always return {@link com.datastax.driver.core.policies.RetryPolicy.RetryDecision#rethrow()} - * if the statement is deemed non-idempotent (see {@link #isIdempotent(Statement)}). - *

    - * For all other cases, this policy delegates the decision to the child policy. * - * @deprecated As of version 3.1.0, the driver doesn't retry non-idempotent statements for write timeouts or unexpected - * errors anymore. It is no longer necessary to wrap your retry policies in this policy. + *

    In case of write timeouts or unexpected errors, this policy will always return {@link + * com.datastax.driver.core.policies.RetryPolicy.RetryDecision#rethrow()} if the statement is deemed + * non-idempotent (see {@link #isIdempotent(Statement)}). + * + *

    For all other cases, this policy delegates the decision to the child policy. + * + * @deprecated As of version 3.1.0, the driver doesn't retry non-idempotent statements for write + * timeouts or unexpected errors anymore. It is no longer necessary to wrap your retry policies + * in this policy. */ @Deprecated public class IdempotenceAwareRetryPolicy implements RetryPolicy { - private final RetryPolicy childPolicy; - - private QueryOptions queryOptions; + private final RetryPolicy childPolicy; - /** - * Creates a new instance. - * - * @param childPolicy the policy to wrap. - */ - public IdempotenceAwareRetryPolicy(RetryPolicy childPolicy) { - this.childPolicy = childPolicy; - } + private QueryOptions queryOptions; - @Override - public RetryDecision onReadTimeout(Statement statement, ConsistencyLevel cl, int requiredResponses, int receivedResponses, boolean dataRetrieved, int nbRetry) { - return childPolicy.onReadTimeout(statement, cl, requiredResponses, receivedResponses, dataRetrieved, nbRetry); - } + /** + * Creates a new instance. + * + * @param childPolicy the policy to wrap. + */ + public IdempotenceAwareRetryPolicy(RetryPolicy childPolicy) { + this.childPolicy = childPolicy; + } - @Override - public RetryDecision onWriteTimeout(Statement statement, ConsistencyLevel cl, WriteType writeType, int requiredAcks, int receivedAcks, int nbRetry) { - if (isIdempotent(statement)) - return childPolicy.onWriteTimeout(statement, cl, writeType, requiredAcks, receivedAcks, nbRetry); - else - return RetryDecision.rethrow(); - } + @Override + public RetryDecision onReadTimeout( + Statement statement, + ConsistencyLevel cl, + int requiredResponses, + int receivedResponses, + boolean dataRetrieved, + int nbRetry) { + return childPolicy.onReadTimeout( + statement, cl, requiredResponses, receivedResponses, dataRetrieved, nbRetry); + } - @Override - public RetryDecision onUnavailable(Statement statement, ConsistencyLevel cl, int requiredReplica, int aliveReplica, int nbRetry) { - return childPolicy.onUnavailable(statement, cl, requiredReplica, aliveReplica, nbRetry); - } + @Override + public RetryDecision onWriteTimeout( + Statement statement, + ConsistencyLevel cl, + WriteType writeType, + int requiredAcks, + int receivedAcks, + int nbRetry) { + if (isIdempotent(statement)) + return childPolicy.onWriteTimeout( + statement, cl, writeType, requiredAcks, receivedAcks, nbRetry); + else return RetryDecision.rethrow(); + } - @Override - public RetryDecision onRequestError(Statement statement, ConsistencyLevel cl, DriverException e, int nbRetry) { - if (isIdempotent(statement)) - return childPolicy.onRequestError(statement, cl, e, nbRetry); - else - return RetryDecision.rethrow(); - } + @Override + public RetryDecision onUnavailable( + Statement statement, + ConsistencyLevel cl, + int requiredReplica, + int aliveReplica, + int nbRetry) { + return childPolicy.onUnavailable(statement, cl, requiredReplica, aliveReplica, nbRetry); + } - @Override - public void init(Cluster cluster) { - childPolicy.init(cluster); - queryOptions = cluster.getConfiguration().getQueryOptions(); - } + @Override + public RetryDecision onRequestError( + Statement statement, ConsistencyLevel cl, DriverException e, int nbRetry) { + if (isIdempotent(statement)) return childPolicy.onRequestError(statement, cl, e, nbRetry); + else return RetryDecision.rethrow(); + } - @Override - public void close() { - childPolicy.close(); - } + @Override + public void init(Cluster cluster) { + childPolicy.init(cluster); + queryOptions = cluster.getConfiguration().getQueryOptions(); + } - /** - * Determines whether the given statement is idempotent or not. - *

    - * The current implementation inspects the statement's - * {@link Statement#isIdempotent() idempotent flag}; - * if this flag is not set, then it inspects - * {@link QueryOptions#getDefaultIdempotence()}. - *

    - * Subclasses may override if they have better knowledge of - * the statement being executed. - * - * @param statement The statement to execute. - * @return {@code true} if the given statement is idempotent, - * {@code false} otherwise - */ - protected boolean isIdempotent(Statement statement) { - Boolean myValue = statement.isIdempotent(); - if (myValue != null) - return myValue; - else - return queryOptions.getDefaultIdempotence(); - } + @Override + public void close() { + childPolicy.close(); + } + /** + * Determines whether the given statement is idempotent or not. + * + *

    The current implementation inspects the statement's {@link Statement#isIdempotent() + * idempotent flag}; if this flag is not set, then it inspects {@link + * QueryOptions#getDefaultIdempotence()}. + * + *

    Subclasses may override if they have better knowledge of the statement being executed. + * + * @param statement The statement to execute. + * @return {@code true} if the given statement is idempotent, {@code false} otherwise + */ + protected boolean isIdempotent(Statement statement) { + Boolean myValue = statement.isIdempotent(); + if (myValue != null) return myValue; + else return queryOptions.getDefaultIdempotence(); + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/policies/IdentityTranslator.java b/driver-core/src/main/java/com/datastax/driver/core/policies/IdentityTranslator.java index 2cdf935d96e..03ada4bfae2 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/policies/IdentityTranslator.java +++ b/driver-core/src/main/java/com/datastax/driver/core/policies/IdentityTranslator.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,35 +18,30 @@ package com.datastax.driver.core.policies; import com.datastax.driver.core.Cluster; - import java.net.InetSocketAddress; -/** - * The default {@link AddressTranslator} used by the driver that do no - * translation. - */ +/** The default {@link AddressTranslator} used by the driver that do no translation. */ public class IdentityTranslator implements AddressTranslator { - @Override - public void init(Cluster cluster) { - // Nothing to do - } + @Override + public void init(Cluster cluster) { + // Nothing to do + } - /** - * Translates a Cassandra {@code rpc_address} to another address if necessary. - *

    - * This method is the identity function, it always return the address passed - * in argument, doing no translation. - * - * @param address the address of a node as returned by Cassandra. - * @return {@code address} unmodified. - */ - @Override - public InetSocketAddress translate(InetSocketAddress address) { - return address; - } + /** + * Translates a Cassandra {@code rpc_address} to another address if necessary. + * + *

    This method is the identity function, it always return the address passed in argument, doing + * no translation. + * + * @param address the address of a node as returned by Cassandra. + * @return {@code address} unmodified. + */ + @Override + public InetSocketAddress translate(InetSocketAddress address) { + return address; + } - @Override - public void close() { - } + @Override + public void close() {} } diff --git a/driver-core/src/main/java/com/datastax/driver/core/policies/LatencyAwarePolicy.java b/driver-core/src/main/java/com/datastax/driver/core/policies/LatencyAwarePolicy.java index dc9b876e1d3..b62acf892bc 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/policies/LatencyAwarePolicy.java +++ b/driver-core/src/main/java/com/datastax/driver/core/policies/LatencyAwarePolicy.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,708 +17,823 @@ */ package com.datastax.driver.core.policies; -import com.datastax.driver.core.*; -import com.datastax.driver.core.exceptions.*; +import com.codahale.metrics.Gauge; +import com.datastax.driver.core.Cluster; +import com.datastax.driver.core.Host; +import com.datastax.driver.core.HostDistance; +import com.datastax.driver.core.LatencyTracker; +import com.datastax.driver.core.Metrics; +import com.datastax.driver.core.MetricsUtil; +import com.datastax.driver.core.Statement; +import com.datastax.driver.core.exceptions.BootstrappingException; +import com.datastax.driver.core.exceptions.DriverException; +import com.datastax.driver.core.exceptions.OverloadedException; +import com.datastax.driver.core.exceptions.QueryValidationException; +import com.datastax.driver.core.exceptions.UnavailableException; +import com.datastax.driver.core.exceptions.UnpreparedException; import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.AbstractIterator; import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; import com.google.common.util.concurrent.ThreadFactoryBuilder; +import java.util.ArrayDeque; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.Map; +import java.util.Queue; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.util.*; -import java.util.concurrent.*; -import java.util.concurrent.atomic.AtomicReference; - /** * A wrapper load balancing policy that adds latency awareness to a child policy. - *

    - * When used, this policy will collect the latencies of the queries to each - * Cassandra node and maintain a per-node average latency score. The nodes - * that are slower than the best performing node by more than a configurable threshold - * will be moved to the end of the query plan (that is, they will only be tried if - * all other nodes failed). Note that this policy only penalizes slow nodes, it does - * not globally sort the query plan by latency. - *

    - * The latency score for a given node is a based on a form of - * exponential moving average. - * In other words, the latency score of a node is the average of its previously - * measured latencies, but where older measurements gets an exponentially decreasing - * weight. The exact weight applied to a newly received latency is based on the - * time elapsed since the previous measure (to account for the fact that - * latencies are not necessarily reported with equal regularity, neither - * over time nor between different nodes). - *

    - * Once a node is excluded from query plans (because its averaged latency grew - * over the exclusion threshold), its latency score will not be updated anymore - * (since it is not queried). To give a chance to this node to recover, the - * policy has a configurable retry period. The policy will not penalize a host - * for which no measurement has been collected for more than this retry period. - *

    - * Please see the {@link Builder} class and methods for more details on the - * possible parameters of this policy. + * + *

    When used, this policy will collect the latencies of the queries to each Cassandra node and + * maintain a per-node average latency score. The nodes that are slower than the best performing + * node by more than a configurable threshold will be moved to the end of the query plan (that is, + * they will only be tried if all other nodes failed). Note that this policy only penalizes slow + * nodes, it does not globally sort the query plan by latency. + * + *

    The latency score for a given node is a based on a form of exponential moving + * average. In other words, the latency score of a node is the average of its previously + * measured latencies, but where older measurements gets an exponentially decreasing weight. The + * exact weight applied to a newly received latency is based on the time elapsed since the previous + * measure (to account for the fact that latencies are not necessarily reported with equal + * regularity, neither over time nor between different nodes). + * + *

    Once a node is excluded from query plans (because its averaged latency grew over the exclusion + * threshold), its latency score will not be updated anymore (since it is not queried). To give a + * chance to this node to recover, the policy has a configurable retry period. The policy will not + * penalize a host for which no measurement has been collected for more than this retry period. + * + *

    Please see the {@link Builder} class and methods for more details on the possible parameters + * of this policy. * * @since 1.0.4 */ public class LatencyAwarePolicy implements ChainableLoadBalancingPolicy { - private static final Logger logger = LoggerFactory.getLogger(LatencyAwarePolicy.class); - - private final LoadBalancingPolicy childPolicy; - private final Tracker latencyTracker; - private final ScheduledExecutorService updaterService = Executors.newSingleThreadScheduledExecutor(threadFactory("LatencyAwarePolicy updater")); - - private final double exclusionThreshold; - - private final long scale; - private final long retryPeriod; - private final long minMeasure; - - private LatencyAwarePolicy(LoadBalancingPolicy childPolicy, - double exclusionThreshold, - long scale, - long retryPeriod, - long updateRate, - int minMeasure) { - this.childPolicy = childPolicy; - this.retryPeriod = retryPeriod; - this.scale = scale; - this.latencyTracker = new Tracker(); - this.exclusionThreshold = exclusionThreshold; - this.minMeasure = minMeasure; - - updaterService.scheduleAtFixedRate(new Updater(), updateRate, updateRate, TimeUnit.NANOSECONDS); - } + private static final Logger logger = LoggerFactory.getLogger(LatencyAwarePolicy.class); + private static final boolean HOST_METRICS_ENABLED = + Boolean.getBoolean("com.datastax.driver.HOST_METRICS_ENABLED"); + + private final LoadBalancingPolicy childPolicy; + private final Tracker latencyTracker; + private final ScheduledExecutorService updaterService = + Executors.newSingleThreadScheduledExecutor(threadFactory("LatencyAwarePolicy updater")); + + private final double exclusionThreshold; + + private final long scale; + private final long retryPeriod; + private final long minMeasure; + private volatile Metrics metrics; + + private LatencyAwarePolicy( + LoadBalancingPolicy childPolicy, + double exclusionThreshold, + long scale, + long retryPeriod, + long updateRate, + int minMeasure) { + this.childPolicy = childPolicy; + this.retryPeriod = retryPeriod; + this.scale = scale; + this.latencyTracker = new Tracker(); + this.exclusionThreshold = exclusionThreshold; + this.minMeasure = minMeasure; + + updaterService.scheduleAtFixedRate(new Updater(), updateRate, updateRate, TimeUnit.NANOSECONDS); + } + + @Override + public LoadBalancingPolicy getChildPolicy() { + return childPolicy; + } + + /** + * Creates a new latency aware policy builder given the child policy that the resulting policy + * should wrap. + * + * @param childPolicy the load balancing policy to wrap with latency awareness. + * @return the created builder. + */ + public static Builder builder(LoadBalancingPolicy childPolicy) { + return new Builder(childPolicy); + } + + @VisibleForTesting + class Updater implements Runnable { + + private Set excludedAtLastTick = Collections.emptySet(); @Override - public LoadBalancingPolicy getChildPolicy() { - return childPolicy; - } + public void run() { + try { + logger.trace("Updating LatencyAwarePolicy minimum"); + latencyTracker.updateMin(); + + if (logger.isDebugEnabled()) { + /* + * For users to be able to know if the policy potentially needs tuning, we need to provide + * some feedback on on how things evolve. For that, we use the min computation to also check + * which host will be excluded if a query is submitted now and if any host is, we log it (but + * we try to avoid flooding too). This is probably interesting information anyway since it + * gets an idea of which host perform badly. + */ + Set excludedThisTick = new HashSet(); + double currentMin = latencyTracker.getMinAverage(); + for (Map.Entry entry : + getScoresSnapshot().getAllStats().entrySet()) { + Host host = entry.getKey(); + Snapshot.Stats stats = entry.getValue(); + if (stats.getMeasurementsCount() < minMeasure) continue; + + if (stats.lastUpdatedSince() > retryPeriod) { + if (excludedAtLastTick.contains(host)) + logger.debug( + String.format( + "Previously avoided host %s has not be queried since %.3fms: will be reconsidered.", + host, inMS(stats.lastUpdatedSince()))); + continue; + } - /** - * Creates a new latency aware policy builder given the child policy - * that the resulting policy should wrap. - * - * @param childPolicy the load balancing policy to wrap with latency - * awareness. - * @return the created builder. - */ - public static Builder builder(LoadBalancingPolicy childPolicy) { - return new Builder(childPolicy); - } + if (stats.getLatencyScore() > ((long) (exclusionThreshold * currentMin))) { + excludedThisTick.add(host); + if (!excludedAtLastTick.contains(host)) + logger.debug( + String.format( + "Host %s has an average latency score of %.3fms, more than %f times more than the minimum %.3fms: will be avoided temporarily.", + host, inMS(stats.getLatencyScore()), exclusionThreshold, inMS(currentMin))); + continue; + } - @VisibleForTesting - class Updater implements Runnable { - - private Set excludedAtLastTick = Collections.emptySet(); - - @Override - public void run() { - try { - logger.trace("Updating LatencyAwarePolicy minimum"); - latencyTracker.updateMin(); - - if (logger.isDebugEnabled()) { - /* - * For users to be able to know if the policy potentially needs tuning, we need to provide - * some feedback on on how things evolve. For that, we use the min computation to also check - * which host will be excluded if a query is submitted now and if any host is, we log it (but - * we try to avoid flooding too). This is probably interesting information anyway since it - * gets an idea of which host perform badly. - */ - Set excludedThisTick = new HashSet(); - double currentMin = latencyTracker.getMinAverage(); - for (Map.Entry entry : getScoresSnapshot().getAllStats().entrySet()) { - Host host = entry.getKey(); - Snapshot.Stats stats = entry.getValue(); - if (stats.getMeasurementsCount() < minMeasure) - continue; - - if (stats.lastUpdatedSince() > retryPeriod) { - if (excludedAtLastTick.contains(host)) - logger.debug(String.format("Previously avoided host %s has not be queried since %.3fms: will be reconsidered.", host, inMS(stats.lastUpdatedSince()))); - continue; - } - - if (stats.getLatencyScore() > ((long) (exclusionThreshold * currentMin))) { - excludedThisTick.add(host); - if (!excludedAtLastTick.contains(host)) - logger.debug(String.format("Host %s has an average latency score of %.3fms, more than %f times more than the minimum %.3fms: will be avoided temporarily.", - host, inMS(stats.getLatencyScore()), exclusionThreshold, inMS(currentMin))); - continue; - } - - if (excludedAtLastTick.contains(host)) { - logger.debug("Previously avoided host {} average latency has come back within accepted bounds: will be reconsidered.", host); - } - } - excludedAtLastTick = excludedThisTick; - } - } catch (RuntimeException e) { - // An unexpected exception would suppress further execution, so catch, log, but swallow after that. - logger.error("Error while updating LatencyAwarePolicy minimum", e); + if (excludedAtLastTick.contains(host)) { + logger.debug( + "Previously avoided host {} average latency has come back within accepted bounds: will be reconsidered.", + host); } + } + excludedAtLastTick = excludedThisTick; } + } catch (RuntimeException e) { + // An unexpected exception would suppress further execution, so catch, log, but swallow + // after that. + logger.error("Error while updating LatencyAwarePolicy minimum", e); + } } + } - private static double inMS(long nanos) { - return ((double) nanos) / (1000 * 1000); - } + private static double inMS(long nanos) { + return ((double) nanos) / (1000 * 1000); + } - private static double inMS(double nanos) { - return nanos / (1000 * 1000); - } + private static double inMS(double nanos) { + return nanos / (1000 * 1000); + } - private static ThreadFactory threadFactory(String nameFormat) { - return new ThreadFactoryBuilder().setNameFormat(nameFormat).build(); - } + private static ThreadFactory threadFactory(String nameFormat) { + return new ThreadFactoryBuilder().setNameFormat(nameFormat).build(); + } - @Override - public void init(Cluster cluster, Collection hosts) { - childPolicy.init(cluster, hosts); - cluster.register(latencyTracker); + @Override + public void init(Cluster cluster, Collection hosts) { + childPolicy.init(cluster, hosts); + for (Host host : hosts) { + latencyTracker.addHost(host); + } + cluster.register(latencyTracker); + metrics = cluster.getMetrics(); + if (metrics != null) { + metrics + .getRegistry() + .register( + "LatencyAwarePolicy.latencies.min", + new Gauge() { + @Override + public Long getValue() { + return latencyTracker.getMinAverage(); + } + }); } + } + + /** + * Returns the HostDistance for the provided host. + * + * @param host the host of which to return the distance of. + * @return the HostDistance to {@code host} as returned by the wrapped policy. + */ + @Override + public HostDistance distance(Host host) { + return childPolicy.distance(host); + } + + /** + * Returns the hosts to use for a new query. + * + *

    The returned plan will be the same as the plan generated by the child policy, except that + * nodes that are slower than the best performing node by more than a configurable threshold will + * be moved to the end (that is, they will only be tried if all other nodes failed). Note that + * this policy only penalizes slow nodes, it does not globally sort the query plan by + * latency. + * + * @param loggedKeyspace the currently logged keyspace. + * @param statement the statement for which to build the plan. + * @return the new query plan. + */ + @Override + public Iterator newQueryPlan(String loggedKeyspace, Statement statement) { + final Iterator childIter = childPolicy.newQueryPlan(loggedKeyspace, statement); + return new AbstractIterator() { + + private Queue skipped; + + @Override + protected Host computeNext() { + long min = latencyTracker.getMinAverage(); + long now = System.nanoTime(); + while (childIter.hasNext()) { + Host host = childIter.next(); + TimestampedAverage latency = latencyTracker.latencyOf(host); + + // If we haven't had enough data point yet to have a score, or the last update of the + // score + // is just too old, include the host. + if (min < 0 + || latency == null + || latency.nbMeasure < minMeasure + || (now - latency.timestamp) > retryPeriod) { + if (hostMetricsEnabled()) { + metrics + .getRegistry() + .counter( + MetricsUtil.hostMetricName("LatencyAwarePolicy.inclusions-nodata.", host)) + .inc(); + } + return host; + } + + // If the host latency is within acceptable bound of the faster known host, return + // that host. Otherwise, skip it. + if (latency.average <= ((long) (exclusionThreshold * (double) min))) { + if (hostMetricsEnabled()) { + metrics + .getRegistry() + .counter(MetricsUtil.hostMetricName("LatencyAwarePolicy.inclusions.", host)) + .inc(); + } + return host; + } + + if (skipped == null) skipped = new ArrayDeque(); + skipped.offer(host); + if (hostMetricsEnabled()) { + metrics + .getRegistry() + .counter(MetricsUtil.hostMetricName("LatencyAwarePolicy.exclusions.", host)) + .inc(); + } + } - /** - * Returns the HostDistance for the provided host. - * - * @param host the host of which to return the distance of. - * @return the HostDistance to {@code host} as returned by the wrapped policy. - */ - @Override - public HostDistance distance(Host host) { - return childPolicy.distance(host); + if (skipped != null && !skipped.isEmpty()) { + Host host = skipped.poll(); + if (hostMetricsEnabled()) { + metrics + .getRegistry() + .counter( + MetricsUtil.hostMetricName("LatencyAwarePolicy.hits-while-excluded.", host)) + .inc(); + } + return host; + } + + return endOfData(); + }; + }; + } + + /** + * Returns a snapshot of the scores (latency averages) maintained by this policy. + * + * @return a new (immutable) {@link Snapshot} object containing the current latency scores + * maintained by this policy. + */ + public Snapshot getScoresSnapshot() { + Map currentLatencies = latencyTracker.currentLatencies(); + ImmutableMap.Builder builder = ImmutableMap.builder(); + long now = System.nanoTime(); + for (Map.Entry entry : currentLatencies.entrySet()) { + Host host = entry.getKey(); + TimestampedAverage latency = entry.getValue(); + Snapshot.Stats stats = + new Snapshot.Stats(now - latency.timestamp, latency.average, latency.nbMeasure); + builder.put(host, stats); + } + return new Snapshot(builder.build()); + } + + @Override + public void onUp(Host host) { + childPolicy.onUp(host); + latencyTracker.addHost(host); + } + + @Override + public void onDown(Host host) { + childPolicy.onDown(host); + latencyTracker.resetHost(host); + } + + @Override + public void onAdd(Host host) { + childPolicy.onAdd(host); + latencyTracker.addHost(host); + } + + @Override + public void onRemove(Host host) { + childPolicy.onRemove(host); + latencyTracker.resetHost(host); + } + + /** + * An immutable snapshot of the per-host scores (and stats in general) maintained by {@code + * LatencyAwarePolicy} to base its decision upon. + */ + public static class Snapshot { + private final Map stats; + + private Snapshot(Map stats) { + this.stats = stats; } /** - * Returns the hosts to use for a new query. - *

    - * The returned plan will be the same as the plan generated by the - * child policy, except that nodes that are slower than the best performing node by more - * than a configurable threshold will be moved to the end (that is, they will only be - * tried if all other nodes failed). Note that this policy only penalizes slow nodes, it - * does not globally sort the query plan by latency. + * A map with the stats for all hosts tracked by the {@code LatencyAwarePolicy} at the time of + * the snapshot. * - * @param loggedKeyspace the currently logged keyspace. - * @param statement the statement for which to build the plan. - * @return the new query plan. + * @return a immutable map with all the stats contained in this snapshot. */ - @Override - public Iterator newQueryPlan(String loggedKeyspace, Statement statement) { - final Iterator childIter = childPolicy.newQueryPlan(loggedKeyspace, statement); - return new AbstractIterator() { - - private Queue skipped; - - @Override - protected Host computeNext() { - long min = latencyTracker.getMinAverage(); - long now = System.nanoTime(); - while (childIter.hasNext()) { - Host host = childIter.next(); - TimestampedAverage latency = latencyTracker.latencyOf(host); - - // If we haven't had enough data point yet to have a score, or the last update of the score - // is just too old, include the host. - if (min < 0 || latency == null || latency.nbMeasure < minMeasure || (now - latency.timestamp) > retryPeriod) - return host; - - // If the host latency is within acceptable bound of the faster known host, return - // that host. Otherwise, skip it. - if (latency.average <= ((long) (exclusionThreshold * (double) min))) - return host; - - if (skipped == null) - skipped = new ArrayDeque(); - skipped.offer(host); - } - - if (skipped != null && !skipped.isEmpty()) - return skipped.poll(); - - return endOfData(); - } - - ; - }; + public Map getAllStats() { + return stats; } /** - * Returns a snapshot of the scores (latency averages) maintained by this - * policy. + * The {@code Stats} object for a given host. * - * @return a new (immutable) {@link Snapshot} object containing the current - * latency scores maintained by this policy. + * @param host the host to return the stats of. + * @return the {@code Stats} for {@code host} in this snapshot or {@code null} if the snapshot + * has not information on {@code host}. */ - public Snapshot getScoresSnapshot() { - Map currentLatencies = latencyTracker.currentLatencies(); - ImmutableMap.Builder builder = ImmutableMap.builder(); - long now = System.nanoTime(); - for (Map.Entry entry : currentLatencies.entrySet()) { - Host host = entry.getKey(); - TimestampedAverage latency = entry.getValue(); - Snapshot.Stats stats = new Snapshot.Stats(now - latency.timestamp, latency.average, latency.nbMeasure); - builder.put(host, stats); - } - return new Snapshot(builder.build()); + public Stats getStats(Host host) { + return stats.get(host); } - @Override - public void onUp(Host host) { - childPolicy.onUp(host); + /** A snapshot of the statistics on a given host kept by {@code LatencyAwarePolicy}. */ + public static class Stats { + private final long lastUpdatedSince; + private final long average; + private final long nbMeasurements; + + private Stats(long lastUpdatedSince, long average, long nbMeasurements) { + this.lastUpdatedSince = lastUpdatedSince; + this.average = average; + this.nbMeasurements = nbMeasurements; + } + + /** + * The number of nanoseconds since the last latency update was recorded (at the time of the + * snapshot). + * + * @return The number of nanoseconds since the last latency update was recorded (at the time + * of the snapshot). + */ + public long lastUpdatedSince() { + return lastUpdatedSince; + } + + /** + * The latency score for the host this is the stats of at the time of the snapshot. + * + * @return the latency score for the host this is the stats of at the time of the snapshot, or + * {@code -1L} if not enough measurements have been taken to assign a score. + */ + public long getLatencyScore() { + return average; + } + + /** + * The number of recorded latency measurements for the host this is the stats of. + * + * @return the number of recorded latency measurements for the host this is the stats of. + */ + public long getMeasurementsCount() { + return nbMeasurements; + } } + } + + /** + * A set of DriverException subclasses that we should prevent from updating the host's score. The + * intent behind it is to filter out "fast" errors: when a host replies with such errors, it + * usually does so very quickly, because it did not involve any actual coordination work. Such + * errors are not good indicators of the host's responsiveness, and tend to make the host's score + * look better than it actually is. + */ + private static final Set> EXCLUDED_EXCEPTIONS = + ImmutableSet.of( + UnavailableException.class, // this is done via the snitch and is usually very fast + OverloadedException.class, + BootstrappingException.class, + UnpreparedException.class, + QueryValidationException + .class // query validation also happens at early stages in the coordinator + ); + + private class Tracker implements LatencyTracker { + + private final ConcurrentMap latencies = + new ConcurrentHashMap(); + private volatile long cachedMin = -1L; @Override - public void onDown(Host host) { - childPolicy.onDown(host); - latencyTracker.resetHost(host); + public void update( + final Host host, Statement statement, Exception exception, long newLatencyNanos) { + HostLatencyTracker hostTracker = latencies.get(host); + if (hostTracker != null) { + if (shouldConsiderNewLatency(statement, exception)) { + hostTracker.add(newLatencyNanos); + } else if (hostMetricsEnabled()) { + metrics + .getRegistry() + .counter(MetricsUtil.hostMetricName("LatencyAwarePolicy.ignored-latencies.", host)) + .inc(); + } + } } - @Override - public void onAdd(Host host) { - childPolicy.onAdd(host); + private boolean shouldConsiderNewLatency(Statement statement, Exception exception) { + // query was successful: always consider + if (exception == null) return true; + // filter out "fast" errors + if (EXCLUDED_EXCEPTIONS.contains(exception.getClass())) return false; + return true; } - @Override - public void onRemove(Host host) { - childPolicy.onRemove(host); - latencyTracker.resetHost(host); + public void updateMin() { + long newMin = Long.MAX_VALUE; + long now = System.nanoTime(); + for (HostLatencyTracker tracker : latencies.values()) { + TimestampedAverage latency = tracker.getCurrentAverage(); + if (latency != null + && latency.average >= 0 + && latency.nbMeasure >= minMeasure + && (now - latency.timestamp) <= retryPeriod) newMin = Math.min(newMin, latency.average); + } + if (newMin != Long.MAX_VALUE) cachedMin = newMin; } - /** - * An immutable snapshot of the per-host scores (and stats in general) - * maintained by {@code LatencyAwarePolicy} to base its decision upon. - */ - public static class Snapshot { - private final Map stats; - - private Snapshot(Map stats) { - this.stats = stats; - } - - /** - * A map with the stats for all hosts tracked by the {@code - * LatencyAwarePolicy} at the time of the snapshot. - * - * @return a immutable map with all the stats contained in this - * snapshot. - */ - public Map getAllStats() { - return stats; - } - - /** - * The {@code Stats} object for a given host. - * - * @param host the host to return the stats of. - * @return the {@code Stats} for {@code host} in this snapshot or - * {@code null} if the snapshot has not information on {@code host}. - */ - public Stats getStats(Host host) { - return stats.get(host); - } - - /** - * A snapshot of the statistics on a given host kept by {@code LatencyAwarePolicy}. - */ - public static class Stats { - private final long lastUpdatedSince; - private final long average; - private final long nbMeasurements; - - private Stats(long lastUpdatedSince, long average, long nbMeasurements) { - this.lastUpdatedSince = lastUpdatedSince; - this.average = average; - this.nbMeasurements = nbMeasurements; - } - - /** - * The number of nanoseconds since the last latency update was recorded (at the time - * of the snapshot). - * - * @return The number of nanoseconds since the last latency update was recorded (at the time - * of the snapshot). - */ - public long lastUpdatedSince() { - return lastUpdatedSince; - } - - /** - * The latency score for the host this is the stats of at the time of the snapshot. - * - * @return the latency score for the host this is the stats of at the time of the snapshot, - * or {@code -1L} if not enough measurements have been taken to assign a score. - */ - public long getLatencyScore() { - return average; - } - - /** - * The number of recorded latency measurements for the host this is the stats of. - * - * @return the number of recorded latency measurements for the host this is the stats of. - */ - public long getMeasurementsCount() { - return nbMeasurements; - } - } + public long getMinAverage() { + return cachedMin; } - /** - * A set of DriverException subclasses that we should prevent from updating the host's score. - * The intent behind it is to filter out "fast" errors: when a host replies with such errors, - * it usually does so very quickly, because it did not involve any actual - * coordination work. Such errors are not good indicators of the host's responsiveness, - * and tend to make the host's score look better than it actually is. - */ - private static final Set> EXCLUDED_EXCEPTIONS = ImmutableSet.of( - UnavailableException.class, // this is done via the snitch and is usually very fast - OverloadedException.class, - BootstrappingException.class, - UnpreparedException.class, - QueryValidationException.class // query validation also happens at early stages in the coordinator - ); - - private class Tracker implements LatencyTracker { - - private final ConcurrentMap latencies = new ConcurrentHashMap(); - private volatile long cachedMin = -1L; - - @Override - public void update(Host host, Statement statement, Exception exception, long newLatencyNanos) { - if (shouldConsiderNewLatency(statement, exception)) { - HostLatencyTracker hostTracker = latencies.get(host); - if (hostTracker == null) { - hostTracker = new HostLatencyTracker(scale, (30L * minMeasure) / 100L); - HostLatencyTracker old = latencies.putIfAbsent(host, hostTracker); - if (old != null) - hostTracker = old; - } - hostTracker.add(newLatencyNanos); - } - } - - private boolean shouldConsiderNewLatency(Statement statement, Exception exception) { - // query was successful: always consider - if (exception == null) return true; - // filter out "fast" errors - if (EXCLUDED_EXCEPTIONS.contains(exception.getClass())) return false; - return true; - } - - public void updateMin() { - long newMin = Long.MAX_VALUE; - long now = System.nanoTime(); - for (HostLatencyTracker tracker : latencies.values()) { - TimestampedAverage latency = tracker.getCurrentAverage(); - if (latency != null && latency.average >= 0 && latency.nbMeasure >= minMeasure && (now - latency.timestamp) <= retryPeriod) - newMin = Math.min(newMin, latency.average); - } - if (newMin != Long.MAX_VALUE) - cachedMin = newMin; - } - - public long getMinAverage() { - return cachedMin; - } - - public TimestampedAverage latencyOf(Host host) { - HostLatencyTracker tracker = latencies.get(host); - return tracker == null ? null : tracker.getCurrentAverage(); - } - - public Map currentLatencies() { - Map map = new HashMap(latencies.size()); - for (Map.Entry entry : latencies.entrySet()) - map.put(entry.getKey(), entry.getValue().getCurrentAverage()); - return map; - } - - public void resetHost(Host host) { - latencies.remove(host); - } + public TimestampedAverage latencyOf(Host host) { + HostLatencyTracker tracker = latencies.get(host); + return tracker == null ? null : tracker.getCurrentAverage(); + } - @Override - public void onRegister(Cluster cluster) { - // nothing to do + public Map currentLatencies() { + Map map = new HashMap(latencies.size()); + for (Map.Entry entry : latencies.entrySet()) { + TimestampedAverage average = entry.getValue().getCurrentAverage(); + // average may be null if no latencies have been recorded yet for a host. + if (average != null) { + map.put(entry.getKey(), average); } + } + return map; + } - @Override - public void onUnregister(Cluster cluster) { - // nothing to do + public void addHost(final Host host) { + logger.debug("Adding tracker for {}", host); + HostLatencyTracker old = + latencies.putIfAbsent(host, new HostLatencyTracker(scale, (30L * minMeasure) / 100L)); + if (old == null && hostMetricsEnabled()) { + String metricName = MetricsUtil.hostMetricName("LatencyAwarePolicy.latencies.", host); + if (!metrics.getRegistry().getNames().contains(metricName)) { + logger.debug("Adding gauge " + metricName); + metrics + .getRegistry() + .register( + metricName, + new Gauge() { + @Override + public Long getValue() { + TimestampedAverage latency = latencyTracker.latencyOf(host); + return (latency == null) ? -1 : latency.average; + } + }); } + } } - private static class TimestampedAverage { + public void resetHost(Host host) { + logger.debug("Removing tracker for {}", host); + latencies.remove(host); + } - private final long timestamp; - private final long average; - private final long nbMeasure; + @Override + public void onRegister(Cluster cluster) { + // nothing to do + } - TimestampedAverage(long timestamp, long average, long nbMeasure) { - this.timestamp = timestamp; - this.average = average; - this.nbMeasure = nbMeasure; - } + @Override + public void onUnregister(Cluster cluster) { + // nothing to do } + } - private static class HostLatencyTracker { + private static class TimestampedAverage { - private final long thresholdToAccount; - private final double scale; - private final AtomicReference current = new AtomicReference(); + private final long timestamp; + private final long average; + private final long nbMeasure; - HostLatencyTracker(long scale, long thresholdToAccount) { - this.scale = (double) scale; // We keep in double since that's how we'll use it. - this.thresholdToAccount = thresholdToAccount; - } + TimestampedAverage(long timestamp, long average, long nbMeasure) { + this.timestamp = timestamp; + this.average = average; + this.nbMeasure = nbMeasure; + } + } - public void add(long newLatencyNanos) { - TimestampedAverage previous, next; - do { - previous = current.get(); - next = computeNextAverage(previous, newLatencyNanos); - } while (next != null && !current.compareAndSet(previous, next)); - } + private static class HostLatencyTracker { - private TimestampedAverage computeNextAverage(TimestampedAverage previous, long newLatencyNanos) { + private final long thresholdToAccount; + private final double scale; + private final AtomicReference current = + new AtomicReference(); - long currentTimestamp = System.nanoTime(); + HostLatencyTracker(long scale, long thresholdToAccount) { + this.scale = (double) scale; // We keep in double since that's how we'll use it. + this.thresholdToAccount = thresholdToAccount; + } - long nbMeasure = previous == null ? 1 : previous.nbMeasure + 1; - if (nbMeasure < thresholdToAccount) - return new TimestampedAverage(currentTimestamp, -1L, nbMeasure); + public void add(long newLatencyNanos) { + TimestampedAverage previous, next; + do { + previous = current.get(); + next = computeNextAverage(previous, newLatencyNanos); + } while (next != null && !current.compareAndSet(previous, next)); + } - if (previous == null || previous.average < 0) - return new TimestampedAverage(currentTimestamp, newLatencyNanos, nbMeasure); + private TimestampedAverage computeNextAverage( + TimestampedAverage previous, long newLatencyNanos) { + + long currentTimestamp = System.nanoTime(); + + long nbMeasure = previous == null ? 1 : previous.nbMeasure + 1; + if (nbMeasure < thresholdToAccount) + return new TimestampedAverage(currentTimestamp, -1L, nbMeasure); + + if (previous == null || previous.average < 0) + return new TimestampedAverage(currentTimestamp, newLatencyNanos, nbMeasure); + + // Note: it's possible for the delay to be 0, in which case newLatencyNanos will basically be + // discarded. It's fine: nanoTime is precise enough in practice that even if it happens, it + // will be very rare, and discarding a latency every once in a while is not the end of the + // world. + // We do test for negative value, even though in theory that should not happen, because it + // seems + // that historically there has been bugs here + // (https://blogs.oracle.com/dholmes/entry/inside_the_hotspot_vm_clocks) + // so while this is almost surely not a problem anymore, there's no reason to break the + // computation + // if this even happen. + long delay = currentTimestamp - previous.timestamp; + if (delay <= 0) return null; + + double scaledDelay = ((double) delay) / scale; + // Note: We don't use log1p because we it's quite a bit slower and we don't care about the + // precision (and since we + // refuse ridiculously big scales, scaledDelay can't be so low that scaledDelay+1 == 1.0 (due + // to rounding)). + double prevWeight = Math.log(scaledDelay + 1) / scaledDelay; + long newAverage = + (long) ((1.0 - prevWeight) * newLatencyNanos + prevWeight * previous.average); + + return new TimestampedAverage(currentTimestamp, newAverage, nbMeasure); + } - // Note: it's possible for the delay to be 0, in which case newLatencyNanos will basically be - // discarded. It's fine: nanoTime is precise enough in practice that even if it happens, it - // will be very rare, and discarding a latency every once in a while is not the end of the world. - // We do test for negative value, even though in theory that should not happen, because it seems - // that historically there has been bugs here (https://blogs.oracle.com/dholmes/entry/inside_the_hotspot_vm_clocks) - // so while this is almost surely not a problem anymore, there's no reason to break the computation - // if this even happen. - long delay = currentTimestamp - previous.timestamp; - if (delay <= 0) - return null; + public TimestampedAverage getCurrentAverage() { + return current.get(); + } + } + + /** + * Helper builder object to create a latency aware policy. + * + *

    This helper allows to configure the different parameters used by {@code LatencyAwarePolicy}. + * The only mandatory parameter is the child policy that will be wrapped with latency awareness. + * The other parameters can be set through the methods of this builder, but all have defaults + * (that are documented in the javadoc of each method) if you don't. + * + *

    If you observe that the resulting policy excludes hosts too aggressively or not enough so, + * the main parameters to check are the exclusion threshold ({@link #withExclusionThreshold}) and + * scale ({@link #withScale}). + * + * @since 1.0.4 + */ + public static class Builder { + + public static final double DEFAULT_EXCLUSION_THRESHOLD = 2.0; + public static final long DEFAULT_SCALE_NANOS = TimeUnit.MILLISECONDS.toNanos(100); + public static final long DEFAULT_RETRY_PERIOD_NANOS = TimeUnit.SECONDS.toNanos(10); + public static final long DEFAULT_UPDATE_RATE_NANOS = TimeUnit.MILLISECONDS.toNanos(100); + public static final int DEFAULT_MIN_MEASURE = 50; - double scaledDelay = ((double) delay) / scale; - // Note: We don't use log1p because we it's quite a bit slower and we don't care about the precision (and since we - // refuse ridiculously big scales, scaledDelay can't be so low that scaledDelay+1 == 1.0 (due to rounding)). - double prevWeight = Math.log(scaledDelay + 1) / scaledDelay; - long newAverage = (long) ((1.0 - prevWeight) * newLatencyNanos + prevWeight * previous.average); + private final LoadBalancingPolicy childPolicy; - return new TimestampedAverage(currentTimestamp, newAverage, nbMeasure); - } + private double exclusionThreshold = DEFAULT_EXCLUSION_THRESHOLD; + private long scale = DEFAULT_SCALE_NANOS; + private long retryPeriod = DEFAULT_RETRY_PERIOD_NANOS; + private long updateRate = DEFAULT_UPDATE_RATE_NANOS; + private int minMeasure = DEFAULT_MIN_MEASURE; - public TimestampedAverage getCurrentAverage() { - return current.get(); - } + /** + * Creates a new latency aware policy builder given the child policy that the resulting policy + * wraps. + * + * @param childPolicy the load balancing policy to wrap with latency awareness. + */ + public Builder(LoadBalancingPolicy childPolicy) { + this.childPolicy = childPolicy; } /** - * Helper builder object to create a latency aware policy. - *

    - * This helper allows to configure the different parameters used by - * {@code LatencyAwarePolicy}. The only mandatory parameter is the child - * policy that will be wrapped with latency awareness. The other parameters - * can be set through the methods of this builder, but all have defaults (that - * are documented in the javadoc of each method) if you don't. - *

    - * If you observe that the resulting policy excludes hosts too aggressively or - * not enough so, the main parameters to check are the exclusion threshold - * ({@link #withExclusionThreshold}) and scale ({@link #withScale}). + * Sets the exclusion threshold to use for the resulting latency aware policy. + * + *

    The exclusion threshold controls how much worse the average latency of a node must be + * compared to the fastest performing node for it to be penalized by the policy. + * + *

    The default exclusion threshold (if this method is not called) is 2. In other + * words, the resulting policy excludes nodes that are more than twice slower than the fastest + * node. * - * @since 1.0.4 + * @param exclusionThreshold the exclusion threshold to use. Must be greater or equal to 1. + * @return this builder. + * @throws IllegalArgumentException if {@code exclusionThreshold < 1}. */ - public static class Builder { - - public static final double DEFAULT_EXCLUSION_THRESHOLD = 2.0; - public static final long DEFAULT_SCALE_NANOS = TimeUnit.MILLISECONDS.toNanos(100); - public static final long DEFAULT_RETRY_PERIOD_NANOS = TimeUnit.SECONDS.toNanos(10); - public static final long DEFAULT_UPDATE_RATE_NANOS = TimeUnit.MILLISECONDS.toNanos(100); - public static final int DEFAULT_MIN_MEASURE = 50; - - private final LoadBalancingPolicy childPolicy; - - private double exclusionThreshold = DEFAULT_EXCLUSION_THRESHOLD; - private long scale = DEFAULT_SCALE_NANOS; - private long retryPeriod = DEFAULT_RETRY_PERIOD_NANOS; - private long updateRate = DEFAULT_UPDATE_RATE_NANOS; - private int minMeasure = DEFAULT_MIN_MEASURE; - - /** - * Creates a new latency aware policy builder given the child policy - * that the resulting policy wraps. - * - * @param childPolicy the load balancing policy to wrap with latency - * awareness. - */ - public Builder(LoadBalancingPolicy childPolicy) { - this.childPolicy = childPolicy; - } - - /** - * Sets the exclusion threshold to use for the resulting latency aware policy. - *

    - * The exclusion threshold controls how much worse the average latency - * of a node must be compared to the fastest performing node for it to be - * penalized by the policy. - *

    - * The default exclusion threshold (if this method is not called) is 2. - * In other words, the resulting policy excludes nodes that are more than - * twice slower than the fastest node. - * - * @param exclusionThreshold the exclusion threshold to use. Must be - * greater or equal to 1. - * @return this builder. - * @throws IllegalArgumentException if {@code exclusionThreshold < 1}. - */ - public Builder withExclusionThreshold(double exclusionThreshold) { - if (exclusionThreshold < 1d) - throw new IllegalArgumentException("Invalid exclusion threshold, must be greater than 1."); - this.exclusionThreshold = exclusionThreshold; - return this; - } - - /** - * Sets the scale to use for the resulting latency aware policy. - *

    - * The {@code scale} provides control on how the weight given to older latencies - * decreases over time. For a given host, if a new latency {@code l} is received at - * time {@code t}, and the previously calculated average is {@code prev} calculated at - * time {@code t'}, then the newly calculated average {@code avg} for that host is calculated - * thusly: - *

    {@code d = (t - t') / scale
    -         * alpha = 1 - (ln(d+1) / d)
    -         * avg = alpha * l + (1 - alpha * prev)}
    - * Typically, with a {@code scale} of 100 milliseconds (the default), if a new - * latency is measured and the previous measure is 10 millisecond old (so {@code d=0.1}), - * then {@code alpha} will be around {@code 0.05}. In other words, the new latency will - * weight 5% of the updated average. A bigger scale will get less weight to new - * measurements (compared to previous ones), a smaller one will give them more weight. - *

    - * The default scale (if this method is not used) is of 100 milliseconds. If unsure, try - * this default scale first and experiment only if it doesn't provide acceptable results - * (hosts are excluded too quickly or not fast enough and tuning the exclusion threshold - * doesn't help). - * - * @param scale the scale to use. - * @param unit the unit of {@code scale}. - * @return this builder. - * @throws IllegalArgumentException if {@code scale <= 0}. - */ - public Builder withScale(long scale, TimeUnit unit) { - if (scale <= 0) - throw new IllegalArgumentException("Invalid scale, must be strictly positive"); - this.scale = unit.toNanos(scale); - return this; - } + public Builder withExclusionThreshold(double exclusionThreshold) { + if (exclusionThreshold < 1d) + throw new IllegalArgumentException("Invalid exclusion threshold, must be greater than 1."); + this.exclusionThreshold = exclusionThreshold; + return this; + } - /** - * Sets the retry period for the resulting latency aware policy. - *

    - * The retry period defines how long a node may be penalized by the - * policy before it is given a 2nd change. More precisely, a node is excluded - * from query plans if both his calculated average latency is {@code exclusionThreshold} - * times slower than the fastest node average latency (at the time the query plan is - * computed) and his calculated average latency has been updated since - * less than {@code retryPeriod}. Since penalized nodes will likely not see their - * latency updated, this is basically how long the policy will exclude a node. - * - * @param retryPeriod the retry period to use. - * @param unit the unit for {@code retryPeriod}. - * @return this builder. - * @throws IllegalArgumentException if {@code retryPeriod < 0}. - */ - public Builder withRetryPeriod(long retryPeriod, TimeUnit unit) { - if (retryPeriod < 0) - throw new IllegalArgumentException("Invalid retry period, must be positive"); - this.retryPeriod = unit.toNanos(retryPeriod); - return this; - } + /** + * Sets the scale to use for the resulting latency aware policy. + * + *

    The {@code scale} provides control on how the weight given to older latencies decreases + * over time. For a given host, if a new latency {@code l} is received at time {@code t}, and + * the previously calculated average is {@code prev} calculated at time {@code t'}, then the + * newly calculated average {@code avg} for that host is calculated thusly: + * + *

    {@code d = (t - t') / scale
    +     * alpha = 1 - (ln(d+1) / d)
    +     * avg = alpha * l + (1 - alpha) * prev}
    + * + * Typically, with a {@code scale} of 100 milliseconds (the default), if a new latency is + * measured and the previous measure is 10 millisecond old (so {@code d=0.1}), then {@code + * alpha} will be around {@code 0.05}. In other words, the new latency will weight 5% of the + * updated average. A bigger scale will get less weight to new measurements (compared to + * previous ones), a smaller one will give them more weight. + * + *

    The default scale (if this method is not used) is of 100 milliseconds. If unsure, + * try this default scale first and experiment only if it doesn't provide acceptable results + * (hosts are excluded too quickly or not fast enough and tuning the exclusion threshold doesn't + * help). + * + * @param scale the scale to use. + * @param unit the unit of {@code scale}. + * @return this builder. + * @throws IllegalArgumentException if {@code scale <= 0}. + */ + public Builder withScale(long scale, TimeUnit unit) { + if (scale <= 0) + throw new IllegalArgumentException("Invalid scale, must be strictly positive"); + this.scale = unit.toNanos(scale); + return this; + } - /** - * Sets the update rate for the resulting latency aware policy. - *

    - * The update rate defines how often the minimum average latency is - * recomputed. While the average latency score of each node is computed - * iteratively (updated each time a new latency is collected), the - * minimum score needs to be recomputed from scratch every time, which - * is slightly more costly. For this reason, the minimum is only - * re-calculated at the given fixed rate and cached between re-calculation. - *

    - * The default update rate if 100 milliseconds, which should be - * appropriate for most applications. In particular, note that while we - * want to avoid to recompute the minimum for every query, that - * computation is not particularly intensive either and there is no - * reason to use a very slow rate (more than second is probably - * unnecessarily slow for instance). - * - * @param updateRate the update rate to use. - * @param unit the unit for {@code updateRate}. - * @return this builder. - * @throws IllegalArgumentException if {@code updateRate <e; 0}. - */ - public Builder withUpdateRate(long updateRate, TimeUnit unit) { - if (updateRate <= 0) - throw new IllegalArgumentException("Invalid update rate value, must be strictly positive"); - this.updateRate = unit.toNanos(updateRate); - return this; - } + /** + * Sets the retry period for the resulting latency aware policy. + * + *

    The retry period defines how long a node may be penalized by the policy before it is given + * a 2nd change. More precisely, a node is excluded from query plans if both his calculated + * average latency is {@code exclusionThreshold} times slower than the fastest node average + * latency (at the time the query plan is computed) and his calculated average latency + * has been updated since less than {@code retryPeriod}. Since penalized nodes will likely not + * see their latency updated, this is basically how long the policy will exclude a node. + * + * @param retryPeriod the retry period to use. + * @param unit the unit for {@code retryPeriod}. + * @return this builder. + * @throws IllegalArgumentException if {@code retryPeriod < 0}. + */ + public Builder withRetryPeriod(long retryPeriod, TimeUnit unit) { + if (retryPeriod < 0) + throw new IllegalArgumentException("Invalid retry period, must be positive"); + this.retryPeriod = unit.toNanos(retryPeriod); + return this; + } - /** - * Sets the minimum number of measurements per-host to consider for - * the resulting latency aware policy. - *

    - * Penalizing nodes is based on an average of their recently measured - * average latency. This average is only meaningful if a minimum of - * measurements have been collected (moreover, a newly started - * Cassandra node will tend to perform relatively poorly on the first - * queries due to the JVM warmup). This is what this option controls. - * If less that {@code minMeasure} data points have been collected for - * a given host, the policy will never penalize that host. Also, the - * 30% first measurement will be entirely ignored (in other words, the - * {@code 30% * minMeasure} first measurement to a node are entirely - * ignored, while the {@code 70%} next ones are accounted in the latency - * computed but the node won't get convicted until we've had at least - * {@code minMeasure} measurements). - *

    - * Note that the number of collected measurements for a given host is - * reset if the node is restarted. - *

    - * The default for this option (if this method is not called) is 50. - * Note that it is probably not a good idea to put this option too low - * if only to avoid the influence of JVM warm-up on newly restarted - * nodes. - * - * @param minMeasure the minimum measurements to consider. - * @return this builder. - * @throws IllegalArgumentException if {@code minMeasure < 0}. - */ - public Builder withMininumMeasurements(int minMeasure) { - if (minMeasure < 0) - throw new IllegalArgumentException("Invalid minimum measurements value, must be positive"); - this.minMeasure = minMeasure; - return this; - } + /** + * Sets the update rate for the resulting latency aware policy. + * + *

    The update rate defines how often the minimum average latency is recomputed. While the + * average latency score of each node is computed iteratively (updated each time a new latency + * is collected), the minimum score needs to be recomputed from scratch every time, which is + * slightly more costly. For this reason, the minimum is only re-calculated at the given fixed + * rate and cached between re-calculation. + * + *

    The default update rate if 100 milliseconds, which should be appropriate for most + * applications. In particular, note that while we want to avoid to recompute the minimum for + * every query, that computation is not particularly intensive either and there is no reason to + * use a very slow rate (more than second is probably unnecessarily slow for instance). + * + * @param updateRate the update rate to use. + * @param unit the unit for {@code updateRate}. + * @return this builder. + * @throws IllegalArgumentException if {@code updateRate <e; 0}. + */ + public Builder withUpdateRate(long updateRate, TimeUnit unit) { + if (updateRate <= 0) + throw new IllegalArgumentException("Invalid update rate value, must be strictly positive"); + this.updateRate = unit.toNanos(updateRate); + return this; + } - /** - * Builds a new latency aware policy using the options set on this - * builder. - * - * @return the newly created {@code LatencyAwarePolicy}. - */ - public LatencyAwarePolicy build() { - return new LatencyAwarePolicy(childPolicy, exclusionThreshold, scale, retryPeriod, updateRate, minMeasure); - } + /** + * Sets the minimum number of measurements per-host to consider for the resulting latency aware + * policy. + * + *

    Penalizing nodes is based on an average of their recently measured average latency. This + * average is only meaningful if a minimum of measurements have been collected (moreover, a + * newly started Cassandra node will tend to perform relatively poorly on the first queries due + * to the JVM warmup). This is what this option controls. If less that {@code minMeasure} data + * points have been collected for a given host, the policy will never penalize that host. Also, + * the 30% first measurement will be entirely ignored (in other words, the {@code 30% * + * minMeasure} first measurement to a node are entirely ignored, while the {@code 70%} next ones + * are accounted in the latency computed but the node won't get convicted until we've had at + * least {@code minMeasure} measurements). + * + *

    Note that the number of collected measurements for a given host is reset if the node is + * restarted. + * + *

    The default for this option (if this method is not called) is 50. Note that it is + * probably not a good idea to put this option too low if only to avoid the influence of JVM + * warm-up on newly restarted nodes. + * + * @param minMeasure the minimum measurements to consider. + * @return this builder. + * @throws IllegalArgumentException if {@code minMeasure < 0}. + */ + public Builder withMininumMeasurements(int minMeasure) { + if (minMeasure < 0) + throw new IllegalArgumentException("Invalid minimum measurements value, must be positive"); + this.minMeasure = minMeasure; + return this; } - @Override - public void close() { - childPolicy.close(); - updaterService.shutdown(); + /** + * Builds a new latency aware policy using the options set on this builder. + * + * @return the newly created {@code LatencyAwarePolicy}. + */ + public LatencyAwarePolicy build() { + return new LatencyAwarePolicy( + childPolicy, exclusionThreshold, scale, retryPeriod, updateRate, minMeasure); } + } + + @Override + public void close() { + childPolicy.close(); + updaterService.shutdown(); + } + + private boolean hostMetricsEnabled() { + return HOST_METRICS_ENABLED && metrics != null; + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/policies/LoadBalancingPolicy.java b/driver-core/src/main/java/com/datastax/driver/core/policies/LoadBalancingPolicy.java index c03d59d69ef..afdba14453a 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/policies/LoadBalancingPolicy.java +++ b/driver-core/src/main/java/com/datastax/driver/core/policies/LoadBalancingPolicy.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,110 +21,106 @@ import com.datastax.driver.core.Host; import com.datastax.driver.core.HostDistance; import com.datastax.driver.core.Statement; - import java.util.Collection; import java.util.Iterator; /** * The policy that decides which Cassandra hosts to contact for each new query. - *

    - * Two methods need to be implemented: + * + *

    Two methods need to be implemented: + * *

      - *
    • {@link LoadBalancingPolicy#distance}: returns the "distance" of an - * host for that balancing policy.
    • - *
    • {@link LoadBalancingPolicy#newQueryPlan}: it is used for each query to - * find which host to query first, and which hosts to use as failover.
    • + *
    • {@link LoadBalancingPolicy#distance}: returns the "distance" of an host for that balancing + * policy. + *
    • {@link LoadBalancingPolicy#newQueryPlan}: it is used for each query to find which host to + * query first, and which hosts to use as failover. *
    - *

    - * The {@code LoadBalancingPolicy} is informed of hosts up/down events. For efficiency - * purposes, the policy is expected to exclude down hosts from query plans. + * + *

    The {@code LoadBalancingPolicy} is informed of hosts up/down events. For efficiency purposes, + * the policy is expected to exclude down hosts from query plans. */ public interface LoadBalancingPolicy { - /** - * Initialize this load balancing policy. - *

    - * Note that the driver guarantees that it will call this method exactly - * once per policy object and will do so before any call to another of the - * methods of the policy. - * - * @param cluster the {@code Cluster} instance for which the policy is created. - * @param hosts the initial hosts to use. - */ - public void init(Cluster cluster, Collection hosts); + /** + * Initialize this load balancing policy. + * + *

    Note that the driver guarantees that it will call this method exactly once per policy object + * and will do so before any call to another of the methods of the policy. + * + * @param cluster the {@code Cluster} instance for which the policy is created. + * @param hosts the initial hosts to use. + */ + public void init(Cluster cluster, Collection hosts); - /** - * Returns the distance assigned by this policy to the provided host. - *

    - * The distance of an host influence how much connections are kept to the - * node (see {@link HostDistance}). A policy should assign a {@code - * LOCAL} distance to nodes that are susceptible to be returned first by - * {@code newQueryPlan} and it is useless for {@code newQueryPlan} to - * return hosts to which it assigns an {@code IGNORED} distance. - *

    - * The host distance is primarily used to prevent keeping too many - * connections to host in remote datacenters when the policy itself always - * picks host in the local datacenter first. - * - * @param host the host of which to return the distance of. - * @return the HostDistance to {@code host}. - */ - public HostDistance distance(Host host); + /** + * Returns the distance assigned by this policy to the provided host. + * + *

    The distance of an host influence how much connections are kept to the node (see {@link + * HostDistance}). A policy should assign a {@code LOCAL} distance to nodes that are susceptible + * to be returned first by {@code newQueryPlan} and it is useless for {@code newQueryPlan} to + * return hosts to which it assigns an {@code IGNORED} distance. + * + *

    The host distance is primarily used to prevent keeping too many connections to host in + * remote datacenters when the policy itself always picks host in the local datacenter first. + * + * @param host the host of which to return the distance of. + * @return the HostDistance to {@code host}. + */ + public HostDistance distance(Host host); - /** - * Returns the hosts to use for a new query. - *

    - * Each new query will call this method. The first host in the result will - * then be used to perform the query. In the event of a connection problem - * (the queried host is down or appear to be so), the next host will be - * used. If all hosts of the returned {@code Iterator} are down, the query - * will fail. - * - * @param loggedKeyspace the currently logged keyspace (the one set through either - * {@link Cluster#connect(String)} or by manually doing a {@code USE} query) for - * the session on which this plan need to be built. This can be {@code null} if - * the corresponding session has no keyspace logged in. - * @param statement the query for which to build a plan. - * @return an iterator of Host. The query is tried against the hosts - * returned by this iterator in order, until the query has been sent - * successfully to one of the host. - */ - public Iterator newQueryPlan(String loggedKeyspace, Statement statement); + /** + * Returns the hosts to use for a new query. + * + *

    Each new query will call this method. The first host in the result will then be used to + * perform the query. In the event of a connection problem (the queried host is down or appear to + * be so), the next host will be used. If all hosts of the returned {@code Iterator} are down, the + * query will fail. + * + * @param loggedKeyspace the currently logged keyspace (the one set through either {@link + * Cluster#connect(String)} or by manually doing a {@code USE} query) for the session on which + * this plan need to be built. This can be {@code null} if the corresponding session has no + * keyspace logged in. + * @param statement the query for which to build a plan. + * @return an iterator of Host. The query is tried against the hosts returned by this iterator in + * order, until the query has been sent successfully to one of the host. + */ + public Iterator newQueryPlan(String loggedKeyspace, Statement statement); - /** - * Called when a new node is added to the cluster. - *

    - * The newly added node should be considered up. - * - * @param host the host that has been newly added. - */ - void onAdd(Host host); + /** + * Called when a new node is added to the cluster. + * + *

    The newly added node should be considered up. + * + * @param host the host that has been newly added. + */ + void onAdd(Host host); - /** - * Called when a node is determined to be up. - * - * @param host the host that has been detected up. - */ - void onUp(Host host); + /** + * Called when a node is determined to be up. + * + * @param host the host that has been detected up. + */ + void onUp(Host host); - /** - * Called when a node is determined to be down. - * - * @param host the host that has been detected down. - */ - void onDown(Host host); + /** + * Called when a node is determined to be down. + * + * @param host the host that has been detected down. + */ + void onDown(Host host); - /** - * Called when a node is removed from the cluster. - * - * @param host the removed host. - */ - void onRemove(Host host); + /** + * Called when a node is removed from the cluster. + * + * @param host the removed host. + */ + void onRemove(Host host); - /** - * Gets invoked at cluster shutdown. - *

    - * This gives the policy the opportunity to perform some cleanup, for instance stop threads that it might have started. - */ - void close(); + /** + * Gets invoked at cluster shutdown. + * + *

    This gives the policy the opportunity to perform some cleanup, for instance stop threads + * that it might have started. + */ + void close(); } diff --git a/driver-core/src/main/java/com/datastax/driver/core/policies/LoggingRetryPolicy.java b/driver-core/src/main/java/com/datastax/driver/core/policies/LoggingRetryPolicy.java index ba882ccb758..ab48733c100 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/policies/LoggingRetryPolicy.java +++ b/driver-core/src/main/java/com/datastax/driver/core/policies/LoggingRetryPolicy.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -26,148 +28,199 @@ /** * A retry policy that wraps another policy, logging the decision made by its sub-policy. - *

    - * Note that this policy only logs - * {@link com.datastax.driver.core.policies.RetryPolicy.RetryDecision.Type#RETRY RETRY} and - * {@link com.datastax.driver.core.policies.RetryPolicy.RetryDecision.Type#IGNORE IGNORE} decisions (since - * {@link com.datastax.driver.core.policies.RetryPolicy.RetryDecision.Type#RETHROW RETHROW} decisions - * are just meant to propagate the Cassandra exception). - *

    - * The logging is done at the INFO level and the logger name is - * {@code com.datastax.driver.core.policies.LoggingRetryPolicy}. + * + *

    Note that this policy only logs {@link + * com.datastax.driver.core.policies.RetryPolicy.RetryDecision.Type#RETRY RETRY} and {@link + * com.datastax.driver.core.policies.RetryPolicy.RetryDecision.Type#IGNORE IGNORE} decisions (since + * {@link com.datastax.driver.core.policies.RetryPolicy.RetryDecision.Type#RETHROW RETHROW} + * decisions are just meant to propagate the Cassandra exception). + * + *

    The logging is done at the INFO level and the logger name is {@code + * com.datastax.driver.core.policies.LoggingRetryPolicy}. */ public class LoggingRetryPolicy implements RetryPolicy { - private static final Logger logger = LoggerFactory.getLogger(LoggingRetryPolicy.class); - - @VisibleForTesting - static final String IGNORING_READ_TIMEOUT = - "Ignoring read timeout (initial consistency: {}, required responses: {}, received responses: {}, data retrieved: {}, retries: {})"; - - @VisibleForTesting - static final String RETRYING_ON_READ_TIMEOUT = - "Retrying on read timeout on {} at consistency {} (initial consistency: {}, required responses: {}, received responses: {}, data retrieved: {}, retries: {})"; - - @VisibleForTesting - static final String IGNORING_WRITE_TIMEOUT = - "Ignoring write timeout (initial consistency: {}, write type: {}, required acknowledgments: {}, received acknowledgments: {}, retries: {})"; - - @VisibleForTesting - static final String RETRYING_ON_WRITE_TIMEOUT = - "Retrying on write timeout on {} at consistency {} (initial consistency: {}, write type: {}, required acknowledgments: {}, received acknowledgments: {}, retries: {})"; - - @VisibleForTesting - static final String IGNORING_UNAVAILABLE = - "Ignoring unavailable exception (initial consistency: {}, required replica: {}, alive replica: {}, retries: {})"; - - @VisibleForTesting - static final String RETRYING_ON_UNAVAILABLE = - "Retrying on unavailable exception on {} at consistency {} (initial consistency: {}, required replica: {}, alive replica: {}, retries: {})"; - - @VisibleForTesting - static final String IGNORING_REQUEST_ERROR = - "Ignoring request error (initial consistency: {}, retries: {}, exception: {})"; - - @VisibleForTesting - static final String RETRYING_ON_REQUEST_ERROR = - "Retrying on request error on {} at consistency {} (initial consistency: {}, retries: {}, exception: {})"; - - private final RetryPolicy policy; - - /** - * Creates a new {@code RetryPolicy} that logs the decision of {@code policy}. - * - * @param policy the policy to wrap. The policy created by this constructor - * will return the same decision than {@code policy} but will log them. - */ - public LoggingRetryPolicy(RetryPolicy policy) { - this.policy = policy; - } - - private static ConsistencyLevel cl(ConsistencyLevel cl, RetryDecision decision) { - return decision.getRetryConsistencyLevel() == null ? cl : decision.getRetryConsistencyLevel(); + private static final Logger logger = LoggerFactory.getLogger(LoggingRetryPolicy.class); + + @VisibleForTesting + static final String IGNORING_READ_TIMEOUT = + "Ignoring read timeout (initial consistency: {}, required responses: {}, received responses: {}, data retrieved: {}, retries: {})"; + + @VisibleForTesting + static final String RETRYING_ON_READ_TIMEOUT = + "Retrying on read timeout on {} at consistency {} (initial consistency: {}, required responses: {}, received responses: {}, data retrieved: {}, retries: {})"; + + @VisibleForTesting + static final String IGNORING_WRITE_TIMEOUT = + "Ignoring write timeout (initial consistency: {}, write type: {}, required acknowledgments: {}, received acknowledgments: {}, retries: {})"; + + @VisibleForTesting + static final String RETRYING_ON_WRITE_TIMEOUT = + "Retrying on write timeout on {} at consistency {} (initial consistency: {}, write type: {}, required acknowledgments: {}, received acknowledgments: {}, retries: {})"; + + @VisibleForTesting + static final String IGNORING_UNAVAILABLE = + "Ignoring unavailable exception (initial consistency: {}, required replica: {}, alive replica: {}, retries: {})"; + + @VisibleForTesting + static final String RETRYING_ON_UNAVAILABLE = + "Retrying on unavailable exception on {} at consistency {} (initial consistency: {}, required replica: {}, alive replica: {}, retries: {})"; + + @VisibleForTesting + static final String IGNORING_REQUEST_ERROR = + "Ignoring request error (initial consistency: {}, retries: {}, exception: {})"; + + @VisibleForTesting + static final String RETRYING_ON_REQUEST_ERROR = + "Retrying on request error on {} at consistency {} (initial consistency: {}, retries: {}, exception: {})"; + + private final RetryPolicy policy; + + /** + * Creates a new {@code RetryPolicy} that logs the decision of {@code policy}. + * + * @param policy the policy to wrap. The policy created by this constructor will return the same + * decision than {@code policy} but will log them. + */ + public LoggingRetryPolicy(RetryPolicy policy) { + this.policy = policy; + } + + private static ConsistencyLevel cl(ConsistencyLevel cl, RetryDecision decision) { + return decision.getRetryConsistencyLevel() == null ? cl : decision.getRetryConsistencyLevel(); + } + + private static String host(RetryDecision decision) { + return decision.isRetryCurrent() ? "same host" : "next host"; + } + + @Override + public RetryDecision onReadTimeout( + Statement statement, + ConsistencyLevel cl, + int requiredResponses, + int receivedResponses, + boolean dataRetrieved, + int nbRetry) { + RetryDecision decision = + policy.onReadTimeout( + statement, cl, requiredResponses, receivedResponses, dataRetrieved, nbRetry); + switch (decision.getType()) { + case IGNORE: + logDecision( + IGNORING_READ_TIMEOUT, + cl, + requiredResponses, + receivedResponses, + dataRetrieved, + nbRetry); + break; + case RETRY: + logDecision( + RETRYING_ON_READ_TIMEOUT, + host(decision), + cl(cl, decision), + cl, + requiredResponses, + receivedResponses, + dataRetrieved, + nbRetry); + break; } - - private static String host(RetryDecision decision) { - return decision.isRetryCurrent() ? "same host" : "next host"; - } - - @Override - public RetryDecision onReadTimeout(Statement statement, ConsistencyLevel cl, int requiredResponses, int receivedResponses, boolean dataRetrieved, int nbRetry) { - RetryDecision decision = policy.onReadTimeout(statement, cl, requiredResponses, receivedResponses, dataRetrieved, nbRetry); - switch (decision.getType()) { - case IGNORE: - logDecision(IGNORING_READ_TIMEOUT, cl, requiredResponses, receivedResponses, dataRetrieved, nbRetry); - break; - case RETRY: - logDecision(RETRYING_ON_READ_TIMEOUT, host(decision), cl(cl, decision), cl, requiredResponses, receivedResponses, dataRetrieved, nbRetry); - break; - } - return decision; - } - - @Override - public RetryDecision onWriteTimeout(Statement statement, ConsistencyLevel cl, WriteType writeType, int requiredAcks, int receivedAcks, int nbRetry) { - RetryDecision decision = policy.onWriteTimeout(statement, cl, writeType, requiredAcks, receivedAcks, nbRetry); - switch (decision.getType()) { - case IGNORE: - logDecision(IGNORING_WRITE_TIMEOUT, cl, writeType, requiredAcks, receivedAcks, nbRetry); - break; - case RETRY: - logDecision(RETRYING_ON_WRITE_TIMEOUT, host(decision), cl(cl, decision), cl, writeType, requiredAcks, receivedAcks, nbRetry); - break; - } - return decision; + return decision; + } + + @Override + public RetryDecision onWriteTimeout( + Statement statement, + ConsistencyLevel cl, + WriteType writeType, + int requiredAcks, + int receivedAcks, + int nbRetry) { + RetryDecision decision = + policy.onWriteTimeout(statement, cl, writeType, requiredAcks, receivedAcks, nbRetry); + switch (decision.getType()) { + case IGNORE: + logDecision(IGNORING_WRITE_TIMEOUT, cl, writeType, requiredAcks, receivedAcks, nbRetry); + break; + case RETRY: + logDecision( + RETRYING_ON_WRITE_TIMEOUT, + host(decision), + cl(cl, decision), + cl, + writeType, + requiredAcks, + receivedAcks, + nbRetry); + break; } - - @Override - public RetryDecision onUnavailable(Statement statement, ConsistencyLevel cl, int requiredReplica, int aliveReplica, int nbRetry) { - RetryDecision decision = policy.onUnavailable(statement, cl, requiredReplica, aliveReplica, nbRetry); - switch (decision.getType()) { - case IGNORE: - logDecision(IGNORING_UNAVAILABLE, cl, requiredReplica, aliveReplica, nbRetry); - break; - case RETRY: - logDecision(RETRYING_ON_UNAVAILABLE, host(decision), cl(cl, decision), cl, requiredReplica, aliveReplica, nbRetry); - break; - } - return decision; + return decision; + } + + @Override + public RetryDecision onUnavailable( + Statement statement, + ConsistencyLevel cl, + int requiredReplica, + int aliveReplica, + int nbRetry) { + RetryDecision decision = + policy.onUnavailable(statement, cl, requiredReplica, aliveReplica, nbRetry); + switch (decision.getType()) { + case IGNORE: + logDecision(IGNORING_UNAVAILABLE, cl, requiredReplica, aliveReplica, nbRetry); + break; + case RETRY: + logDecision( + RETRYING_ON_UNAVAILABLE, + host(decision), + cl(cl, decision), + cl, + requiredReplica, + aliveReplica, + nbRetry); + break; } - - @Override - public RetryDecision onRequestError(Statement statement, ConsistencyLevel cl, DriverException e, int nbRetry) { - RetryDecision decision = policy.onRequestError(statement, cl, e, nbRetry); - switch (decision.getType()) { - case IGNORE: - logDecision(IGNORING_REQUEST_ERROR, cl, nbRetry, e.toString()); - break; - case RETRY: - logDecision(RETRYING_ON_REQUEST_ERROR, host(decision), cl(cl, decision), cl, nbRetry, e.toString()); - break; - } - return decision; + return decision; + } + + @Override + public RetryDecision onRequestError( + Statement statement, ConsistencyLevel cl, DriverException e, int nbRetry) { + RetryDecision decision = policy.onRequestError(statement, cl, e, nbRetry); + switch (decision.getType()) { + case IGNORE: + logDecision(IGNORING_REQUEST_ERROR, cl, nbRetry, e.toString()); + break; + case RETRY: + logDecision( + RETRYING_ON_REQUEST_ERROR, host(decision), cl(cl, decision), cl, nbRetry, e.toString()); + break; } - - @Override - public void init(Cluster cluster) { - policy.init(cluster); - } - - @Override - public void close() { - policy.close(); - } - - /** - * Logs the decision according to the given template and parameters. - * The log level is INFO, but subclasses may override. - * - * @param template The template to use; arguments must be specified in SLF4J style, i.e. {@code "{}"}. - * @param parameters The template parameters. - */ - protected void logDecision(String template, Object... parameters) { - logger.info(template, parameters); - } - - + return decision; + } + + @Override + public void init(Cluster cluster) { + policy.init(cluster); + } + + @Override + public void close() { + policy.close(); + } + + /** + * Logs the decision according to the given template and parameters. The log level is INFO, but + * subclasses may override. + * + * @param template The template to use; arguments must be specified in SLF4J style, i.e. {@code + * "{}"}. + * @param parameters The template parameters. + */ + protected void logDecision(String template, Object... parameters) { + logger.info(template, parameters); + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/policies/NoSpeculativeExecutionPolicy.java b/driver-core/src/main/java/com/datastax/driver/core/policies/NoSpeculativeExecutionPolicy.java index 940839fecfd..a4a53c2f546 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/policies/NoSpeculativeExecutionPolicy.java +++ b/driver-core/src/main/java/com/datastax/driver/core/policies/NoSpeculativeExecutionPolicy.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,39 +21,36 @@ import com.datastax.driver.core.Host; import com.datastax.driver.core.Statement; -/** - * A {@link SpeculativeExecutionPolicy} that never schedules speculative executions. - */ +/** A {@link SpeculativeExecutionPolicy} that never schedules speculative executions. */ public class NoSpeculativeExecutionPolicy implements SpeculativeExecutionPolicy { - /** - * The single instance (this class is stateless). - */ - public static final NoSpeculativeExecutionPolicy INSTANCE = new NoSpeculativeExecutionPolicy(); + /** The single instance (this class is stateless). */ + public static final NoSpeculativeExecutionPolicy INSTANCE = new NoSpeculativeExecutionPolicy(); - private static final SpeculativeExecutionPlan PLAN = new SpeculativeExecutionPlan() { + private static final SpeculativeExecutionPlan PLAN = + new SpeculativeExecutionPlan() { @Override public long nextExecution(Host lastQueried) { - return -1; + return -1; } - }; - - @Override - public SpeculativeExecutionPlan newPlan(String loggedKeyspace, Statement statement) { - return PLAN; - } - - private NoSpeculativeExecutionPolicy() { - // do nothing - } - - @Override - public void init(Cluster cluster) { - // do nothing - } - - @Override - public void close() { - // do nothing - } + }; + + @Override + public SpeculativeExecutionPlan newPlan(String loggedKeyspace, Statement statement) { + return PLAN; + } + + private NoSpeculativeExecutionPolicy() { + // do nothing + } + + @Override + public void init(Cluster cluster) { + // do nothing + } + + @Override + public void close() { + // do nothing + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/policies/PercentileSpeculativeExecutionPolicy.java b/driver-core/src/main/java/com/datastax/driver/core/policies/PercentileSpeculativeExecutionPolicy.java index 8b8b84ef5a7..995a6775275 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/policies/PercentileSpeculativeExecutionPolicy.java +++ b/driver-core/src/main/java/com/datastax/driver/core/policies/PercentileSpeculativeExecutionPolicy.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,66 +17,70 @@ */ package com.datastax.driver.core.policies; -import com.datastax.driver.core.*; +import static com.google.common.base.Preconditions.checkArgument; +import com.datastax.driver.core.Cluster; +import com.datastax.driver.core.Host; +import com.datastax.driver.core.LatencyTracker; +import com.datastax.driver.core.PercentileTracker; +import com.datastax.driver.core.Statement; import java.util.concurrent.atomic.AtomicInteger; -import static com.google.common.base.Preconditions.checkArgument; - /** - * A policy that triggers speculative executions when the request to the current host is above a given percentile. + * A policy that triggers speculative executions when the request to the current host is above a + * given percentile. */ public class PercentileSpeculativeExecutionPolicy implements SpeculativeExecutionPolicy { - private final PercentileTracker percentileTracker; - private final double percentile; - private final int maxSpeculativeExecutions; + private final PercentileTracker percentileTracker; + private final double percentile; + private final int maxSpeculativeExecutions; - /** - * Builds a new instance. - * - * @param percentileTracker the component that will record latencies. It will get - * {@link Cluster#register(LatencyTracker) registered} with the cluster when this - * policy initializes. - * @param percentile the percentile that a request's latency must fall into to be considered slow (ex: - * {@code 99.0}). - * @param maxSpeculativeExecutions the maximum number of speculative executions that will be triggered for a given - * request (this does not include the initial, normal request). Must be strictly - * positive. - */ - public PercentileSpeculativeExecutionPolicy(PercentileTracker percentileTracker, - double percentile, int maxSpeculativeExecutions) { - checkArgument(maxSpeculativeExecutions > 0, - "number of speculative executions must be strictly positive (was %d)", maxSpeculativeExecutions); - checkArgument(percentile >= 0.0 && percentile < 100, - "percentile must be between 0.0 and 100 (was %f)"); + /** + * Builds a new instance. + * + * @param percentileTracker the component that will record latencies. It will get {@link + * Cluster#register(LatencyTracker) registered} with the cluster when this policy initializes. + * @param percentile the percentile that a request's latency must fall into to be considered slow + * (ex: {@code 99.0}). + * @param maxSpeculativeExecutions the maximum number of speculative executions that will be + * triggered for a given request (this does not include the initial, normal request). Must be + * strictly positive. + */ + public PercentileSpeculativeExecutionPolicy( + PercentileTracker percentileTracker, double percentile, int maxSpeculativeExecutions) { + checkArgument( + maxSpeculativeExecutions > 0, + "number of speculative executions must be strictly positive (was %d)", + maxSpeculativeExecutions); + checkArgument( + percentile >= 0.0 && percentile < 100, "percentile must be between 0.0 and 100 (was %f)"); - this.percentileTracker = percentileTracker; - this.percentile = percentile; - this.maxSpeculativeExecutions = maxSpeculativeExecutions; - } + this.percentileTracker = percentileTracker; + this.percentile = percentile; + this.maxSpeculativeExecutions = maxSpeculativeExecutions; + } - @Override - public SpeculativeExecutionPlan newPlan(String loggedKeyspace, Statement statement) { - return new SpeculativeExecutionPlan() { - private final AtomicInteger remaining = new AtomicInteger(maxSpeculativeExecutions); + @Override + public SpeculativeExecutionPlan newPlan(String loggedKeyspace, Statement statement) { + return new SpeculativeExecutionPlan() { + private final AtomicInteger remaining = new AtomicInteger(maxSpeculativeExecutions); - @Override - public long nextExecution(Host lastQueried) { - if (remaining.getAndDecrement() > 0) - return percentileTracker.getLatencyAtPercentile(lastQueried, null, null, percentile); - else - return -1; - } - }; - } + @Override + public long nextExecution(Host lastQueried) { + if (remaining.getAndDecrement() > 0) + return percentileTracker.getLatencyAtPercentile(lastQueried, null, null, percentile); + else return -1; + } + }; + } - @Override - public void init(Cluster cluster) { - cluster.register(percentileTracker); - } + @Override + public void init(Cluster cluster) { + cluster.register(percentileTracker); + } - @Override - public void close() { - // nothing - } + @Override + public void close() { + // nothing + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/policies/Policies.java b/driver-core/src/main/java/com/datastax/driver/core/policies/Policies.java index 881b437e13b..0011a85e8a1 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/policies/Policies.java +++ b/driver-core/src/main/java/com/datastax/driver/core/policies/Policies.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,275 +18,298 @@ package com.datastax.driver.core.policies; import com.datastax.driver.core.AtomicMonotonicTimestampGenerator; +import com.datastax.driver.core.DefaultEndPointFactory; +import com.datastax.driver.core.EndPointFactory; import com.datastax.driver.core.TimestampGenerator; -/** - * Policies configured for a {@link com.datastax.driver.core.Cluster} instance. - */ +/** Policies configured for a {@link com.datastax.driver.core.Cluster} instance. */ public class Policies { - /** - * Returns a builder to create a new {@code Policies} object. - * - * @return the builder. - */ - public static Builder builder() { - return new Builder(); - } + /** + * Returns a builder to create a new {@code Policies} object. + * + * @return the builder. + */ + public static Builder builder() { + return new Builder(); + } - private static final ReconnectionPolicy DEFAULT_RECONNECTION_POLICY = new ExponentialReconnectionPolicy(1000, 10 * 60 * 1000); - private static final RetryPolicy DEFAULT_RETRY_POLICY = DefaultRetryPolicy.INSTANCE; - private static final AddressTranslator DEFAULT_ADDRESS_TRANSLATOR = new IdentityTranslator(); - private static final SpeculativeExecutionPolicy DEFAULT_SPECULATIVE_EXECUTION_POLICY = NoSpeculativeExecutionPolicy.INSTANCE; + private static final ReconnectionPolicy DEFAULT_RECONNECTION_POLICY = + new ExponentialReconnectionPolicy(1000, 10 * 60 * 1000); + private static final RetryPolicy DEFAULT_RETRY_POLICY = DefaultRetryPolicy.INSTANCE; + private static final AddressTranslator DEFAULT_ADDRESS_TRANSLATOR = new IdentityTranslator(); + private static final SpeculativeExecutionPolicy DEFAULT_SPECULATIVE_EXECUTION_POLICY = + NoSpeculativeExecutionPolicy.INSTANCE; - private final LoadBalancingPolicy loadBalancingPolicy; - private final ReconnectionPolicy reconnectionPolicy; - private final RetryPolicy retryPolicy; - private final AddressTranslator addressTranslator; - private final TimestampGenerator timestampGenerator; - private final SpeculativeExecutionPolicy speculativeExecutionPolicy; + private final LoadBalancingPolicy loadBalancingPolicy; + private final ReconnectionPolicy reconnectionPolicy; + private final RetryPolicy retryPolicy; + private final AddressTranslator addressTranslator; + private final TimestampGenerator timestampGenerator; + private final SpeculativeExecutionPolicy speculativeExecutionPolicy; + private final EndPointFactory endPointFactory; - private Policies(LoadBalancingPolicy loadBalancingPolicy, - ReconnectionPolicy reconnectionPolicy, - RetryPolicy retryPolicy, - AddressTranslator addressTranslator, - TimestampGenerator timestampGenerator, - SpeculativeExecutionPolicy speculativeExecutionPolicy) { - this.loadBalancingPolicy = loadBalancingPolicy; - this.reconnectionPolicy = reconnectionPolicy; - this.retryPolicy = retryPolicy; - this.addressTranslator = addressTranslator; - this.timestampGenerator = timestampGenerator; - this.speculativeExecutionPolicy = speculativeExecutionPolicy; - } + private Policies( + LoadBalancingPolicy loadBalancingPolicy, + ReconnectionPolicy reconnectionPolicy, + RetryPolicy retryPolicy, + AddressTranslator addressTranslator, + TimestampGenerator timestampGenerator, + SpeculativeExecutionPolicy speculativeExecutionPolicy, + EndPointFactory endPointFactory) { + this.loadBalancingPolicy = loadBalancingPolicy; + this.reconnectionPolicy = reconnectionPolicy; + this.retryPolicy = retryPolicy; + this.addressTranslator = addressTranslator; + this.timestampGenerator = timestampGenerator; + this.speculativeExecutionPolicy = speculativeExecutionPolicy; + this.endPointFactory = endPointFactory; + } - /** - * The default load balancing policy. - *

    - * The default load balancing policy is {@link DCAwareRoundRobinPolicy} with token - * awareness (so {@code new TokenAwarePolicy(new DCAwareRoundRobinPolicy())}). - *

    - * Note that this policy shuffles the replicas when token awareness is used, see - * {@link TokenAwarePolicy#TokenAwarePolicy(LoadBalancingPolicy, boolean)} for an - * explanation of the tradeoffs. - * - * @return the default load balancing policy. - */ - public static LoadBalancingPolicy defaultLoadBalancingPolicy() { - // Note: balancing policies are stateful, so we can't store that in a static or that would screw thing - // up if multiple Cluster instance are started in the same JVM. - return new TokenAwarePolicy(DCAwareRoundRobinPolicy.builder().build()); - } + /** + * The default load balancing policy. + * + *

    The default load balancing policy is {@link DCAwareRoundRobinPolicy} with token awareness + * (so {@code new TokenAwarePolicy(new DCAwareRoundRobinPolicy())}). + * + *

    Note that this policy shuffles the replicas when token awareness is used, see {@link + * TokenAwarePolicy#TokenAwarePolicy(LoadBalancingPolicy, boolean)} for an explanation of the + * tradeoffs. + * + * @return the default load balancing policy. + */ + public static LoadBalancingPolicy defaultLoadBalancingPolicy() { + // Note: balancing policies are stateful, so we can't store that in a static or that would screw + // thing + // up if multiple Cluster instance are started in the same JVM. + return new TokenAwarePolicy(DCAwareRoundRobinPolicy.builder().build()); + } - /** - * The default reconnection policy. - *

    - * The default reconnection policy is an {@link ExponentialReconnectionPolicy} - * where the base delay is 1 second and the max delay is 10 minutes; - * - * @return the default reconnection policy. - */ - public static ReconnectionPolicy defaultReconnectionPolicy() { - return DEFAULT_RECONNECTION_POLICY; - } + /** + * The default reconnection policy. + * + *

    The default reconnection policy is an {@link ExponentialReconnectionPolicy} where the base + * delay is 1 second and the max delay is 10 minutes; + * + * @return the default reconnection policy. + */ + public static ReconnectionPolicy defaultReconnectionPolicy() { + return DEFAULT_RECONNECTION_POLICY; + } - /** - * The default retry policy. - *

    - * The default retry policy is {@link DefaultRetryPolicy}. - * - * @return the default retry policy. - */ - public static RetryPolicy defaultRetryPolicy() { - return DEFAULT_RETRY_POLICY; - } + /** + * The default retry policy. + * + *

    The default retry policy is {@link DefaultRetryPolicy}. + * + * @return the default retry policy. + */ + public static RetryPolicy defaultRetryPolicy() { + return DEFAULT_RETRY_POLICY; + } - /** - * The default address translator. - *

    - * The default address translator is {@link IdentityTranslator}. - * - * @return the default address translator. - */ - public static AddressTranslator defaultAddressTranslator() { - return DEFAULT_ADDRESS_TRANSLATOR; - } + /** + * The default address translator. + * + *

    The default address translator is {@link IdentityTranslator}. + * + * @return the default address translator. + */ + public static AddressTranslator defaultAddressTranslator() { + return DEFAULT_ADDRESS_TRANSLATOR; + } - /** - * The default timestamp generator. - *

    - * This is an instance of {@link AtomicMonotonicTimestampGenerator}. - * - * @return the default timestamp generator. - */ - public static TimestampGenerator defaultTimestampGenerator() { - return new AtomicMonotonicTimestampGenerator(); - } + /** + * The default timestamp generator. + * + *

    This is an instance of {@link AtomicMonotonicTimestampGenerator}. + * + * @return the default timestamp generator. + */ + public static TimestampGenerator defaultTimestampGenerator() { + return new AtomicMonotonicTimestampGenerator(); + } + + /** + * The default speculative retry policy. + * + *

    The default speculative retry policy is a {@link NoSpeculativeExecutionPolicy}. + * + * @return the default speculative retry policy. + */ + public static SpeculativeExecutionPolicy defaultSpeculativeExecutionPolicy() { + return DEFAULT_SPECULATIVE_EXECUTION_POLICY; + } + + public static EndPointFactory defaultEndPointFactory() { + return new DefaultEndPointFactory(); + } + + /** + * The load balancing policy in use. + * + *

    The load balancing policy defines how Cassandra hosts are picked for queries. + * + * @return the load balancing policy in use. + */ + public LoadBalancingPolicy getLoadBalancingPolicy() { + return loadBalancingPolicy; + } + + /** + * The reconnection policy in use. + * + *

    The reconnection policy defines how often the driver tries to reconnect to a dead node. + * + * @return the reconnection policy in use. + */ + public ReconnectionPolicy getReconnectionPolicy() { + return reconnectionPolicy; + } + + /** + * The retry policy in use. + * + *

    The retry policy defines in which conditions a query should be automatically retries by the + * driver. + * + * @return the retry policy in use. + */ + public RetryPolicy getRetryPolicy() { + return retryPolicy; + } + + /** + * The address translator in use. + * + * @return the address translator in use. + */ + public AddressTranslator getAddressTranslator() { + return addressTranslator; + } + + /** + * The timestamp generator to use. + * + * @return the timestamp generator to use. + */ + public TimestampGenerator getTimestampGenerator() { + return timestampGenerator; + } + + /** + * The speculative execution policy in use. + * + * @return the speculative execution policy in use. + */ + public SpeculativeExecutionPolicy getSpeculativeExecutionPolicy() { + return speculativeExecutionPolicy; + } + + public EndPointFactory getEndPointFactory() { + return endPointFactory; + } + + /** A builder to create a new {@code Policies} object. */ + public static class Builder { + private LoadBalancingPolicy loadBalancingPolicy; + private ReconnectionPolicy reconnectionPolicy; + private RetryPolicy retryPolicy; + private AddressTranslator addressTranslator; + private TimestampGenerator timestampGenerator; + private SpeculativeExecutionPolicy speculativeExecutionPolicy; + private EndPointFactory endPointFactory; /** - * The default speculative retry policy. - *

    - * The default speculative retry policy is a {@link NoSpeculativeExecutionPolicy}. + * Sets the load balancing policy. * - * @return the default speculative retry policy. + * @param loadBalancingPolicy see {@link #getLoadBalancingPolicy()}. + * @return this builder. */ - public static SpeculativeExecutionPolicy defaultSpeculativeExecutionPolicy() { - return DEFAULT_SPECULATIVE_EXECUTION_POLICY; + public Builder withLoadBalancingPolicy(LoadBalancingPolicy loadBalancingPolicy) { + this.loadBalancingPolicy = loadBalancingPolicy; + return this; } /** - * The load balancing policy in use. - *

    - * The load balancing policy defines how Cassandra hosts are picked for queries. + * Sets the reconnection policy. * - * @return the load balancing policy in use. + * @param reconnectionPolicy see {@link #getReconnectionPolicy()}. + * @return this builder. */ - public LoadBalancingPolicy getLoadBalancingPolicy() { - return loadBalancingPolicy; + public Builder withReconnectionPolicy(ReconnectionPolicy reconnectionPolicy) { + this.reconnectionPolicy = reconnectionPolicy; + return this; } /** - * The reconnection policy in use. - *

    - * The reconnection policy defines how often the driver tries to reconnect to a dead node. + * Sets the retry policy. * - * @return the reconnection policy in use. + * @param retryPolicy see {@link #getRetryPolicy()}. + * @return this builder. */ - public ReconnectionPolicy getReconnectionPolicy() { - return reconnectionPolicy; + public Builder withRetryPolicy(RetryPolicy retryPolicy) { + this.retryPolicy = retryPolicy; + return this; } /** - * The retry policy in use. - *

    - * The retry policy defines in which conditions a query should be - * automatically retries by the driver. + * Sets the address translator. * - * @return the retry policy in use. + * @param addressTranslator see {@link #getAddressTranslator()}. + * @return this builder. */ - public RetryPolicy getRetryPolicy() { - return retryPolicy; + public Builder withAddressTranslator(AddressTranslator addressTranslator) { + this.addressTranslator = addressTranslator; + return this; } /** - * The address translator in use. + * Sets the timestamp generator. * - * @return the address translator in use. + * @param timestampGenerator see {@link #getTimestampGenerator()}. + * @return this builder. */ - public AddressTranslator getAddressTranslator() { - return addressTranslator; + public Builder withTimestampGenerator(TimestampGenerator timestampGenerator) { + this.timestampGenerator = timestampGenerator; + return this; } /** - * The timestamp generator to use. + * Sets the speculative execution policy. * - * @return the timestamp generator to use. + * @param speculativeExecutionPolicy see {@link #getSpeculativeExecutionPolicy()}. + * @return this builder. */ - public TimestampGenerator getTimestampGenerator() { - return timestampGenerator; + public Builder withSpeculativeExecutionPolicy( + SpeculativeExecutionPolicy speculativeExecutionPolicy) { + this.speculativeExecutionPolicy = speculativeExecutionPolicy; + return this; } - /** - * The speculative execution policy in use. - * - * @return the speculative execution policy in use. - */ - public SpeculativeExecutionPolicy getSpeculativeExecutionPolicy() { - return speculativeExecutionPolicy; + public Builder withEndPointFactory(EndPointFactory endPointFactory) { + this.endPointFactory = endPointFactory; + return this; } /** - * A builder to create a new {@code Policies} object. + * Builds the final object from this builder. + * + *

    Any field that hasn't been set explicitly will get its default value. + * + * @return the object. */ - public static class Builder { - private LoadBalancingPolicy loadBalancingPolicy; - private ReconnectionPolicy reconnectionPolicy; - private RetryPolicy retryPolicy; - private AddressTranslator addressTranslator; - private TimestampGenerator timestampGenerator; - private SpeculativeExecutionPolicy speculativeExecutionPolicy; - - /** - * Sets the load balancing policy. - * - * @param loadBalancingPolicy see {@link #getLoadBalancingPolicy()}. - * @return this builder. - */ - public Builder withLoadBalancingPolicy(LoadBalancingPolicy loadBalancingPolicy) { - this.loadBalancingPolicy = loadBalancingPolicy; - return this; - } - - /** - * Sets the reconnection policy. - * - * @param reconnectionPolicy see {@link #getReconnectionPolicy()}. - * @return this builder. - */ - public Builder withReconnectionPolicy(ReconnectionPolicy reconnectionPolicy) { - this.reconnectionPolicy = reconnectionPolicy; - return this; - } - - /** - * Sets the retry policy. - * - * @param retryPolicy see {@link #getRetryPolicy()}. - * @return this builder. - */ - public Builder withRetryPolicy(RetryPolicy retryPolicy) { - this.retryPolicy = retryPolicy; - return this; - } - - /** - * Sets the address translator. - * - * @param addressTranslator see {@link #getAddressTranslator()}. - * @return this builder. - */ - public Builder withAddressTranslator(AddressTranslator addressTranslator) { - this.addressTranslator = addressTranslator; - return this; - } - - /** - * Sets the timestamp generator. - * - * @param timestampGenerator see {@link #getTimestampGenerator()}. - * @return this builder. - */ - public Builder withTimestampGenerator(TimestampGenerator timestampGenerator) { - this.timestampGenerator = timestampGenerator; - return this; - } - - /** - * Sets the speculative execution policy. - * - * @param speculativeExecutionPolicy see {@link #getSpeculativeExecutionPolicy()}. - * @return this builder. - */ - public Builder withSpeculativeExecutionPolicy(SpeculativeExecutionPolicy speculativeExecutionPolicy) { - this.speculativeExecutionPolicy = speculativeExecutionPolicy; - return this; - } - - /** - * Builds the final object from this builder. - *

    - * Any field that hasn't been set explicitly will get its default value. - * - * @return the object. - */ - public Policies build() { - return new Policies( - loadBalancingPolicy == null ? defaultLoadBalancingPolicy() : loadBalancingPolicy, - reconnectionPolicy == null ? defaultReconnectionPolicy() : reconnectionPolicy, - retryPolicy == null ? defaultRetryPolicy() : retryPolicy, - addressTranslator == null ? defaultAddressTranslator() : addressTranslator, - timestampGenerator == null ? defaultTimestampGenerator() : timestampGenerator, - speculativeExecutionPolicy == null ? defaultSpeculativeExecutionPolicy() : speculativeExecutionPolicy); - } + public Policies build() { + return new Policies( + loadBalancingPolicy == null ? defaultLoadBalancingPolicy() : loadBalancingPolicy, + reconnectionPolicy == null ? defaultReconnectionPolicy() : reconnectionPolicy, + retryPolicy == null ? defaultRetryPolicy() : retryPolicy, + addressTranslator == null ? defaultAddressTranslator() : addressTranslator, + timestampGenerator == null ? defaultTimestampGenerator() : timestampGenerator, + speculativeExecutionPolicy == null + ? defaultSpeculativeExecutionPolicy() + : speculativeExecutionPolicy, + endPointFactory == null ? defaultEndPointFactory() : endPointFactory); } + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/policies/ReconnectionPolicy.java b/driver-core/src/main/java/com/datastax/driver/core/policies/ReconnectionPolicy.java index acae1efa2ea..0cdae499c7a 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/policies/ReconnectionPolicy.java +++ b/driver-core/src/main/java/com/datastax/driver/core/policies/ReconnectionPolicy.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,60 +21,54 @@ /** * Policy that decides how often the reconnection to a dead node is attempted. - *

    - * Each time a node is detected dead (because a connection error occurs), a new - * {@code ReconnectionSchedule} instance is created (through the {@link #newSchedule()}). - * Then each call to the {@link ReconnectionSchedule#nextDelayMs} method of - * this instance will decide when the next reconnection attempt to this node - * will be tried. - *

    - * Note that if the driver receives a push notification from the Cassandra cluster - * that a node is UP, any existing {@code ReconnectionSchedule} on that node - * will be cancelled and a new one will be created (in effect, the driver reset - * the scheduler). - *

    - * The default {@link ExponentialReconnectionPolicy} policy is usually - * adequate. + * + *

    Each time a node is detected dead (because a connection error occurs), a new {@code + * ReconnectionSchedule} instance is created (through the {@link #newSchedule()}). Then each call to + * the {@link ReconnectionSchedule#nextDelayMs} method of this instance will decide when the next + * reconnection attempt to this node will be tried. + * + *

    Note that if the driver receives a push notification from the Cassandra cluster that a node is + * UP, any existing {@code ReconnectionSchedule} on that node will be cancelled and a new one will + * be created (in effect, the driver reset the scheduler). + * + *

    The default {@link ExponentialReconnectionPolicy} policy is usually adequate. */ public interface ReconnectionPolicy { - /** - * Creates a new schedule for reconnection attempts. - * - * @return the created schedule. - */ - public ReconnectionSchedule newSchedule(); - - /** - * Schedules reconnection attempts to a node. - */ - public interface ReconnectionSchedule { + /** + * Creates a new schedule for reconnection attempts. + * + * @return the created schedule. + */ + public ReconnectionSchedule newSchedule(); - /** - * When to attempt the next reconnection. - *

    - * This method will be called once when the host is detected down to - * schedule the first reconnection attempt, and then once after each failed - * reconnection attempt to schedule the next one. Hence each call to this - * method are free to return a different value. - * - * @return a time in milliseconds to wait before attempting the next - * reconnection. - */ - public long nextDelayMs(); - } + /** Schedules reconnection attempts to a node. */ + public interface ReconnectionSchedule { /** - * Gets invoked at cluster startup. + * When to attempt the next reconnection. + * + *

    This method will be called once when the host is detected down to schedule the first + * reconnection attempt, and then once after each failed reconnection attempt to schedule the + * next one. Hence each call to this method are free to return a different value. * - * @param cluster the cluster that this policy is associated with. + * @return a time in milliseconds to wait before attempting the next reconnection. */ - void init(Cluster cluster); + public long nextDelayMs(); + } - /** - * Gets invoked at cluster shutdown. - *

    - * This gives the policy the opportunity to perform some cleanup, for instance stop threads that it might have started. - */ - void close(); + /** + * Gets invoked at cluster startup. + * + * @param cluster the cluster that this policy is associated with. + */ + void init(Cluster cluster); + + /** + * Gets invoked at cluster shutdown. + * + *

    This gives the policy the opportunity to perform some cleanup, for instance stop threads + * that it might have started. + */ + void close(); } diff --git a/driver-core/src/main/java/com/datastax/driver/core/policies/RetryPolicy.java b/driver-core/src/main/java/com/datastax/driver/core/policies/RetryPolicy.java index 78c47f4ee92..2a51acb30ba 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/policies/RetryPolicy.java +++ b/driver-core/src/main/java/com/datastax/driver/core/policies/RetryPolicy.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,274 +17,290 @@ */ package com.datastax.driver.core.policies; -import com.datastax.driver.core.*; +import com.datastax.driver.core.Cluster; +import com.datastax.driver.core.ConsistencyLevel; +import com.datastax.driver.core.SocketOptions; +import com.datastax.driver.core.Statement; +import com.datastax.driver.core.WriteType; import com.datastax.driver.core.exceptions.DriverException; /** * A policy that defines a default behavior to adopt when a request fails. - *

    - * Such policy allows to centralize the handling of query retries, allowing to - * minimize the need for exception catching/handling in business code. + * + *

    Such policy allows to centralize the handling of query retries, allowing to minimize the need + * for exception catching/handling in business code. */ public interface RetryPolicy { - /** - * A retry decision to adopt on a Cassandra exception (read/write timeout - * or unavailable exception). - *

    - * There are three possible decisions: - *

      - *
    • RETHROW: no retry should be attempted and an exception should be thrown.
    • - *
    • RETRY: the operation will be retried. The consistency level of the - * retry should be specified.
    • - *
    • IGNORE: no retry should be attempted and the exception should be - * ignored. In that case, the operation that triggered the Cassandra - * exception will return an empty result set.
    • - *
    - */ - class RetryDecision { - - private static final RetryDecision RETHROW_DECISION = new RetryDecision(Type.RETHROW, null, true); - private static final RetryDecision IGNORE_DECISION = new RetryDecision(Type.IGNORE, null, true); - - /** - * The types of retry decisions. - */ - public enum Type { - RETRY, RETHROW, IGNORE - } - - private final Type type; - private final ConsistencyLevel retryCL; - private final boolean retryCurrent; - - private RetryDecision(Type type, ConsistencyLevel retryCL, boolean retryCurrent) { - this.type = type; - this.retryCL = retryCL; - this.retryCurrent = retryCurrent; - } - - /** - * The type of this retry decision. - * - * @return the type of this retry decision. - */ - public Type getType() { - return type; - } - - /** - * The consistency level for this retry decision. - * This is only meaningful for {@code RETRY} decisions. - * The consistency level is always {@code null} for an - * {@code IGNORE} or a {@code RETHROW} decision; - * for a {@code RETRY} decision, the consistency level can be {@code null}, - * in which case the retry is done at the same consistency level - * as in the previous attempt. - * - * @return the consistency level for a retry decision. - */ - public ConsistencyLevel getRetryConsistencyLevel() { - return retryCL; - } + /** + * A retry decision to adopt on a Cassandra exception (read/write timeout or unavailable + * exception). + * + *

    There are three possible decisions: + * + *

      + *
    • RETHROW: no retry should be attempted and an exception should be thrown. + *
    • RETRY: the operation will be retried. The consistency level of the retry should be + * specified. + *
    • IGNORE: no retry should be attempted and the exception should be ignored. In that case, + * the operation that triggered the Cassandra exception will return an empty result set. + *
    + */ + class RetryDecision { - /** - * Whether this decision is to retry the same host. - * This is only meaningful for {@code RETRY} decisions. - * - * @return {@code true} if the decision is to retry the same host, - * {@code false} otherwise. Default is {@code false}. - */ - public boolean isRetryCurrent() { - return retryCurrent; - } + private static final RetryDecision RETHROW_DECISION = + new RetryDecision(Type.RETHROW, null, true); + private static final RetryDecision IGNORE_DECISION = new RetryDecision(Type.IGNORE, null, true); - /** - * Creates a {@link RetryDecision.Type#RETHROW} retry decision. - * - * @return a {@link RetryDecision.Type#RETHROW} retry decision. - */ - public static RetryDecision rethrow() { - return RETHROW_DECISION; - } - - /** - * Creates a {@link RetryDecision.Type#RETRY} retry decision using - * the same host and the provided consistency level. - *

    - * If the provided consistency level is {@code null}, the retry will be done at the same consistency level as - * the previous attempt. - *

    - * Beware that {@link ConsistencyLevel#isSerial() serial} consistency levels - * should never be passed to this method; attempting to do so would trigger an - * {@link com.datastax.driver.core.exceptions.InvalidQueryException InvalidQueryException}. - * - * @param consistency the consistency level to use for the retry; if {@code null}, - * the same level as the previous attempt will be used. - * @return a {@link RetryDecision.Type#RETRY} decision using - * the same host and the provided consistency level - */ - public static RetryDecision retry(ConsistencyLevel consistency) { - return new RetryDecision(Type.RETRY, consistency, true); - } + /** The types of retry decisions. */ + public enum Type { + RETRY, + RETHROW, + IGNORE + } - /** - * Creates an {@link RetryDecision.Type#IGNORE} retry decision. - * - * @return an {@link RetryDecision.Type#IGNORE} retry decision. - */ - public static RetryDecision ignore() { - return IGNORE_DECISION; - } + private final Type type; + private final ConsistencyLevel retryCL; + private final boolean retryCurrent; - /** - * Creates a {@link RetryDecision.Type#RETRY} retry decision using the next host - * in the query plan, and using the provided consistency level. - *

    - * If the provided consistency level is {@code null}, the retry will be done at the same consistency level as - * the previous attempt. - *

    - * Beware that {@link ConsistencyLevel#isSerial() serial} consistency levels - * should never be passed to this method; attempting to do so would trigger an - * {@link com.datastax.driver.core.exceptions.InvalidQueryException InvalidQueryException}. - * - * @param consistency the consistency level to use for the retry; if {@code null}, - * the same level as the previous attempt will be used. - * @return a {@link RetryDecision.Type#RETRY} retry decision using the next host - * in the query plan, and using the provided consistency level. - */ - public static RetryDecision tryNextHost(ConsistencyLevel consistency) { - return new RetryDecision(Type.RETRY, consistency, false); - } + private RetryDecision(Type type, ConsistencyLevel retryCL, boolean retryCurrent) { + this.type = type; + this.retryCL = retryCL; + this.retryCurrent = retryCurrent; + } - @Override - public String toString() { - switch (type) { - case RETRY: - String retryClDesc = (retryCL == null) ? "same CL" : retryCL.toString(); - String hostDesc = retryCurrent ? "same" : "next"; - return "Retry at " + retryClDesc + " on " + hostDesc + " host."; - case RETHROW: - return "Rethrow"; - case IGNORE: - return "Ignore"; - } - throw new AssertionError(); - } + /** + * The type of this retry decision. + * + * @return the type of this retry decision. + */ + public Type getType() { + return type; } /** - * Defines whether to retry and at which consistency level on a read timeout. - *

    - * Note that this method may be called even if - * {@code requiredResponses >= receivedResponses} if {@code dataPresent} is - * {@code false} (see - * {@link com.datastax.driver.core.exceptions.ReadTimeoutException#wasDataRetrieved}). + * The consistency level for this retry decision. This is only meaningful for {@code RETRY} + * decisions. The consistency level is always {@code null} for an {@code IGNORE} or a {@code + * RETHROW} decision; for a {@code RETRY} decision, the consistency level can be {@code null}, + * in which case the retry is done at the same consistency level as in the previous attempt. * - * @param statement the original query that timed out. - * @param cl the requested consistency level of the read that timed out. - * Note that this can never be a {@link ConsistencyLevel#isSerial() serial} - * consistency level. - * @param requiredResponses the number of responses that were required to - * achieve the requested consistency level. - * @param receivedResponses the number of responses that had been received - * by the time the timeout exception was raised. - * @param dataRetrieved whether actual data (by opposition to data checksum) - * was present in the received responses. - * @param nbRetry the number of retry already performed for this operation. - * @return the retry decision. If {@code RetryDecision.RETHROW} is returned, - * a {@link com.datastax.driver.core.exceptions.ReadTimeoutException} will - * be thrown for the operation. + * @return the consistency level for a retry decision. */ - RetryDecision onReadTimeout(Statement statement, ConsistencyLevel cl, int requiredResponses, int receivedResponses, boolean dataRetrieved, int nbRetry); + public ConsistencyLevel getRetryConsistencyLevel() { + return retryCL; + } /** - * Defines whether to retry and at which consistency level on a write timeout. - *

    - * Note that if a statement is {@link Statement#isIdempotent() not idempotent}, the driver will never retry it on a - * write timeout (this method won't even be called). + * Whether this decision is to retry the same host. This is only meaningful for {@code RETRY} + * decisions. * - * @param statement the original query that timed out. - * @param cl the requested consistency level of the write that timed out. - * If the timeout occurred at the "paxos" phase of a - * Lightweight transaction, - * then {@code cl} will actually be the requested {@link ConsistencyLevel#isSerial() serial} consistency level. - * Beware that serial consistency levels should never be passed to a {@link RetryDecision RetryDecision} as this would - * invariably trigger an {@link com.datastax.driver.core.exceptions.InvalidQueryException InvalidQueryException}. - * Also, when {@code cl} is {@link ConsistencyLevel#isSerial() serial}, then {@code writeType} is always {@link WriteType#CAS CAS}. - * @param writeType the type of the write that timed out. - * @param requiredAcks the number of acknowledgments that were required to - * achieve the requested consistency level. - * @param receivedAcks the number of acknowledgments that had been received - * by the time the timeout exception was raised. - * @param nbRetry the number of retry already performed for this operation. - * @return the retry decision. If {@code RetryDecision.RETHROW} is returned, - * a {@link com.datastax.driver.core.exceptions.WriteTimeoutException} will - * be thrown for the operation. + * @return {@code true} if the decision is to retry the same host, {@code false} otherwise. + * Default is {@code false}. */ - RetryDecision onWriteTimeout(Statement statement, ConsistencyLevel cl, WriteType writeType, int requiredAcks, int receivedAcks, int nbRetry); + public boolean isRetryCurrent() { + return retryCurrent; + } /** - * Defines whether to retry and at which consistency level on an - * unavailable exception. + * Creates a {@link RetryDecision.Type#RETHROW} retry decision. * - * @param statement the original query for which the consistency level cannot - * be achieved. - * @param cl the requested consistency level for the operation. - * If the operation failed at the "paxos" phase of a - * Lightweight transaction, - * then {@code cl} will actually be the requested {@link ConsistencyLevel#isSerial() serial} consistency level. - * Beware that serial consistency levels should never be passed to a {@link RetryDecision RetryDecision} as this would - * invariably trigger an {@link com.datastax.driver.core.exceptions.InvalidQueryException InvalidQueryException}. - * @param requiredReplica the number of replica that should have been - * (known) alive for the operation to be attempted. - * @param aliveReplica the number of replica that were know to be alive by - * the coordinator of the operation. - * @param nbRetry the number of retry already performed for this operation. - * @return the retry decision. If {@code RetryDecision.RETHROW} is returned, - * an {@link com.datastax.driver.core.exceptions.UnavailableException} will - * be thrown for the operation. + * @return a {@link RetryDecision.Type#RETHROW} retry decision. */ - RetryDecision onUnavailable(Statement statement, ConsistencyLevel cl, int requiredReplica, int aliveReplica, int nbRetry); + public static RetryDecision rethrow() { + return RETHROW_DECISION; + } /** - * Defines whether to retry and at which consistency level on an - * unexpected error. - *

    - * This method might be invoked in the following situations: - *

      - *
    1. On a client timeout, while waiting for the server response - * (see {@link SocketOptions#getReadTimeoutMillis()});
    2. - *
    3. On a connection error (socket closed, etc.);
    4. - *
    5. When the contacted host replies with an {@code OVERLOADED} error or a {@code SERVER_ERROR}.
    6. - *
    - *

    - * Note that when such an error occurs, there is no guarantee that the mutation has been applied server-side or not. - * Therefore, if a statement is {@link Statement#isIdempotent() not idempotent}, the driver will never retry it - * (this method won't even be called). + * Creates a {@link RetryDecision.Type#RETRY} retry decision using the same host and the + * provided consistency level. + * + *

    If the provided consistency level is {@code null}, the retry will be done at the same + * consistency level as the previous attempt. + * + *

    Beware that {@link ConsistencyLevel#isSerial() serial} consistency levels should never be + * passed to this method; attempting to do so would trigger an {@link + * com.datastax.driver.core.exceptions.InvalidQueryException InvalidQueryException}. * - * @param statement the original query that failed. - * @param cl the requested consistency level for the operation. - * Note that this is not necessarily the achieved consistency level (if any), - * and it is never a {@link ConsistencyLevel#isSerial() serial} one. - * @param e the exception that caused this request to fail. - * @param nbRetry the number of retries already performed for this operation. - * @return the retry decision. If {@code RetryDecision.RETHROW} is returned, - * the {@link DriverException} passed to this method will be thrown for the operation. + * @param consistency the consistency level to use for the retry; if {@code null}, the same + * level as the previous attempt will be used. + * @return a {@link RetryDecision.Type#RETRY} decision using the same host and the provided + * consistency level */ - RetryDecision onRequestError(Statement statement, ConsistencyLevel cl, DriverException e, int nbRetry); + public static RetryDecision retry(ConsistencyLevel consistency) { + return new RetryDecision(Type.RETRY, consistency, true); + } /** - * Gets invoked at cluster startup. + * Creates an {@link RetryDecision.Type#IGNORE} retry decision. * - * @param cluster the cluster that this policy is associated with. + * @return an {@link RetryDecision.Type#IGNORE} retry decision. */ - void init(Cluster cluster); + public static RetryDecision ignore() { + return IGNORE_DECISION; + } /** - * Gets invoked at cluster shutdown. - *

    - * This gives the policy the opportunity to perform some cleanup, for instance stop threads that it might have started. + * Creates a {@link RetryDecision.Type#RETRY} retry decision using the next host in the query + * plan, and using the provided consistency level. + * + *

    If the provided consistency level is {@code null}, the retry will be done at the same + * consistency level as the previous attempt. + * + *

    Beware that {@link ConsistencyLevel#isSerial() serial} consistency levels should never be + * passed to this method; attempting to do so would trigger an {@link + * com.datastax.driver.core.exceptions.InvalidQueryException InvalidQueryException}. + * + * @param consistency the consistency level to use for the retry; if {@code null}, the same + * level as the previous attempt will be used. + * @return a {@link RetryDecision.Type#RETRY} retry decision using the next host in the query + * plan, and using the provided consistency level. */ - void close(); + public static RetryDecision tryNextHost(ConsistencyLevel consistency) { + return new RetryDecision(Type.RETRY, consistency, false); + } + + @Override + public String toString() { + switch (type) { + case RETRY: + String retryClDesc = (retryCL == null) ? "same CL" : retryCL.toString(); + String hostDesc = retryCurrent ? "same" : "next"; + return "Retry at " + retryClDesc + " on " + hostDesc + " host."; + case RETHROW: + return "Rethrow"; + case IGNORE: + return "Ignore"; + } + throw new AssertionError(); + } + } + + /** + * Defines whether to retry and at which consistency level on a read timeout. + * + *

    Note that this method may be called even if {@code requiredResponses >= receivedResponses} + * if {@code dataPresent} is {@code false} (see {@link + * com.datastax.driver.core.exceptions.ReadTimeoutException#wasDataRetrieved}). + * + * @param statement the original query that timed out. + * @param cl the requested consistency level of the read that timed out. Note that this can never + * be a {@link ConsistencyLevel#isSerial() serial} consistency level. + * @param requiredResponses the number of responses that were required to achieve the requested + * consistency level. + * @param receivedResponses the number of responses that had been received by the time the timeout + * exception was raised. + * @param dataRetrieved whether actual data (by opposition to data checksum) was present in the + * received responses. + * @param nbRetry the number of retry already performed for this operation. + * @return the retry decision. If {@code RetryDecision.RETHROW} is returned, a {@link + * com.datastax.driver.core.exceptions.ReadTimeoutException} will be thrown for the operation. + */ + RetryDecision onReadTimeout( + Statement statement, + ConsistencyLevel cl, + int requiredResponses, + int receivedResponses, + boolean dataRetrieved, + int nbRetry); + + /** + * Defines whether to retry and at which consistency level on a write timeout. + * + *

    Note that if a statement is {@link Statement#isIdempotent() not idempotent}, the driver will + * never retry it on a write timeout (this method won't even be called). + * + * @param statement the original query that timed out. + * @param cl the requested consistency level of the write that timed out. If the timeout occurred + * at the "paxos" phase of a Lightweight + * transaction, then {@code cl} will actually be the requested {@link + * ConsistencyLevel#isSerial() serial} consistency level. Beware that serial consistency + * levels should never be passed to a {@link RetryDecision RetryDecision} as this would + * invariably trigger an {@link com.datastax.driver.core.exceptions.InvalidQueryException + * InvalidQueryException}. Also, when {@code cl} is {@link ConsistencyLevel#isSerial() + * serial}, then {@code writeType} is always {@link WriteType#CAS CAS}. + * @param writeType the type of the write that timed out. + * @param requiredAcks the number of acknowledgments that were required to achieve the requested + * consistency level. + * @param receivedAcks the number of acknowledgments that had been received by the time the + * timeout exception was raised. + * @param nbRetry the number of retry already performed for this operation. + * @return the retry decision. If {@code RetryDecision.RETHROW} is returned, a {@link + * com.datastax.driver.core.exceptions.WriteTimeoutException} will be thrown for the + * operation. + */ + RetryDecision onWriteTimeout( + Statement statement, + ConsistencyLevel cl, + WriteType writeType, + int requiredAcks, + int receivedAcks, + int nbRetry); + + /** + * Defines whether to retry and at which consistency level on an unavailable exception. + * + * @param statement the original query for which the consistency level cannot be achieved. + * @param cl the requested consistency level for the operation. If the operation failed at the + * "paxos" phase of a Lightweight + * transaction, then {@code cl} will actually be the requested {@link + * ConsistencyLevel#isSerial() serial} consistency level. Beware that serial consistency + * levels should never be passed to a {@link RetryDecision RetryDecision} as this would + * invariably trigger an {@link com.datastax.driver.core.exceptions.InvalidQueryException + * InvalidQueryException}. + * @param requiredReplica the number of replica that should have been (known) alive for the + * operation to be attempted. + * @param aliveReplica the number of replica that were know to be alive by the coordinator of the + * operation. + * @param nbRetry the number of retry already performed for this operation. + * @return the retry decision. If {@code RetryDecision.RETHROW} is returned, an {@link + * com.datastax.driver.core.exceptions.UnavailableException} will be thrown for the operation. + */ + RetryDecision onUnavailable( + Statement statement, ConsistencyLevel cl, int requiredReplica, int aliveReplica, int nbRetry); + + /** + * Defines whether to retry and at which consistency level on an unexpected error. + * + *

    This method might be invoked in the following situations: + * + *

      + *
    1. On a client timeout, while waiting for the server response (see {@link + * SocketOptions#getReadTimeoutMillis()}); + *
    2. On a connection error (socket closed, etc.); + *
    3. When the contacted host replies with an {@code OVERLOADED} error, {@code SERVER_ERROR}, + * {@code READ_FAILURE} or {@code WRITE_FAILURE}. + *
    + * + *

    Note that when such an error occurs, there is no guarantee that the mutation has been + * applied server-side or not. Therefore, if a statement is {@link Statement#isIdempotent() not + * idempotent}, the driver will never retry it (this method won't even be called). + * + * @param statement the original query that failed. + * @param cl the requested consistency level for the operation. Note that this is not necessarily + * the achieved consistency level (if any), and it is never a {@link + * ConsistencyLevel#isSerial() serial} one. + * @param e the exception that caused this request to fail. + * @param nbRetry the number of retries already performed for this operation. + * @return the retry decision. If {@code RetryDecision.RETHROW} is returned, the {@link + * DriverException} passed to this method will be thrown for the operation. + */ + RetryDecision onRequestError( + Statement statement, ConsistencyLevel cl, DriverException e, int nbRetry); + + /** + * Gets invoked at cluster startup. + * + * @param cluster the cluster that this policy is associated with. + */ + void init(Cluster cluster); + + /** + * Gets invoked at cluster shutdown. + * + *

    This gives the policy the opportunity to perform some cleanup, for instance stop threads + * that it might have started. + */ + void close(); } diff --git a/driver-core/src/main/java/com/datastax/driver/core/policies/RollingCount.java b/driver-core/src/main/java/com/datastax/driver/core/policies/RollingCount.java index 7cb53c1e027..2f33fcf9273 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/policies/RollingCount.java +++ b/driver-core/src/main/java/com/datastax/driver/core/policies/RollingCount.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,99 +22,102 @@ import java.util.concurrent.atomic.AtomicLongArray; import java.util.concurrent.atomic.AtomicReference; -/** - * A "rolling" count over a 1-minute sliding window. - */ +/** A "rolling" count over a 1-minute sliding window. */ class RollingCount { - // Divide the minute into 5-second intervals - private static final long INTERVAL_SIZE = TimeUnit.SECONDS.toNanos(5); + // Divide the minute into 5-second intervals + private static final long INTERVAL_SIZE = TimeUnit.SECONDS.toNanos(5); - // A circular buffer containing the counts over the 12 previous intervals. Their sum is the count we're looking for. - // If we're at t = 61s this would span [0,60[ - private final AtomicLongArray previousIntervals = new AtomicLongArray(12); - // The interval we're currently recording events for. It hasn't completed yet, so it's not included in the count. - // If we're at t = 61s this would span [60,65[ - // Note that we don't expect very high concurrency on RollingCount (it's used to count errors), so AtomicLong is - // good enough here. - private final AtomicLong currentInterval = new AtomicLong(); - // Other mutable state, grouped in an object for atomic updates - private final AtomicReference state; - private final Clock clock; + // A circular buffer containing the counts over the 12 previous intervals. Their sum is the count + // we're looking for. + // If we're at t = 61s this would span [0,60[ + private final AtomicLongArray previousIntervals = new AtomicLongArray(12); + // The interval we're currently recording events for. It hasn't completed yet, so it's not + // included in the count. + // If we're at t = 61s this would span [60,65[ + // Note that we don't expect very high concurrency on RollingCount (it's used to count errors), so + // AtomicLong is + // good enough here. + private final AtomicLong currentInterval = new AtomicLong(); + // Other mutable state, grouped in an object for atomic updates + private final AtomicReference state; + private final Clock clock; - RollingCount(Clock clock) { - this.state = new AtomicReference(new State(clock.nanoTime())); - this.clock = clock; - } + RollingCount(Clock clock) { + this.state = new AtomicReference(new State(clock.nanoTime())); + this.clock = clock; + } - void increment() { - add(1); - } + void increment() { + add(1); + } - void add(long amount) { - tickIfNecessary(); - currentInterval.addAndGet(amount); - } + void add(long amount) { + tickIfNecessary(); + currentInterval.addAndGet(amount); + } - long get() { - tickIfNecessary(); - return state.get().totalCount; - } + long get() { + tickIfNecessary(); + return state.get().totalCount; + } - private void tickIfNecessary() { - State oldState = state.get(); - long newTick = clock.nanoTime(); - long age = newTick - oldState.lastTick; - if (age >= INTERVAL_SIZE) { - long currentCount = currentInterval.get(); + private void tickIfNecessary() { + State oldState = state.get(); + long newTick = clock.nanoTime(); + long age = newTick - oldState.lastTick; + if (age >= INTERVAL_SIZE) { + long currentCount = currentInterval.get(); - long newIntervalStartTick = newTick - age % INTERVAL_SIZE; - long elapsedIntervals = Math.min(age / INTERVAL_SIZE, 12); - int newOffset = (int) ((oldState.offset + elapsedIntervals) % 12); + long newIntervalStartTick = newTick - age % INTERVAL_SIZE; + long elapsedIntervals = Math.min(age / INTERVAL_SIZE, 12); + int newOffset = (int) ((oldState.offset + elapsedIntervals) % 12); - long newTotal; - if (elapsedIntervals == 12) { - // We wrapped around the circular buffer, all our values are stale - // Don't mutate previousIntervals yet because this part of the code is still multi-threaded. - newTotal = 0; - } else { - // Add the current interval that just completed - newTotal = oldState.totalCount + currentCount; - // Subtract all elapsed intervals: they're either idle ones, or the one at the old offset that we're - // about to replace - for (int i = 1; i <= elapsedIntervals; i++) { - newTotal -= previousIntervals.get((newOffset + 12 - i) % 12); - } - } + long newTotal; + if (elapsedIntervals == 12) { + // We wrapped around the circular buffer, all our values are stale + // Don't mutate previousIntervals yet because this part of the code is still multi-threaded. + newTotal = 0; + } else { + // Add the current interval that just completed + newTotal = oldState.totalCount + currentCount; + // Subtract all elapsed intervals: they're either idle ones, or the one at the old offset + // that we're + // about to replace + for (int i = 1; i <= elapsedIntervals; i++) { + newTotal -= previousIntervals.get((newOffset + 12 - i) % 12); + } + } - State newState = new State(newIntervalStartTick, newOffset, newTotal); - if (state.compareAndSet(oldState, newState)) { - // Only one thread gets here, so we can now: - // - reset the current count (don't use reset because other threads might already have started updating - // it) - currentInterval.addAndGet(-currentCount); - // - store the interval that just completed (or clear it if we wrapped) - previousIntervals.set(oldState.offset, elapsedIntervals < 12 ? currentCount : 0); - // - clear any idle interval - for (int i = 1; i < elapsedIntervals; i++) { - previousIntervals.set((newOffset + 12 - i) % 12, 0); - } - } + State newState = new State(newIntervalStartTick, newOffset, newTotal); + if (state.compareAndSet(oldState, newState)) { + // Only one thread gets here, so we can now: + // - reset the current count (don't use reset because other threads might already have + // started updating + // it) + currentInterval.addAndGet(-currentCount); + // - store the interval that just completed (or clear it if we wrapped) + previousIntervals.set(oldState.offset, elapsedIntervals < 12 ? currentCount : 0); + // - clear any idle interval + for (int i = 1; i < elapsedIntervals; i++) { + previousIntervals.set((newOffset + 12 - i) % 12, 0); } + } } + } - static class State { - final long lastTick; // last time the state was modified - final int offset; // the offset that the current interval will replace once it's complete - final long totalCount; // cache of sum(previousIntervals) + static class State { + final long lastTick; // last time the state was modified + final int offset; // the offset that the current interval will replace once it's complete + final long totalCount; // cache of sum(previousIntervals) - State(long lastTick) { - this(lastTick, 0, 0); - } + State(long lastTick) { + this(lastTick, 0, 0); + } - State(long lastTick, int offset, long totalCount) { - this.lastTick = lastTick; - this.offset = offset; - this.totalCount = totalCount; - } + State(long lastTick, int offset, long totalCount) { + this.lastTick = lastTick; + this.offset = offset; + this.totalCount = totalCount; } + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/policies/RoundRobinPolicy.java b/driver-core/src/main/java/com/datastax/driver/core/policies/RoundRobinPolicy.java index 6121da4c1b4..ab7b4a2e3bf 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/policies/RoundRobinPolicy.java +++ b/driver-core/src/main/java/com/datastax/driver/core/policies/RoundRobinPolicy.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,150 +17,150 @@ */ package com.datastax.driver.core.policies; -import com.datastax.driver.core.*; +import com.datastax.driver.core.Cluster; +import com.datastax.driver.core.Configuration; +import com.datastax.driver.core.ConsistencyLevel; +import com.datastax.driver.core.Host; +import com.datastax.driver.core.HostDistance; +import com.datastax.driver.core.Statement; import com.google.common.collect.AbstractIterator; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import java.util.Collection; import java.util.Iterator; import java.util.List; import java.util.Random; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.atomic.AtomicInteger; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * A Round-robin load balancing policy. - *

    - * This policy queries nodes in a round-robin fashion. For a given query, - * if an host fail, the next one (following the round-robin order) is - * tried, until all hosts have been tried. - *

    - * This policy is not datacenter aware and will include every known - * Cassandra host in its round robin algorithm. If you use multiple - * datacenter this will be inefficient and you will want to use the - * {@link DCAwareRoundRobinPolicy} load balancing policy instead. + * + *

    This policy queries nodes in a round-robin fashion. For a given query, if an host fail, the + * next one (following the round-robin order) is tried, until all hosts have been tried. + * + *

    This policy is not datacenter aware and will include every known Cassandra host in its round + * robin algorithm. If you use multiple datacenter this will be inefficient and you will want to use + * the {@link DCAwareRoundRobinPolicy} load balancing policy instead. */ public class RoundRobinPolicy implements LoadBalancingPolicy { - private static final Logger logger = LoggerFactory.getLogger(RoundRobinPolicy.class); - - private final CopyOnWriteArrayList liveHosts = new CopyOnWriteArrayList(); - private final AtomicInteger index = new AtomicInteger(); - - private volatile Configuration configuration; - private volatile boolean hasLoggedLocalCLUse; - - /** - * Creates a load balancing policy that picks host to query in a round robin - * fashion (on all the hosts of the Cassandra cluster). - */ - public RoundRobinPolicy() { - } - - @Override - public void init(Cluster cluster, Collection hosts) { - this.liveHosts.addAll(hosts); - this.configuration = cluster.getConfiguration(); - this.index.set(new Random().nextInt(Math.max(hosts.size(), 1))); + private static final Logger logger = LoggerFactory.getLogger(RoundRobinPolicy.class); + + private final CopyOnWriteArrayList liveHosts = new CopyOnWriteArrayList(); + private final AtomicInteger index = new AtomicInteger(); + + private volatile Configuration configuration; + private volatile boolean hasLoggedLocalCLUse; + + /** + * Creates a load balancing policy that picks host to query in a round robin fashion (on all the + * hosts of the Cassandra cluster). + */ + public RoundRobinPolicy() {} + + @Override + public void init(Cluster cluster, Collection hosts) { + this.liveHosts.addAll(hosts); + this.configuration = cluster.getConfiguration(); + this.index.set(new Random().nextInt(Math.max(hosts.size(), 1))); + } + + /** + * Return the HostDistance for the provided host. + * + *

    This policy consider all nodes as local. This is generally the right thing to do in a single + * datacenter deployment. If you use multiple datacenter, see {@link DCAwareRoundRobinPolicy} + * instead. + * + * @param host the host of which to return the distance of. + * @return the HostDistance to {@code host}. + */ + @Override + public HostDistance distance(Host host) { + return HostDistance.LOCAL; + } + + /** + * Returns the hosts to use for a new query. + * + *

    The returned plan will try each known host of the cluster. Upon each call to this method, + * the {@code i}th host of the plans returned will cycle over all the hosts of the cluster in a + * round-robin fashion. + * + * @param loggedKeyspace the keyspace currently logged in on for this query. + * @param statement the query for which to build the plan. + * @return a new query plan, i.e. an iterator indicating which host to try first for querying, + * which one to use as failover, etc... + */ + @Override + public Iterator newQueryPlan(String loggedKeyspace, Statement statement) { + + if (!hasLoggedLocalCLUse) { + ConsistencyLevel cl = + statement.getConsistencyLevel() == null + ? configuration.getQueryOptions().getConsistencyLevel() + : statement.getConsistencyLevel(); + if (cl.isDCLocal()) { + hasLoggedLocalCLUse = true; + logger.warn( + "Detected request at Consistency Level {} but the non-DC aware RoundRobinPolicy is in use. " + + "It is strongly advised to use DCAwareRoundRobinPolicy if you have multiple DCs/use DC-aware consistency levels " + + "(note: this message will only be logged once)", + cl); + } } - /** - * Return the HostDistance for the provided host. - *

    - * This policy consider all nodes as local. This is generally the right - * thing to do in a single datacenter deployment. If you use multiple - * datacenter, see {@link DCAwareRoundRobinPolicy} instead. - * - * @param host the host of which to return the distance of. - * @return the HostDistance to {@code host}. - */ - @Override - public HostDistance distance(Host host) { - return HostDistance.LOCAL; - } - - /** - * Returns the hosts to use for a new query. - *

    - * The returned plan will try each known host of the cluster. Upon each - * call to this method, the {@code i}th host of the plans returned will cycle - * over all the hosts of the cluster in a round-robin fashion. - * - * @param loggedKeyspace the keyspace currently logged in on for this - * query. - * @param statement the query for which to build the plan. - * @return a new query plan, i.e. an iterator indicating which host to - * try first for querying, which one to use as failover, etc... - */ - @Override - public Iterator newQueryPlan(String loggedKeyspace, Statement statement) { - - if (!hasLoggedLocalCLUse) { - ConsistencyLevel cl = statement.getConsistencyLevel() == null - ? configuration.getQueryOptions().getConsistencyLevel() - : statement.getConsistencyLevel(); - if (cl.isDCLocal()) { - hasLoggedLocalCLUse = true; - logger.warn("Detected request at Consistency Level {} but the non-DC aware RoundRobinPolicy is in use. " - + "It is strongly advised to use DCAwareRoundRobinPolicy if you have multiple DCs/use DC-aware consistency levels " - + "(note: this message will only be logged once)", cl); - } - } - - // We clone liveHosts because we want a version of the list that - // cannot change concurrently of the query plan iterator (this - // would be racy). We use clone() as it don't involve a copy of the - // underlying array (and thus we rely on liveHosts being a CopyOnWriteArrayList). - @SuppressWarnings("unchecked") - final List hosts = (List) liveHosts.clone(); - final int startIdx = index.getAndIncrement(); - - // Overflow protection; not theoretically thread safe but should be good enough - if (startIdx > Integer.MAX_VALUE - 10000) - index.set(0); - - return new AbstractIterator() { - - private int idx = startIdx; - private int remaining = hosts.size(); - - @Override - protected Host computeNext() { - if (remaining <= 0) - return endOfData(); - - remaining--; - int c = idx++ % hosts.size(); - if (c < 0) - c += hosts.size(); - return hosts.get(c); - } - }; - } - - @Override - public void onUp(Host host) { - liveHosts.addIfAbsent(host); - } - - @Override - public void onDown(Host host) { - liveHosts.remove(host); - } - - @Override - public void onAdd(Host host) { - onUp(host); - } - - @Override - public void onRemove(Host host) { - onDown(host); - } - - @Override - public void close() { - // nothing to do - } + // We clone liveHosts because we want a version of the list that + // cannot change concurrently of the query plan iterator (this + // would be racy). We use clone() as it don't involve a copy of the + // underlying array (and thus we rely on liveHosts being a CopyOnWriteArrayList). + @SuppressWarnings("unchecked") + final List hosts = (List) liveHosts.clone(); + final int startIdx = index.getAndIncrement(); + + // Overflow protection; not theoretically thread safe but should be good enough + if (startIdx > Integer.MAX_VALUE - 10000) index.set(0); + + return new AbstractIterator() { + + private int idx = startIdx; + private int remaining = hosts.size(); + + @Override + protected Host computeNext() { + if (remaining <= 0) return endOfData(); + + remaining--; + int c = idx++ % hosts.size(); + if (c < 0) c += hosts.size(); + return hosts.get(c); + } + }; + } + + @Override + public void onUp(Host host) { + liveHosts.addIfAbsent(host); + } + + @Override + public void onDown(Host host) { + liveHosts.remove(host); + } + + @Override + public void onAdd(Host host) { + onUp(host); + } + + @Override + public void onRemove(Host host) { + onDown(host); + } + + @Override + public void close() { + // nothing to do + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/policies/SpeculativeExecutionPolicy.java b/driver-core/src/main/java/com/datastax/driver/core/policies/SpeculativeExecutionPolicy.java index f936ca78c5c..82aa791693b 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/policies/SpeculativeExecutionPolicy.java +++ b/driver-core/src/main/java/com/datastax/driver/core/policies/SpeculativeExecutionPolicy.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,53 +22,57 @@ import com.datastax.driver.core.Statement; /** - * The policy that decides if the driver will send speculative queries to the next hosts when the current host takes too - * long to respond. - *

    - * Note that only idempotent statements will be speculatively retried, see - * {@link com.datastax.driver.core.Statement#isIdempotent()} for more information. + * The policy that decides if the driver will send speculative queries to the next hosts when the + * current host takes too long to respond. + * + *

    Note that only idempotent statements will be speculatively retried, see {@link + * com.datastax.driver.core.Statement#isIdempotent()} for more information. */ public interface SpeculativeExecutionPolicy { - /** - * Gets invoked at cluster startup. - * - * @param cluster the cluster that this policy is associated with. - */ - void init(Cluster cluster); + /** + * Gets invoked at cluster startup. + * + * @param cluster the cluster that this policy is associated with. + */ + void init(Cluster cluster); - /** - * Returns the plan to use for a new query. - * - * @param loggedKeyspace the currently logged keyspace (the one set through either - * {@link Cluster#connect(String)} or by manually doing a {@code USE} query) for - * the session on which this plan need to be built. This can be {@code null} if - * the corresponding session has no keyspace logged in. - * @param statement the query for which to build a plan. - * @return the plan. - */ - SpeculativeExecutionPlan newPlan(String loggedKeyspace, Statement statement); + /** + * Returns the plan to use for a new query. + * + * @param loggedKeyspace the currently logged keyspace (the one set through either {@link + * Cluster#connect(String)} or by manually doing a {@code USE} query) for the session on which + * this plan need to be built. This can be {@code null} if the corresponding session has no + * keyspace logged in. + * @param statement the query for which to build a plan. + * @return the plan. + */ + SpeculativeExecutionPlan newPlan(String loggedKeyspace, Statement statement); - /** - * Gets invoked at cluster shutdown. - *

    - * This gives the policy the opportunity to perform some cleanup, for instance stop threads that it might have started. - */ - void close(); + /** + * Gets invoked at cluster shutdown. + * + *

    This gives the policy the opportunity to perform some cleanup, for instance stop threads + * that it might have started. + */ + void close(); + /** + * A plan that governs speculative executions for a given query. + * + *

    Each time a host is queried, {@link #nextExecution(Host)} is invoked to determine if and + * when a speculative query to the next host will be sent. + */ + interface SpeculativeExecutionPlan { /** - * A plan that governs speculative executions for a given query. - *

    - * Each time a host is queried, {@link #nextExecution(Host)} is invoked to determine if and when a speculative query to - * the next host will be sent. + * Returns the time before the next speculative query. + * + * @param lastQueried the host that was just queried. + * @return the time (in milliseconds) before a speculative query is sent to the next host. If + * negative, no speculative query will be sent. If zero, it will immediately send the + * execution. Note that, prior to version 3.3.1, zero meant "no speculative query", so + * custom policies written at that time may now start to schedule more executions than + * expected; make sure you use a negative value, not zero, to stop executions. */ - interface SpeculativeExecutionPlan { - /** - * Returns the time before the next speculative query. - * - * @param lastQueried the host that was just queried. - * @return the time (in milliseconds) before a speculative query is sent to the next host. If zero or negative, - * no speculative query will be sent. - */ - long nextExecution(Host lastQueried); - } + long nextExecution(Host lastQueried); + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/policies/TokenAwarePolicy.java b/driver-core/src/main/java/com/datastax/driver/core/policies/TokenAwarePolicy.java index d0b912c6127..ccfda1062d7 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/policies/TokenAwarePolicy.java +++ b/driver-core/src/main/java/com/datastax/driver/core/policies/TokenAwarePolicy.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,178 +17,273 @@ */ package com.datastax.driver.core.policies; -import com.datastax.driver.core.*; +import com.datastax.driver.core.Cluster; +import com.datastax.driver.core.CodecRegistry; +import com.datastax.driver.core.Host; +import com.datastax.driver.core.HostDistance; +import com.datastax.driver.core.Metadata; +import com.datastax.driver.core.ProtocolVersion; +import com.datastax.driver.core.Statement; import com.google.common.collect.AbstractIterator; import com.google.common.collect.Lists; - import java.nio.ByteBuffer; -import java.util.*; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.Iterator; +import java.util.List; +import java.util.Set; /** - * A wrapper load balancing policy that add token awareness to a child policy. - *

    - * This policy encapsulates another policy. The resulting policy works in - * the following way: + * A wrapper load balancing policy that adds token awareness to a child policy. + * + *

    This policy encapsulates another policy. The resulting policy works in the following way: + * *

      - *
    • the {@code distance} method is inherited from the child policy.
    • - *
    • the iterator return by the {@code newQueryPlan} method will first - * return the {@code LOCAL} replicas for the query (based on {@link Statement#getRoutingKey}) - * if possible (i.e. if the query {@code getRoutingKey} method - * doesn't return {@code null} and if {@link Metadata#getReplicas} - * returns a non empty set of replicas for that partition key). If no - * local replica can be either found or successfully contacted, the rest - * of the query plan will fallback to one of the child policy.
    • + *
    • the {@code distance} method is inherited from the child policy. + *
    • the iterator returned by the {@code newQueryPlan} method will first return the {@link + * HostDistance#LOCAL LOCAL} replicas for the query if possible (i.e. if the query's + * {@linkplain Statement#getRoutingKey(ProtocolVersion, CodecRegistry) routing key} is not + * {@code null} and if the {@linkplain Metadata#getReplicas(String, ByteBuffer) set of + * replicas} for that partition key is not empty). If no local replica can be either found or + * successfully contacted, the rest of the query plan will fallback to the child policy's one. *
    - *

    - * Do note that only replica for which the child policy {@code distance} - * method returns {@code HostDistance.LOCAL} will be considered having - * priority. For example, if you wrap {@link DCAwareRoundRobinPolicy} with this - * token aware policy, replicas from remote data centers may only be - * returned after all the host of the local data center. + * + * The exact order in which local replicas are returned is dictated by the {@linkplain + * ReplicaOrdering strategy} provided at instantiation. + * + *

    Do note that only replicas for which the child policy's {@linkplain + * LoadBalancingPolicy#distance(Host) distance} method returns {@link HostDistance#LOCAL LOCAL} will + * be considered having priority. For example, if you wrap {@link DCAwareRoundRobinPolicy} with this + * token aware policy, replicas from remote data centers may only be returned after all the hosts of + * the local data center. */ public class TokenAwarePolicy implements ChainableLoadBalancingPolicy { - private final LoadBalancingPolicy childPolicy; - private final boolean shuffleReplicas; - private volatile Metadata clusterMetadata; - private volatile ProtocolVersion protocolVersion; - private volatile CodecRegistry codecRegistry; + /** Strategies for replica ordering. */ + public enum ReplicaOrdering { /** - * Creates a new {@code TokenAware} policy. + * Order replicas by token ring topology, i.e. always return the "primary" replica first, then + * the second, etc., according to the placement of replicas around the token ring. * - * @param childPolicy the load balancing policy to wrap with token awareness. - * @param shuffleReplicas whether to shuffle the replicas returned by {@code getRoutingKey}. - * Note that setting this parameter to {@code true} might decrease the - * effectiveness of caching (especially at consistency level ONE), since - * the same row will be retrieved from any replica (instead of only the - * "primary" replica without shuffling). - * On the other hand, shuffling will better distribute writes, and can - * alleviate hotspots caused by "fat" partitions. + *

    This strategy is the only one guaranteed to order replicas in a deterministic and constant + * way. This increases the effectiveness of server-side row caching (especially at consistency + * level ONE), but is more heavily impacted by hotspots, since the primary replica is always + * tried first. */ - public TokenAwarePolicy(LoadBalancingPolicy childPolicy, boolean shuffleReplicas) { - this.childPolicy = childPolicy; - this.shuffleReplicas = shuffleReplicas; - } + TOPOLOGICAL, /** - * Creates a new {@code TokenAware} policy with shuffling of replicas. + * Return replicas in a different, random order for each query plan. This is the default + * strategy. * - * @param childPolicy the load balancing policy to wrap with token - * awareness. - * @see #TokenAwarePolicy(LoadBalancingPolicy, boolean) + *

    This strategy fans out writes and thus can alleviate hotspots caused by "fat" partitions, + * but its randomness makes server-side caching less efficient. */ - public TokenAwarePolicy(LoadBalancingPolicy childPolicy) { - this(childPolicy, true); - } - - @Override - public LoadBalancingPolicy getChildPolicy() { - return childPolicy; - } - - @Override - public void init(Cluster cluster, Collection hosts) { - clusterMetadata = cluster.getMetadata(); - protocolVersion = cluster.getConfiguration().getProtocolOptions().getProtocolVersion(); - codecRegistry = cluster.getConfiguration().getCodecRegistry(); - childPolicy.init(cluster, hosts); - } + RANDOM, /** - * Return the HostDistance for the provided host. + * Return the replicas in the exact same order in which they appear in the child policy's query + * plan. * - * @param host the host of which to return the distance of. - * @return the HostDistance to {@code host} as returned by the wrapped policy. + *

    This is the only strategy that fully respects the child policy's replica ordering. Use it + * when it is important to keep that order intact (e.g. when using the {@link + * LatencyAwarePolicy}). */ - @Override - public HostDistance distance(Host host) { - return childPolicy.distance(host); - } + NEUTRAL + } - /** - * Returns the hosts to use for a new query. - *

    - * The returned plan will first return replicas (whose {@code HostDistance} - * for the child policy is {@code LOCAL}) for the query if it can determine - * them (i.e. mainly if {@code statement.getRoutingKey()} is not {@code null}). - * Following what it will return the plan of the child policy. - * - * @param statement the query for which to build the plan. - * @return the new query plan. - */ - @Override - public Iterator newQueryPlan(final String loggedKeyspace, final Statement statement) { - - ByteBuffer partitionKey = statement.getRoutingKey(protocolVersion, codecRegistry); - String keyspace = statement.getKeyspace(); - if (keyspace == null) - keyspace = loggedKeyspace; - - if (partitionKey == null || keyspace == null) - return childPolicy.newQueryPlan(keyspace, statement); - - final Set replicas = clusterMetadata.getReplicas(Metadata.quote(keyspace), partitionKey); - if (replicas.isEmpty()) - return childPolicy.newQueryPlan(loggedKeyspace, statement); - - final Iterator iter; - if (shuffleReplicas) { - List l = Lists.newArrayList(replicas); - Collections.shuffle(l); - iter = l.iterator(); - } else { - iter = replicas.iterator(); - } + private final LoadBalancingPolicy childPolicy; + private final ReplicaOrdering replicaOrdering; + private volatile Metadata clusterMetadata; + private volatile ProtocolVersion protocolVersion; + private volatile CodecRegistry codecRegistry; + + /** + * Creates a new {@code TokenAware} policy. + * + * @param childPolicy the load balancing policy to wrap with token awareness. + * @param replicaOrdering the strategy to use to order replicas. + */ + public TokenAwarePolicy(LoadBalancingPolicy childPolicy, ReplicaOrdering replicaOrdering) { + this.childPolicy = childPolicy; + this.replicaOrdering = replicaOrdering; + } + + /** + * Creates a new {@code TokenAware} policy. + * + * @param childPolicy the load balancing policy to wrap with token awareness. + * @param shuffleReplicas whether or not to shuffle the replicas. If {@code true}, then the {@link + * ReplicaOrdering#RANDOM RANDOM} strategy will be used, otherwise the {@link + * ReplicaOrdering#TOPOLOGICAL TOPOLOGICAL} one will be used. + * @deprecated Use {@link #TokenAwarePolicy(LoadBalancingPolicy, ReplicaOrdering)} instead. This + * constructor will be removed in the next major release. + */ + @SuppressWarnings("DeprecatedIsStillUsed") + @Deprecated + public TokenAwarePolicy(LoadBalancingPolicy childPolicy, boolean shuffleReplicas) { + this(childPolicy, shuffleReplicas ? ReplicaOrdering.RANDOM : ReplicaOrdering.TOPOLOGICAL); + } + + /** + * Creates a new {@code TokenAware} policy with {@link ReplicaOrdering#RANDOM RANDOM} replica + * ordering. + * + * @param childPolicy the load balancing policy to wrap with token awareness. + */ + public TokenAwarePolicy(LoadBalancingPolicy childPolicy) { + this(childPolicy, ReplicaOrdering.RANDOM); + } + + @Override + public LoadBalancingPolicy getChildPolicy() { + return childPolicy; + } + + @Override + public void init(Cluster cluster, Collection hosts) { + clusterMetadata = cluster.getMetadata(); + protocolVersion = cluster.getConfiguration().getProtocolOptions().getProtocolVersion(); + codecRegistry = cluster.getConfiguration().getCodecRegistry(); + childPolicy.init(cluster, hosts); + } + + /** + * {@inheritDoc} + * + *

    This implementation always returns distances as reported by the wrapped policy. + */ + @Override + public HostDistance distance(Host host) { + return childPolicy.distance(host); + } + + /** + * {@inheritDoc} + * + *

    The returned plan will first return local replicas for the query (i.e. replicas whose + * {@linkplain HostDistance distance} according to the child policy is {@code LOCAL}), if it can + * determine them (i.e. mainly if the statement's {@linkplain + * Statement#getRoutingKey(ProtocolVersion, CodecRegistry) routing key} is not {@code null}), and + * ordered according to the {@linkplain ReplicaOrdering ordering strategy} specified at + * instantiation; following what it will return the rest of the child policy's original query + * plan. + */ + @Override + public Iterator newQueryPlan(final String loggedKeyspace, final Statement statement) { + + ByteBuffer partitionKey = statement.getRoutingKey(protocolVersion, codecRegistry); + String keyspace = statement.getKeyspace(); + if (keyspace == null) keyspace = loggedKeyspace; - return new AbstractIterator() { + if (partitionKey == null || keyspace == null) + return childPolicy.newQueryPlan(keyspace, statement); - private Iterator childIterator; + final Set replicas = clusterMetadata.getReplicas(Metadata.quote(keyspace), partitionKey); + if (replicas.isEmpty()) return childPolicy.newQueryPlan(loggedKeyspace, statement); - @Override - protected Host computeNext() { - while (iter.hasNext()) { - Host host = iter.next(); - if (host.isUp() && childPolicy.distance(host) == HostDistance.LOCAL) - return host; - } + if (replicaOrdering == ReplicaOrdering.NEUTRAL) { - if (childIterator == null) - childIterator = childPolicy.newQueryPlan(loggedKeyspace, statement); + final Iterator childIterator = childPolicy.newQueryPlan(keyspace, statement); - while (childIterator.hasNext()) { - Host host = childIterator.next(); - // Skip it if it was already a local replica - if (!replicas.contains(host) || childPolicy.distance(host) != HostDistance.LOCAL) - return host; - } - return endOfData(); + return new AbstractIterator() { + + private List nonReplicas; + private Iterator nonReplicasIterator; + + @Override + protected Host computeNext() { + + while (childIterator.hasNext()) { + + Host host = childIterator.next(); + + if (host.isUp() + && replicas.contains(host) + && childPolicy.distance(host) == HostDistance.LOCAL) { + // UP replicas should be prioritized, retaining order from childPolicy + return host; + } else { + // save for later + if (nonReplicas == null) nonReplicas = new ArrayList(); + nonReplicas.add(host); } - }; - } + } - @Override - public void onUp(Host host) { - childPolicy.onUp(host); - } + // This should only engage if all local replicas are DOWN + if (nonReplicas != null) { - @Override - public void onDown(Host host) { - childPolicy.onDown(host); - } + if (nonReplicasIterator == null) nonReplicasIterator = nonReplicas.iterator(); - @Override - public void onAdd(Host host) { - childPolicy.onAdd(host); - } + if (nonReplicasIterator.hasNext()) return nonReplicasIterator.next(); + } - @Override - public void onRemove(Host host) { - childPolicy.onRemove(host); - } + return endOfData(); + } + }; + + } else { - @Override - public void close() { - childPolicy.close(); + final Iterator replicasIterator; + + if (replicaOrdering == ReplicaOrdering.RANDOM) { + List replicasList = Lists.newArrayList(replicas); + Collections.shuffle(replicasList); + replicasIterator = replicasList.iterator(); + } else { + replicasIterator = replicas.iterator(); + } + + return new AbstractIterator() { + + private Iterator childIterator; + + @Override + protected Host computeNext() { + while (replicasIterator.hasNext()) { + Host host = replicasIterator.next(); + if (host.isUp() && childPolicy.distance(host) == HostDistance.LOCAL) return host; + } + + if (childIterator == null) + childIterator = childPolicy.newQueryPlan(loggedKeyspace, statement); + + while (childIterator.hasNext()) { + Host host = childIterator.next(); + // Skip it if it was already a local replica + if (!replicas.contains(host) || childPolicy.distance(host) != HostDistance.LOCAL) + return host; + } + return endOfData(); + } + }; } + } + + @Override + public void onUp(Host host) { + childPolicy.onUp(host); + } + + @Override + public void onDown(Host host) { + childPolicy.onDown(host); + } + + @Override + public void onAdd(Host host) { + childPolicy.onAdd(host); + } + + @Override + public void onRemove(Host host) { + childPolicy.onRemove(host); + } + + @Override + public void close() { + childPolicy.close(); + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/policies/WhiteListPolicy.java b/driver-core/src/main/java/com/datastax/driver/core/policies/WhiteListPolicy.java index 83ca10267d3..9ef85186d15 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/policies/WhiteListPolicy.java +++ b/driver-core/src/main/java/com/datastax/driver/core/policies/WhiteListPolicy.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,52 +20,110 @@ import com.datastax.driver.core.Host; import com.google.common.base.Predicate; import com.google.common.collect.ImmutableSet; - +import java.net.InetAddress; import java.net.InetSocketAddress; +import java.net.UnknownHostException; +import java.util.Arrays; import java.util.Collection; /** - * A load balancing policy wrapper that ensure that only hosts from a provided - * white list will ever be returned. - *

    - * This policy wraps another load balancing policy and will delegate the choice - * of hosts to the wrapped policy with the exception that only hosts contained - * in the white list provided when constructing this policy will ever be - * returned. Any host not in the while list will be considered {@code IGNORED} - * and thus will not be connected to. - *

    - * This policy can be useful to ensure that the driver only connects to a - * predefined set of hosts. Keep in mind however that this policy defeats - * somewhat the host auto-detection of the driver. As such, this policy is only - * useful in a few special cases or for testing, but is not optimal in general. - * If all you want to do is limiting connections to hosts of the local - * data-center then you should use DCAwareRoundRobinPolicy and *not* this policy - * in particular. + * A load balancing policy wrapper that ensure that only hosts from a provided white list will ever + * be returned. + * + *

    This policy wraps another load balancing policy and will delegate the choice of hosts to the + * wrapped policy with the exception that only hosts contained in the white list provided when + * constructing this policy will ever be returned. Any host not in the while list will be considered + * {@code IGNORED} and thus will not be connected to. + * + *

    This policy can be useful to ensure that the driver only connects to a predefined set of + * hosts. Keep in mind however that this policy defeats somewhat the host auto-detection of the + * driver. As such, this policy is only useful in a few special cases or for testing, but is not + * optimal in general. If all you want to do is limiting connections to hosts of the local + * data-center then you should use DCAwareRoundRobinPolicy and *not* this policy in particular. * * @see HostFilterPolicy */ public class WhiteListPolicy extends HostFilterPolicy { - /** - * Creates a new policy that wraps the provided child policy but only "allows" hosts - * from the provided while list. - * - * @param childPolicy the wrapped policy. - * @param whiteList the white listed hosts. Only hosts from this list may get connected - * to (whether they will get connected to or not depends on the child policy). - */ - public WhiteListPolicy(LoadBalancingPolicy childPolicy, Collection whiteList) { - super(childPolicy, buildPredicate(whiteList)); - } + /** + * Creates a new policy that wraps the provided child policy but only "allows" hosts from the + * provided white list. + * + * @param childPolicy the wrapped policy. + * @param whiteList the white listed hosts. Only hosts from this list may get connected to + * (whether they will get connected to or not depends on the child policy). + */ + public WhiteListPolicy(LoadBalancingPolicy childPolicy, Collection whiteList) { + super(childPolicy, buildPredicate(whiteList)); + } - private static Predicate buildPredicate(Collection whiteList) { - final ImmutableSet hosts = ImmutableSet.copyOf(whiteList); - return new Predicate() { - @Override - public boolean apply(Host host) { - return hosts.contains(host.getSocketAddress()); - } - }; - } + /** + * Private constructor solely for maintaining type from policy created by {@link + * #ofHosts(LoadBalancingPolicy, String...)}. + */ + private WhiteListPolicy(LoadBalancingPolicy childPolicy, Predicate predicate) { + super(childPolicy, predicate); + } + + private static Predicate buildPredicate(Collection whiteList) { + final ImmutableSet hosts = ImmutableSet.copyOf(whiteList); + return new Predicate() { + @Override + public boolean apply(Host host) { + // This policy shouldn't be used with endpoints that don't resolve to unique addresses. This + // should be pretty obvious from the API. We don't really have any way to check it here. + InetSocketAddress socketAddress = host.getEndPoint().resolve(); + return hosts.contains(socketAddress); + } + }; + } + /** + * Creates a new policy with the given host names. + * + *

    See {@link #ofHosts(LoadBalancingPolicy, Iterable)} for more details. + */ + public static WhiteListPolicy ofHosts(LoadBalancingPolicy childPolicy, String... hostnames) { + return ofHosts(childPolicy, Arrays.asList(hostnames)); + } + + /** + * Creates a new policy that wraps the provided child policy but only "allows" hosts having + * addresses that match those from the resolved input host names. + * + *

    Note that all host names must be non-null and resolvable; if any of them cannot be + * resolved, this method will fail. + * + * @param childPolicy the wrapped policy. + * @param hostnames list of host names to resolve whitelisted addresses from. + * @throws IllegalArgumentException if any of the given {@code hostnames} could not be resolved. + * @throws NullPointerException If null was provided for a hostname. + * @throws SecurityException if a security manager is present and permission to resolve the host + * name is denied. + */ + public static WhiteListPolicy ofHosts( + LoadBalancingPolicy childPolicy, Iterable hostnames) { + ImmutableSet.Builder builder = ImmutableSet.builder(); + for (String hostname : hostnames) { + try { + // We explicitly check for nulls because InetAdress.getByName() will happily + // accept it and use localhost (while a null here almost likely mean a user error, + // not "connect to localhost") + if (hostname == null) throw new NullPointerException(); + builder.add(InetAddress.getAllByName(hostname)); + } catch (UnknownHostException e) { + throw new IllegalArgumentException("Failed to resolve: " + hostname, e); + } + } + final ImmutableSet addresses = builder.build(); + return new WhiteListPolicy( + childPolicy, + new Predicate() { + @Override + public boolean apply(Host host) { + InetSocketAddress socketAddress = host.getEndPoint().resolve(); + return addresses.contains(socketAddress.getAddress()); + } + }); + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/policies/package-info.java b/driver-core/src/main/java/com/datastax/driver/core/policies/package-info.java index 986935a8d21..cd0bd707554 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/policies/package-info.java +++ b/driver-core/src/main/java/com/datastax/driver/core/policies/package-info.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -13,7 +15,5 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -/** - * Policies that allow to control some of the behavior of the DataStax Java driver for Cassandra. - */ +/** Policies that allow to control some of the behavior of the Java Driver for Cassandra. */ package com.datastax.driver.core.policies; diff --git a/driver-core/src/main/java/com/datastax/driver/core/querybuilder/Assignment.java b/driver-core/src/main/java/com/datastax/driver/core/querybuilder/Assignment.java index 153baf7497b..fdf31ab384c 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/querybuilder/Assignment.java +++ b/driver-core/src/main/java/com/datastax/driver/core/querybuilder/Assignment.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,211 +17,210 @@ */ package com.datastax.driver.core.querybuilder; -import com.datastax.driver.core.CodecRegistry; - -import java.util.List; - import static com.datastax.driver.core.querybuilder.Utils.appendName; import static com.datastax.driver.core.querybuilder.Utils.appendValue; +import com.datastax.driver.core.CodecRegistry; +import java.util.List; + public abstract class Assignment extends Utils.Appendeable { - final Object name; + final Object name; + + private Assignment(Object name) { + this.name = name; + } + + /** + * The name of the column this assignment applies to. + * + * @return the name of the column this assignment applies to. + */ + public String getColumnName() { + return name.toString(); + } + + abstract boolean isIdempotent(); + + static class SetAssignment extends Assignment { - private Assignment(Object name) { - this.name = name; + private final Object value; + + SetAssignment(Object name, Object value) { + super(name); + this.value = value; + } + + @Override + void appendTo(StringBuilder sb, List variables, CodecRegistry codecRegistry) { + appendName(name, codecRegistry, sb); + sb.append('='); + appendValue(value, codecRegistry, sb, variables); } - /** - * The name of the column this assignment applies to. - * - * @return the name of the column this assignment applies to. - */ - public String getColumnName() { - return name.toString(); + @Override + boolean containsBindMarker() { + return Utils.containsBindMarker(value); + } + + @Override + boolean isIdempotent() { + return Utils.isIdempotent(value); + } + } + + static class CounterAssignment extends Assignment { + + private final Object value; + private final boolean isIncr; + + CounterAssignment(String name, Object value, boolean isIncr) { + super(name); + if (!isIncr && value instanceof Long && ((Long) value) < 0) { + this.value = -((Long) value); + this.isIncr = true; + } else { + this.value = value; + this.isIncr = isIncr; + } } - abstract boolean isIdempotent(); + @Override + void appendTo(StringBuilder sb, List variables, CodecRegistry codecRegistry) { + appendName(name, codecRegistry, sb).append('='); + appendName(name, codecRegistry, sb).append(isIncr ? "+" : "-"); + appendValue(value, codecRegistry, sb, variables); + } - static class SetAssignment extends Assignment { + @Override + boolean containsBindMarker() { + return Utils.containsBindMarker(value); + } - private final Object value; + @Override + boolean isIdempotent() { + return false; + } + } - SetAssignment(Object name, Object value) { - super(name); - this.value = value; - } + static class ListPrependAssignment extends Assignment { - @Override - void appendTo(StringBuilder sb, List variables, CodecRegistry codecRegistry) { - appendName(name, codecRegistry, sb); - sb.append('='); - appendValue(value, codecRegistry, sb, variables); - } + private final Object value; - @Override - boolean containsBindMarker() { - return Utils.containsBindMarker(value); - } + ListPrependAssignment(String name, Object value) { + super(name); + this.value = value; + } - @Override - boolean isIdempotent() { - return Utils.isIdempotent(value); - } + @Override + void appendTo(StringBuilder sb, List variables, CodecRegistry codecRegistry) { + appendName(name, codecRegistry, sb).append('='); + appendValue(value, codecRegistry, sb, variables); + sb.append('+'); + appendName(name, codecRegistry, sb); } - static class CounterAssignment extends Assignment { + @Override + boolean containsBindMarker() { + return Utils.containsBindMarker(value); + } - private final Object value; - private final boolean isIncr; + @Override + boolean isIdempotent() { + return false; + } + } - CounterAssignment(String name, Object value, boolean isIncr) { - super(name); - if (!isIncr && value instanceof Long && ((Long) value) < 0) { - this.value = -((Long) value); - this.isIncr = true; - } else { - this.value = value; - this.isIncr = isIncr; - } - } + static class ListSetIdxAssignment extends Assignment { - @Override - void appendTo(StringBuilder sb, List variables, CodecRegistry codecRegistry) { - appendName(name, codecRegistry, sb).append('='); - appendName(name, codecRegistry, sb).append(isIncr ? "+" : "-"); - appendValue(value, codecRegistry, sb, variables); - } - - @Override - boolean containsBindMarker() { - return Utils.containsBindMarker(value); - } + private final int idx; + private final Object value; - @Override - boolean isIdempotent() { - return false; - } + ListSetIdxAssignment(String name, int idx, Object value) { + super(name); + this.idx = idx; + this.value = value; } - static class ListPrependAssignment extends Assignment { + @Override + void appendTo(StringBuilder sb, List variables, CodecRegistry codecRegistry) { + appendName(name, codecRegistry, sb).append('[').append(idx).append("]="); + appendValue(value, codecRegistry, sb, variables); + } + + @Override + boolean containsBindMarker() { + return Utils.containsBindMarker(value); + } + + @Override + boolean isIdempotent() { + return true; + } + } + + static class CollectionAssignment extends Assignment { - private final Object value; + private final Object collection; + private final boolean isAdd; + private final boolean isIdempotent; + + CollectionAssignment(String name, Object collection, boolean isAdd, boolean isIdempotent) { + super(name); + this.collection = collection; + this.isAdd = isAdd; + this.isIdempotent = isIdempotent; + } + + CollectionAssignment(String name, Object collection, boolean isAdd) { + this(name, collection, isAdd, true); + } + + @Override + void appendTo(StringBuilder sb, List variables, CodecRegistry codecRegistry) { + appendName(name, codecRegistry, sb).append('='); + appendName(name, codecRegistry, sb).append(isAdd ? "+" : "-"); + appendValue(collection, codecRegistry, sb, variables); + } - ListPrependAssignment(String name, Object value) { - super(name); - this.value = value; - } - - @Override - void appendTo(StringBuilder sb, List variables, CodecRegistry codecRegistry) { - appendName(name, codecRegistry, sb).append('='); - appendValue(value, codecRegistry, sb, variables); - sb.append('+'); - appendName(name, codecRegistry, sb); - } - - @Override - boolean containsBindMarker() { - return Utils.containsBindMarker(value); - } + @Override + boolean containsBindMarker() { + return Utils.containsBindMarker(collection); + } + + @Override + public boolean isIdempotent() { + return isIdempotent; + } + } + + static class MapPutAssignment extends Assignment { + + private final Object key; + private final Object value; + + MapPutAssignment(String name, Object key, Object value) { + super(name); + this.key = key; + this.value = value; + } + + @Override + void appendTo(StringBuilder sb, List variables, CodecRegistry codecRegistry) { + appendName(name, codecRegistry, sb).append('['); + appendValue(key, codecRegistry, sb, variables); + sb.append("]="); + appendValue(value, codecRegistry, sb, variables); + } + + @Override + boolean containsBindMarker() { + return Utils.containsBindMarker(key) || Utils.containsBindMarker(value); + } - @Override - boolean isIdempotent() { - return false; - } - } - - static class ListSetIdxAssignment extends Assignment { - - private final int idx; - private final Object value; - - ListSetIdxAssignment(String name, int idx, Object value) { - super(name); - this.idx = idx; - this.value = value; - } - - @Override - void appendTo(StringBuilder sb, List variables, CodecRegistry codecRegistry) { - appendName(name, codecRegistry, sb).append('[').append(idx).append("]="); - appendValue(value, codecRegistry, sb, variables); - } - - @Override - boolean containsBindMarker() { - return Utils.containsBindMarker(value); - } - - @Override - boolean isIdempotent() { - return true; - } - } - - static class CollectionAssignment extends Assignment { - - private final Object collection; - private final boolean isAdd; - private final boolean isIdempotent; - - CollectionAssignment(String name, Object collection, boolean isAdd, boolean isIdempotent) { - super(name); - this.collection = collection; - this.isAdd = isAdd; - this.isIdempotent = isIdempotent; - } - - CollectionAssignment(String name, Object collection, boolean isAdd) { - this(name, collection, isAdd, true); - } - - @Override - void appendTo(StringBuilder sb, List variables, CodecRegistry codecRegistry) { - appendName(name, codecRegistry, sb).append('='); - appendName(name, codecRegistry, sb).append(isAdd ? "+" : "-"); - appendValue(collection, codecRegistry, sb, variables); - } - - @Override - boolean containsBindMarker() { - return Utils.containsBindMarker(collection); - } - - @Override - public boolean isIdempotent() { - return isIdempotent; - } - } - - static class MapPutAssignment extends Assignment { - - private final Object key; - private final Object value; - - MapPutAssignment(String name, Object key, Object value) { - super(name); - this.key = key; - this.value = value; - } - - @Override - void appendTo(StringBuilder sb, List variables, CodecRegistry codecRegistry) { - appendName(name, codecRegistry, sb).append('['); - appendValue(key, codecRegistry, sb, variables); - sb.append("]="); - appendValue(value, codecRegistry, sb, variables); - } - - @Override - boolean containsBindMarker() { - return Utils.containsBindMarker(key) || Utils.containsBindMarker(value); - } - - @Override - boolean isIdempotent() { - return true; - } + @Override + boolean isIdempotent() { + return true; } + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/querybuilder/Batch.java b/driver-core/src/main/java/com/datastax/driver/core/querybuilder/Batch.java index 9b92c4ed83f..3c89993cb6e 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/querybuilder/Batch.java +++ b/driver-core/src/main/java/com/datastax/driver/core/querybuilder/Batch.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,202 +17,196 @@ */ package com.datastax.driver.core.querybuilder; -import com.datastax.driver.core.*; - +import com.datastax.driver.core.CodecRegistry; +import com.datastax.driver.core.ProtocolVersion; +import com.datastax.driver.core.RegularStatement; +import com.datastax.driver.core.SimpleStatement; import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.List; -/** - * A built BATCH statement. - */ +/** A built BATCH statement. */ public class Batch extends BuiltStatement { - private final List statements; - private final boolean logged; - private final Options usings; + private final List statements; + private final boolean logged; + private final Options usings; - // Only used when we add at last one statement that is not a BuiltStatement subclass - private int nonBuiltStatementValues; + // Only used when we add at last one statement that is not a BuiltStatement subclass + private int nonBuiltStatementValues; - Batch(RegularStatement[] statements, boolean logged) { - super(null, null, null); - this.statements = statements.length == 0 - ? new ArrayList() - : new ArrayList(statements.length); - this.logged = logged; - this.usings = new Options(this); + Batch(RegularStatement[] statements, boolean logged) { + super(null, null, null); + this.statements = + statements.length == 0 + ? new ArrayList() + : new ArrayList(statements.length); + this.logged = logged; + this.usings = new Options(this); - for (int i = 0; i < statements.length; i++) - add(statements[i]); - } + for (int i = 0; i < statements.length; i++) add(statements[i]); + } - @Override - StringBuilder buildQueryString(List variables, CodecRegistry codecRegistry) { - StringBuilder builder = new StringBuilder(); - - builder.append(isCounterOp() - ? "BEGIN COUNTER BATCH" - : (logged ? "BEGIN BATCH" : "BEGIN UNLOGGED BATCH")); - - if (!usings.usings.isEmpty()) { - builder.append(" USING "); - Utils.joinAndAppend(builder, codecRegistry, " AND ", usings.usings, variables); - } - builder.append(' '); - - for (int i = 0; i < statements.size(); i++) { - RegularStatement stmt = statements.get(i); - if (stmt instanceof BuiltStatement) { - BuiltStatement bst = (BuiltStatement) stmt; - builder.append(maybeAddSemicolon(bst.buildQueryString(variables, codecRegistry))); - - } else { - String str = stmt.getQueryString(codecRegistry); - builder.append(str); - if (!str.trim().endsWith(";")) - builder.append(';'); - - // Note that we force hasBindMarkers if there is any non-BuiltStatement, so we know - // that we can only get there with variables == null - assert variables == null; - } - } - builder.append("APPLY BATCH;"); - return builder; - } + @Override + StringBuilder buildQueryString(List variables, CodecRegistry codecRegistry) { + StringBuilder builder = new StringBuilder(); - /** - * Adds a new statement to this batch. - * - * @param statement the new statement to add. - * @return this batch. - * @throws IllegalArgumentException if counter and non-counter operations - * are mixed. - */ - public Batch add(RegularStatement statement) { - boolean isCounterOp = statement instanceof BuiltStatement && ((BuiltStatement) statement).isCounterOp(); + builder.append( + isCounterOp() ? "BEGIN COUNTER BATCH" : (logged ? "BEGIN BATCH" : "BEGIN UNLOGGED BATCH")); - if (this.isCounterOp == null) - setCounterOp(isCounterOp); - else if (isCounterOp() != isCounterOp) - throw new IllegalArgumentException("Cannot mix counter operations and non-counter operations in a batch statement"); + if (!usings.usings.isEmpty()) { + builder.append(" USING "); + Utils.joinAndAppend(builder, codecRegistry, " AND ", usings.usings, variables); + } + builder.append(' '); + + for (int i = 0; i < statements.size(); i++) { + RegularStatement stmt = statements.get(i); + if (stmt instanceof BuiltStatement) { + BuiltStatement bst = (BuiltStatement) stmt; + builder.append(maybeAddSemicolon(bst.buildQueryString(variables, codecRegistry))); + + } else { + String str = stmt.getQueryString(codecRegistry); + builder.append(str); + if (!str.trim().endsWith(";")) builder.append(';'); + + // Note that we force hasBindMarkers if there is any non-BuiltStatement, so we know + // that we can only get there with variables == null + assert variables == null; + } + } + builder.append("APPLY BATCH;"); + return builder; + } + + /** + * Adds a new statement to this batch. + * + * @param statement the new statement to add. + * @return this batch. + * @throws IllegalArgumentException if counter and non-counter operations are mixed. + */ + public Batch add(RegularStatement statement) { + boolean isCounterOp = + statement instanceof BuiltStatement && ((BuiltStatement) statement).isCounterOp(); + + if (this.isCounterOp == null) setCounterOp(isCounterOp); + else if (isCounterOp() != isCounterOp) + throw new IllegalArgumentException( + "Cannot mix counter operations and non-counter operations in a batch statement"); + + this.statements.add(statement); + + if (statement instanceof BuiltStatement) { + this.hasBindMarkers |= ((BuiltStatement) statement).hasBindMarkers; + } else { + // For non-BuiltStatement, we cannot know if it includes a bind makers and we assume it does. + // In practice, + // this means we will always serialize values as strings when there is non-BuiltStatement + this.hasBindMarkers = true; + this.nonBuiltStatementValues += ((SimpleStatement) statement).valuesCount(); + } - this.statements.add(statement); + setDirty(); - if (statement instanceof BuiltStatement) { - this.hasBindMarkers |= ((BuiltStatement) statement).hasBindMarkers; - } else { - // For non-BuiltStatement, we cannot know if it includes a bind makers and we assume it does. In practice, - // this means we will always serialize values as strings when there is non-BuiltStatement - this.hasBindMarkers = true; - this.nonBuiltStatementValues += ((SimpleStatement) statement).valuesCount(); - } + return this; + } - setDirty(); + @Override + public ByteBuffer[] getValues(ProtocolVersion protocolVersion, CodecRegistry codecRegistry) { + // If there is some non-BuiltStatement inside the batch with values, we shouldn't + // use super.getValues() since it will ignore the values of said non-BuiltStatement. + // If that's the case, we just collects all those values (and we know + // super.getValues() == null in that case since we've explicitely set this.hasBindMarker + // to true). Otherwise, we simply call super.getValues(). + if (nonBuiltStatementValues == 0) return super.getValues(protocolVersion, codecRegistry); - return this; - } + ByteBuffer[] values = new ByteBuffer[nonBuiltStatementValues]; + int i = 0; + for (RegularStatement statement : statements) { + if (statement instanceof BuiltStatement) continue; - @Override - public ByteBuffer[] getValues(ProtocolVersion protocolVersion, CodecRegistry codecRegistry) { - // If there is some non-BuiltStatement inside the batch with values, we shouldn't - // use super.getValues() since it will ignore the values of said non-BuiltStatement. - // If that's the case, we just collects all those values (and we know - // super.getValues() == null in that case since we've explicitely set this.hasBindMarker - // to true). Otherwise, we simply call super.getValues(). - if (nonBuiltStatementValues == 0) - return super.getValues(protocolVersion, codecRegistry); - - ByteBuffer[] values = new ByteBuffer[nonBuiltStatementValues]; - int i = 0; - for (RegularStatement statement : statements) { - if (statement instanceof BuiltStatement) - continue; - - ByteBuffer[] statementValues = statement.getValues(protocolVersion, codecRegistry); - System.arraycopy(statementValues, 0, values, i, statementValues.length); - i += statementValues.length; - } - return values; + ByteBuffer[] statementValues = statement.getValues(protocolVersion, codecRegistry); + System.arraycopy(statementValues, 0, values, i, statementValues.length); + i += statementValues.length; } - - /** - * Adds a new options for this BATCH statement. - * - * @param using the option to add. - * @return the options of this BATCH statement. - */ - public Options using(Using using) { - return usings.and(using); + return values; + } + + /** + * Adds a new options for this BATCH statement. + * + * @param using the option to add. + * @return the options of this BATCH statement. + */ + public Options using(Using using) { + return usings.and(using); + } + + /** + * Returns the first non-null routing key of the statements in this batch or null otherwise. + * + * @return the routing key for this batch statement. + */ + @Override + public ByteBuffer getRoutingKey(ProtocolVersion protocolVersion, CodecRegistry codecRegistry) { + for (RegularStatement statement : statements) { + ByteBuffer routingKey = statement.getRoutingKey(protocolVersion, codecRegistry); + if (routingKey != null) { + return routingKey; + } + } + return null; + } + + /** + * Returns the keyspace of the first statement in this batch. + * + * @return the keyspace of the first statement in this batch. + */ + @Override + public String getKeyspace() { + return statements.isEmpty() ? null : statements.get(0).getKeyspace(); + } + + @Override + public Boolean isIdempotent() { + if (idempotent != null) { + return idempotent; } + return isBatchIdempotent(statements); + } - /** - * Returns the first non-null routing key of the statements in this batch - * or null otherwise. - * - * @return the routing key for this batch statement. - */ - @Override - public ByteBuffer getRoutingKey(ProtocolVersion protocolVersion, CodecRegistry codecRegistry) { - for (RegularStatement statement : statements) { - ByteBuffer routingKey = statement.getRoutingKey(protocolVersion, codecRegistry); - if (routingKey != null) { - return routingKey; - } - } - return null; + /** The options of a BATCH statement. */ + public static class Options extends BuiltStatement.ForwardingStatement { + + private final List usings = new ArrayList(); + + Options(Batch statement) { + super(statement); } /** - * Returns the keyspace of the first statement in this batch. + * Adds the provided option. * - * @return the keyspace of the first statement in this batch. + * @param using a BATCH option. + * @return this {@code Options} object. */ - @Override - public String getKeyspace() { - return statements.isEmpty() ? null : statements.get(0).getKeyspace(); - } - - @Override - public Boolean isIdempotent() { - if (idempotent != null) { - return idempotent; - } - return isBatchIdempotent(statements); + public Options and(Using using) { + usings.add(using); + checkForBindMarkers(using); + return this; } /** - * The options of a BATCH statement. + * Adds a new statement to the BATCH statement these options are part of. + * + * @param statement the statement to add. + * @return the BATCH statement these options are part of. */ - public static class Options extends BuiltStatement.ForwardingStatement { - - private final List usings = new ArrayList(); - - Options(Batch statement) { - super(statement); - } - - /** - * Adds the provided option. - * - * @param using a BATCH option. - * @return this {@code Options} object. - */ - public Options and(Using using) { - usings.add(using); - checkForBindMarkers(using); - return this; - } - - /** - * Adds a new statement to the BATCH statement these options are part of. - * - * @param statement the statement to add. - * @return the BATCH statement these options are part of. - */ - public Batch add(RegularStatement statement) { - return this.statement.add(statement); - } + public Batch add(RegularStatement statement) { + return this.statement.add(statement); } + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/querybuilder/BindMarker.java b/driver-core/src/main/java/com/datastax/driver/core/querybuilder/BindMarker.java index 23e2cdff77d..e9b6427ec05 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/querybuilder/BindMarker.java +++ b/driver-core/src/main/java/com/datastax/driver/core/querybuilder/BindMarker.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,28 +19,27 @@ /** * A CQL3 bind marker. - *

    - * This can be either an anonymous bind marker or a named one (but note that - * named ones are only supported starting in Cassandra 2.0.1). - *

    - * Please note that to create a new bind maker object you should use - * {@link QueryBuilder#bindMarker()} (anonymous marker) or - * {@link QueryBuilder#bindMarker(String)} (named marker). + * + *

    This can be either an anonymous bind marker or a named one (but note that named ones are only + * supported starting in Cassandra 2.0.1). + * + *

    Please note that to create a new bind maker object you should use {@link + * QueryBuilder#bindMarker()} (anonymous marker) or {@link QueryBuilder#bindMarker(String)} (named + * marker). */ public class BindMarker { - static final BindMarker ANONYMOUS = new BindMarker(null); + static final BindMarker ANONYMOUS = new BindMarker(null); - private final String name; + private final String name; - BindMarker(String name) { - this.name = name; - } + BindMarker(String name) { + this.name = name; + } - @Override - public String toString() { - if (name == null) - return "?"; + @Override + public String toString() { + if (name == null) return "?"; - return Utils.appendName(name, new StringBuilder(name.length() + 1).append(':')).toString(); - } + return Utils.appendName(name, new StringBuilder(name.length() + 1).append(':')).toString(); + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/querybuilder/BuiltStatement.java b/driver-core/src/main/java/com/datastax/driver/core/querybuilder/BuiltStatement.java index 73ef66772a7..76e6cb45189 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/querybuilder/BuiltStatement.java +++ b/driver-core/src/main/java/com/datastax/driver/core/querybuilder/BuiltStatement.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,10 +17,16 @@ */ package com.datastax.driver.core.querybuilder; -import com.datastax.driver.core.*; +import com.datastax.driver.core.CodecRegistry; +import com.datastax.driver.core.ColumnMetadata; +import com.datastax.driver.core.ConsistencyLevel; +import com.datastax.driver.core.Metadata; +import com.datastax.driver.core.ProtocolVersion; +import com.datastax.driver.core.RegularStatement; +import com.datastax.driver.core.Statement; +import com.datastax.driver.core.TypeCodec; import com.datastax.driver.core.exceptions.CodecNotFoundException; import com.datastax.driver.core.policies.RetryPolicy; - import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.List; @@ -26,435 +34,429 @@ /** * Common ancestor to statements generated with the {@link QueryBuilder}. - *

    - * The actual query string will be generated and cached the first time it is requested, - * which is either when the driver tries to execute the query, or when you call certain - * public methods (for example {@link RegularStatement#getQueryString(CodecRegistry)}, - * {@link #getObject(int, CodecRegistry)}). - *

    - * Whenever possible (and unless you call {@link #setForceNoValues(boolean)}, the builder - * will try to handle values passed to its methods as standalone values bound to the query - * string with placeholders. For instance: + * + *

    The actual query string will be generated and cached the first time it is requested, which is + * either when the driver tries to execute the query, or when you call certain public methods (for + * example {@link RegularStatement#getQueryString(CodecRegistry)}, {@link #getObject(int, + * CodecRegistry)}). + * + *

    Whenever possible (and unless you call {@link #setForceNoValues(boolean)}, the builder will + * try to handle values passed to its methods as standalone values bound to the query string with + * placeholders. For instance: + * *

      *     select().all().from("foo").where(eq("k", "the key"));
      *     // Is equivalent to:
      *     new SimpleStatement("SELECT * FROM foo WHERE k=?", "the key");
      * 
    + * * There are a few exceptions to this rule: + * *
      - *
    • for fixed-size number types, the builder can't guess what the actual CQL type - * is. Standalone values are sent to Cassandra in their serialized form, and number - * types aren't all serialized in the same way, so picking the wrong type could - * lead to a query error;
    • - *
    • if the value is a "special" element like a function call, it can't be serialized. - * This also applies to collections mixing special elements and regular objects.
    • + *
    • for fixed-size number types, the builder can't guess what the actual CQL type is. + * Standalone values are sent to Cassandra in their serialized form, and number types aren't + * all serialized in the same way, so picking the wrong type could lead to a query error; + *
    • if the value is a "special" element like a function call, it can't be serialized. This also + * applies to collections mixing special elements and regular objects. *
    + * * In these cases, the builder will inline the value in the query string: + * *
      *     select().all().from("foo").where(eq("k", 1));
      *     // Is equivalent to:
      *     new SimpleStatement("SELECT * FROM foo WHERE k=1");
      * 
    - * One final thing to consider is {@link CodecRegistry custom codecs}. If you've registered - * codecs to handle your own Java types against the cluster, then you can pass instances of - * those types to query builder methods. But should the builder have to inline those values, - * it needs your codecs to {@link TypeCodec#format(Object) convert them to string form}. - * That is why some of the public methods of this class take a {@code CodecRegistry} as a - * parameter: + * + * One final thing to consider is {@link CodecRegistry custom codecs}. If you've registered codecs + * to handle your own Java types against the cluster, then you can pass instances of those types to + * query builder methods. But should the builder have to inline those values, it needs your codecs + * to {@link TypeCodec#format(Object) convert them to string form}. That is why some of the public + * methods of this class take a {@code CodecRegistry} as a parameter: + * *
      *     BuiltStatement s = select().all().from("foo").where(eq("k", myCustomObject));
      *     // if we do this codecs will definitely be needed:
      *     s.forceNoValues(true);
      *     s.getQueryString(myCodecRegistry);
      * 
    - * For convenience, there are no-arg versions of those methods that use - * {@link CodecRegistry#DEFAULT_INSTANCE}. But you should only use them if you are sure that - * no custom values will need to be inlined while building the statement, or if you have - * registered your custom codecs with the default registry instance. Otherwise, you will get - * a {@link CodecNotFoundException}. + * + * For convenience, there are no-arg versions of those methods that use {@link + * CodecRegistry#DEFAULT_INSTANCE}. But you should only use them if you are sure that no custom + * values will need to be inlined while building the statement, or if you have registered your + * custom codecs with the default registry instance. Otherwise, you will get a {@link + * CodecNotFoundException}. */ public abstract class BuiltStatement extends RegularStatement { - private final List partitionKey; - private final List routingKeyValues; - final String keyspace; - - private boolean dirty; - private String cache; - private List values; - Boolean isCounterOp; - boolean hasNonIdempotentOps; - - // Whether the user has inputted bind markers. If that's the case, we never generate values as - // it means the user meant for the statement to be prepared and we shouldn't add our own markers. - boolean hasBindMarkers; - private boolean forceNoValues; - - BuiltStatement(String keyspace, List partitionKey, List routingKeyValues) { - this.partitionKey = partitionKey; - this.routingKeyValues = routingKeyValues; - this.keyspace = keyspace; + private final List partitionKey; + private final List routingKeyValues; + final String keyspace; + + private boolean dirty; + private String cache; + private List values; + Boolean isCounterOp; + boolean hasNonIdempotentOps; + + // Whether the user has inputted bind markers. If that's the case, we never generate values as + // it means the user meant for the statement to be prepared and we shouldn't add our own markers. + boolean hasBindMarkers; + private boolean forceNoValues; + + BuiltStatement( + String keyspace, List partitionKey, List routingKeyValues) { + this.partitionKey = partitionKey; + this.routingKeyValues = routingKeyValues; + this.keyspace = keyspace; + } + + /** + * @deprecated preserved for backward compatibility, use {@link Metadata#quoteIfNecessary(String)} + * instead. + */ + @Deprecated + protected static String escapeId(String ident) { + return Metadata.quoteIfNecessary(ident); + } + + @Override + public String getQueryString(CodecRegistry codecRegistry) { + maybeRebuildCache(codecRegistry); + return cache; + } + + /** + * Returns the {@code i}th value as the Java type matching its CQL type. + * + * @param i the index to retrieve. + * @param codecRegistry the codec registry that will be used if the statement must be rebuilt in + * order to determine if it has values, and Java objects must be inlined in the process (see + * {@link BuiltStatement} for more explanations on why this is so). + * @return the value of the {@code i}th value of this statement. + * @throws IllegalStateException if this statement does not have values. + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + * @see #getObject(int) + */ + public Object getObject(int i, CodecRegistry codecRegistry) { + maybeRebuildCache(codecRegistry); + if (values == null || values.isEmpty()) + throw new IllegalStateException("This statement does not have values"); + if (i < 0 || i >= values.size()) throw new ArrayIndexOutOfBoundsException(i); + return values.get(i); + } + + /** + * Returns the {@code i}th value as the Java type matching its CQL type. + * + *

    This method calls {@link #getObject(int, CodecRegistry)} with {@link + * CodecRegistry#DEFAULT_INSTANCE}. It's safe to use if you don't use any custom codecs, or if + * your custom codecs are in the default registry; otherwise, use the other method and provide the + * registry that contains your codecs. + * + * @param i the index to retrieve. + * @return the value of the {@code i}th value of this statement. + * @throws IllegalStateException if this statement does not have values. + * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. + */ + public Object getObject(int i) { + return getObject(i, CodecRegistry.DEFAULT_INSTANCE); + } + + private void maybeRebuildCache(CodecRegistry codecRegistry) { + if (!dirty && cache != null) return; + + StringBuilder sb; + values = null; + + if (hasBindMarkers || forceNoValues) { + sb = buildQueryString(null, codecRegistry); + } else { + values = new ArrayList(); + sb = buildQueryString(values, codecRegistry); + + if (values.size() > 65535) + throw new IllegalArgumentException( + "Too many values for built statement, the maximum allowed is 65535"); + + if (values.isEmpty()) values = null; } - /** - * @deprecated preserved for backward compatibility, use {@link Metadata#quoteIfNecessary(String)} instead. - */ - @Deprecated - protected static String escapeId(String ident) { - return Metadata.quoteIfNecessary(ident); + maybeAddSemicolon(sb); + + cache = sb.toString(); + dirty = false; + } + + static StringBuilder maybeAddSemicolon(StringBuilder sb) { + // Use the same test that String#trim() uses to determine + // if a character is a whitespace character. + int l = sb.length(); + while (l > 0 && sb.charAt(l - 1) <= ' ') l -= 1; + if (l != sb.length()) sb.setLength(l); + + if (l == 0 || sb.charAt(l - 1) != ';') sb.append(';'); + return sb; + } + + abstract StringBuilder buildQueryString(List variables, CodecRegistry codecRegistry); + + boolean isCounterOp() { + return isCounterOp == null ? false : isCounterOp; + } + + void setCounterOp(boolean isCounterOp) { + this.isCounterOp = isCounterOp; + } + + boolean hasNonIdempotentOps() { + return hasNonIdempotentOps; + } + + void setNonIdempotentOps() { + hasNonIdempotentOps = true; + } + + void setDirty() { + dirty = true; + } + + void checkForBindMarkers(Object value) { + dirty = true; + if (Utils.containsBindMarker(value)) hasBindMarkers = true; + } + + void checkForBindMarkers(Utils.Appendeable value) { + dirty = true; + if (value != null && value.containsBindMarker()) hasBindMarkers = true; + } + + // TODO: Correctly document the InvalidTypeException + void maybeAddRoutingKey(String name, Object value) { + if (routingKeyValues == null + || name == null + || value == null + || Utils.containsSpecialValue(value)) return; + + for (int i = 0; i < partitionKey.size(); i++) { + if (Utils.handleId(name).equals(partitionKey.get(i).getName())) { + routingKeyValues.set(i, value); + return; + } + } + } + + @Override + public ByteBuffer getRoutingKey(ProtocolVersion protocolVersion, CodecRegistry codecRegistry) { + if (routingKeyValues == null) return null; + ByteBuffer[] routingKeyParts = new ByteBuffer[partitionKey.size()]; + for (int i = 0; i < partitionKey.size(); i++) { + Object value = routingKeyValues.get(i); + if (value == null) return null; + TypeCodec codec = codecRegistry.codecFor(partitionKey.get(i).getType(), value); + routingKeyParts[i] = codec.serialize(value, protocolVersion); + } + return routingKeyParts.length == 1 ? routingKeyParts[0] : Utils.compose(routingKeyParts); + } + + @Override + public String getKeyspace() { + return keyspace; + } + + @Override + public ByteBuffer[] getValues(ProtocolVersion protocolVersion, CodecRegistry codecRegistry) { + maybeRebuildCache(codecRegistry); + return values == null ? null : Utils.convert(values.toArray(), protocolVersion, codecRegistry); + } + + @Override + public boolean hasValues(CodecRegistry codecRegistry) { + maybeRebuildCache(codecRegistry); + return values != null; + } + + @Override + public Map getNamedValues( + ProtocolVersion protocolVersion, CodecRegistry codecRegistry) { + // Built statements never return named values + return null; + } + + @Override + public boolean usesNamedValues() { + return false; + } + + @Override + public Boolean isIdempotent() { + // If a value was forced with setIdempotent, it takes priority + if (idempotent != null) return idempotent; + + // Otherwise return the computed value + return !hasNonIdempotentOps(); + } + + @Override + public String toString() { + try { + if (forceNoValues) return getQueryString(); + // 1) try first with all values inlined (will not work if some values require custom codecs, + // or if the required codecs are registered in a different CodecRegistry instance than the + // default one) + return maybeAddSemicolon(buildQueryString(null, CodecRegistry.DEFAULT_INSTANCE)).toString(); + } catch (RuntimeException e1) { + // 2) try next with bind markers for all values to avoid usage of custom codecs + try { + return maybeAddSemicolon( + buildQueryString(new ArrayList(), CodecRegistry.DEFAULT_INSTANCE)) + .toString(); + } catch (RuntimeException e2) { + // Ugly but we have absolutely no context to get the registry from + return String.format( + "built query (could not generate with default codec registry: %s)", e2.getMessage()); + } + } + } + + /** + * Allows to force this builder to not generate values (through its {@code getValues()} method). + * + *

    By default (and unless the protocol version 1 is in use, see below) and for performance + * reasons, the query builder will not serialize all values provided to strings. This means that + * {@link #getQueryString(CodecRegistry)} may return a query string with bind markers (where and + * when is at the discretion of the builder) and {@link #getValues} will return the binary values + * for those markers. This method allows to force the builder to not generate binary values but + * rather to inline them all in the query string. In practice, this means that if you call {@code + * setForceNoValues(true)}, you are guaranteed that {@code getValues()} will return {@code null} + * and that the string returned by {@code getQueryString()} will contain no other bind markers + * than the ones specified by the user. + * + *

    If the native protocol version 1 is in use, the driver will default to not generating values + * since those are not supported by that version of the protocol. In practice, the driver will + * automatically call this method with {@code true} as argument prior to execution. Hence, calling + * this method when the protocol version 1 is in use is basically a no-op. + * + *

    Note that this method is mainly useful for debugging purpose. In general, the default + * behavior should be the correct and most efficient one. + * + * @param forceNoValues whether or not this builder may generate values. + * @return this statement. + */ + public RegularStatement setForceNoValues(boolean forceNoValues) { + this.forceNoValues = forceNoValues; + this.dirty = true; + return this; + } + + /** An utility class to create a BuiltStatement that encapsulate another one. */ + abstract static class ForwardingStatement extends BuiltStatement { + + T statement; + + ForwardingStatement(T statement) { + super(null, null, null); + this.statement = statement; } @Override public String getQueryString(CodecRegistry codecRegistry) { - maybeRebuildCache(codecRegistry); - return cache; - } - - /** - * Returns the {@code i}th value as the Java type matching its CQL type. - * - * @param i the index to retrieve. - * @param codecRegistry the codec registry that will be used if the statement must be - * rebuilt in order to determine if it has values, and Java objects - * must be inlined in the process (see {@link BuiltStatement} for - * more explanations on why this is so). - * @return the value of the {@code i}th value of this statement. - * @throws IllegalStateException if this statement does not have values. - * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. - * @see #getObject(int) - */ - public Object getObject(int i, CodecRegistry codecRegistry) { - maybeRebuildCache(codecRegistry); - if (values == null || values.isEmpty()) - throw new IllegalStateException("This statement does not have values"); - if (i < 0 || i >= values.size()) - throw new ArrayIndexOutOfBoundsException(i); - return values.get(i); + return statement.getQueryString(codecRegistry); } - /** - * Returns the {@code i}th value as the Java type matching its CQL type. - *

    - * This method calls {@link #getObject(int, CodecRegistry)} with - * {@link CodecRegistry#DEFAULT_INSTANCE}. - * It's safe to use if you don't use any custom codecs, or if your custom codecs are in - * the default registry; otherwise, use the other method and provide the registry that - * contains your codecs. - * - * @param i the index to retrieve. - * @return the value of the {@code i}th value of this statement. - * @throws IllegalStateException if this statement does not have values. - * @throws IndexOutOfBoundsException if {@code i} is not a valid index for this object. - */ - public Object getObject(int i) { - return getObject(i, CodecRegistry.DEFAULT_INSTANCE); + @Override + StringBuilder buildQueryString(List values, CodecRegistry codecRegistry) { + return statement.buildQueryString(values, codecRegistry); } - private void maybeRebuildCache(CodecRegistry codecRegistry) { - if (!dirty && cache != null) - return; - - StringBuilder sb; - values = null; - - if (hasBindMarkers || forceNoValues) { - sb = buildQueryString(null, codecRegistry); - } else { - values = new ArrayList(); - sb = buildQueryString(values, codecRegistry); - - if (values.size() > 65535) - throw new IllegalArgumentException("Too many values for built statement, the maximum allowed is 65535"); - - if (values.isEmpty()) - values = null; - } - - maybeAddSemicolon(sb); - - cache = sb.toString(); - dirty = false; + @Override + public ByteBuffer getRoutingKey(ProtocolVersion protocolVersion, CodecRegistry codecRegistry) { + return statement.getRoutingKey(protocolVersion, codecRegistry); } - static StringBuilder maybeAddSemicolon(StringBuilder sb) { - // Use the same test that String#trim() uses to determine - // if a character is a whitespace character. - int l = sb.length(); - while (l > 0 && sb.charAt(l - 1) <= ' ') - l -= 1; - if (l != sb.length()) - sb.setLength(l); - - if (l == 0 || sb.charAt(l - 1) != ';') - sb.append(';'); - return sb; + @Override + public String getKeyspace() { + return statement.getKeyspace(); } - abstract StringBuilder buildQueryString(List variables, CodecRegistry codecRegistry); - + @Override boolean isCounterOp() { - return isCounterOp == null ? false : isCounterOp; - } - - void setCounterOp(boolean isCounterOp) { - this.isCounterOp = isCounterOp; + return statement.isCounterOp(); } + @Override boolean hasNonIdempotentOps() { - return hasNonIdempotentOps; + return statement.hasNonIdempotentOps(); } - void setNonIdempotentOps() { - hasNonIdempotentOps = true; + @Override + public RegularStatement setForceNoValues(boolean forceNoValues) { + statement.setForceNoValues(forceNoValues); + return this; } - void setDirty() { - dirty = true; + @Override + public Statement setConsistencyLevel(ConsistencyLevel consistency) { + statement.setConsistencyLevel(consistency); + return this; } - void checkForBindMarkers(Object value) { - dirty = true; - if (Utils.containsBindMarker(value)) - hasBindMarkers = true; + @Override + public ConsistencyLevel getConsistencyLevel() { + return statement.getConsistencyLevel(); } - void checkForBindMarkers(Utils.Appendeable value) { - dirty = true; - if (value != null && value.containsBindMarker()) - hasBindMarkers = true; + @Override + public Statement enableTracing() { + statement.enableTracing(); + return this; } - // TODO: Correctly document the InvalidTypeException - void maybeAddRoutingKey(String name, Object value) { - if (routingKeyValues == null || name == null || value == null || Utils.containsSpecialValue(value)) - return; - - for (int i = 0; i < partitionKey.size(); i++) { - if (Utils.handleId(name).equals(partitionKey.get(i).getName())) { - routingKeyValues.set(i, value); - return; - } - } + @Override + public Statement disableTracing() { + statement.disableTracing(); + return this; } @Override - public ByteBuffer getRoutingKey(ProtocolVersion protocolVersion, CodecRegistry codecRegistry) { - if (routingKeyValues == null) - return null; - ByteBuffer[] routingKeyParts = new ByteBuffer[partitionKey.size()]; - for (int i = 0; i < partitionKey.size(); i++) { - Object value = routingKeyValues.get(i); - if (value == null) - return null; - TypeCodec codec = codecRegistry.codecFor(partitionKey.get(i).getType(), value); - routingKeyParts[i] = codec.serialize(value, protocolVersion); - } - return routingKeyParts.length == 1 - ? routingKeyParts[0] - : Utils.compose(routingKeyParts); + public boolean isTracing() { + return statement.isTracing(); } @Override - public String getKeyspace() { - return keyspace; + public Statement setRetryPolicy(RetryPolicy policy) { + statement.setRetryPolicy(policy); + return this; } @Override - public ByteBuffer[] getValues(ProtocolVersion protocolVersion, CodecRegistry codecRegistry) { - maybeRebuildCache(codecRegistry); - return values == null ? null : Utils.convert(values.toArray(), protocolVersion, codecRegistry); + public RetryPolicy getRetryPolicy() { + return statement.getRetryPolicy(); } @Override - public boolean hasValues(CodecRegistry codecRegistry) { - maybeRebuildCache(codecRegistry); - return values != null; + public ByteBuffer[] getValues(ProtocolVersion protocolVersion, CodecRegistry codecRegistry) { + return statement.getValues(protocolVersion, codecRegistry); } @Override - public Map getNamedValues(ProtocolVersion protocolVersion, CodecRegistry codecRegistry) { - // Built statements never return named values - return null; + public boolean hasValues() { + return statement.hasValues(); } @Override - public boolean usesNamedValues() { - return false; + void checkForBindMarkers(Object value) { + statement.checkForBindMarkers(value); } @Override - public Boolean isIdempotent() { - // If a value was forced with setIdempotent, it takes priority - if (idempotent != null) - return idempotent; - - // Otherwise return the computed value - return !hasNonIdempotentOps(); + void checkForBindMarkers(Utils.Appendeable value) { + statement.checkForBindMarkers(value); } @Override public String toString() { - try { - if (forceNoValues) - return getQueryString(); - // 1) try first with all values inlined (will not work if some values require custom codecs, - // or if the required codecs are registered in a different CodecRegistry instance than the default one) - return maybeAddSemicolon(buildQueryString(null, CodecRegistry.DEFAULT_INSTANCE)).toString(); - } catch (RuntimeException e1) { - // 2) try next with bind markers for all values to avoid usage of custom codecs - try { - return maybeAddSemicolon(buildQueryString(new ArrayList(), CodecRegistry.DEFAULT_INSTANCE)).toString(); - } catch (RuntimeException e2) { - // Ugly but we have absolutely no context to get the registry from - return String.format("built query (could not generate with default codec registry: %s)", e2.getMessage()); - } - } - } - - /** - * Allows to force this builder to not generate values (through its {@code getValues()} method). - *

    - * By default (and unless the protocol version 1 is in use, see below) and - * for performance reasons, the query builder will not serialize all values - * provided to strings. This means that {@link #getQueryString(CodecRegistry)} - * may return a query string with bind markers (where and when is at the - * discretion of the builder) and {@link #getValues} will return the binary - * values for those markers. This method allows to force the builder to not - * generate binary values but rather to inline them all in the query - * string. In practice, this means that if you call {@code - * setForceNoValues(true)}, you are guaranteed that {@code getValues()} will - * return {@code null} and that the string returned by {@code - * getQueryString()} will contain no other bind markers than the ones - * specified by the user. - *

    - * If the native protocol version 1 is in use, the driver will default - * to not generating values since those are not supported by that version of - * the protocol. In practice, the driver will automatically call this method - * with {@code true} as argument prior to execution. Hence, calling this - * method when the protocol version 1 is in use is basically a no-op. - *

    - * Note that this method is mainly useful for debugging purpose. In general, - * the default behavior should be the correct and most efficient one. - * - * @param forceNoValues whether or not this builder may generate values. - * @return this statement. - */ - public RegularStatement setForceNoValues(boolean forceNoValues) { - this.forceNoValues = forceNoValues; - this.dirty = true; - return this; - } - - /** - * An utility class to create a BuiltStatement that encapsulate another one. - */ - abstract static class ForwardingStatement extends BuiltStatement { - - T statement; - - ForwardingStatement(T statement) { - super(null, null, null); - this.statement = statement; - } - - @Override - public String getQueryString(CodecRegistry codecRegistry) { - return statement.getQueryString(codecRegistry); - } - - @Override - StringBuilder buildQueryString(List values, CodecRegistry codecRegistry) { - return statement.buildQueryString(values, codecRegistry); - } - - @Override - public ByteBuffer getRoutingKey(ProtocolVersion protocolVersion, CodecRegistry codecRegistry) { - return statement.getRoutingKey(protocolVersion, codecRegistry); - } - - @Override - public String getKeyspace() { - return statement.getKeyspace(); - } - - @Override - boolean isCounterOp() { - return statement.isCounterOp(); - } - - @Override - boolean hasNonIdempotentOps() { - return statement.hasNonIdempotentOps(); - } - - @Override - public RegularStatement setForceNoValues(boolean forceNoValues) { - statement.setForceNoValues(forceNoValues); - return this; - } - - @Override - public Statement setConsistencyLevel(ConsistencyLevel consistency) { - statement.setConsistencyLevel(consistency); - return this; - } - - @Override - public ConsistencyLevel getConsistencyLevel() { - return statement.getConsistencyLevel(); - } - - @Override - public Statement enableTracing() { - statement.enableTracing(); - return this; - } - - @Override - public Statement disableTracing() { - statement.disableTracing(); - return this; - } - - @Override - public boolean isTracing() { - return statement.isTracing(); - } - - @Override - public Statement setRetryPolicy(RetryPolicy policy) { - statement.setRetryPolicy(policy); - return this; - } - - @Override - public RetryPolicy getRetryPolicy() { - return statement.getRetryPolicy(); - } - - @Override - public ByteBuffer[] getValues(ProtocolVersion protocolVersion, CodecRegistry codecRegistry) { - return statement.getValues(protocolVersion, codecRegistry); - } - - @Override - public boolean hasValues() { - return statement.hasValues(); - } - - @Override - void checkForBindMarkers(Object value) { - statement.checkForBindMarkers(value); - } - - @Override - void checkForBindMarkers(Utils.Appendeable value) { - statement.checkForBindMarkers(value); - } - - @Override - public String toString() { - return statement.toString(); - } + return statement.toString(); } + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/querybuilder/Clause.java b/driver-core/src/main/java/com/datastax/driver/core/querybuilder/Clause.java index 8b1e83f1f9a..1d91a8a7005 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/querybuilder/Clause.java +++ b/driver-core/src/main/java/com/datastax/driver/core/querybuilder/Clause.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,286 +18,314 @@ package com.datastax.driver.core.querybuilder; import com.datastax.driver.core.CodecRegistry; - +import com.google.common.collect.Lists; import java.util.List; public abstract class Clause extends Utils.Appendeable { - abstract String name(); + abstract String name(); - abstract Object firstValue(); + abstract Object firstValue(); - private static abstract class AbstractClause extends Clause { - final String name; - - private AbstractClause(String name) { - this.name = name; - } + private abstract static class AbstractClause extends Clause { + final String name; - @Override - String name() { - return name; - } + private AbstractClause(String name) { + this.name = name; } - static class SimpleClause extends AbstractClause { + @Override + String name() { + return name; + } + } - private final String op; - private final Object value; + static class SimpleClause extends AbstractClause { - SimpleClause(String name, String op, Object value) { - super(name); - this.op = op; - this.value = value; - } + private final String op; + private final Object value; - @Override - void appendTo(StringBuilder sb, List variables, CodecRegistry codecRegistry) { - Utils.appendName(name, sb).append(op); - Utils.appendValue(value, codecRegistry, sb, variables); - } - - @Override - Object firstValue() { - return value; - } + SimpleClause(String name, String op, Object value) { + super(name); + this.op = op; + this.value = value; + } - @Override - boolean containsBindMarker() { - return Utils.containsBindMarker(value); - } + @Override + void appendTo(StringBuilder sb, List variables, CodecRegistry codecRegistry) { + Utils.appendName(name, sb).append(op); + Utils.appendValue(value, codecRegistry, sb, variables); } - static class InClause extends AbstractClause { + @Override + Object firstValue() { + return value; + } - private final List values; + @Override + boolean containsBindMarker() { + return Utils.containsBindMarker(value); + } + } - InClause(String name, List values) { - super(name); - this.values = values; + static class IsNotNullClause extends AbstractClause { - if (values == null) - throw new IllegalArgumentException("Missing values for IN clause"); - if (values.size() > 65535) - throw new IllegalArgumentException("Too many values for IN clause, the maximum allowed is 65535"); - } + IsNotNullClause(String name) { + super(name); + } - @Override - void appendTo(StringBuilder sb, List variables, CodecRegistry codecRegistry) { - - // We special case the case of just one bind marker because there is little - // reasons to do: - // ... IN (?) ... - // since in that case it's more elegant to use an equal. On the other side, - // it is a lot more useful to do: - // ... IN ? ... - // which binds the variable to the full list the IN is on. - if (values.size() == 1 && values.get(0) instanceof BindMarker) { - Utils.appendName(name, sb).append(" IN ").append(values.get(0)); - return; - } - - Utils.appendName(name, sb).append(" IN ("); - Utils.joinAndAppendValues(sb, codecRegistry, values, variables).append(')'); - } + @Override + void appendTo(StringBuilder sb, List values, CodecRegistry codecRegistry) { + Utils.appendName(name, sb).append(" IS NOT NULL"); + } - @Override - Object firstValue() { - return values.isEmpty() ? null : values.get(0); - } + @Override + Object firstValue() { + return null; + } - @Override - boolean containsBindMarker() { - for (Object value : values) - if (Utils.containsBindMarker(value)) - return true; - return false; - } + @Override + boolean containsBindMarker() { + return false; } + } - static class ContainsClause extends AbstractClause { + static class InClause extends AbstractClause { - private final Object value; + private final List values; - ContainsClause(String name, Object value) { - super(name); - this.value = value; + InClause(String name, Iterable values) { + super(name); + if (values == null) throw new IllegalArgumentException("Missing values for IN clause"); - if (value == null) - throw new IllegalArgumentException("Missing value for CONTAINS clause"); - } + this.values = Lists.newArrayList(values); - @Override - void appendTo(StringBuilder sb, List variables, CodecRegistry codecRegistry) { - Utils.appendName(name, sb).append(" CONTAINS "); - Utils.appendValue(value, codecRegistry, sb, variables); - } + if (this.values.size() > 65535) + throw new IllegalArgumentException( + "Too many values for IN clause, the maximum allowed is 65535"); + } - @Override - Object firstValue() { - return value; - } + @Override + void appendTo(StringBuilder sb, List variables, CodecRegistry codecRegistry) { + + // We special case the case of just one bind marker because there is little + // reasons to do: + // ... IN (?) ... + // since in that case it's more elegant to use an equal. On the other side, + // it is a lot more useful to do: + // ... IN ? ... + // which binds the variable to the full list the IN is on. + if (values.size() == 1 && values.get(0) instanceof BindMarker) { + Utils.appendName(name, sb).append(" IN ").append(values.iterator().next()); + return; + } + + Utils.appendName(name, sb).append(" IN ("); + Utils.joinAndAppendValues(sb, codecRegistry, values, variables).append(')'); + } - @Override - boolean containsBindMarker() { - return Utils.containsBindMarker(value); - } + @Override + Object firstValue() { + return values.isEmpty() ? null : values.get(0); } + @Override + boolean containsBindMarker() { + for (Object value : values) if (Utils.containsBindMarker(value)) return true; + return false; + } + } - static class ContainsKeyClause extends AbstractClause { + static class ContainsClause extends AbstractClause { - private final Object value; + private final Object value; - ContainsKeyClause(String name, Object value) { - super(name); - this.value = value; + ContainsClause(String name, Object value) { + super(name); + this.value = value; - if (value == null) - throw new IllegalArgumentException("Missing value for CONTAINS KEY clause"); - } + if (value == null) throw new IllegalArgumentException("Missing value for CONTAINS clause"); + } - @Override - void appendTo(StringBuilder sb, List variables, CodecRegistry codecRegistry) { - Utils.appendName(name, sb).append(" CONTAINS KEY "); - Utils.appendValue(value, codecRegistry, sb, variables); - } + @Override + void appendTo(StringBuilder sb, List variables, CodecRegistry codecRegistry) { + Utils.appendName(name, sb).append(" CONTAINS "); + Utils.appendValue(value, codecRegistry, sb, variables); + } - @Override - Object firstValue() { - return value; - } + @Override + Object firstValue() { + return value; + } - @Override - boolean containsBindMarker() { - return Utils.containsBindMarker(value); - } + @Override + boolean containsBindMarker() { + return Utils.containsBindMarker(value); } + } + static class ContainsKeyClause extends AbstractClause { - static class CompoundClause extends Clause { - private String op; - private final List names; - private final List values; + private final Object value; - CompoundClause(List names, String op, List values) { - assert names.size() == values.size(); - this.op = op; - this.names = names; - this.values = values; - } + ContainsKeyClause(String name, Object value) { + super(name); + this.value = value; - @Override - String name() { - // This is only used for routing key purpose, and so far CompoundClause - // are not allowed for the partitionKey anyway - return null; - } + if (value == null) + throw new IllegalArgumentException("Missing value for CONTAINS KEY clause"); + } - @Override - Object firstValue() { - // This is only used for routing key purpose, and so far CompoundClause - // are not allowed for the partitionKey anyway - return null; - } + @Override + void appendTo(StringBuilder sb, List variables, CodecRegistry codecRegistry) { + Utils.appendName(name, sb).append(" CONTAINS KEY "); + Utils.appendValue(value, codecRegistry, sb, variables); + } - @Override - boolean containsBindMarker() { - for (Object value : values) - if (Utils.containsBindMarker(value)) - return true; - return false; - } + @Override + Object firstValue() { + return value; + } - @Override - void appendTo(StringBuilder sb, List variables, CodecRegistry codecRegistry) { - sb.append("("); - for (int i = 0; i < names.size(); i++) { - if (i > 0) - sb.append(","); - Utils.appendName(names.get(i), sb); - } - sb.append(")").append(op).append("("); - for (int i = 0; i < values.size(); i++) { - if (i > 0) - sb.append(","); - Utils.appendValue(values.get(i), codecRegistry, sb, variables); - } - sb.append(")"); - } + @Override + boolean containsBindMarker() { + return Utils.containsBindMarker(value); + } + } + + static class CompoundClause extends Clause { + private String op; + private final List names; + private final List values; + + CompoundClause(Iterable names, String op, Iterable values) { + this.op = op; + this.names = Lists.newArrayList(names); + this.values = Lists.newArrayList(values); + if (this.names.size() != this.values.size()) + throw new IllegalArgumentException( + String.format( + "The number of names (%d) and values (%d) don't match", + this.names.size(), this.values.size())); } - static class CompoundInClause extends Clause { - private final List names; - private final List valueLists; - - public CompoundInClause(List names, List valueLists) { - if (valueLists == null) - throw new IllegalArgumentException("Missing values for IN clause"); - if (valueLists.size() > 65535) - throw new IllegalArgumentException("Too many values for IN clause, the maximum allowed is 65535"); - for (Object value : valueLists) { - if (value instanceof List) { - List tuple = (List) value; - if (tuple.size() != names.size()) { - throw new IllegalArgumentException(String.format("The number of names (%d) and values (%d) don't match", names.size(), tuple.size())); - } - } else if (!(value instanceof BindMarker)) { - throw new IllegalArgumentException(String.format("Wrong element type for values list, expected List or BindMarker, got %s", value.getClass().getName())); - } - } - this.names = names; - this.valueLists = valueLists; - } + @Override + String name() { + // This is only used for routing key purpose, and so far CompoundClause + // are not allowed for the partitionKey anyway + return null; + } - @Override - String name() { - // This is only used for routing key purpose, and so far CompoundClause - // are not allowed for the partitionKey anyway - return null; - } + @Override + Object firstValue() { + // This is only used for routing key purpose, and so far CompoundClause + // are not allowed for the partitionKey anyway + return null; + } - @Override - Object firstValue() { - // This is only used for routing key purpose, and so far CompoundClause - // are not allowed for the partitionKey anyway - return null; - } + @Override + boolean containsBindMarker() { + for (Object value : values) if (Utils.containsBindMarker(value)) return true; + return false; + } - @Override - boolean containsBindMarker() { - return Utils.containsBindMarker(valueLists); + @Override + void appendTo(StringBuilder sb, List variables, CodecRegistry codecRegistry) { + sb.append("("); + for (int i = 0; i < names.size(); i++) { + if (i > 0) sb.append(","); + Utils.appendName(names.get(i), sb); + } + sb.append(")").append(op).append("("); + for (int i = 0; i < values.size(); i++) { + if (i > 0) sb.append(","); + Utils.appendValue(values.get(i), codecRegistry, sb, variables); + } + sb.append(")"); + } + } + + static class CompoundInClause extends Clause { + private final List names; + private final List valueLists; + + public CompoundInClause(Iterable names, Iterable valueLists) { + if (valueLists == null) throw new IllegalArgumentException("Missing values for IN clause"); + if (names == null) throw new IllegalArgumentException("Missing names for IN clause"); + + this.names = Lists.newArrayList(names); + this.valueLists = Lists.newArrayList(); + + for (Object value : valueLists) { + if (value instanceof Iterable) { + List tuple = Lists.newArrayList((Iterable) value); + if (tuple.size() != this.names.size()) { + throw new IllegalArgumentException( + String.format( + "The number of names (%d) and values (%d) don't match", + this.names.size(), tuple.size())); + } + this.valueLists.add(tuple); + } else if (!(value instanceof BindMarker)) { + throw new IllegalArgumentException( + String.format( + "Wrong element type for values list, expected List or BindMarker, got %s", + value.getClass().getName())); + } else { + this.valueLists.add(value); } + } + if (this.valueLists.size() > 65535) + throw new IllegalArgumentException( + "Too many values for IN clause, the maximum allowed is 65535"); + } + + @Override + String name() { + // This is only used for routing key purpose, and so far CompoundClause + // are not allowed for the partitionKey anyway + return null; + } + + @Override + Object firstValue() { + // This is only used for routing key purpose, and so far CompoundClause + // are not allowed for the partitionKey anyway + return null; + } + + @Override + boolean containsBindMarker() { + return Utils.containsBindMarker(valueLists); + } - @Override - void appendTo(StringBuilder sb, List variables, CodecRegistry codecRegistry) { + @Override + void appendTo(StringBuilder sb, List variables, CodecRegistry codecRegistry) { + sb.append("("); + for (int i = 0; i < names.size(); i++) { + if (i > 0) sb.append(","); + Utils.appendName(names.get(i), sb); + } + sb.append(")").append(" IN ").append("("); + for (int i = 0; i < valueLists.size(); i++) { + if (i > 0) sb.append(","); + + Object elt = valueLists.get(i); + + if (elt instanceof BindMarker) { + sb.append(elt); + } else { + List tuple = (List) elt; + if (tuple.size() == 1 && tuple.get(0) instanceof BindMarker) { + // Special case when there is only one bind marker: "IN ?" instead of "IN (?)" + sb.append(tuple.get(0)); + } else { sb.append("("); - for (int i = 0; i < names.size(); i++) { - if (i > 0) - sb.append(","); - Utils.appendName(names.get(i), sb); - } - sb.append(")").append(" IN ").append("("); - for (int i = 0; i < valueLists.size(); i++) { - if (i > 0) - sb.append(","); - - Object elt = valueLists.get(i); - - if (elt instanceof BindMarker) { - sb.append(elt); - } else { - List tuple = (List) elt; - if (tuple.size() == 1 && tuple.get(0) instanceof BindMarker) { - // Special case when there is only one bind marker: "IN ?" instead of "IN (?)" - sb.append(tuple.get(0)); - } else { - sb.append("("); - Utils.joinAndAppendValues(sb, codecRegistry, (List) tuple, variables).append(')'); - } - } - } - sb.append(")"); + Utils.joinAndAppendValues(sb, codecRegistry, (List) tuple, variables).append(')'); + } } + } + sb.append(")"); } + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/querybuilder/Delete.java b/driver-core/src/main/java/com/datastax/driver/core/querybuilder/Delete.java index 90d0ae2df57..42e32d9035b 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/querybuilder/Delete.java +++ b/driver-core/src/main/java/com/datastax/driver/core/querybuilder/Delete.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,562 +21,542 @@ import com.datastax.driver.core.ColumnMetadata; import com.datastax.driver.core.Metadata; import com.datastax.driver.core.TableMetadata; - import java.util.ArrayList; import java.util.Arrays; import java.util.List; -/** - * A built DELETE statement. - */ +/** A built DELETE statement. */ public class Delete extends BuiltStatement { - private final String table; - private final List columns; - private final Where where; - private final Options usings; - private final Conditions conditions; - private boolean ifExists; + private final String table; + private final List columns; + private final Where where; + private final Options usings; + private final Conditions conditions; + private boolean ifExists; + + Delete(String keyspace, String table, List columns) { + this(keyspace, table, null, null, columns); + } + + Delete(TableMetadata table, List columns) { + this( + Metadata.quoteIfNecessary(table.getKeyspace().getName()), + Metadata.quoteIfNecessary(table.getName()), + Arrays.asList(new Object[table.getPartitionKey().size()]), + table.getPartitionKey(), + columns); + } + + Delete( + String keyspace, + String table, + List routingKeyValues, + List partitionKey, + List columns) { + super(keyspace, partitionKey, routingKeyValues); + this.table = table; + this.columns = columns; + this.where = new Where(this); + this.usings = new Options(this); + this.conditions = new Conditions(this); + + // This is for JAVA-1089, if the query deletes an element in a list, the statement should be + // non-idempotent. + if (!areIdempotent(columns)) { + setNonIdempotentOps(); + } + } + + @Override + StringBuilder buildQueryString(List variables, CodecRegistry codecRegistry) { + StringBuilder builder = new StringBuilder(); + + builder.append("DELETE"); + if (!columns.isEmpty()) + Utils.joinAndAppend(builder.append(" "), codecRegistry, ",", columns, variables); + + builder.append(" FROM "); + if (keyspace != null) Utils.appendName(keyspace, builder).append('.'); + Utils.appendName(table, builder); + if (!usings.usings.isEmpty()) { + builder.append(" USING "); + Utils.joinAndAppend(builder, codecRegistry, " AND ", usings.usings, variables); + } - Delete(String keyspace, String table, List columns) { - this(keyspace, table, null, null, columns); + if (!where.clauses.isEmpty()) { + builder.append(" WHERE "); + Utils.joinAndAppend(builder, codecRegistry, " AND ", where.clauses, variables); } - Delete(TableMetadata table, List columns) { - this(Metadata.quoteIfNecessary(table.getKeyspace().getName()), - Metadata.quoteIfNecessary(table.getName()), - Arrays.asList(new Object[table.getPartitionKey().size()]), - table.getPartitionKey(), - columns); + if (ifExists) { + builder.append(" IF EXISTS "); } - Delete(String keyspace, - String table, - List routingKeyValues, - List partitionKey, - List columns) { - super(keyspace, partitionKey, routingKeyValues); - this.table = table; - this.columns = columns; - this.where = new Where(this); - this.usings = new Options(this); - this.conditions = new Conditions(this); + if (!conditions.conditions.isEmpty()) { + builder.append(" IF "); + Utils.joinAndAppend(builder, codecRegistry, " AND ", conditions.conditions, variables); + } - // This is for JAVA-1089, if the query deletes an element in a list, the statement should be non-idempotent. - if (!areIdempotent(columns)) { - setNonIdempotentOps(); - } + return builder; + } + + /** + * Adds a WHERE clause to this statement. + * + *

    This is a shorter/more readable version for {@code where().and(clause)}. + * + * @param clause the clause to add. + * @return the where clause of this query to which more clause can be added. + */ + public Where where(Clause clause) { + return where.and(clause); + } + + /** + * Returns a Where statement for this query without adding clause. + * + * @return the where clause of this query to which more clause can be added. + */ + public Where where() { + return where; + } + + /** + * Adds a conditions clause (IF) to this statement. + * + *

    This is a shorter/more readable version for {@code onlyIf().and(condition)}. + * + *

    This will configure the statement as non-idempotent, see {@link + * com.datastax.driver.core.Statement#isIdempotent()} for more information. + * + * @param condition the condition to add. + * @return the conditions of this query to which more conditions can be added. + */ + public Conditions onlyIf(Clause condition) { + return conditions.and(condition); + } + + /** + * Adds a conditions clause (IF) to this statement. + * + *

    This will configure the statement as non-idempotent, see {@link + * com.datastax.driver.core.Statement#isIdempotent()} for more information. + * + * @return the conditions of this query to which more conditions can be added. + */ + public Conditions onlyIf() { + return conditions; + } + + /** + * Adds a new options for this DELETE statement. + * + * @param using the option to add. + * @return the options of this DELETE statement. + */ + public Options using(Using using) { + return usings.and(using); + } + + /** + * Returns the options for this DELETE statement. + * + *

    Chain this with {@link Options#and(Using)} to add options. + * + * @return the options of this DELETE statement. + */ + public Options using() { + return usings; + } + + /** + * Sets the 'IF EXISTS' option for this DELETE statement. + * + *

    + * + *

    A delete with that option will report whether the statement actually resulted in data being + * deleted. The existence check and deletion are done transactionally in the sense that if + * multiple clients attempt to delete a given row with this option, then at most one may succeed. + * + *

    Please keep in mind that using this option has a non negligible performance impact and + * should be avoided when possible. This will configure the statement as non-idempotent, see + * {@link com.datastax.driver.core.Statement#isIdempotent()} for more information. + * + * @return this DELETE statement. + */ + public Delete ifExists() { + this.ifExists = true; + setNonIdempotentOps(); + return this; + } + + /** The WHERE clause of a DELETE statement. */ + public static class Where extends BuiltStatement.ForwardingStatement { + + private final List clauses = new ArrayList(); + + Where(Delete statement) { + super(statement); } - @Override - StringBuilder buildQueryString(List variables, CodecRegistry codecRegistry) { - StringBuilder builder = new StringBuilder(); + /** + * Adds the provided clause to this WHERE clause. + * + * @param clause the clause to add. + * @return this WHERE clause. + */ + public Where and(Clause clause) { + clauses.add(clause); + statement.maybeAddRoutingKey(clause.name(), clause.firstValue()); + if (!hasNonIdempotentOps() && !Utils.isIdempotent(clause)) { + statement.setNonIdempotentOps(); + } + checkForBindMarkers(clause); + return this; + } - builder.append("DELETE"); - if (!columns.isEmpty()) - Utils.joinAndAppend(builder.append(" "), codecRegistry, ",", columns, variables); + /** + * Adds an option to the DELETE statement this WHERE clause is part of. + * + * @param using the using clause to add. + * @return the options of the DELETE statement this WHERE clause is part of. + */ + public Options using(Using using) { + return statement.using(using); + } - builder.append(" FROM "); - if (keyspace != null) - Utils.appendName(keyspace, builder).append('.'); - Utils.appendName(table, builder); - if (!usings.usings.isEmpty()) { - builder.append(" USING "); - Utils.joinAndAppend(builder, codecRegistry, " AND ", usings.usings, variables); - } + /** + * Sets the 'IF EXISTS' option for the DELETE statement this WHERE clause is part of. + * + *

    + * + *

    A delete with that option will report whether the statement actually resulted in data + * being deleted. The existence check and deletion are done transactionally in the sense that if + * multiple clients attempt to delete a given row with this option, then at most one may + * succeed. + * + *

    Please keep in mind that using this option has a non negligible performance impact and + * should be avoided when possible. + * + * @return the DELETE statement this WHERE clause is part of. + */ + public Delete ifExists() { + return statement.ifExists(); + } - if (!where.clauses.isEmpty()) { - builder.append(" WHERE "); - Utils.joinAndAppend(builder, codecRegistry, " AND ", where.clauses, variables); - } + /** + * Adds a condition to the DELETE statement this WHERE clause is part of. + * + * @param condition the condition to add. + * @return the conditions for the DELETE statement this WHERE clause is part of. + */ + public Conditions onlyIf(Clause condition) { + return statement.onlyIf(condition); + } + } - if (ifExists) { - builder.append(" IF EXISTS "); - } + /** The options of a DELETE statement. */ + public static class Options extends BuiltStatement.ForwardingStatement { - if (!conditions.conditions.isEmpty()) { - builder.append(" IF "); - Utils.joinAndAppend(builder, codecRegistry, " AND ", conditions.conditions, variables); - } + private final List usings = new ArrayList(); - return builder; + Options(Delete statement) { + super(statement); } /** - * Adds a WHERE clause to this statement. - *

    - * This is a shorter/more readable version for {@code where().and(clause)}. + * Adds the provided option. * - * @param clause the clause to add. - * @return the where clause of this query to which more clause can be added. + * @param using a DELETE option. + * @return this {@code Options} object. */ - public Where where(Clause clause) { - return where.and(clause); + public Options and(Using using) { + usings.add(using); + checkForBindMarkers(using); + return this; } /** - * Returns a Where statement for this query without adding clause. + * Adds a where clause to the DELETE statement these options are part of. * - * @return the where clause of this query to which more clause can be added. + * @param clause clause to add. + * @return the WHERE clause of the DELETE statement these options are part of. */ - public Where where() { - return where; + public Where where(Clause clause) { + return statement.where(clause); + } + } + + /** An in-construction DELETE statement. */ + public static class Builder { + + List columns = new ArrayList(); + + Builder() {} + + Builder(String... columnNames) { + for (String columnName : columnNames) { + this.columns.add(new Selector(columnName)); + } } /** - * Adds a conditions clause (IF) to this statement. - *

    - * This is a shorter/more readable version for {@code onlyIf().and(condition)}. - *

    - * This will configure the statement as non-idempotent, see {@link com.datastax.driver.core.Statement#isIdempotent()} - * for more information. + * Adds the table to delete from. * - * @param condition the condition to add. - * @return the conditions of this query to which more conditions can be added. + * @param table the name of the table to delete from. + * @return a newly built DELETE statement that deletes from {@code table}. */ - public Conditions onlyIf(Clause condition) { - return conditions.and(condition); + public Delete from(String table) { + return from(null, table); } /** - * Adds a conditions clause (IF) to this statement. - *

    - * This will configure the statement as non-idempotent, see {@link com.datastax.driver.core.Statement#isIdempotent()} - * for more information. + * Adds the table to delete from. * - * @return the conditions of this query to which more conditions can be added. + * @param keyspace the name of the keyspace to delete from. + * @param table the name of the table to delete from. + * @return a newly built DELETE statement that deletes from {@code keyspace.table}. */ - public Conditions onlyIf() { - return conditions; + public Delete from(String keyspace, String table) { + return new Delete(keyspace, table, columns); } /** - * Adds a new options for this DELETE statement. + * Adds the table to delete from. * - * @param using the option to add. - * @return the options of this DELETE statement. + * @param table the table to delete from. + * @return a newly built DELETE statement that deletes from {@code table}. */ - public Options using(Using using) { - return usings.and(using); + public Delete from(TableMetadata table) { + return new Delete(table, columns); } + } + + /** An column selection clause for an in-construction DELETE statement. */ + public static class Selection extends Builder { /** - * Returns the options for this DELETE statement. - *

    - * Chain this with {@link Options#and(Using)} to add options. + * Deletes all columns (i.e. "DELETE FROM ...") * - * @return the options of this DELETE statement. + * @return an in-build DELETE statement. + * @throws IllegalStateException if some columns had already been selected for this builder. */ - public Options using() { - return usings; + public Builder all() { + if (!columns.isEmpty()) + throw new IllegalStateException( + String.format("Some columns (%s) have already been selected.", columns)); + + return this; } /** - * Sets the 'IF EXISTS' option for this DELETE statement. - *

    - *

    - * A delete with that option will report whether the statement actually - * resulted in data being deleted. The existence check and deletion are - * done transactionally in the sense that if multiple clients attempt to - * delete a given row with this option, then at most one may succeed. - *

    - *

    - * Please keep in mind that using this option has a non negligible - * performance impact and should be avoided when possible. - *

    - * This will configure the statement as non-idempotent, see {@link com.datastax.driver.core.Statement#isIdempotent()} - * for more information. + * Deletes the provided column. * - * @return this DELETE statement. + * @param columnName the column to select for deletion. + * @return this in-build DELETE Selection */ - public Delete ifExists() { - this.ifExists = true; - setNonIdempotentOps(); - return this; + public Selection column(String columnName) { + columns.add(new Selector(columnName)); + return this; } /** - * The WHERE clause of a DELETE statement. + * Deletes the provided list element. + * + * @param columnName the name of the list column. + * @param idx the index of the element to delete. + * @return this in-build DELETE Selection */ - public static class Where extends BuiltStatement.ForwardingStatement { - - private final List clauses = new ArrayList(); - - Where(Delete statement) { - super(statement); - } - - /** - * Adds the provided clause to this WHERE clause. - * - * @param clause the clause to add. - * @return this WHERE clause. - */ - public Where and(Clause clause) { - clauses.add(clause); - statement.maybeAddRoutingKey(clause.name(), clause.firstValue()); - if (!hasNonIdempotentOps() && !Utils.isIdempotent(clause)) { - statement.setNonIdempotentOps(); - } - checkForBindMarkers(clause); - return this; - } - - /** - * Adds an option to the DELETE statement this WHERE clause is part of. - * - * @param using the using clause to add. - * @return the options of the DELETE statement this WHERE clause is part of. - */ - public Options using(Using using) { - return statement.using(using); - } - - /** - * Sets the 'IF EXISTS' option for the DELETE statement this WHERE clause - * is part of. - *

    - *

    - * A delete with that option will report whether the statement actually - * resulted in data being deleted. The existence check and deletion are - * done transactionally in the sense that if multiple clients attempt to - * delete a given row with this option, then at most one may succeed. - *

    - *

    - * Please keep in mind that using this option has a non negligible - * performance impact and should be avoided when possible. - *

    - * - * @return the DELETE statement this WHERE clause is part of. - */ - public Delete ifExists() { - return statement.ifExists(); - } - - /** - * Adds a condition to the DELETE statement this WHERE clause is part of. - * - * @param condition the condition to add. - * @return the conditions for the DELETE statement this WHERE clause is part of. - */ - public Conditions onlyIf(Clause condition) { - return statement.onlyIf(condition); - } + public Selection listElt(String columnName, int idx) { + columns.add(new ListElementSelector(columnName, idx)); + return this; } /** - * The options of a DELETE statement. + * Deletes the provided list element, specified as a bind marker. + * + * @param columnName the name of the list column. + * @param idx the index of the element to delete. + * @return this in-build DELETE Selection */ - public static class Options extends BuiltStatement.ForwardingStatement { - - private final List usings = new ArrayList(); - - Options(Delete statement) { - super(statement); - } - - /** - * Adds the provided option. - * - * @param using a DELETE option. - * @return this {@code Options} object. - */ - public Options and(Using using) { - usings.add(using); - checkForBindMarkers(using); - return this; - } - - /** - * Adds a where clause to the DELETE statement these options are part of. - * - * @param clause clause to add. - * @return the WHERE clause of the DELETE statement these options are part of. - */ - public Where where(Clause clause) { - return statement.where(clause); - } + public Selection listElt(String columnName, BindMarker idx) { + columns.add(new ListElementSelector(columnName, idx)); + return this; } /** - * An in-construction DELETE statement. + * Deletes the provided set element. + * + * @param columnName the name of the set column. + * @param element the element to delete. + * @return this in-build DELETE Selection */ - public static class Builder { - - List columns = new ArrayList(); - - Builder() { - } - - Builder(String... columnNames) { - for (String columnName : columnNames) { - this.columns.add(new Selector(columnName)); - } - } - - /** - * Adds the table to delete from. - * - * @param table the name of the table to delete from. - * @return a newly built DELETE statement that deletes from {@code table}. - */ - public Delete from(String table) { - return from(null, table); - } - - /** - * Adds the table to delete from. - * - * @param keyspace the name of the keyspace to delete from. - * @param table the name of the table to delete from. - * @return a newly built DELETE statement that deletes from {@code keyspace.table}. - */ - public Delete from(String keyspace, String table) { - return new Delete(keyspace, table, columns); - } - - /** - * Adds the table to delete from. - * - * @param table the table to delete from. - * @return a newly built DELETE statement that deletes from {@code table}. - */ - public Delete from(TableMetadata table) { - return new Delete(table, columns); - } + public Selection setElt(String columnName, Object element) { + columns.add(new SetElementSelector(columnName, element)); + return this; } /** - * An column selection clause for an in-construction DELETE statement. + * Deletes the provided set element, specified as a bind marker. + * + * @param columnName the name of the set column. + * @param element the element to delete. + * @return this in-build DELETE Selection */ - public static class Selection extends Builder { - - /** - * Deletes all columns (i.e. "DELETE FROM ...") - * - * @return an in-build DELETE statement. - * @throws IllegalStateException if some columns had already been selected for this builder. - */ - public Builder all() { - if (!columns.isEmpty()) - throw new IllegalStateException(String.format("Some columns (%s) have already been selected.", columns)); - - return this; - } - - /** - * Deletes the provided column. - * - * @param columnName the column to select for deletion. - * @return this in-build DELETE Selection - */ - public Selection column(String columnName) { - columns.add(new Selector(columnName)); - return this; - } - - /** - * Deletes the provided list element. - * - * @param columnName the name of the list column. - * @param idx the index of the element to delete. - * @return this in-build DELETE Selection - */ - public Selection listElt(String columnName, int idx) { - columns.add(new ListElementSelector(columnName, idx)); - return this; - } - - /** - * Deletes the provided list element, - * specified as a bind marker. - * - * @param columnName the name of the list column. - * @param idx the index of the element to delete. - * @return this in-build DELETE Selection - */ - public Selection listElt(String columnName, BindMarker idx) { - columns.add(new ListElementSelector(columnName, idx)); - return this; - } - - /** - * Deletes the provided set element. - * - * @param columnName the name of the set column. - * @param element the element to delete. - * @return this in-build DELETE Selection - */ - public Selection setElt(String columnName, Object element) { - columns.add(new SetElementSelector(columnName, element)); - return this; - } - - /** - * Deletes the provided set element, - * specified as a bind marker. - * - * @param columnName the name of the set column. - * @param element the element to delete. - * @return this in-build DELETE Selection - */ - public Selection setElt(String columnName, BindMarker element) { - columns.add(new SetElementSelector(columnName, element)); - return this; - } - - /** - * Deletes a map element given a key. - * - * @param columnName the name of the map column. - * @param key the key for the element to delete. - * @return this in-build DELETE Selection - */ - public Selection mapElt(String columnName, Object key) { - columns.add(new MapElementSelector(columnName, key)); - return this; - } + public Selection setElt(String columnName, BindMarker element) { + columns.add(new SetElementSelector(columnName, element)); + return this; } /** - * A selector in a DELETE selection clause. - * A selector can be either a column name, - * a list element, a set element or a map entry. + * Deletes a map element given a key. + * + * @param columnName the name of the map column. + * @param key the key for the element to delete. + * @return this in-build DELETE Selection */ - private static class Selector extends Utils.Appendeable { + public Selection mapElt(String columnName, Object key) { + columns.add(new MapElementSelector(columnName, key)); + return this; + } + } + + /** + * A selector in a DELETE selection clause. A selector can be either a column name, a list + * element, a set element or a map entry. + */ + private static class Selector extends Utils.Appendeable { - private final String columnName; + private final String columnName; - Selector(String columnName) { - this.columnName = columnName; - } + Selector(String columnName) { + this.columnName = columnName; + } - @Override - void appendTo(StringBuilder sb, List values, CodecRegistry codecRegistry) { - Utils.appendName(columnName, sb); - } + @Override + void appendTo(StringBuilder sb, List values, CodecRegistry codecRegistry) { + Utils.appendName(columnName, sb); + } - @Override - boolean containsBindMarker() { - return false; - } + @Override + boolean containsBindMarker() { + return false; + } - @Override - public String toString() { - return columnName; - } + @Override + public String toString() { + return columnName; } + } - /** - * A selector representing a list index, a set element or a map key in a DELETE selection clause. - */ - private static class CollectionElementSelector extends Selector { + /** + * A selector representing a list index, a set element or a map key in a DELETE selection clause. + */ + private static class CollectionElementSelector extends Selector { - protected final Object key; + protected final Object key; - CollectionElementSelector(String columnName, Object key) { - super(columnName); - this.key = key; - } + CollectionElementSelector(String columnName, Object key) { + super(columnName); + this.key = key; + } + + @Override + void appendTo(StringBuilder sb, List values, CodecRegistry codecRegistry) { + super.appendTo(sb, values, codecRegistry); + sb.append('['); + Utils.appendValue(key, codecRegistry, sb, values); + sb.append(']'); + } - @Override - void appendTo(StringBuilder sb, List values, CodecRegistry codecRegistry) { - super.appendTo(sb, values, codecRegistry); - sb.append('['); - Utils.appendValue(key, codecRegistry, sb, values); - sb.append(']'); - } + @Override + boolean containsBindMarker() { + return Utils.containsBindMarker(key); + } + } - @Override - boolean containsBindMarker() { - return Utils.containsBindMarker(key); - } + private static class ListElementSelector extends CollectionElementSelector { + ListElementSelector(String columnName, Object key) { + super(columnName, key); } + } - private static class ListElementSelector extends CollectionElementSelector { + private static class SetElementSelector extends CollectionElementSelector { - ListElementSelector(String columnName, Object key) { - super(columnName, key); - } + SetElementSelector(String columnName, Object key) { + super(columnName, key); } + } - private static class SetElementSelector extends CollectionElementSelector { + private static class MapElementSelector extends CollectionElementSelector { - SetElementSelector(String columnName, Object key) { - super(columnName, key); - } + MapElementSelector(String columnName, Object key) { + super(columnName, key); } + } - private static class MapElementSelector extends CollectionElementSelector { + private boolean areIdempotent(List selectors) { + for (Selector sel : selectors) { + if (sel instanceof ListElementSelector) { + return false; + } + } + return true; + } + + /** + * Conditions for a DELETE statement. + * + *

    When provided some conditions, a deletion will not apply unless the provided conditions + * applies. + * + *

    Please keep in mind that provided conditions have a non negligible performance impact and + * should be avoided when possible. + */ + public static class Conditions extends BuiltStatement.ForwardingStatement { + + private final List conditions = new ArrayList(); + + Conditions(Delete statement) { + super(statement); + } - MapElementSelector(String columnName, Object key) { - super(columnName, key); - } + /** + * Adds the provided condition for the deletion. + * + *

    Note that while the query builder accept any type of {@code Clause} as conditions, + * Cassandra currently only allows equality ones. + * + * @param condition the condition to add. + * @return this {@code Conditions} clause. + */ + public Conditions and(Clause condition) { + statement.setNonIdempotentOps(); + conditions.add(condition); + checkForBindMarkers(condition); + return this; } - private boolean areIdempotent(List selectors) { - for (Selector sel : selectors) { - if (sel instanceof ListElementSelector) { - return false; - } - } - return true; + /** + * Adds a where clause to the DELETE statement these conditions are part of. + * + * @param clause clause to add. + * @return the WHERE clause of the DELETE statement these conditions are part of. + */ + public Where where(Clause clause) { + return statement.where(clause); } /** - * Conditions for a DELETE statement. - *

    - * When provided some conditions, a deletion will not apply unless the - * provided conditions applies. - *

    - *

    - * Please keep in mind that provided conditions have a non negligible - * performance impact and should be avoided when possible. - *

    + * Adds an option to the DELETE statement these conditions are part of. + * + * @param using the using clause to add. + * @return the options of the DELETE statement these conditions are part of. */ - public static class Conditions extends BuiltStatement.ForwardingStatement { - - private final List conditions = new ArrayList(); - - Conditions(Delete statement) { - super(statement); - } - - /** - * Adds the provided condition for the deletion. - *

    - * Note that while the query builder accept any type of {@code Clause} - * as conditions, Cassandra currently only allows equality ones. - * - * @param condition the condition to add. - * @return this {@code Conditions} clause. - */ - public Conditions and(Clause condition) { - statement.setNonIdempotentOps(); - conditions.add(condition); - checkForBindMarkers(condition); - return this; - } - - /** - * Adds a where clause to the DELETE statement these conditions are part of. - * - * @param clause clause to add. - * @return the WHERE clause of the DELETE statement these conditions are part of. - */ - public Where where(Clause clause) { - return statement.where(clause); - } - - /** - * Adds an option to the DELETE statement these conditions are part of. - * - * @param using the using clause to add. - * @return the options of the DELETE statement these conditions are part of. - */ - public Options using(Using using) { - return statement.using(using); - } + public Options using(Using using) { + return statement.using(using); } + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/querybuilder/Insert.java b/driver-core/src/main/java/com/datastax/driver/core/querybuilder/Insert.java index 34cf016f0c8..57df351f912 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/querybuilder/Insert.java +++ b/driver-core/src/main/java/com/datastax/driver/core/querybuilder/Insert.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,340 +17,352 @@ */ package com.datastax.driver.core.querybuilder; +import static com.google.common.base.Preconditions.checkState; + import com.datastax.driver.core.CodecRegistry; import com.datastax.driver.core.ColumnMetadata; import com.datastax.driver.core.Metadata; import com.datastax.driver.core.TableMetadata; - import java.util.ArrayList; import java.util.Arrays; import java.util.List; -import static com.google.common.base.Preconditions.checkState; - -/** - * A built {@code INSERT} statement. - */ +/** A built {@code INSERT} statement. */ public class Insert extends BuiltStatement { - private enum JsonDefault { - NULL, UNSET - } + private enum JsonDefault { + NULL, + UNSET + } - private final String table; - private final List names = new ArrayList(); - private final List values = new ArrayList(); - private final Options usings; - private boolean ifNotExists; - private Object json; - private JsonDefault jsonDefault; + private final String table; + private final List names = new ArrayList(); + private final List values = new ArrayList(); + private final Options usings; + private boolean ifNotExists; + private Object json; + private JsonDefault jsonDefault; - Insert(String keyspace, String table) { - this(keyspace, table, null, null); - } + Insert(String keyspace, String table) { + this(keyspace, table, null, null); + } - Insert(TableMetadata table) { - this(Metadata.quoteIfNecessary(table.getKeyspace().getName()), - Metadata.quoteIfNecessary(table.getName()), - Arrays.asList(new Object[table.getPartitionKey().size()]), - table.getPartitionKey()); - } + Insert(TableMetadata table) { + this( + Metadata.quoteIfNecessary(table.getKeyspace().getName()), + Metadata.quoteIfNecessary(table.getName()), + Arrays.asList(new Object[table.getPartitionKey().size()]), + table.getPartitionKey()); + } - Insert(String keyspace, - String table, - List routingKeyValues, - List partitionKey) { - super(keyspace, partitionKey, routingKeyValues); - this.table = table; - this.usings = new Options(this); - } + Insert( + String keyspace, + String table, + List routingKeyValues, + List partitionKey) { + super(keyspace, partitionKey, routingKeyValues); + this.table = table; + this.usings = new Options(this); + } - @Override - StringBuilder buildQueryString(List variables, CodecRegistry codecRegistry) { - StringBuilder builder = new StringBuilder(); + @Override + StringBuilder buildQueryString(List variables, CodecRegistry codecRegistry) { + StringBuilder builder = new StringBuilder(); - builder.append("INSERT INTO "); - if (keyspace != null) - Utils.appendName(keyspace, builder).append('.'); - Utils.appendName(table, builder); + builder.append("INSERT INTO "); + if (keyspace != null) Utils.appendName(keyspace, builder).append('.'); + Utils.appendName(table, builder); - builder.append(" "); - if (json != null) { - builder.append("JSON "); - Utils.appendValue(json, codecRegistry, builder, variables); - if (jsonDefault == JsonDefault.UNSET) - builder.append(" DEFAULT UNSET"); - else if (jsonDefault == JsonDefault.NULL) - builder.append(" DEFAULT NULL"); - } else { - builder.append("("); - Utils.joinAndAppendNames(builder, codecRegistry, names); - builder.append(") VALUES ("); - Utils.joinAndAppendValues(builder, codecRegistry, values, variables); - builder.append(')'); - } + builder.append(" "); + if (json != null) { + builder.append("JSON "); + Utils.appendValue(json, codecRegistry, builder, variables); + if (jsonDefault == JsonDefault.UNSET) builder.append(" DEFAULT UNSET"); + else if (jsonDefault == JsonDefault.NULL) builder.append(" DEFAULT NULL"); + } else { + builder.append("("); + Utils.joinAndAppendNames(builder, codecRegistry, names); + builder.append(") VALUES ("); + Utils.joinAndAppendValues(builder, codecRegistry, values, variables); + builder.append(')'); + } - if (ifNotExists) - builder.append(" IF NOT EXISTS"); + if (ifNotExists) builder.append(" IF NOT EXISTS"); - if (!usings.usings.isEmpty()) { - builder.append(" USING "); - Utils.joinAndAppend(builder, codecRegistry, " AND ", usings.usings, variables); - } - return builder; + if (!usings.usings.isEmpty()) { + builder.append(" USING "); + Utils.joinAndAppend(builder, codecRegistry, " AND ", usings.usings, variables); } + return builder; + } - /** - * Adds a column/value pair to the values inserted by this {@code INSERT} statement. - * - * @param name the name of the column to insert/update. - * @param value the value to insert/update for {@code name}. - * @return this {@code INSERT} statement. - * @throws IllegalStateException if this method is called and the {@link #json(Object)} - * method has been called before, because it's not possible - * to mix {@code INSERT JSON} syntax with regular {@code INSERT} syntax. - */ - public Insert value(String name, Object value) { - checkState(json == null && jsonDefault == null, "Cannot mix INSERT JSON syntax with regular INSERT syntax"); - names.add(name); - values.add(value); - checkForBindMarkers(value); - if (!hasNonIdempotentOps() && !Utils.isIdempotent(value)) - this.setNonIdempotentOps(); - maybeAddRoutingKey(name, value); - return this; - } + /** + * Adds a column/value pair to the values inserted by this {@code INSERT} statement. + * + * @param name the name of the column to insert/update. + * @param value the value to insert/update for {@code name}. + * @return this {@code INSERT} statement. + * @throws IllegalStateException if this method is called and the {@link #json(Object)} method has + * been called before, because it's not possible to mix {@code INSERT JSON} syntax with + * regular {@code INSERT} syntax. + */ + public Insert value(String name, Object value) { + checkState( + json == null && jsonDefault == null, + "Cannot mix INSERT JSON syntax with regular INSERT syntax"); + names.add(name); + values.add(value); + checkForBindMarkers(value); + if (!hasNonIdempotentOps() && !Utils.isIdempotent(value)) this.setNonIdempotentOps(); + maybeAddRoutingKey(name, value); + return this; + } - /** - * Adds multiple column/value pairs to the values inserted by this INSERT statement. - * - * @param names a list of column names to insert/update. - * @param values a list of values to insert/update. The {@code i}th - * value in {@code values} will be inserted for the {@code i}th column - * in {@code names}. - * @return this INSERT statement. - * @throws IllegalArgumentException if {@code names.length != values.length}. - * @throws IllegalStateException if this method is called and the {@link #json(Object)} - * method has been called before, because it's not possible - * to mix {@code INSERT JSON} syntax with regular {@code INSERT} syntax. - */ - public Insert values(String[] names, Object[] values) { - return values(Arrays.asList(names), Arrays.asList(values)); - } + /** + * Adds multiple column/value pairs to the values inserted by this INSERT statement. + * + * @param names a list of column names to insert/update. + * @param values a list of values to insert/update. The {@code i}th value in {@code values} will + * be inserted for the {@code i}th column in {@code names}. + * @return this INSERT statement. + * @throws IllegalArgumentException if {@code names.length != values.length}. + * @throws IllegalStateException if this method is called and the {@link #json(Object)} method has + * been called before, because it's not possible to mix {@code INSERT JSON} syntax with + * regular {@code INSERT} syntax. + */ + public Insert values(String[] names, Object[] values) { + return values(Arrays.asList(names), Arrays.asList(values)); + } - /** - * Adds multiple column/value pairs to the values inserted by this INSERT statement. - * - * @param names a list of column names to insert/update. - * @param values a list of values to insert/update. The {@code i}th - * value in {@code values} will be inserted for the {@code i}th column - * in {@code names}. - * @return this INSERT statement. - * @throws IllegalArgumentException if {@code names.size() != values.size()}. - * @throws IllegalStateException if this method is called and the {@link #json(Object)} - * method has been called before, because it's not possible - * to mix {@code INSERT JSON} syntax with regular {@code INSERT} syntax. - */ - public Insert values(List names, List values) { - if (names.size() != values.size()) - throw new IllegalArgumentException(String.format("Got %d names but %d values", names.size(), values.size())); - checkState(json == null && jsonDefault == null, "Cannot mix INSERT JSON syntax with regular INSERT syntax"); - this.names.addAll(names); - this.values.addAll(values); - for (int i = 0; i < names.size(); i++) { - Object value = values.get(i); - checkForBindMarkers(value); - maybeAddRoutingKey(names.get(i), value); - if (!hasNonIdempotentOps() && !Utils.isIdempotent(value)) - this.setNonIdempotentOps(); - } - return this; + /** + * Adds multiple column/value pairs to the values inserted by this INSERT statement. + * + * @param names a list of column names to insert/update. + * @param values a list of values to insert/update. The {@code i}th value in {@code values} will + * be inserted for the {@code i}th column in {@code names}. + * @return this INSERT statement. + * @throws IllegalArgumentException if {@code names.size() != values.size()}. + * @throws IllegalStateException if this method is called and the {@link #json(Object)} method has + * been called before, because it's not possible to mix {@code INSERT JSON} syntax with + * regular {@code INSERT} syntax. + */ + public Insert values(List names, List values) { + if (names.size() != values.size()) + throw new IllegalArgumentException( + String.format("Got %d names but %d values", names.size(), values.size())); + checkState( + json == null && jsonDefault == null, + "Cannot mix INSERT JSON syntax with regular INSERT syntax"); + this.names.addAll(names); + this.values.addAll(values); + for (int i = 0; i < names.size(); i++) { + Object value = values.get(i); + checkForBindMarkers(value); + maybeAddRoutingKey(names.get(i), value); + if (!hasNonIdempotentOps() && !Utils.isIdempotent(value)) this.setNonIdempotentOps(); } + return this; + } - /** - * Inserts the provided object, using the {@code INSERT INTO ... JSON} syntax introduced - * in Cassandra 2.2. - *

    - * With INSERT statements, the new {@code JSON} keyword can be used to enable inserting a JSON - * structure as a single row. - *

    - * The provided object can be of the following types: - *

      - *
    1. A raw string. In this case, it will be appended to the query string as is. - * It should NOT be surrounded by single quotes. - * Its format should generally match that returned by a - * {@code SELECT JSON} statement on the same table. - * Note that it is not possible to insert function calls nor bind markers in a JSON string.
    2. - *
    3. A {@link QueryBuilder#bindMarker() bind marker}. In this case, the statement is meant to be prepared - * and no JSON string will be appended to the query string, only a bind marker for the whole JSON parameter.
    4. - *
    5. Any object that can be serialized to JSON. Such objects can be used provided that - * a matching {@link com.datastax.driver.core.TypeCodec codec} is registered with the - * {@link CodecRegistry} in use. This allows the usage of JSON libraries, such - * as the Java API for JSON processing, - * the popular Jackson library, or - * Google's Gson library, for instance.
    6. - *
    - *

    Case-sensitive column names

    - * When passing raw strings to this method, users are required to handle case-sensitive column names - * by surrounding them with double quotes. - *

    - * For example, to insert into a table with two columns named “myKey” and “value”, - * you would do the following: - *

    -     * insertInto("mytable").json("{\"\\\"myKey\\\"\": 0, \"value\": 0}");
    -     * 
    - * This will produce the following CQL: - *
    -     * INSERT INTO mytable JSON '{"\"myKey\"": 0, "value": 0}';
    -     * 
    - *

    Escaping quotes in column values

    - * When passing raw strings to this method, double quotes should be escaped with a backslash, - * but single quotes should be escaped in - * the CQL manner, i.e. by another single quote. For example, the column value - * {@code foo"'bar} should be inserted in the JSON string - * as {@code "foo\"''bar"}. - *

    - *

    Null values and tombstones

    - * Any columns which are omitted from the JSON string will be defaulted to a {@code NULL} value - * (which will result in a tombstone being created). - * - * @param json the JSON string, or a bind marker, or a JSON object handled by a specific {@link com.datastax.driver.core.TypeCodec codec}. - * @return this INSERT statement. - * @throws IllegalStateException if this method is called and any of the {@code value} or {@code values} - * methods have been called before, because it's not possible - * to mix {@code INSERT JSON} syntax with regular {@code INSERT} syntax. - * @see JSON Support for CQL - * @see JSON Support in Cassandra 2.2 - * @see Inserting JSON data - */ - public Insert json(Object json) { - checkState(values.isEmpty() && names.isEmpty(), "Cannot mix INSERT JSON syntax with regular INSERT syntax"); - this.json = json; - return this; - } + /** + * Inserts the provided object, using the {@code INSERT INTO ... JSON} syntax introduced in + * Cassandra 2.2. + * + *

    With INSERT statements, the new {@code JSON} keyword can be used to enable inserting a JSON + * structure as a single row. + * + *

    The provided object can be of the following types: + * + *

      + *
    1. A raw string. In this case, it will be appended to the query string as is. It + * should NOT be surrounded by single quotes. Its format should generally match + * that returned by a {@code SELECT JSON} statement on the same table. Note that it is not + * possible to insert function calls nor bind markers in a JSON string. + *
    2. A {@link QueryBuilder#bindMarker() bind marker}. In this case, the statement is meant to + * be prepared and no JSON string will be appended to the query string, only a bind marker + * for the whole JSON parameter. + *
    3. Any object that can be serialized to JSON. Such objects can be used provided that a + * matching {@link com.datastax.driver.core.TypeCodec codec} is registered with the {@link + * CodecRegistry} in use. This allows the usage of JSON libraries, such as the Java API for JSON processing, the popular + * Jackson library, or Google's Gson library, for instance. + *
    + * + *

    Case-sensitive column names

    + * + * When passing raw strings to this method, users are required to handle case-sensitive column + * names by surrounding them with double quotes. + * + *

    For example, to insert into a table with two columns named “myKey” and “value”, you would do + * the following: + * + *

    +   * insertInto("mytable").json("{\"\\\"myKey\\\"\": 0, \"value\": 0}");
    +   * 
    + * + * This will produce the following CQL: + * + *
    +   * INSERT INTO mytable JSON '{"\"myKey\"": 0, "value": 0}';
    +   * 
    + * + *

    Escaping quotes in column values

    + * + * When passing raw strings to this method, double quotes should be escaped with a backslash, but + * single quotes should be escaped in the CQL manner, i.e. by another single quote. For example, + * the column value {@code foo"'bar} should be inserted in the JSON string as {@code + * "foo\"''bar"}. + * + *

    + * + *

    Null values and tombstones

    + * + * Any columns which are omitted from the JSON string will be defaulted to a {@code NULL} value + * (which will result in a tombstone being created). + * + * @param json the JSON string, or a bind marker, or a JSON object handled by a specific {@link + * com.datastax.driver.core.TypeCodec codec}. + * @return this INSERT statement. + * @throws IllegalStateException if this method is called and any of the {@code value} or {@code + * values} methods have been called before, because it's not possible to mix {@code INSERT + * JSON} syntax with regular {@code INSERT} syntax. + * @see JSON Support for CQL + * @see JSON + * Support in Cassandra 2.2 + * @see Inserting + * JSON data + */ + public Insert json(Object json) { + checkState( + values.isEmpty() && names.isEmpty(), + "Cannot mix INSERT JSON syntax with regular INSERT syntax"); + this.json = json; + return this; + } - /** - * Appends a {@code DEFAULT UNSET} clause to this {@code INSERT INTO ... JSON} statement. - *

    - * Support for {@code DEFAULT UNSET} has been introduced in Cassandra 3.10. - * - * @return this {@code INSERT} statement. - * @throws IllegalStateException if this method is called and any of the {@code value} or {@code values} - * methods have been called before, because it's not possible - * to mix {@code INSERT JSON} syntax with regular {@code INSERT} syntax. - */ - public Insert defaultUnset() { - checkState(values.isEmpty() && names.isEmpty(), "Cannot mix INSERT JSON syntax with regular INSERT syntax"); - this.jsonDefault = JsonDefault.UNSET; - return this; - } + /** + * Appends a {@code DEFAULT UNSET} clause to this {@code INSERT INTO ... JSON} statement. + * + *

    Support for {@code DEFAULT UNSET} has been introduced in Cassandra 3.10. + * + * @return this {@code INSERT} statement. + * @throws IllegalStateException if this method is called and any of the {@code value} or {@code + * values} methods have been called before, because it's not possible to mix {@code INSERT + * JSON} syntax with regular {@code INSERT} syntax. + */ + public Insert defaultUnset() { + checkState( + values.isEmpty() && names.isEmpty(), + "Cannot mix INSERT JSON syntax with regular INSERT syntax"); + this.jsonDefault = JsonDefault.UNSET; + return this; + } - /** - * Appends a {@code DEFAULT NULL} clause to this {@code INSERT INTO ... JSON} statement. - *

    - * Support for {@code DEFAULT NULL} has been introduced in Cassandra 3.10. - * - * @return this {@code INSERT} statement. - * @throws IllegalStateException if this method is called and any of the {@code value} or {@code values} - * methods have been called before, because it's not possible - * to mix {@code INSERT JSON} syntax with regular {@code INSERT} syntax. - */ - public Insert defaultNull() { - checkState(values.isEmpty() && names.isEmpty(), "Cannot mix INSERT JSON syntax with regular INSERT syntax"); - this.jsonDefault = JsonDefault.NULL; - return this; - } + /** + * Appends a {@code DEFAULT NULL} clause to this {@code INSERT INTO ... JSON} statement. + * + *

    Support for {@code DEFAULT NULL} has been introduced in Cassandra 3.10. + * + * @return this {@code INSERT} statement. + * @throws IllegalStateException if this method is called and any of the {@code value} or {@code + * values} methods have been called before, because it's not possible to mix {@code INSERT + * JSON} syntax with regular {@code INSERT} syntax. + */ + public Insert defaultNull() { + checkState( + values.isEmpty() && names.isEmpty(), + "Cannot mix INSERT JSON syntax with regular INSERT syntax"); + this.jsonDefault = JsonDefault.NULL; + return this; + } - /** - * Adds a new options for this {@code INSERT} statement. - * - * @param using the option to add. - * @return the options of this {@code INSERT} statement. - */ - public Options using(Using using) { - return usings.and(using); + /** + * Adds a new options for this {@code INSERT} statement. + * + * @param using the option to add. + * @return the options of this {@code INSERT} statement. + */ + public Options using(Using using) { + return usings.and(using); + } + + /** + * Returns the options for this {@code INSERT} statement. + * + *

    Chain this with {@link Options#and(Using)} to add options. + * + * @return the options of this {@code INSERT} statement. + */ + public Options using() { + return usings; + } + + /** + * Sets the 'IF NOT EXISTS' option for this {@code INSERT} statement. + * + *

    An insert with that option will not succeed unless the row does not exist at the time the + * insertion is executed. The existence check and insertions are done transactionally in the sense + * that if multiple clients attempt to create a given row with this option, then at most one may + * succeed. + * + *

    Please keep in mind that using this option has a non negligible performance impact and + * should be avoided when possible. + * + *

    This will configure the statement as non-idempotent, see {@link + * com.datastax.driver.core.Statement#isIdempotent()} for more information. + * + * @return this {@code INSERT} statement. + */ + public Insert ifNotExists() { + this.setNonIdempotentOps(); + this.ifNotExists = true; + return this; + } + + /** The options of an {@code INSERT} statement. */ + public static class Options extends BuiltStatement.ForwardingStatement { + + private final List usings = new ArrayList(); + + Options(Insert st) { + super(st); } /** - * Returns the options for this {@code INSERT} statement. - *

    - * Chain this with {@link Options#and(Using)} to add options. + * Adds the provided option. * - * @return the options of this {@code INSERT} statement. + * @param using an {@code INSERT} option. + * @return this {@code Options} object. */ - public Options using() { - return usings; + public Options and(Using using) { + usings.add(using); + checkForBindMarkers(using); + return this; } /** - * Sets the 'IF NOT EXISTS' option for this {@code INSERT} statement. - *

    - * An insert with that option will not succeed unless the row does not - * exist at the time the insertion is executed. The existence check and - * insertions are done transactionally in the sense that if multiple - * clients attempt to create a given row with this option, then at most one - * may succeed. - *

    - * Please keep in mind that using this option has a non negligible - * performance impact and should be avoided when possible. - *

    - * This will configure the statement as non-idempotent, see {@link com.datastax.driver.core.Statement#isIdempotent()} - * for more information. + * Adds a column/value pair to the values inserted by this {@code INSERT} statement. * - * @return this {@code INSERT} statement. + * @param name the name of the column to insert/update. + * @param value the value to insert/update for {@code name}. + * @return the {@code INSERT} statement those options are part of. */ - public Insert ifNotExists() { - this.setNonIdempotentOps(); - this.ifNotExists = true; - return this; + public Insert value(String name, Object value) { + return statement.value(name, value); } /** - * The options of an {@code INSERT} statement. + * Adds multiple column/value pairs to the values inserted by this {@code INSERT} statement. + * + * @param names a list of column names to insert/update. + * @param values a list of values to insert/update. The {@code i}th value in {@code values} will + * be inserted for the {@code i}th column in {@code names}. + * @return the {@code INSERT} statement those options are part of. + * @throws IllegalArgumentException if {@code names.length != values.length}. */ - public static class Options extends BuiltStatement.ForwardingStatement { - - private final List usings = new ArrayList(); - - Options(Insert st) { - super(st); - } - - /** - * Adds the provided option. - * - * @param using an {@code INSERT} option. - * @return this {@code Options} object. - */ - public Options and(Using using) { - usings.add(using); - checkForBindMarkers(using); - return this; - } - - /** - * Adds a column/value pair to the values inserted by this {@code INSERT} statement. - * - * @param name the name of the column to insert/update. - * @param value the value to insert/update for {@code name}. - * @return the {@code INSERT} statement those options are part of. - */ - public Insert value(String name, Object value) { - return statement.value(name, value); - } - - /** - * Adds multiple column/value pairs to the values inserted by this {@code INSERT} statement. - * - * @param names a list of column names to insert/update. - * @param values a list of values to insert/update. The {@code i}th - * value in {@code values} will be inserted for the {@code i}th column - * in {@code names}. - * @return the {@code INSERT} statement those options are part of. - * @throws IllegalArgumentException if {@code names.length != values.length}. - */ - public Insert values(String[] names, Object[] values) { - return statement.values(names, values); - } + public Insert values(String[] names, Object[] values) { + return statement.values(names, values); } + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/querybuilder/Ordering.java b/driver-core/src/main/java/com/datastax/driver/core/querybuilder/Ordering.java index fd695f735bd..d6fc90f9579 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/querybuilder/Ordering.java +++ b/driver-core/src/main/java/com/datastax/driver/core/querybuilder/Ordering.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,27 +18,26 @@ package com.datastax.driver.core.querybuilder; import com.datastax.driver.core.CodecRegistry; - import java.util.List; public class Ordering extends Utils.Appendeable { - private final String name; - private final boolean isDesc; + private final String name; + private final boolean isDesc; - Ordering(String name, boolean isDesc) { - this.name = name; - this.isDesc = isDesc; - } + Ordering(String name, boolean isDesc) { + this.name = name; + this.isDesc = isDesc; + } - @Override - void appendTo(StringBuilder sb, List variables, CodecRegistry codecRegistry) { - Utils.appendName(name, sb); - sb.append(isDesc ? " DESC" : " ASC"); - } + @Override + void appendTo(StringBuilder sb, List variables, CodecRegistry codecRegistry) { + Utils.appendName(name, sb); + sb.append(isDesc ? " DESC" : " ASC"); + } - @Override - boolean containsBindMarker() { - return false; - } + @Override + boolean containsBindMarker() { + return false; + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/querybuilder/QueryBuilder.java b/driver-core/src/main/java/com/datastax/driver/core/querybuilder/QueryBuilder.java index 5400266976c..1062b66bf94 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/querybuilder/QueryBuilder.java +++ b/driver-core/src/main/java/com/datastax/driver/core/querybuilder/QueryBuilder.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,1280 +22,1306 @@ import com.datastax.driver.core.RegularStatement; import com.datastax.driver.core.TableMetadata; import com.datastax.driver.core.exceptions.InvalidQueryException; - -import java.util.*; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Set; /** * Builds CQL3 query via a fluent API. - *

    - * The queries built by this builder will provide a value for the - * {@link com.datastax.driver.core.Statement#getRoutingKey} method only when a - * {@link com.datastax.driver.core.TableMetadata} is provided to the builder. - * It is thus advised to do so if a {@link com.datastax.driver.core.policies.TokenAwarePolicy} - * is in use. - *

    - * The provider builders perform very little validation of the built query. - * There is thus no guarantee that a built query is valid, and it is - * definitively possible to create invalid queries. - *

    - * Note that it could be convenient to use an 'import static' to bring the static methods of - * this class into scope. + * + *

    The queries built by this builder will provide a value for the {@link + * com.datastax.driver.core.Statement#getRoutingKey} method only when a {@link + * com.datastax.driver.core.TableMetadata} is provided to the builder. It is thus advised to do so + * if a {@link com.datastax.driver.core.policies.TokenAwarePolicy} is in use. + * + *

    The provider builders perform very little validation of the built query. There is thus no + * guarantee that a built query is valid, and it is definitively possible to create invalid queries. + * + *

    Note that it could be convenient to use an 'import static' to bring the static methods of this + * class into scope. */ public final class QueryBuilder { - private QueryBuilder() { - } - - /** - * Starts building a new {@code SELECT} query that selects the provided names. - *

    - * Note that {@code select(c1, c2)} is just a shortcut for {@code select().column(c1).column(c2)}. - * - * @param columns the columns names that should be selected by the query. - * @return an in-construction {@code SELECT} query (you will need to provide at - * least a {@code FROM} clause to complete the query). - */ - public static Select.Builder select(String... columns) { - return select((Object[]) columns); - } - - /** - * Starts building a new {@code SELECT} query that selects the provided names. - *

    - * Note that {@code select(c1, c2)} is just a shortcut for {@code select().column(c1).column(c2)}. - * - * @param columns the columns names that should be selected by the query. - * @return an in-construction {@code SELECT} query (you will need to provide at - * least a {@code FROM} clause to complete the query). - */ - public static Select.Builder select(Object... columns) { - return new Select.Builder(Arrays.asList(columns)); - } - - /** - * Starts building a new {@code SELECT} query. - * - * @return an in-construction {@code SELECT} query (you will need to provide a - * column selection and at least a {@code FROM} clause to complete the query). - */ - public static Select.Selection select() { - // Note: the fact we return Select.Selection as return type is on purpose. - return new Select.SelectionOrAlias(); - } - - /** - * Starts building a new {@code INSERT} query. - * - * @param table the name of the table in which to insert. - * @return an in-construction {@code INSERT} query. - */ - public static Insert insertInto(String table) { - return new Insert(null, table); - } - - /** - * Starts building a new {@code INSERT} query. - * - * @param keyspace the name of the keyspace to use. - * @param table the name of the table to insert into. - * @return an in-construction {@code INSERT} query. - */ - public static Insert insertInto(String keyspace, String table) { - return new Insert(keyspace, table); - } - - /** - * Starts building a new {@code INSERT} query. - * - * @param table the name of the table to insert into. - * @return an in-construction {@code INSERT} query. - */ - public static Insert insertInto(TableMetadata table) { - return new Insert(table); - } - - /** - * Starts building a new {@code UPDATE} query. - * - * @param table the name of the table to update. - * @return an in-construction {@code UPDATE} query (at least a {@code SET} and a {@code WHERE} - * clause needs to be provided to complete the query). - */ - public static Update update(String table) { - return new Update(null, table); - } - - /** - * Starts building a new {@code UPDATE} query. - * - * @param keyspace the name of the keyspace to use. - * @param table the name of the table to update. - * @return an in-construction {@code UPDATE} query (at least a {@code SET} and a {@code WHERE} - * clause needs to be provided to complete the query). - */ - public static Update update(String keyspace, String table) { - return new Update(keyspace, table); - } - - /** - * Starts building a new {@code UPDATE} query. - * - * @param table the name of the table to update. - * @return an in-construction {@code UPDATE} query (at least a {@code SET} and a {@code WHERE} - * clause needs to be provided to complete the query). - */ - public static Update update(TableMetadata table) { - return new Update(table); - } - - /** - * Starts building a new {@code DELETE} query that deletes the provided names. - * - * @param columns the columns names that should be deleted by the query. - * @return an in-construction {@code DELETE} query (At least a {@code FROM} and a {@code WHERE} - * clause needs to be provided to complete the query). - */ - public static Delete.Builder delete(String... columns) { - return new Delete.Builder(columns); - } - - /** - * Starts building a new {@code DELETE} query. - * - * @return an in-construction {@code DELETE} query (you will need to provide a - * column selection and at least a {@code FROM} and a {@code WHERE} clause to complete the - * query). - */ - public static Delete.Selection delete() { - return new Delete.Selection(); - } - - /** - * Builds a new {@code BATCH} query on the provided statements. - *

    - * This method will build a logged batch (this is the default in CQL3). To - * create unlogged batches, use {@link #unloggedBatch}. Also note that - * for convenience, if the provided statements are counter statements, this - * method will create a {@code COUNTER} batch even though COUNTER batches are never - * logged (so for counters, using this method is effectively equivalent to - * using {@link #unloggedBatch}). - * - * @param statements the statements to batch. - * @return a new {@code RegularStatement} that batch {@code statements}. - */ - public static Batch batch(RegularStatement... statements) { - return new Batch(statements, true); - } - - /** - * Builds a new {@code UNLOGGED BATCH} query on the provided statements. - *

    - * Compared to logged batches (the default), unlogged batch don't - * use the distributed batch log server side and as such are not - * guaranteed to be atomic. In other words, if an unlogged batch - * timeout, some of the batched statements may have been persisted - * while some have not. Unlogged batch will however be slightly - * faster than logged batch. - *

    - * If the statements added to the batch are counter statements, the - * resulting batch will be a {@code COUNTER} one. - * - * @param statements the statements to batch. - * @return a new {@code RegularStatement} that batch {@code statements} without - * using the batch log. - */ - public static Batch unloggedBatch(RegularStatement... statements) { - return new Batch(statements, false); - } - - /** - * Creates a new {@code TRUNCATE} query. - * - * @param table the name of the table to truncate. - * @return the truncation query. - */ - public static Truncate truncate(String table) { - return new Truncate(null, table); - } - - /** - * Creates a new {@code TRUNCATE} query. - * - * @param keyspace the name of the keyspace to use. - * @param table the name of the table to truncate. - * @return the truncation query. - */ - public static Truncate truncate(String keyspace, String table) { - return new Truncate(keyspace, table); - } - - /** - * Creates a new {@code TRUNCATE} query. - * - * @param table the table to truncate. - * @return the truncation query. - */ - public static Truncate truncate(TableMetadata table) { - return new Truncate(table); - } - - /** - * Quotes a column name to make it case sensitive. - * - * @param columnName the column name to quote. - * @return the quoted column name. - * @see Metadata#quote(String) - */ - public static String quote(String columnName) { - return Metadata.quote(columnName); - } - - /** - * The token of a column name. - * - * @param columnName the column name to take the token of. - * @return {@code "token(" + columnName + ")"}. - */ - public static String token(String columnName) { - StringBuilder sb = new StringBuilder(); - sb.append("token("); - Utils.appendName(columnName, sb); - sb.append(')'); - return sb.toString(); - } - - /** - * The token of column names. - *

    - * This variant is most useful when the partition key is composite. - * - * @param columnNames the column names to take the token of. - * @return a string representing the token of the provided column names. - */ - public static String token(String... columnNames) { - StringBuilder sb = new StringBuilder(); - sb.append("token("); - Utils.joinAndAppendNames(sb, null, Arrays.asList(columnNames)); - sb.append(')'); - return sb.toString(); - } - - /** - * Returns a generic {@code token} function call. - * - * @param values the arguments of the {@code token} function. - * @return {@code token} function call. - */ - public static Object token(Object... values) { - return new Utils.FCall("token", values); - } - - /** - * Creates an "equal" {@code WHERE} clause stating the provided column must be - * equal to the provided value. - * - * @param name the column name - * @param value the value - * @return the corresponding where clause. - */ - public static Clause eq(String name, Object value) { - return new Clause.SimpleClause(name, "=", value); - } - - /** - * Creates an "equal" {@code WHERE} clause for a group of clustering columns. - *

    - * For instance, {@code eq(Arrays.asList("a", "b"), Arrays.asList(2, "test"))} - * will generate the CQL {@code WHERE} clause {@code (a, b) = (2, 'test') }. - *

    - * Please note that this variant is only supported starting with Cassandra 2.0.6. - * - * @param names the column names - * @param values the values - * @return the corresponding where clause. - * @throws IllegalArgumentException if {@code names.size() != values.size()}. - */ - public static Clause eq(List names, List values) { - if (names.size() != values.size()) - throw new IllegalArgumentException(String.format("The number of names (%d) and values (%d) don't match", names.size(), values.size())); - - return new Clause.CompoundClause(names, "=", values); - } - - /** - * Creates a "like" {@code WHERE} clause stating that the provided column must be equal to the provided value. - * - * @param name the column name. - * @param value the value. - * @return the corresponding where clause. - */ - public static Clause like(String name, Object value) { - return new Clause.SimpleClause(name, " LIKE ", value); - } - - /** - * Create an "in" {@code WHERE} clause stating the provided column must be equal - * to one of the provided values. - * - * @param name the column name - * @param values the values - * @return the corresponding where clause. - */ - public static Clause in(String name, Object... values) { - return new Clause.InClause(name, Arrays.asList(values)); - } - - /** - * Create an "in" {@code WHERE} clause stating the provided column must be equal - * to one of the provided values. - * - * @param name the column name - * @param values the values - * @return the corresponding where clause. - */ - public static Clause in(String name, List values) { - return new Clause.InClause(name, values); - } - - /** - * Creates an "in" {@code WHERE} clause for a group of clustering columns (a.k.a. "multi-column IN restriction"). - *

    - * For instance, {@code in(Arrays.asList("a", "b"), Arrays.asList(Arrays.asList(1, "foo"), Arrays.asList(2, "bar")))} - * will generate the CQL {@code WHERE} clause {@code (a, b) IN ((1, 'foo'), (2, 'bar'))}. - *

    - * Each element in {@code values} must be either a {@link List list} containing exactly as many values - * as there are columns to match in {@code names}, - * or a {@link #bindMarker() bind marker} – in which case, that marker is to be considered as - * a placeholder for one whole tuple of values to match. - *

    - * Please note that this variant is only supported starting with Cassandra 2.0.9. - * - * @param names the column names - * @param values the values - * @return the corresponding where clause. - * @throws IllegalArgumentException if the size of any tuple in {@code values} is not equal to {@code names.size()}, - * or if {@code values} contains elements that are neither {@link List lists} nor {@link #bindMarker() bind markers}. - */ - public static Clause in(List names, List values) { - return new Clause.CompoundInClause(names, values); - } - - /** - * Creates a "contains" {@code WHERE} clause stating the provided column must contain - * the value provided. - * - * @param name the column name - * @param value the value - * @return the corresponding where clause. - */ - public static Clause contains(String name, Object value) { - return new Clause.ContainsClause(name, value); - } - - /** - * Creates a "contains key" {@code WHERE} clause stating the provided column must contain - * the key provided. - * - * @param name the column name - * @param key the key - * @return the corresponding where clause. - */ - public static Clause containsKey(String name, Object key) { - return new Clause.ContainsKeyClause(name, key); - } - - /** - * Creates a "lesser than" {@code WHERE} clause stating the provided column must be less than - * the provided value. - * - * @param name the column name - * @param value the value - * @return the corresponding where clause. - */ - public static Clause lt(String name, Object value) { - return new Clause.SimpleClause(name, "<", value); - } - - /** - * Creates a "lesser than" {@code WHERE} clause for a group of clustering columns. - *

    - * For instance, {@code lt(Arrays.asList("a", "b"), Arrays.asList(2, "test"))} - * will generate the CQL {@code WHERE} clause {@code (a, b) < (2, 'test') }. - *

    - * Please note that this variant is only supported starting with Cassandra 2.0.6. - * - * @param names the column names - * @param values the values - * @return the corresponding where clause. - * @throws IllegalArgumentException if {@code names.size() != values.size()}. - */ - public static Clause lt(List names, List values) { - if (names.size() != values.size()) - throw new IllegalArgumentException(String.format("The number of names (%d) and values (%d) don't match", names.size(), values.size())); - - return new Clause.CompoundClause(names, "<", values); - } - - /** - * Creates a "lesser than or equal" {@code WHERE} clause stating the provided column must - * be lesser than or equal to the provided value. - * - * @param name the column name - * @param value the value - * @return the corresponding where clause. - */ - public static Clause lte(String name, Object value) { - return new Clause.SimpleClause(name, "<=", value); - } - - /** - * Creates a "lesser than or equal" {@code WHERE} clause for a group of clustering columns. - *

    - * For instance, {@code lte(Arrays.asList("a", "b"), Arrays.asList(2, "test"))} - * will generate the CQL {@code WHERE} clause {@code (a, b) <e; (2, 'test') }. - *

    - * Please note that this variant is only supported starting with Cassandra 2.0.6. - * - * @param names the column names - * @param values the values - * @return the corresponding where clause. - * @throws IllegalArgumentException if {@code names.size() != values.size()}. - */ - public static Clause lte(List names, List values) { - if (names.size() != values.size()) - throw new IllegalArgumentException(String.format("The number of names (%d) and values (%d) don't match", names.size(), values.size())); - - return new Clause.CompoundClause(names, "<=", values); - } - - /** - * Creates a "greater than" {@code WHERE} clause stating the provided column must - * be greater to the provided value. - * - * @param name the column name - * @param value the value - * @return the corresponding where clause. - */ - public static Clause gt(String name, Object value) { - return new Clause.SimpleClause(name, ">", value); - } - - /** - * Creates a "greater than" {@code WHERE} clause for a group of clustering columns. - *

    - * For instance, {@code gt(Arrays.asList("a", "b"), Arrays.asList(2, "test"))} - * will generate the CQL {@code WHERE} clause {@code (a, b) > (2, 'test') }. - *

    - * Please note that this variant is only supported starting with Cassandra 2.0.6. - * - * @param names the column names - * @param values the values - * @return the corresponding where clause. - * @throws IllegalArgumentException if {@code names.size() != values.size()}. - */ - public static Clause gt(List names, List values) { - if (names.size() != values.size()) - throw new IllegalArgumentException(String.format("The number of names (%d) and values (%d) don't match", names.size(), values.size())); - - return new Clause.CompoundClause(names, ">", values); - } - - /** - * Creates a "greater than or equal" {@code WHERE} clause stating the provided - * column must be greater than or equal to the provided value. - * - * @param name the column name - * @param value the value - * @return the corresponding where clause. - */ - public static Clause gte(String name, Object value) { - return new Clause.SimpleClause(name, ">=", value); - } - - /** - * Creates a "greater than or equal" {@code WHERE} clause for a group of clustering columns. - *

    - * For instance, {@code gte(Arrays.asList("a", "b"), Arrays.asList(2, "test"))} - * will generate the CQL {@code WHERE} clause {@code (a, b) >e; (2, 'test') }. - *

    - * Please note that this variant is only supported starting with Cassandra 2.0.6. - * - * @param names the column names - * @param values the values - * @return the corresponding where clause. - * @throws IllegalArgumentException if {@code names.size() != values.size()}. - */ - public static Clause gte(List names, List values) { - if (names.size() != values.size()) - throw new IllegalArgumentException(String.format("The number of names (%d) and values (%d) don't match", names.size(), values.size())); - - return new Clause.CompoundClause(names, ">=", values); - } - - /** - * Ascending ordering for the provided column. - * - * @param columnName the column name - * @return the corresponding ordering - */ - public static Ordering asc(String columnName) { - return new Ordering(columnName, false); - } - - /** - * Descending ordering for the provided column. - * - * @param columnName the column name - * @return the corresponding ordering - */ - public static Ordering desc(String columnName) { - return new Ordering(columnName, true); - } - - /** - * Option to set the timestamp for a modification query (insert, update or delete). - * - * @param timestamp the timestamp (in microseconds) to use. - * @return the corresponding option - * @throws IllegalArgumentException if {@code timestamp < 0}. - */ - public static Using timestamp(long timestamp) { - if (timestamp < 0) - throw new IllegalArgumentException("Invalid timestamp, must be positive"); - - return new Using.WithValue("TIMESTAMP", timestamp); - } - - /** - * Option to prepare the timestamp (in microseconds) for a modification query (insert, update or delete). - * - * @param marker bind marker to use for the timestamp. - * @return the corresponding option. - */ - public static Using timestamp(BindMarker marker) { - return new Using.WithMarker("TIMESTAMP", marker); - } - - /** - * Option to set the ttl for a modification query (insert, update or delete). - * - * @param ttl the ttl (in seconds) to use. - * @return the corresponding option - * @throws IllegalArgumentException if {@code ttl < 0}. - */ - public static Using ttl(int ttl) { - if (ttl < 0) - throw new IllegalArgumentException("Invalid ttl, must be positive"); - - return new Using.WithValue("TTL", ttl); - } - - /** - * Option to prepare the ttl (in seconds) for a modification query (insert, update or delete). - * - * @param marker bind marker to use for the ttl. - * @return the corresponding option - */ - public static Using ttl(BindMarker marker) { - return new Using.WithMarker("TTL", marker); - } - - /** - * Simple "set" assignment of a value to a column. - *

    - * This will generate: - *

    -     * name = value
    -     * 
    - * The column name will only be quoted if it contains special characters, as in: - *
    -     * "a name that contains spaces" = value
    -     * 
    - * Otherwise, if you want to force case sensitivity, use - * {@link #quote(String)}: - *
    -     * set(quote("aCaseSensitiveName"), value)
    -     * 
    - * This method won't work to set UDT fields; use {@link #set(Object, Object)} with a - * {@link #path(String...) path} instead: - *
    -     * set(path("udt", "field"), value)
    -     * 
    - * - * @param name the column name - * @param value the value to assign - * @return the correspond assignment (to use in an update query) - */ - public static Assignment set(String name, Object value) { - return new Assignment.SetAssignment(name, value); - } - - /** - * Advanced "set" assignment of a value to a column or a - * {@link com.datastax.driver.core.UserType UDT} field. - *

    - * This method is seldom preferable to {@link #set(String, Object)}; it is only useful: - *

      - *
    • when assigning values to individual fields of a UDT (see {@link #path(String...)}): - *
      -     * set(path("udt", "field"), value)
      -     * 
      - *
    • - *
    • if you wish to pass a "raw" string that will get appended as-is to the query (see {@link #raw(String)}). - * There is no practical usage for this the time of writing, but it will serve as a workaround if new features are - * added to Cassandra and you're using a older driver version that is not yet aware of them: - *
      -     * set(raw("some custom string"), value)
      -     * 
      - *
    • - *
    - * If the runtime type of {@code name} is {@code String}, this method is equivalent to {@link #set(String, Object)}. - * - * @param name the column or UDT field name - * @param value the value to assign - * @return the correspond assignment (to use in an update query) - */ - public static Assignment set(Object name, Object value) { - return new Assignment.SetAssignment(name, value); - } - - /** - * Incrementation of a counter column. - *

    - * This will generate: {@code name = name + 1}. - * - * @param name the column name to increment - * @return the correspond assignment (to use in an update query) - */ - public static Assignment incr(String name) { - return incr(name, 1L); - } - - /** - * Incrementation of a counter column by a provided value. - *

    - * This will generate: {@code name = name + value}. - * - * @param name the column name to increment - * @param value the value by which to increment - * @return the correspond assignment (to use in an update query) - */ - public static Assignment incr(String name, long value) { - return new Assignment.CounterAssignment(name, value, true); - } - - /** - * Incrementation of a counter column by a provided value. - *

    - * This will generate: {@code name = name + value}. - * - * @param name the column name to increment - * @param value a bind marker representing the value by which to increment - * @return the correspond assignment (to use in an update query) - */ - public static Assignment incr(String name, BindMarker value) { - return new Assignment.CounterAssignment(name, value, true); - } - - /** - * Decrementation of a counter column. - *

    - * This will generate: {@code name = name - 1}. - * - * @param name the column name to decrement - * @return the correspond assignment (to use in an update query) - */ - public static Assignment decr(String name) { - return decr(name, 1L); - } - - /** - * Decrementation of a counter column by a provided value. - *

    - * This will generate: {@code name = name - value}. - * - * @param name the column name to decrement - * @param value the value by which to decrement - * @return the correspond assignment (to use in an update query) - */ - public static Assignment decr(String name, long value) { - return new Assignment.CounterAssignment(name, value, false); - } - - /** - * Decrementation of a counter column by a provided value. - *

    - * This will generate: {@code name = name - value}. - * - * @param name the column name to decrement - * @param value a bind marker representing the value by which to decrement - * @return the correspond assignment (to use in an update query) - */ - public static Assignment decr(String name, BindMarker value) { - return new Assignment.CounterAssignment(name, value, false); - } - - /** - * Prepend a value to a list column. - *

    - * This will generate: {@code name = [ value ] + name}. - * - * @param name the column name (must be of type list). - * @param value the value to prepend. Using a BindMarker here is not supported. - * To use a BindMarker use {@code QueryBuilder#prependAll} with a - * singleton list. - * @return the correspond assignment (to use in an update query) - */ - public static Assignment prepend(String name, Object value) { - if (value instanceof BindMarker) { - throw new InvalidQueryException("binding a value in prepend() is not supported, use prependAll() and bind a singleton list"); - } - return prependAll(name, Collections.singletonList(value)); - } - - /** - * Prepend a list of values to a list column. - *

    - * This will generate: {@code name = list + name}. - * - * @param name the column name (must be of type list). - * @param list the list of values to prepend. - * @return the correspond assignment (to use in an update query) - */ - public static Assignment prependAll(String name, List list) { - return new Assignment.ListPrependAssignment(name, list); - } - - /** - * Prepend a list of values to a list column. - *

    - * This will generate: {@code name = list + name}. - * - * @param name the column name (must be of type list). - * @param list a bind marker representing the list of values to prepend. - * @return the correspond assignment (to use in an update query) - */ - public static Assignment prependAll(String name, BindMarker list) { - return new Assignment.ListPrependAssignment(name, list); - } - - /** - * Append a value to a list column. - *

    - * This will generate: {@code name = name + [value]}. - * - * @param name the column name (must be of type list). - * @param value the value to append. Using a BindMarker here is not supported. - * To use a BindMarker use {@code QueryBuilder#appendAll} with a - * singleton list. - * @return the correspond assignment (to use in an update query) - */ - public static Assignment append(String name, Object value) { - if (value instanceof BindMarker) { - throw new InvalidQueryException("Binding a value in append() is not supported, use appendAll() and bind a singleton list"); - } - return appendAll(name, Collections.singletonList(value)); - } - - /** - * Append a list of values to a list column. - *

    - * This will generate: {@code name = name + list}. - * - * @param name the column name (must be of type list). - * @param list the list of values to append - * @return the correspond assignment (to use in an update query) - */ - public static Assignment appendAll(String name, List list) { - return new Assignment.CollectionAssignment(name, list, true, false); - } - - /** - * Append a list of values to a list column. - *

    - * This will generate: {@code name = name + list}. - * - * @param name the column name (must be of type list). - * @param list a bind marker representing the list of values to append - * @return the correspond assignment (to use in an update query) - */ - public static Assignment appendAll(String name, BindMarker list) { - return new Assignment.CollectionAssignment(name, list, true, false); - } - - /** - * Discard a value from a list column. - *

    - * This will generate: {@code name = name - [value]}. - * - * @param name the column name (must be of type list). - * @param value the value to discard. Using a BindMarker here is not supported. - * To use a BindMarker use {@code QueryBuilder#discardAll} with a singleton list. - * @return the correspond assignment (to use in an update query) - */ - public static Assignment discard(String name, Object value) { - if (value instanceof BindMarker) { - throw new InvalidQueryException("Binding a value in discard() is not supported, use discardAll() and bind a singleton list"); - } - return discardAll(name, Collections.singletonList(value)); - } - - /** - * Discard a list of values to a list column. - *

    - * This will generate: {@code name = name - list}. - * - * @param name the column name (must be of type list). - * @param list the list of values to discard - * @return the correspond assignment (to use in an update query) - */ - public static Assignment discardAll(String name, List list) { - return new Assignment.CollectionAssignment(name, list, false); - } - - /** - * Discard a list of values to a list column. - *

    - * This will generate: {@code name = name - list}. - * - * @param name the column name (must be of type list). - * @param list a bind marker representing the list of values to discard - * @return the correspond assignment (to use in an update query) - */ - public static Assignment discardAll(String name, BindMarker list) { - return new Assignment.CollectionAssignment(name, list, false); - } - - /** - * Sets a list column value by index. - *

    - * This will generate: {@code name[idx] = value}. - * - * @param name the column name (must be of type list). - * @param idx the index to set - * @param value the value to set - * @return the correspond assignment (to use in an update query) - */ - public static Assignment setIdx(String name, int idx, Object value) { - return new Assignment.ListSetIdxAssignment(name, idx, value); - } - - /** - * Adds a value to a set column. - *

    - * This will generate: {@code name = name + {value}}. - * - * @param name the column name (must be of type set). - * @param value the value to add. Using a BindMarker here is not supported. - * To use a BindMarker use {@code QueryBuilder#addAll} with a - * singleton set. - * @return the correspond assignment (to use in an update query) - */ - public static Assignment add(String name, Object value) { - if (value instanceof BindMarker) { - throw new InvalidQueryException("Binding a value in add() is not supported, use addAll() and bind a singleton list"); - } - return addAll(name, Collections.singleton(value)); - } - - /** - * Adds a set of values to a set column. - *

    - * This will generate: {@code name = name + set}. - * - * @param name the column name (must be of type set). - * @param set the set of values to append - * @return the correspond assignment (to use in an update query) - */ - public static Assignment addAll(String name, Set set) { - return new Assignment.CollectionAssignment(name, set, true); - } - - /** - * Adds a set of values to a set column. - *

    - * This will generate: {@code name = name + set}. - * - * @param name the column name (must be of type set). - * @param set a bind marker representing the set of values to append - * @return the correspond assignment (to use in an update query) - */ - public static Assignment addAll(String name, BindMarker set) { - return new Assignment.CollectionAssignment(name, set, true); - } - - /** - * Remove a value from a set column. - *

    - * This will generate: {@code name = name - {value}}. - * - * @param name the column name (must be of type set). - * @param value the value to remove. Using a BindMarker here is not supported. - * To use a BindMarker use {@code QueryBuilder#removeAll} with a singleton set. - * @return the correspond assignment (to use in an update query) - */ - public static Assignment remove(String name, Object value) { - if (value instanceof BindMarker) { - throw new InvalidQueryException("Binding a value in remove() is not supported, use removeAll() and bind a singleton set"); - } - return removeAll(name, Collections.singleton(value)); - } - - /** - * Remove a set of values from a set column. - *

    - * This will generate: {@code name = name - set}. - * - * @param name the column name (must be of type set). - * @param set the set of values to remove - * @return the correspond assignment (to use in an update query) - */ - public static Assignment removeAll(String name, Set set) { - return new Assignment.CollectionAssignment(name, set, false); - } - - /** - * Remove a set of values from a set column. - *

    - * This will generate: {@code name = name - set}. - * - * @param name the column name (must be of type set). - * @param set a bind marker representing the set of values to remove - * @return the correspond assignment (to use in an update query) - */ - public static Assignment removeAll(String name, BindMarker set) { - return new Assignment.CollectionAssignment(name, set, false); - } - - /** - * Puts a new key/value pair to a map column. - *

    - * This will generate: {@code name[key] = value}. - * - * @param name the column name (must be of type map). - * @param key the key to put - * @param value the value to put - * @return the correspond assignment (to use in an update query) - */ - public static Assignment put(String name, Object key, Object value) { - return new Assignment.MapPutAssignment(name, key, value); - } - - /** - * Puts a map of new key/value pairs to a map column. - *

    - * This will generate: {@code name = name + map}. - * - * @param name the column name (must be of type map). - * @param map the map of key/value pairs to put - * @return the correspond assignment (to use in an update query) - */ - public static Assignment putAll(String name, Map map) { - return new Assignment.CollectionAssignment(name, map, true); - } - - /** - * Puts a map of new key/value pairs to a map column. - *

    - * This will generate: {@code name = name + map}. - * - * @param name the column name (must be of type map). - * @param map a bind marker representing the map of key/value pairs to put - * @return the correspond assignment (to use in an update query) - */ - public static Assignment putAll(String name, BindMarker map) { - return new Assignment.CollectionAssignment(name, map, true); - } - - /** - * An object representing an anonymous bind marker (a question mark). - *

    - * This can be used wherever a value is expected. For instance, one can do: - *

    -     * {@code
    -     *     Insert i = QueryBuilder.insertInto("test").value("k", 0)
    -     *                                               .value("c", QueryBuilder.bindMarker());
    -     *     PreparedState p = session.prepare(i.toString());
    -     * }
    -     * 
    - * - * @return a new bind marker. - */ - public static BindMarker bindMarker() { - return BindMarker.ANONYMOUS; - } - - /** - * An object representing a named bind marker. - *

    - * This can be used wherever a value is expected. For instance, one can do: - *

    -     * {@code
    -     *     Insert i = QueryBuilder.insertInto("test").value("k", 0)
    -     *                                               .value("c", QueryBuilder.bindMarker("c_val"));
    -     *     PreparedState p = session.prepare(i.toString());
    -     * }
    -     * 
    - *

    - * Please note that named bind makers are only supported starting with Cassandra 2.0.1. - * - * @param name the name for the bind marker. - * @return an object representing a bind marker named {@code name}. - */ - public static BindMarker bindMarker(String name) { - return new BindMarker(name); - } - - /** - * Protects a value from any interpretation by the query builder. - *

    - * The following table exemplify the behavior of this function: - * - * - * - * - * - * - * - * - *
    Examples of use
    CodeResulting query string
    {@code select().from("t").where(eq("c", "C'est la vie!")); }{@code "SELECT * FROM t WHERE c='C''est la vie!';"}
    {@code select().from("t").where(eq("c", raw("C'est la vie!"))); }{@code "SELECT * FROM t WHERE c=C'est la vie!;"}
    {@code select().from("t").where(eq("c", raw("'C'est la vie!'"))); }{@code "SELECT * FROM t WHERE c='C'est la vie!';"}
    {@code select().from("t").where(eq("c", "now()")); }{@code "SELECT * FROM t WHERE c='now()';"}
    {@code select().from("t").where(eq("c", raw("now()"))); }{@code "SELECT * FROM t WHERE c=now();"}
    - * Note: the 2nd and 3rd examples in this table are not a valid CQL3 queries. - *

    - * The use of that method is generally discouraged since it lead to security risks. However, - * if you know what you are doing, it allows to escape the interpretations done by the - * QueryBuilder. - * - * @param str the raw value to use as a string - * @return the value but protected from being interpreted/escaped by the query builder. - */ - public static Object raw(String str) { - return new Utils.RawString(str); - } - - /** - * Creates a function call. - * - * @param name the name of the function to call. - * @param parameters the parameters for the function. - * @return the function call. - */ - public static Object fcall(String name, Object... parameters) { - return new Utils.FCall(name, parameters); - } - - /** - * Creates a Cast of a column using the given dataType. - * - * @param column the column to cast. - * @param dataType the data type to cast to. - * @return the casted column. - */ - public static Object cast(Object column, DataType dataType) { - return new Utils.Cast(column, dataType); - } - - /** - * Creates a {@code now()} function call. - * - * @return the function call. - */ - public static Object now() { - return new Utils.FCall("now"); - } - - /** - * Creates a {@code uuid()} function call. - * - * @return the function call. - */ - public static Object uuid() { - return new Utils.FCall("uuid"); - } - - /** - * Declares that the name in argument should be treated as a column name. - *

    - * This mainly meant for use with {@link Select.Selection#fcall} when a - * function should apply to a column name, not a string value. - * - * @param name the name of the column. - * @return the name as a column name. - */ - public static Object column(String name) { - return new Utils.CName(name); - } - - /** - * Creates a path composed of the given path {@code segments}. - *

    - * All provided path segments will be concatenated together with dots. - * If any segment contains an identifier that needs quoting, - * caller code is expected to call {@link #quote(String)} prior to - * invoking this method. - *

    - * This method is currently only useful when accessing individual fields of a - * {@link com.datastax.driver.core.UserType user-defined type} (UDT), - * which is only possible since CASSANDRA-7423. - *

    - * Note that currently nested UDT fields are not supported and - * will be rejected by the server as a - * {@link com.datastax.driver.core.exceptions.SyntaxError syntax error}. - * - * @param segments the segments of the path to create. - * @return the segments concatenated as a single path. - * @see CASSANDRA-7423 - */ - public static Object path(String... segments) { - return new Utils.Path(segments); - } - - /** - * Creates a {@code fromJson()} function call. - *

    - * Support for JSON functions has been added in Cassandra 2.2. - * The {@code fromJson()} function is similar to {@code INSERT JSON} statements, - * but applies to a single column value instead of the entire row, and - * converts a JSON object into the normal Cassandra column value. - *

    - * It may be used in {@code INSERT} and {@code UPDATE} statements, - * but NOT in the selection clause of a {@code SELECT} statement. - *

    - * The provided object can be of the following types: - *

      - *
    1. A raw string. In this case, it will be appended to the query string as is. - * It should NOT be surrounded by single quotes. - * Its format should generally match that returned by a - * {@code SELECT JSON} statement on the same table. - * Note that it is not possible to insert function calls nor bind markers in a JSON string.
    2. - *
    3. A {@link QueryBuilder#bindMarker() bind marker}. In this case, the statement is meant to be prepared - * and no JSON string will be appended to the query string, only a bind marker for the whole JSON parameter.
    4. - *
    5. Any object that can be serialized to JSON. Such objects can be used provided that - * a matching {@link com.datastax.driver.core.TypeCodec codec} is registered with the - * {@link com.datastax.driver.core.CodecRegistry CodecRegistry} in use. This allows the usage of JSON libraries, such - * as the Java API for JSON processing, - * the popular Jackson library, or - * Google's Gson library, for instance.
    6. - *
    - *

    - * When passing raw strings to this method, the following rules apply: - *

      - *
    1. String values should be enclosed in double quotes.
    2. - *
    3. Double quotes appearing inside strings should be escaped with a backslash, - * but single quotes should be escaped in - * the CQL manner, i.e. by another single quote. For example, the column value - * {@code foo"'bar} should be inserted in the JSON string - * as {@code "foo\"''bar"}.
    4. - *
    - * - * @param json the JSON string, or a bind marker, or a JSON object handled by a specific {@link com.datastax.driver.core.TypeCodec codec}. - * @return the function call. - * @see JSON Support for CQL - * @see JSON Support in Cassandra 2.2 - */ - public static Object fromJson(Object json) { - return fcall("fromJson", json); - } - - /** - * Creates a {@code toJson()} function call. - * This is a shortcut for {@code fcall("toJson", QueryBuilder.column(name))}. - *

    - * Support for JSON functions has been added in Cassandra 2.2. - * The {@code toJson()} function is similar to {@code SELECT JSON} statements, - * but applies to a single column value instead of the entire row, - * and produces a JSON-encoded string representing the normal Cassandra column value. - *

    - * It may only be used in the selection clause of a {@code SELECT} statement. - * - * @param column the column to retrieve JSON from. - * @return the function call. - * @see JSON Support for CQL - * @see JSON Support in Cassandra 2.2 - */ - public static Object toJson(Object column) { - // consider a String literal as a column name for user convenience, - // as CQL literals are not allowed here. - if (column instanceof String) - column = column(((String) column)); - return new Utils.FCall("toJson", column); - } - - /** - * Creates an alias for a given column. - *

    - * This is most useful when used with the method {@link #select(Object...)}. - * - * @param column The column to create an alias for. - * @param alias The column alias. - * @return a column alias. - */ - public static Object alias(Object column, String alias) { - return new Utils.Alias(column, alias); - } - - /** - * Creates a {@code count(x)} built-in function call. - * - * @return the function call. - */ - public static Object count(Object column) { - // consider a String literal as a column name for user convenience, - // as CQL literals are not allowed here. - if (column instanceof String) - column = column(((String) column)); - return new Utils.FCall("count", column); - } - - /** - * Creates a {@code max(x)} built-in function call. - * - * @return the function call. - */ - public static Object max(Object column) { - // consider a String literal as a column name for user convenience, - // as CQL literals are not allowed here. - if (column instanceof String) - column = column(((String) column)); - return new Utils.FCall("max", column); - } - - /** - * Creates a {@code min(x)} built-in function call. - * - * @return the function call. - */ - public static Object min(Object column) { - // consider a String literal as a column name for user convenience, - // as CQL literals are not allowed here. - if (column instanceof String) - column = column(((String) column)); - return new Utils.FCall("min", column); - } - - /** - * Creates a {@code sum(x)} built-in function call. - * - * @return the function call. - */ - public static Object sum(Object column) { - // consider a String literal as a column name for user convenience, - // as CQL literals are not allowed here. - if (column instanceof String) - column = column(((String) column)); - return new Utils.FCall("sum", column); - } - - /** - * Creates an {@code avg(x)} built-in function call. - * - * @return the function call. - */ - public static Object avg(Object column) { - // consider a String literal as a column name for user convenience, - // as CQL literals are not allowed here. - if (column instanceof String) - column = column(((String) column)); - return new Utils.FCall("avg", column); - } - + private QueryBuilder() {} + + /** + * Starts building a new {@code SELECT} query that selects the provided names. + * + *

    Note that {@code select(c1, c2)} is just a shortcut for {@code + * select().column(c1).column(c2)}. + * + * @param columns the columns names that should be selected by the query. + * @return an in-construction {@code SELECT} query (you will need to provide at least a {@code + * FROM} clause to complete the query). + */ + public static Select.Builder select(String... columns) { + return select((Object[]) columns); + } + + /** + * Starts building a new {@code SELECT} query that selects the provided names. + * + *

    Note that {@code select(c1, c2)} is just a shortcut for {@code + * select().column(c1).column(c2)}. + * + * @param columns the columns names that should be selected by the query. + * @return an in-construction {@code SELECT} query (you will need to provide at least a {@code + * FROM} clause to complete the query). + */ + public static Select.Builder select(Object... columns) { + return new Select.Builder(Arrays.asList(columns)); + } + + /** + * Starts building a new {@code SELECT} query. + * + * @return an in-construction {@code SELECT} query (you will need to provide a column selection + * and at least a {@code FROM} clause to complete the query). + */ + public static Select.Selection select() { + // Note: the fact we return Select.Selection as return type is on purpose. + return new Select.SelectionOrAlias(); + } + + /** + * Starts building a new {@code INSERT} query. + * + * @param table the name of the table in which to insert. + * @return an in-construction {@code INSERT} query. + */ + public static Insert insertInto(String table) { + return new Insert(null, table); + } + + /** + * Starts building a new {@code INSERT} query. + * + * @param keyspace the name of the keyspace to use. + * @param table the name of the table to insert into. + * @return an in-construction {@code INSERT} query. + */ + public static Insert insertInto(String keyspace, String table) { + return new Insert(keyspace, table); + } + + /** + * Starts building a new {@code INSERT} query. + * + * @param table the name of the table to insert into. + * @return an in-construction {@code INSERT} query. + */ + public static Insert insertInto(TableMetadata table) { + return new Insert(table); + } + + /** + * Starts building a new {@code UPDATE} query. + * + * @param table the name of the table to update. + * @return an in-construction {@code UPDATE} query (at least a {@code SET} and a {@code WHERE} + * clause needs to be provided to complete the query). + */ + public static Update update(String table) { + return new Update(null, table); + } + + /** + * Starts building a new {@code UPDATE} query. + * + * @param keyspace the name of the keyspace to use. + * @param table the name of the table to update. + * @return an in-construction {@code UPDATE} query (at least a {@code SET} and a {@code WHERE} + * clause needs to be provided to complete the query). + */ + public static Update update(String keyspace, String table) { + return new Update(keyspace, table); + } + + /** + * Starts building a new {@code UPDATE} query. + * + * @param table the name of the table to update. + * @return an in-construction {@code UPDATE} query (at least a {@code SET} and a {@code WHERE} + * clause needs to be provided to complete the query). + */ + public static Update update(TableMetadata table) { + return new Update(table); + } + + /** + * Starts building a new {@code DELETE} query that deletes the provided names. + * + * @param columns the columns names that should be deleted by the query. + * @return an in-construction {@code DELETE} query (At least a {@code FROM} and a {@code WHERE} + * clause needs to be provided to complete the query). + */ + public static Delete.Builder delete(String... columns) { + return new Delete.Builder(columns); + } + + /** + * Starts building a new {@code DELETE} query. + * + * @return an in-construction {@code DELETE} query (you will need to provide a column selection + * and at least a {@code FROM} and a {@code WHERE} clause to complete the query). + */ + public static Delete.Selection delete() { + return new Delete.Selection(); + } + + /** + * Builds a new {@code BATCH} query on the provided statements. + * + *

    This method will build a logged batch (this is the default in CQL3). To create unlogged + * batches, use {@link #unloggedBatch}. Also note that for convenience, if the provided statements + * are counter statements, this method will create a {@code COUNTER} batch even though COUNTER + * batches are never logged (so for counters, using this method is effectively equivalent to using + * {@link #unloggedBatch}). + * + * @param statements the statements to batch. + * @return a new {@code RegularStatement} that batch {@code statements}. + */ + public static Batch batch(RegularStatement... statements) { + return new Batch(statements, true); + } + + /** + * Builds a new {@code UNLOGGED BATCH} query on the provided statements. + * + *

    Compared to logged batches (the default), unlogged batch don't use the distributed batch log + * server side and as such are not guaranteed to be atomic. In other words, if an unlogged batch + * timeout, some of the batched statements may have been persisted while some have not. Unlogged + * batch will however be slightly faster than logged batch. + * + *

    If the statements added to the batch are counter statements, the resulting batch will be a + * {@code COUNTER} one. + * + * @param statements the statements to batch. + * @return a new {@code RegularStatement} that batch {@code statements} without using the batch + * log. + */ + public static Batch unloggedBatch(RegularStatement... statements) { + return new Batch(statements, false); + } + + /** + * Creates a new {@code TRUNCATE} query. + * + * @param table the name of the table to truncate. + * @return the truncation query. + */ + public static Truncate truncate(String table) { + return new Truncate(null, table); + } + + /** + * Creates a new {@code TRUNCATE} query. + * + * @param keyspace the name of the keyspace to use. + * @param table the name of the table to truncate. + * @return the truncation query. + */ + public static Truncate truncate(String keyspace, String table) { + return new Truncate(keyspace, table); + } + + /** + * Creates a new {@code TRUNCATE} query. + * + * @param table the table to truncate. + * @return the truncation query. + */ + public static Truncate truncate(TableMetadata table) { + return new Truncate(table); + } + + /** + * Quotes a column name to make it case sensitive. + * + * @param columnName the column name to quote. + * @return the quoted column name. + * @see Metadata#quote(String) + */ + public static String quote(String columnName) { + return Metadata.quote(columnName); + } + + /** + * The token of a column name. + * + * @param columnName the column name to take the token of. + * @return {@code "token(" + columnName + ")"}. + */ + public static String token(String columnName) { + StringBuilder sb = new StringBuilder(); + sb.append("token("); + Utils.appendName(columnName, sb); + sb.append(')'); + return sb.toString(); + } + + /** + * The token of column names. + * + *

    This variant is most useful when the partition key is composite. + * + * @param columnNames the column names to take the token of. + * @return a string representing the token of the provided column names. + */ + public static String token(String... columnNames) { + StringBuilder sb = new StringBuilder(); + sb.append("token("); + Utils.joinAndAppendNames(sb, null, Arrays.asList(columnNames)); + sb.append(')'); + return sb.toString(); + } + + /** + * Returns a generic {@code token} function call. + * + * @param values the arguments of the {@code token} function. + * @return {@code token} function call. + */ + public static Object token(Object... values) { + return new Utils.FCall("token", values); + } + + /** + * Creates an "equal" {@code WHERE} clause stating the provided column must be equal to the + * provided value. + * + * @param name the column name + * @param value the value + * @return the corresponding where clause. + */ + public static Clause eq(String name, Object value) { + return new Clause.SimpleClause(name, "=", value); + } + + /** + * Creates an "equal" {@code WHERE} clause for a group of clustering columns. + * + *

    For instance, {@code eq(Arrays.asList("a", "b"), Arrays.asList(2, "test"))} will generate + * the CQL {@code WHERE} clause {@code (a, b) = (2, 'test') }. + * + *

    Please note that this variant is only supported starting with Cassandra 2.0.6. + * + * @param names the column names + * @param values the values + * @return the corresponding where clause. + * @throws IllegalArgumentException if {@code names.size() != values.size()}. + */ + public static Clause eq(Iterable names, Iterable values) { + return new Clause.CompoundClause(names, "=", values); + } + + /** + * Creates a "not equal" {@code WHERE} clause stating the provided column must be different from + * the provided value. + * + * @param name the column name + * @param value the value + * @return the corresponding where clause. + */ + public static Clause ne(String name, Object value) { + return new Clause.SimpleClause(name, "!=", value); + } + + /** + * Creates an "not equal" {@code WHERE} clause for a group of clustering columns. + * + *

    For instance, {@code eq(Arrays.asList("a", "b"), Arrays.asList(2, "test"))} will generate + * the CQL {@code WHERE} clause {@code (a, b) != (2, 'test') }. + * + * @param names the column names + * @param values the values + * @return the corresponding where clause. + * @throws IllegalArgumentException if {@code names.size() != values.size()}. + */ + public static Clause ne(Iterable names, Iterable values) { + return new Clause.CompoundClause(names, "!=", values); + } + + /** + * Creates an "IS NOT NULL" {@code WHERE} clause for the provided column. + * + * @param name the column name + * @return the corresponding where clause. + */ + public static Clause notNull(String name) { + return new Clause.IsNotNullClause(name); + } + + /** + * Creates a "like" {@code WHERE} clause stating that the provided column must be equal to the + * provided value. + * + * @param name the column name. + * @param value the value. + * @return the corresponding where clause. + */ + public static Clause like(String name, Object value) { + return new Clause.SimpleClause(name, " LIKE ", value); + } + + /** + * Create an "in" {@code WHERE} clause stating the provided column must be equal to one of the + * provided values. + * + * @param name the column name + * @param values the values + * @return the corresponding where clause. + */ + public static Clause in(String name, Object... values) { + return new Clause.InClause(name, Arrays.asList(values)); + } + + /** + * Create an "in" {@code WHERE} clause stating the provided column must be equal to one of the + * provided values. + * + * @param name the column name + * @param values the values + * @return the corresponding where clause. + */ + public static Clause in(String name, Iterable values) { + return new Clause.InClause(name, values); + } + + /** + * Creates an "in" {@code WHERE} clause for a group of clustering columns (a.k.a. "multi-column IN + * restriction"). + * + *

    For instance, {@code in(Arrays.asList("a", "b"), Arrays.asList(Arrays.asList(1, "foo"), + * Arrays.asList(2, "bar")))} will generate the CQL {@code WHERE} clause {@code (a, b) IN ((1, + * 'foo'), (2, 'bar'))}. + * + *

    Each element in {@code values} must be either an {@link Iterable iterable} containing + * exactly as many values as there are columns to match in {@code names}, or a {@link + * #bindMarker() bind marker} – in which case, that marker is to be considered as a placeholder + * for one whole tuple of values to match. + * + *

    Please note that this variant is only supported starting with Cassandra 2.0.9. + * + * @param names the column names + * @param values the values + * @return the corresponding where clause. + * @throws IllegalArgumentException if the size of any tuple in {@code values} is not equal to + * {@code names.size()}, or if {@code values} contains elements that are neither {@link List + * lists} nor {@link #bindMarker() bind markers}. + */ + public static Clause in(Iterable names, Iterable values) { + return new Clause.CompoundInClause(names, values); + } + + /** + * Creates a "contains" {@code WHERE} clause stating the provided column must contain the value + * provided. + * + * @param name the column name + * @param value the value + * @return the corresponding where clause. + */ + public static Clause contains(String name, Object value) { + return new Clause.ContainsClause(name, value); + } + + /** + * Creates a "contains key" {@code WHERE} clause stating the provided column must contain the key + * provided. + * + * @param name the column name + * @param key the key + * @return the corresponding where clause. + */ + public static Clause containsKey(String name, Object key) { + return new Clause.ContainsKeyClause(name, key); + } + + /** + * Creates a "lesser than" {@code WHERE} clause stating the provided column must be less than the + * provided value. + * + * @param name the column name + * @param value the value + * @return the corresponding where clause. + */ + public static Clause lt(String name, Object value) { + return new Clause.SimpleClause(name, "<", value); + } + + /** + * Creates a "lesser than" {@code WHERE} clause for a group of clustering columns. + * + *

    For instance, {@code lt(Arrays.asList("a", "b"), Arrays.asList(2, "test"))} will generate + * the CQL {@code WHERE} clause {@code (a, b) < (2, 'test') }. + * + *

    Please note that this variant is only supported starting with Cassandra 2.0.6. + * + * @param names the column names + * @param values the values + * @return the corresponding where clause. + * @throws IllegalArgumentException if {@code names.size() != values.size()}. + */ + public static Clause lt(Iterable names, Iterable values) { + return new Clause.CompoundClause(names, "<", values); + } + + /** + * Creates a "lesser than or equal" {@code WHERE} clause stating the provided column must be + * lesser than or equal to the provided value. + * + * @param name the column name + * @param value the value + * @return the corresponding where clause. + */ + public static Clause lte(String name, Object value) { + return new Clause.SimpleClause(name, "<=", value); + } + + /** + * Creates a "lesser than or equal" {@code WHERE} clause for a group of clustering columns. + * + *

    For instance, {@code lte(Arrays.asList("a", "b"), Arrays.asList(2, "test"))} will generate + * the CQL {@code WHERE} clause {@code (a, b) <e; (2, 'test') }. + * + *

    Please note that this variant is only supported starting with Cassandra 2.0.6. + * + * @param names the column names + * @param values the values + * @return the corresponding where clause. + * @throws IllegalArgumentException if {@code names.size() != values.size()}. + */ + public static Clause lte(Iterable names, Iterable values) { + return new Clause.CompoundClause(names, "<=", values); + } + + /** + * Creates a "greater than" {@code WHERE} clause stating the provided column must be greater to + * the provided value. + * + * @param name the column name + * @param value the value + * @return the corresponding where clause. + */ + public static Clause gt(String name, Object value) { + return new Clause.SimpleClause(name, ">", value); + } + + /** + * Creates a "greater than" {@code WHERE} clause for a group of clustering columns. + * + *

    For instance, {@code gt(Arrays.asList("a", "b"), Arrays.asList(2, "test"))} will generate + * the CQL {@code WHERE} clause {@code (a, b) > (2, 'test') }. + * + *

    Please note that this variant is only supported starting with Cassandra 2.0.6. + * + * @param names the column names + * @param values the values + * @return the corresponding where clause. + * @throws IllegalArgumentException if {@code names.size() != values.size()}. + */ + public static Clause gt(Iterable names, Iterable values) { + return new Clause.CompoundClause(names, ">", values); + } + + /** + * Creates a "greater than or equal" {@code WHERE} clause stating the provided column must be + * greater than or equal to the provided value. + * + * @param name the column name + * @param value the value + * @return the corresponding where clause. + */ + public static Clause gte(String name, Object value) { + return new Clause.SimpleClause(name, ">=", value); + } + + /** + * Creates a "greater than or equal" {@code WHERE} clause for a group of clustering columns. + * + *

    For instance, {@code gte(Arrays.asList("a", "b"), Arrays.asList(2, "test"))} will generate + * the CQL {@code WHERE} clause {@code (a, b) >e; (2, 'test') }. + * + *

    Please note that this variant is only supported starting with Cassandra 2.0.6. + * + * @param names the column names + * @param values the values + * @return the corresponding where clause. + * @throws IllegalArgumentException if {@code names.size() != values.size()}. + */ + public static Clause gte(Iterable names, Iterable values) { + return new Clause.CompoundClause(names, ">=", values); + } + + /** + * Ascending ordering for the provided column. + * + * @param columnName the column name + * @return the corresponding ordering + */ + public static Ordering asc(String columnName) { + return new Ordering(columnName, false); + } + + /** + * Descending ordering for the provided column. + * + * @param columnName the column name + * @return the corresponding ordering + */ + public static Ordering desc(String columnName) { + return new Ordering(columnName, true); + } + + /** + * Option to set the timestamp for a modification query (insert, update or delete). + * + * @param timestamp the timestamp (in microseconds) to use. + * @return the corresponding option + * @throws IllegalArgumentException if {@code timestamp < 0}. + */ + public static Using timestamp(long timestamp) { + if (timestamp < 0) throw new IllegalArgumentException("Invalid timestamp, must be positive"); + + return new Using.WithValue("TIMESTAMP", timestamp); + } + + /** + * Option to prepare the timestamp (in microseconds) for a modification query (insert, update or + * delete). + * + * @param marker bind marker to use for the timestamp. + * @return the corresponding option. + */ + public static Using timestamp(BindMarker marker) { + return new Using.WithMarker("TIMESTAMP", marker); + } + + /** + * Option to set the ttl for a modification query (insert, update or delete). + * + * @param ttl the ttl (in seconds) to use. + * @return the corresponding option + * @throws IllegalArgumentException if {@code ttl < 0}. + */ + public static Using ttl(int ttl) { + if (ttl < 0) throw new IllegalArgumentException("Invalid ttl, must be positive"); + + return new Using.WithValue("TTL", ttl); + } + + /** + * Option to prepare the ttl (in seconds) for a modification query (insert, update or delete). + * + * @param marker bind marker to use for the ttl. + * @return the corresponding option + */ + public static Using ttl(BindMarker marker) { + return new Using.WithMarker("TTL", marker); + } + + /** + * Simple "set" assignment of a value to a column. + * + *

    This will generate: + * + *

    +   * name = value
    +   * 
    + * + * The column name will only be quoted if it contains special characters, as in: + * + *
    +   * "a name that contains spaces" = value
    +   * 
    + * + * Otherwise, if you want to force case sensitivity, use {@link #quote(String)}: + * + *
    +   * set(quote("aCaseSensitiveName"), value)
    +   * 
    + * + * This method won't work to set UDT fields; use {@link #set(Object, Object)} with a {@link + * #path(String...) path} instead: + * + *
    +   * set(path("udt", "field"), value)
    +   * 
    + * + * @param name the column name + * @param value the value to assign + * @return the correspond assignment (to use in an update query) + */ + public static Assignment set(String name, Object value) { + return new Assignment.SetAssignment(name, value); + } + + /** + * Advanced "set" assignment of a value to a column or a {@link com.datastax.driver.core.UserType + * UDT} field. + * + *

    This method is seldom preferable to {@link #set(String, Object)}; it is only useful: + * + *

      + *
    • when assigning values to individual fields of a UDT (see {@link #path(String...)}): + *
      +   * set(path("udt", "field"), value)
      +   * 
      + *
    • if you wish to pass a "raw" string that will get appended as-is to the query (see {@link + * #raw(String)}). There is no practical usage for this the time of writing, but it will + * serve as a workaround if new features are added to Cassandra and you're using a older + * driver version that is not yet aware of them: + *
      +   * set(raw("some custom string"), value)
      +   * 
      + *
    + * + * If the runtime type of {@code name} is {@code String}, this method is equivalent to {@link + * #set(String, Object)}. + * + * @param name the column or UDT field name + * @param value the value to assign + * @return the correspond assignment (to use in an update query) + */ + public static Assignment set(Object name, Object value) { + return new Assignment.SetAssignment(name, value); + } + + /** + * Incrementation of a counter column. + * + *

    This will generate: {@code name = name + 1}. + * + * @param name the column name to increment + * @return the correspond assignment (to use in an update query) + */ + public static Assignment incr(String name) { + return incr(name, 1L); + } + + /** + * Incrementation of a counter column by a provided value. + * + *

    This will generate: {@code name = name + value}. + * + * @param name the column name to increment + * @param value the value by which to increment + * @return the correspond assignment (to use in an update query) + */ + public static Assignment incr(String name, long value) { + return new Assignment.CounterAssignment(name, value, true); + } + + /** + * Incrementation of a counter column by a provided value. + * + *

    This will generate: {@code name = name + value}. + * + * @param name the column name to increment + * @param value a bind marker representing the value by which to increment + * @return the correspond assignment (to use in an update query) + */ + public static Assignment incr(String name, BindMarker value) { + return new Assignment.CounterAssignment(name, value, true); + } + + /** + * Decrementation of a counter column. + * + *

    This will generate: {@code name = name - 1}. + * + * @param name the column name to decrement + * @return the correspond assignment (to use in an update query) + */ + public static Assignment decr(String name) { + return decr(name, 1L); + } + + /** + * Decrementation of a counter column by a provided value. + * + *

    This will generate: {@code name = name - value}. + * + * @param name the column name to decrement + * @param value the value by which to decrement + * @return the correspond assignment (to use in an update query) + */ + public static Assignment decr(String name, long value) { + return new Assignment.CounterAssignment(name, value, false); + } + + /** + * Decrementation of a counter column by a provided value. + * + *

    This will generate: {@code name = name - value}. + * + * @param name the column name to decrement + * @param value a bind marker representing the value by which to decrement + * @return the correspond assignment (to use in an update query) + */ + public static Assignment decr(String name, BindMarker value) { + return new Assignment.CounterAssignment(name, value, false); + } + + /** + * Prepend a value to a list column. + * + *

    This will generate: {@code name = [ value ] + name}. + * + * @param name the column name (must be of type list). + * @param value the value to prepend. Using a BindMarker here is not supported. To use a + * BindMarker use {@code QueryBuilder#prependAll} with a singleton list. + * @return the correspond assignment (to use in an update query) + */ + public static Assignment prepend(String name, Object value) { + if (value instanceof BindMarker) { + throw new InvalidQueryException( + "binding a value in prepend() is not supported, use prependAll() and bind a singleton list"); + } + return prependAll(name, Collections.singletonList(value)); + } + + /** + * Prepend a list of values to a list column. + * + *

    This will generate: {@code name = list + name}. + * + * @param name the column name (must be of type list). + * @param list the list of values to prepend. + * @return the correspond assignment (to use in an update query) + */ + public static Assignment prependAll(String name, List list) { + return new Assignment.ListPrependAssignment(name, list); + } + + /** + * Prepend a list of values to a list column. + * + *

    This will generate: {@code name = list + name}. + * + * @param name the column name (must be of type list). + * @param list a bind marker representing the list of values to prepend. + * @return the correspond assignment (to use in an update query) + */ + public static Assignment prependAll(String name, BindMarker list) { + return new Assignment.ListPrependAssignment(name, list); + } + + /** + * Append a value to a list column. + * + *

    This will generate: {@code name = name + [value]}. + * + * @param name the column name (must be of type list). + * @param value the value to append. Using a BindMarker here is not supported. To use a BindMarker + * use {@code QueryBuilder#appendAll} with a singleton list. + * @return the correspond assignment (to use in an update query) + */ + public static Assignment append(String name, Object value) { + if (value instanceof BindMarker) { + throw new InvalidQueryException( + "Binding a value in append() is not supported, use appendAll() and bind a singleton list"); + } + return appendAll(name, Collections.singletonList(value)); + } + + /** + * Append a list of values to a list column. + * + *

    This will generate: {@code name = name + list}. + * + * @param name the column name (must be of type list). + * @param list the list of values to append + * @return the correspond assignment (to use in an update query) + */ + public static Assignment appendAll(String name, List list) { + return new Assignment.CollectionAssignment(name, list, true, false); + } + + /** + * Append a list of values to a list column. + * + *

    This will generate: {@code name = name + list}. + * + * @param name the column name (must be of type list). + * @param list a bind marker representing the list of values to append + * @return the correspond assignment (to use in an update query) + */ + public static Assignment appendAll(String name, BindMarker list) { + return new Assignment.CollectionAssignment(name, list, true, false); + } + + /** + * Discard a value from a list column. + * + *

    This will generate: {@code name = name - [value]}. + * + * @param name the column name (must be of type list). + * @param value the value to discard. Using a BindMarker here is not supported. To use a + * BindMarker use {@code QueryBuilder#discardAll} with a singleton list. + * @return the correspond assignment (to use in an update query) + */ + public static Assignment discard(String name, Object value) { + if (value instanceof BindMarker) { + throw new InvalidQueryException( + "Binding a value in discard() is not supported, use discardAll() and bind a singleton list"); + } + return discardAll(name, Collections.singletonList(value)); + } + + /** + * Discard a list of values to a list column. + * + *

    This will generate: {@code name = name - list}. + * + * @param name the column name (must be of type list). + * @param list the list of values to discard + * @return the correspond assignment (to use in an update query) + */ + public static Assignment discardAll(String name, List list) { + return new Assignment.CollectionAssignment(name, list, false); + } + + /** + * Discard a list of values to a list column. + * + *

    This will generate: {@code name = name - list}. + * + * @param name the column name (must be of type list). + * @param list a bind marker representing the list of values to discard + * @return the correspond assignment (to use in an update query) + */ + public static Assignment discardAll(String name, BindMarker list) { + return new Assignment.CollectionAssignment(name, list, false); + } + + /** + * Sets a list column value by index. + * + *

    This will generate: {@code name[idx] = value}. + * + * @param name the column name (must be of type list). + * @param idx the index to set + * @param value the value to set + * @return the correspond assignment (to use in an update query) + */ + public static Assignment setIdx(String name, int idx, Object value) { + return new Assignment.ListSetIdxAssignment(name, idx, value); + } + + /** + * Adds a value to a set column. + * + *

    This will generate: {@code name = name + {value}}. + * + * @param name the column name (must be of type set). + * @param value the value to add. Using a BindMarker here is not supported. To use a BindMarker + * use {@code QueryBuilder#addAll} with a singleton set. + * @return the correspond assignment (to use in an update query) + */ + public static Assignment add(String name, Object value) { + if (value instanceof BindMarker) { + throw new InvalidQueryException( + "Binding a value in add() is not supported, use addAll() and bind a singleton list"); + } + return addAll(name, Collections.singleton(value)); + } + + /** + * Adds a set of values to a set column. + * + *

    This will generate: {@code name = name + set}. + * + * @param name the column name (must be of type set). + * @param set the set of values to append + * @return the correspond assignment (to use in an update query) + */ + public static Assignment addAll(String name, Set set) { + return new Assignment.CollectionAssignment(name, set, true); + } + + /** + * Adds a set of values to a set column. + * + *

    This will generate: {@code name = name + set}. + * + * @param name the column name (must be of type set). + * @param set a bind marker representing the set of values to append + * @return the correspond assignment (to use in an update query) + */ + public static Assignment addAll(String name, BindMarker set) { + return new Assignment.CollectionAssignment(name, set, true); + } + + /** + * Remove a value from a set column. + * + *

    This will generate: {@code name = name - {value}}. + * + * @param name the column name (must be of type set). + * @param value the value to remove. Using a BindMarker here is not supported. To use a BindMarker + * use {@code QueryBuilder#removeAll} with a singleton set. + * @return the correspond assignment (to use in an update query) + */ + public static Assignment remove(String name, Object value) { + if (value instanceof BindMarker) { + throw new InvalidQueryException( + "Binding a value in remove() is not supported, use removeAll() and bind a singleton set"); + } + return removeAll(name, Collections.singleton(value)); + } + + /** + * Remove a set of values from a set column. + * + *

    This will generate: {@code name = name - set}. + * + * @param name the column name (must be of type set). + * @param set the set of values to remove + * @return the correspond assignment (to use in an update query) + */ + public static Assignment removeAll(String name, Set set) { + return new Assignment.CollectionAssignment(name, set, false); + } + + /** + * Remove a set of values from a set column. + * + *

    This will generate: {@code name = name - set}. + * + * @param name the column name (must be of type set). + * @param set a bind marker representing the set of values to remove + * @return the correspond assignment (to use in an update query) + */ + public static Assignment removeAll(String name, BindMarker set) { + return new Assignment.CollectionAssignment(name, set, false); + } + + /** + * Puts a new key/value pair to a map column. + * + *

    This will generate: {@code name[key] = value}. + * + * @param name the column name (must be of type map). + * @param key the key to put + * @param value the value to put + * @return the correspond assignment (to use in an update query) + */ + public static Assignment put(String name, Object key, Object value) { + return new Assignment.MapPutAssignment(name, key, value); + } + + /** + * Puts a map of new key/value pairs to a map column. + * + *

    This will generate: {@code name = name + map}. + * + * @param name the column name (must be of type map). + * @param map the map of key/value pairs to put + * @return the correspond assignment (to use in an update query) + */ + public static Assignment putAll(String name, Map map) { + return new Assignment.CollectionAssignment(name, map, true); + } + + /** + * Puts a map of new key/value pairs to a map column. + * + *

    This will generate: {@code name = name + map}. + * + * @param name the column name (must be of type map). + * @param map a bind marker representing the map of key/value pairs to put + * @return the correspond assignment (to use in an update query) + */ + public static Assignment putAll(String name, BindMarker map) { + return new Assignment.CollectionAssignment(name, map, true); + } + + /** + * An object representing an anonymous bind marker (a question mark). + * + *

    This can be used wherever a value is expected. For instance, one can do: + * + *

    {@code
    +   * Insert i = QueryBuilder.insertInto("test").value("k", 0)
    +   *                                           .value("c", QueryBuilder.bindMarker());
    +   * PreparedStatement p = session.prepare(i.toString());
    +   * }
    + * + * @return a new bind marker. + */ + public static BindMarker bindMarker() { + return BindMarker.ANONYMOUS; + } + + /** + * An object representing a named bind marker. + * + *

    This can be used wherever a value is expected. For instance, one can do: + * + *

    {@code
    +   * Insert i = QueryBuilder.insertInto("test").value("k", 0)
    +   *                                           .value("c", QueryBuilder.bindMarker("c_val"));
    +   * PreparedStatement p = session.prepare(i.toString());
    +   * }
    + * + *

    Please note that named bind makers are only supported starting with Cassandra 2.0.1. + * + * @param name the name for the bind marker. + * @return an object representing a bind marker named {@code name}. + */ + public static BindMarker bindMarker(String name) { + return new BindMarker(name); + } + + /** + * Protects a value from any interpretation by the query builder. + * + *

    The following table exemplify the behavior of this function: + * + * + * + * + * + * + * + * + * + *
    Examples of use
    CodeResulting query string
    {@code select().from("t").where(eq("c", "C'est la vie!")); }{@code "SELECT * FROM t WHERE c='C''est la vie!';"}
    {@code select().from("t").where(eq("c", raw("C'est la vie!"))); }{@code "SELECT * FROM t WHERE c=C'est la vie!;"}
    {@code select().from("t").where(eq("c", raw("'C'est la vie!'"))); }{@code "SELECT * FROM t WHERE c='C'est la vie!';"}
    {@code select().from("t").where(eq("c", "now()")); }{@code "SELECT * FROM t WHERE c='now()';"}
    {@code select().from("t").where(eq("c", raw("now()"))); }{@code "SELECT * FROM t WHERE c=now();"}
    + * + * Note: the 2nd and 3rd examples in this table are not a valid CQL3 queries. + * + *

    The use of that method is generally discouraged since it lead to security risks. However, if + * you know what you are doing, it allows to escape the interpretations done by the QueryBuilder. + * + * @param str the raw value to use as a string + * @return the value but protected from being interpreted/escaped by the query builder. + */ + public static Object raw(String str) { + return new Utils.RawString(str); + } + + /** + * Creates a function call. + * + * @param name the name of the function to call. + * @param parameters the parameters for the function. + * @return the function call. + */ + public static Object fcall(String name, Object... parameters) { + return new Utils.FCall(name, parameters); + } + + /** + * Creates a Cast of a column using the given dataType. + * + * @param column the column to cast. + * @param dataType the data type to cast to. + * @return the casted column. + */ + public static Object cast(Object column, DataType dataType) { + return new Utils.Cast(column, dataType); + } + + /** + * Creates a {@code now()} function call. + * + * @return the function call. + */ + public static Object now() { + return new Utils.FCall("now"); + } + + /** + * Creates a {@code uuid()} function call. + * + * @return the function call. + */ + public static Object uuid() { + return new Utils.FCall("uuid"); + } + + /** + * Declares that the name in argument should be treated as a column name. + * + *

    This mainly meant for use with {@link Select.Selection#fcall} when a function should apply + * to a column name, not a string value. + * + * @param name the name of the column. + * @return the name as a column name. + */ + public static Object column(String name) { + return new Utils.CName(name); + } + + /** + * Creates a path composed of the given path {@code segments}. + * + *

    All provided path segments will be concatenated together with dots. If any segment contains + * an identifier that needs quoting, caller code is expected to call {@link #quote(String)} prior + * to invoking this method. + * + *

    This method is currently only useful when accessing individual fields of a {@link + * com.datastax.driver.core.UserType user-defined type} (UDT), which is only possible since + * CASSANDRA-7423. + * + *

    Note that currently nested UDT fields are not supported and will be rejected by the server + * as a {@link com.datastax.driver.core.exceptions.SyntaxError syntax error}. + * + * @param segments the segments of the path to create. + * @return the segments concatenated as a single path. + * @see CASSANDRA-7423 + */ + public static Object path(String... segments) { + return new Utils.Path(segments); + } + + /** + * Creates a {@code fromJson()} function call. + * + *

    Support for JSON functions has been added in Cassandra 2.2. The {@code fromJson()} function + * is similar to {@code INSERT JSON} statements, but applies to a single column value instead of + * the entire row, and converts a JSON object into the normal Cassandra column value. + * + *

    It may be used in {@code INSERT} and {@code UPDATE} statements, but NOT in the selection + * clause of a {@code SELECT} statement. + * + *

    The provided object can be of the following types: + * + *

      + *
    1. A raw string. In this case, it will be appended to the query string as is. It + * should NOT be surrounded by single quotes. Its format should generally match + * that returned by a {@code SELECT JSON} statement on the same table. Note that it is not + * possible to insert function calls nor bind markers in a JSON string. + *
    2. A {@link QueryBuilder#bindMarker() bind marker}. In this case, the statement is meant to + * be prepared and no JSON string will be appended to the query string, only a bind marker + * for the whole JSON parameter. + *
    3. Any object that can be serialized to JSON. Such objects can be used provided that a + * matching {@link com.datastax.driver.core.TypeCodec codec} is registered with the {@link + * com.datastax.driver.core.CodecRegistry CodecRegistry} in use. This allows the usage of + * JSON libraries, such as the Java API for + * JSON processing, the popular Jackson library, or Google's Gson library, for instance. + *
    + * + *

    When passing raw strings to this method, the following rules apply: + * + *

      + *
    1. String values should be enclosed in double quotes. + *
    2. Double quotes appearing inside strings should be escaped with a backslash, but single + * quotes should be escaped in the CQL manner, i.e. by another single quote. For example, + * the column value {@code foo"'bar} should be inserted in the JSON string as {@code + * "foo\"''bar"}. + *
    + * + * @param json the JSON string, or a bind marker, or a JSON object handled by a specific {@link + * com.datastax.driver.core.TypeCodec codec}. + * @return the function call. + * @see JSON Support for CQL + * @see JSON + * Support in Cassandra 2.2 + */ + public static Object fromJson(Object json) { + return fcall("fromJson", json); + } + + /** + * Creates a {@code toJson()} function call. This is a shortcut for {@code fcall("toJson", + * QueryBuilder.column(name))}. + * + *

    Support for JSON functions has been added in Cassandra 2.2. The {@code toJson()} function is + * similar to {@code SELECT JSON} statements, but applies to a single column value instead of the + * entire row, and produces a JSON-encoded string representing the normal Cassandra column value. + * + *

    It may only be used in the selection clause of a {@code SELECT} statement. + * + * @param column the column to retrieve JSON from. + * @return the function call. + * @see JSON Support for CQL + * @see JSON + * Support in Cassandra 2.2 + */ + public static Object toJson(Object column) { + // consider a String literal as a column name for user convenience, + // as CQL literals are not allowed here. + if (column instanceof String) column = column(((String) column)); + return new Utils.FCall("toJson", column); + } + + /** + * Creates an alias for a given column. + * + *

    This is most useful when used with the method {@link #select(Object...)}. + * + * @param column The column to create an alias for. + * @param alias The column alias. + * @return a column alias. + */ + public static Object alias(Object column, String alias) { + return new Utils.Alias(column, alias); + } + + /** + * Creates a {@code count(x)} built-in function call. + * + * @return the function call. + */ + public static Object count(Object column) { + // consider a String literal as a column name for user convenience, + // as CQL literals are not allowed here. + if (column instanceof String) column = column(((String) column)); + return new Utils.FCall("count", column); + } + + /** + * Creates a {@code max(x)} built-in function call. + * + * @return the function call. + */ + public static Object max(Object column) { + // consider a String literal as a column name for user convenience, + // as CQL literals are not allowed here. + if (column instanceof String) column = column(((String) column)); + return new Utils.FCall("max", column); + } + + /** + * Creates a {@code min(x)} built-in function call. + * + * @return the function call. + */ + public static Object min(Object column) { + // consider a String literal as a column name for user convenience, + // as CQL literals are not allowed here. + if (column instanceof String) column = column(((String) column)); + return new Utils.FCall("min", column); + } + + /** + * Creates a {@code sum(x)} built-in function call. + * + * @return the function call. + */ + public static Object sum(Object column) { + // consider a String literal as a column name for user convenience, + // as CQL literals are not allowed here. + if (column instanceof String) column = column(((String) column)); + return new Utils.FCall("sum", column); + } + + /** + * Creates an {@code avg(x)} built-in function call. + * + * @return the function call. + */ + public static Object avg(Object column) { + // consider a String literal as a column name for user convenience, + // as CQL literals are not allowed here. + if (column instanceof String) column = column(((String) column)); + return new Utils.FCall("avg", column); + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/querybuilder/Select.java b/driver-core/src/main/java/com/datastax/driver/core/querybuilder/Select.java index d070a04888c..c92fb3fa6a7 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/querybuilder/Select.java +++ b/driver-core/src/main/java/com/datastax/driver/core/querybuilder/Select.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,829 +17,869 @@ */ package com.datastax.driver.core.querybuilder; -import com.datastax.driver.core.*; - +import com.datastax.driver.core.AbstractTableMetadata; +import com.datastax.driver.core.CodecRegistry; +import com.datastax.driver.core.ColumnMetadata; +import com.datastax.driver.core.DataType; +import com.datastax.driver.core.MaterializedViewMetadata; +import com.datastax.driver.core.Metadata; +import com.datastax.driver.core.TableMetadata; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.List; -/** - * A built SELECT statement. - */ +/** A built SELECT statement. */ public class Select extends BuiltStatement { - private static final List COUNT_ALL = Collections.singletonList(new Utils.FCall("count", new Utils.RawString("*"))); - - private final String table; - private final boolean isDistinct; - private final boolean isJson; - private final List columnNames; - private final Where where; - private List orderings; - private List groupByColumnNames; - private Object limit; - private Object perPartitionLimit; - private boolean allowFiltering; - - Select(String keyspace, String table, List columnNames, boolean isDistinct, boolean isJson) { - this(keyspace, table, null, null, columnNames, isDistinct, isJson); - } - - Select(TableMetadata table, List columnNames, boolean isDistinct, boolean isJson) { - this(Metadata.quoteIfNecessary(table.getKeyspace().getName()), - Metadata.quoteIfNecessary(table.getName()), - Arrays.asList(new Object[table.getPartitionKey().size()]), - table.getPartitionKey(), - columnNames, - isDistinct, - isJson); - } - - Select(String keyspace, - String table, - List routingKeyValues, - List partitionKey, - List columnNames, - boolean isDistinct, - boolean isJson) { - super(keyspace, partitionKey, routingKeyValues); - this.table = table; - this.columnNames = columnNames; - this.isDistinct = isDistinct; - this.isJson = isJson; - this.where = new Where(this); + private static final List COUNT_ALL = + Collections.singletonList(new Utils.FCall("count", new Utils.RawString("*"))); + + private final String table; + private final boolean isDistinct; + private final boolean isJson; + private final List columnNames; + private final Where where; + private List orderings; + private List groupByColumnNames; + private Object limit; + private Object perPartitionLimit; + private boolean allowFiltering; + + Select( + String keyspace, String table, List columnNames, boolean isDistinct, boolean isJson) { + this(keyspace, table, null, null, columnNames, isDistinct, isJson); + } + + Select( + AbstractTableMetadata table, List columnNames, boolean isDistinct, boolean isJson) { + this( + Metadata.quoteIfNecessary(table.getKeyspace().getName()), + Metadata.quoteIfNecessary(table.getName()), + Arrays.asList(new Object[table.getPartitionKey().size()]), + table.getPartitionKey(), + columnNames, + isDistinct, + isJson); + } + + Select( + String keyspace, + String table, + List routingKeyValues, + List partitionKey, + List columnNames, + boolean isDistinct, + boolean isJson) { + super(keyspace, partitionKey, routingKeyValues); + this.table = table; + this.columnNames = columnNames; + this.isDistinct = isDistinct; + this.isJson = isJson; + this.where = new Where(this); + } + + @Override + StringBuilder buildQueryString(List variables, CodecRegistry codecRegistry) { + StringBuilder builder = new StringBuilder(); + + builder.append("SELECT "); + + if (isJson) builder.append("JSON "); + + if (isDistinct) builder.append("DISTINCT "); + + if (columnNames == null) { + builder.append('*'); + } else { + Utils.joinAndAppendNames(builder, codecRegistry, columnNames); } + builder.append(" FROM "); + if (keyspace != null) Utils.appendName(keyspace, builder).append('.'); + Utils.appendName(table, builder); - @Override - StringBuilder buildQueryString(List variables, CodecRegistry codecRegistry) { - StringBuilder builder = new StringBuilder(); + if (!where.clauses.isEmpty()) { + builder.append(" WHERE "); + Utils.joinAndAppend(builder, codecRegistry, " AND ", where.clauses, variables); + } - builder.append("SELECT "); + if (groupByColumnNames != null) { + builder.append(" GROUP BY "); + Utils.joinAndAppendNames(builder, codecRegistry, groupByColumnNames); + } - if (isJson) - builder.append("JSON "); + if (orderings != null) { + builder.append(" ORDER BY "); + Utils.joinAndAppend(builder, codecRegistry, ",", orderings, variables); + } - if (isDistinct) - builder.append("DISTINCT "); + if (perPartitionLimit != null) { + builder.append(" PER PARTITION LIMIT ").append(perPartitionLimit); + } - if (columnNames == null) { - builder.append('*'); - } else { - Utils.joinAndAppendNames(builder, codecRegistry, columnNames); - } - builder.append(" FROM "); - if (keyspace != null) - Utils.appendName(keyspace, builder).append('.'); - Utils.appendName(table, builder); + if (limit != null) { + builder.append(" LIMIT ").append(limit); + } - if (!where.clauses.isEmpty()) { - builder.append(" WHERE "); - Utils.joinAndAppend(builder, codecRegistry, " AND ", where.clauses, variables); - } + if (allowFiltering) { + builder.append(" ALLOW FILTERING"); + } - if (groupByColumnNames != null) { - builder.append(" GROUP BY "); - Utils.joinAndAppendNames(builder, codecRegistry, groupByColumnNames); - } + return builder; + } + + /** + * Adds a {@code WHERE} clause to this statement. + * + *

    This is a shorter/more readable version for {@code where().and(clause)}. + * + * @param clause the clause to add. + * @return the where clause of this query to which more clause can be added. + */ + public Where where(Clause clause) { + return where.and(clause); + } + + /** + * Returns a {@code WHERE} statement for this query without adding clause. + * + * @return the where clause of this query to which more clause can be added. + */ + public Where where() { + return where; + } + + /** + * Adds an {@code ORDER BY} clause to this statement. + * + * @param orderings the orderings to define for this query. + * @return this statement. + * @throws IllegalStateException if an {@code ORDER BY} clause has already been provided. + */ + public Select orderBy(Ordering... orderings) { + if (this.orderings != null) + throw new IllegalStateException("An ORDER BY clause has already been provided"); + + if (orderings.length == 0) + throw new IllegalArgumentException( + "Invalid ORDER BY argument, the orderings must not be empty."); + + this.orderings = Arrays.asList(orderings); + for (Ordering ordering : orderings) checkForBindMarkers(ordering); + return this; + } + + /** + * Adds a {@code GROUP BY} clause to this statement. + * + *

    Note: support for {@code GROUP BY} clause is only available from Cassandra 3.10 onwards. + * + * @param columns the columns to group by. + * @return this statement. + * @throws IllegalStateException if a {@code GROUP BY} clause has already been provided. + */ + public Select groupBy(Object... columns) { + if (this.groupByColumnNames != null) + throw new IllegalStateException("A GROUP BY clause has already been provided"); + + this.groupByColumnNames = Arrays.asList(columns); + return this; + } + + /** + * Adds a {@code LIMIT} clause to this statement. + * + * @param limit the limit to set. + * @return this statement. + * @throws IllegalArgumentException if {@code limit <= 0}. + * @throws IllegalStateException if a {@code LIMIT} clause has already been provided. + */ + public Select limit(int limit) { + if (limit <= 0) + throw new IllegalArgumentException("Invalid LIMIT value, must be strictly positive"); + + if (this.limit != null) + throw new IllegalStateException("A LIMIT value has already been provided"); + + this.limit = limit; + setDirty(); + return this; + } + + /** + * Adds a prepared {@code LIMIT} clause to this statement. + * + * @param marker the marker to use for the limit. + * @return this statement. + * @throws IllegalStateException if a {@code LIMIT} clause has already been provided. + */ + public Select limit(BindMarker marker) { + if (this.limit != null) + throw new IllegalStateException("A LIMIT value has already been provided"); + + this.limit = marker; + checkForBindMarkers(marker); + return this; + } + + /** + * Adds a {@code PER PARTITION LIMIT} clause to this statement. + * + *

    Note: support for {@code PER PARTITION LIMIT} clause is only available from Cassandra 3.6 + * onwards. + * + * @param perPartitionLimit the limit to set per partition. + * @return this statement. + * @throws IllegalArgumentException if {@code perPartitionLimit <= 0}. + * @throws IllegalStateException if a {@code PER PARTITION LIMIT} clause has already been + * provided. + * @throws IllegalStateException if this statement is a {@code SELECT DISTINCT} statement. + */ + public Select perPartitionLimit(int perPartitionLimit) { + if (perPartitionLimit <= 0) + throw new IllegalArgumentException( + "Invalid PER PARTITION LIMIT value, must be strictly positive"); + + if (this.perPartitionLimit != null) + throw new IllegalStateException("A PER PARTITION LIMIT value has already been provided"); + if (isDistinct) + throw new IllegalStateException( + "PER PARTITION LIMIT is not allowed with SELECT DISTINCT queries"); + + this.perPartitionLimit = perPartitionLimit; + setDirty(); + return this; + } + + /** + * Adds a prepared {@code PER PARTITION LIMIT} clause to this statement. + * + *

    Note: support for {@code PER PARTITION LIMIT} clause is only available from Cassandra 3.6 + * onwards. + * + * @param marker the marker to use for the limit per partition. + * @return this statement. + * @throws IllegalStateException if a {@code PER PARTITION LIMIT} clause has already been + * provided. + * @throws IllegalStateException if this statement is a {@code SELECT DISTINCT} statement. + */ + public Select perPartitionLimit(BindMarker marker) { + if (this.perPartitionLimit != null) + throw new IllegalStateException("A PER PARTITION LIMIT value has already been provided"); + if (isDistinct) + throw new IllegalStateException( + "PER PARTITION LIMIT is not allowed with SELECT DISTINCT queries"); + + this.perPartitionLimit = marker; + checkForBindMarkers(marker); + return this; + } + + /** + * Adds an {@code ALLOW FILTERING} directive to this statement. + * + * @return this statement. + */ + public Select allowFiltering() { + allowFiltering = true; + return this; + } + + /** The {@code WHERE} clause of a {@code SELECT} statement. */ + public static class Where extends BuiltStatement.ForwardingStatement { - - private final List clauses = new ArrayList(); - - Where(Select statement) { - super(statement); - } - - /** - * Adds the provided clause to this {@code WHERE} clause. - * - * @param clause the clause to add. - * @return this {@code WHERE} clause. - */ - public Where and(Clause clause) { - clauses.add(clause); - statement.maybeAddRoutingKey(clause.name(), clause.firstValue()); - checkForBindMarkers(clause); - return this; - } - - /** - * Adds an ORDER BY clause to the {@code SELECT} statement this {@code WHERE} clause if - * part of. - * - * @param orderings the orderings to add. - * @return the {@code SELECT} statement this {@code WHERE} clause is part of. - * @throws IllegalStateException if an {@code ORDER BY} clause has already been provided. - */ - public Select orderBy(Ordering... orderings) { - return statement.orderBy(orderings); - } - - /** - * Adds a {@code GROUP BY} clause to this statement. - *

    - * Note: support for {@code GROUP BY} clause is only available from - * Cassandra 3.10 onwards. - * - * @param columns the columns to group by. - * @return the {@code SELECT} statement this {@code WHERE} clause is part of. - * @throws IllegalStateException if a {@code GROUP BY} clause has already been provided. - */ - public Select groupBy(Object... columns) { - return statement.groupBy(columns); - } - - /** - * Adds a {@code LIMIT} clause to the {@code SELECT} statement this - * {@code WHERE} clause is part of. - * - * @param limit the limit to set. - * @return the {@code SELECT} statement this {@code WHERE} clause is part of. - * @throws IllegalArgumentException if {@code limit <= 0}. - * @throws IllegalStateException if a {@code LIMIT} clause has already been - * provided. - */ - public Select limit(int limit) { - return statement.limit(limit); - } - - /** - * Adds a bind marker for the {@code LIMIT} clause to the {@code SELECT} statement this - * {@code WHERE} clause is part of. - * - * @param limit the bind marker to use as limit. - * @return the {@code SELECT} statement this {@code WHERE} clause is part of. - * @throws IllegalStateException if a {@code LIMIT} clause has already been - * provided. - */ - public Select limit(BindMarker limit) { - return statement.limit(limit); - } - - /** - * Adds a {@code PER PARTITION LIMIT} clause to the {@code SELECT} statement this - * {@code WHERE} clause is part of. - *

    - * Note: support for {@code PER PARTITION LIMIT} clause is only available from - * Cassandra 3.6 onwards. - * - * @param perPartitionLimit the limit to set per partition. - * @return the {@code SELECT} statement this {@code WHERE} clause is part of. - * @throws IllegalArgumentException if {@code perPartitionLimit <= 0}. - * @throws IllegalStateException if a {@code PER PARTITION LIMIT} clause has already been provided. - * @throws IllegalStateException if this statement is a {@code SELECT DISTINCT} statement. - */ - public Select perPartitionLimit(int perPartitionLimit) { - return statement.perPartitionLimit(perPartitionLimit); - } - - /** - * Adds a bind marker for the {@code PER PARTITION LIMIT} clause to the {@code SELECT} statement this - * {@code WHERE} clause is part of. - *

    - * Note: support for {@code PER PARTITION LIMIT} clause is only available from - * Cassandra 3.6 onwards. - * - * @param limit the bind marker to use as limit per partition. - * @return the {@code SELECT} statement this {@code WHERE} clause is part of. - * @throws IllegalStateException if a {@code PER PARTITION LIMIT} clause has already been provided. - * @throws IllegalStateException if this statement is a {@code SELECT DISTINCT} statement. - */ - public Select perPartitionLimit(BindMarker limit) { - return statement.perPartitionLimit(limit); - } - - /** - * Adds an {@code ALLOW FILTERING} directive to the {@code SELECT} statement this - * {@code WHERE} clause is part of. - * - * @return the {@code SELECT} statement this {@code WHERE} clause is part of. - */ - public Select allowFiltering() { - return statement.allowFiltering(); - } - } - - /** - * An in-construction SELECT statement. - */ - public static class Builder { - - List columnNames; - boolean isDistinct; - boolean isJson; - - Builder() { - } - - Builder(List columnNames) { - this.columnNames = columnNames; - } - - /** - * Uses DISTINCT selection. - * - * @return this in-build SELECT statement. - */ - public Builder distinct() { - this.isDistinct = true; - return this; - } - - /** - * Uses JSON selection. - *

    - * Cassandra 2.2 introduced JSON support to SELECT statements: - * the {@code JSON} keyword can be used to return each row as a single JSON encoded map. - * - * @return this in-build SELECT statement. - * @see JSON Support for CQL - * @see JSON Support in Cassandra 2.2 - * @see Data retrieval using JSON - */ - public Builder json() { - this.isJson = true; - return this; - } - - /** - * Adds the table to select from. - * - * @param table the name of the table to select from. - * @return a newly built SELECT statement that selects from {@code table}. - */ - public Select from(String table) { - return from(null, table); - } - - /** - * Adds the table to select from. - * - * @param keyspace the name of the keyspace to select from. - * @param table the name of the table to select from. - * @return a newly built SELECT statement that selects from {@code keyspace.table}. - */ - public Select from(String keyspace, String table) { - return new Select(keyspace, table, columnNames, isDistinct, isJson); - } - - /** - * Adds the table to select from. - * - * @param table the table to select from. - * @return a newly built SELECT statement that selects from {@code table}. - */ - public Select from(TableMetadata table) { - return new Select(table, columnNames, isDistinct, isJson); - } - } - - /** - * An Selection clause for an in-construction SELECT statement. - */ - public static abstract class Selection extends Builder { - - /** - * Uses DISTINCT selection. - * - * @return this in-build SELECT statement. - */ - @Override - public Selection distinct() { - this.isDistinct = true; - return this; - } - - /** - * Uses JSON selection. - *

    - * Cassandra 2.2 introduced JSON support to SELECT statements: - * the {@code JSON} keyword can be used to return each row as a single JSON encoded map. - * - * @return this in-build SELECT statement. - * @see JSON Support for CQL - * @see JSON Support in Cassandra 2.2 - * @see Data retrieval using JSON - */ - @Override - public Selection json() { - this.isJson = true; - return this; - } - - /** - * Selects all columns (i.e. "SELECT * ...") - * - * @return an in-build SELECT statement. - * @throws IllegalStateException if some columns had already been selected for this builder. - */ - public abstract Builder all(); - - /** - * Selects the count of all returned rows (i.e. "SELECT count(*) ..."). - * - * @return an in-build SELECT statement. - * @throws IllegalStateException if some columns had already been selected for this builder. - */ - public abstract Builder countAll(); - - /** - * Selects the provided column. - * - * @param name the new column name to add. - * @return this in-build SELECT statement - */ - public abstract SelectionOrAlias column(String name); - - /** - * Selects the write time of provided column. - *

    - * This is a shortcut for {@code fcall("writetime", QueryBuilder.column(name))}. - * - * @param name the name of the column to select the write time of. - * @return this in-build SELECT statement - */ - public abstract SelectionOrAlias writeTime(String name); - - /** - * Selects the ttl of provided column. - *

    - * This is a shortcut for {@code fcall("ttl", QueryBuilder.column(name))}. - * - * @param name the name of the column to select the ttl of. - * @return this in-build SELECT statement - */ - public abstract SelectionOrAlias ttl(String name); - - /** - * Creates a function call. - *

    - * Please note that the parameters are interpreted as values, and so - * {@code fcall("textToBlob", "foo")} will generate the string - * {@code "textToBlob('foo')"}. If you want to generate - * {@code "textToBlob(foo)"}, i.e. if the argument must be interpreted - * as a column name (in a select clause), you will need to use the - * {@link QueryBuilder#column} method, and so - * {@code fcall("textToBlob", QueryBuilder.column(foo)}. - * - * @param name the name of the function. - * @param parameters the parameters for the function call. - * @return this in-build SELECT statement - */ - public abstract SelectionOrAlias fcall(String name, Object... parameters); - - /** - * Creates a cast of an expression to a given CQL type. - * - * @param column the expression to cast. It can be a complex expression like a - * {@link QueryBuilder#fcall(String, Object...) function call}. - * @param targetType the target CQL type to cast to. Use static methods such as {@link DataType#text()}. - * @return this in-build SELECT statement. - */ - public SelectionOrAlias cast(Object column, DataType targetType) { - // This method should be abstract like others here. But adding an abstract method is not binary-compatible, - // so we add this dummy implementation to make Clirr happy. - throw new UnsupportedOperationException("Not implemented. This should only happen if you've written your own implementation of Selection"); - } - - /** - * Selects the provided raw expression. - *

    - * The provided string will be appended to the query as-is, without any form of escaping or quoting. - * - * @param rawString the raw expression to add. - * @return this in-build SELECT statement - */ - public SelectionOrAlias raw(String rawString) { - // This method should be abstract like others here. But adding an abstract method is not binary-compatible, - // so we add this dummy implementation to make Clirr happy. - throw new UnsupportedOperationException("Not implemented. This should only happen if you've written your own implementation of Selection"); - } - - /** - * Selects the provided path. - *

    - * All given path {@code segments} will be concatenated together with dots. - * If any segment contains an identifier that needs quoting, - * caller code is expected to call {@link QueryBuilder#quote(String)} prior to - * invoking this method. - *

    - * This method is currently only useful when accessing individual fields of a - * {@link com.datastax.driver.core.UserType user-defined type} (UDT), - * which is only possible since CASSANDRA-7423. - *

    - * Note that currently nested UDT fields are not supported and - * will be rejected by the server as a - * {@link com.datastax.driver.core.exceptions.SyntaxError syntax error}. - * - * @param segments the segments of the path to create. - * @return this in-build SELECT statement - * @see CASSANDRA-7423 - */ - public SelectionOrAlias path(String... segments) { - // This method should be abstract like others here. But adding an abstract method is not binary-compatible, - // so we add this dummy implementation to make Clirr happy. - throw new UnsupportedOperationException("Not implemented. This should only happen if you've written your own implementation of Selection"); - } - - /** - * Creates a {@code toJson()} function call. - * This is a shortcut for {@code fcall("toJson", QueryBuilder.column(name))}. - *

    - * Support for JSON functions has been added in Cassandra 2.2. - * The {@code toJson()} function is similar to {@code SELECT JSON} statements, - * but applies to a single column value instead of the entire row, - * and produces a JSON-encoded string representing the normal Cassandra column value. - *

    - * It may only be used in the selection clause of a {@code SELECT} statement. - * - * @param column the column to retrieve JSON from. - * @return the function call. - * @see JSON Support for CQL - * @see JSON Support in Cassandra 2.2 - */ - public SelectionOrAlias toJson(String column) { - // This method should be abstract like others here. But adding an abstract method is not binary-compatible, - // so we add this dummy implementation to make Clirr happy. - throw new UnsupportedOperationException("Not implemented. This should only happen if you've written your own implementation of Selection"); - } - - /** - * Creates a {@code count(x)} built-in function call. - * - * @return the function call. - */ - public SelectionOrAlias count(Object column) { - // This method should be abstract like others here. But adding an abstract method is not binary-compatible, - // so we add this dummy implementation to make Clirr happy. - throw new UnsupportedOperationException("Not implemented. This should only happen if you've written your own implementation of Selection"); - } - - /** - * Creates a {@code max(x)} built-in function call. - * - * @return the function call. - */ - public SelectionOrAlias max(Object column) { - // This method should be abstract like others here. But adding an abstract method is not binary-compatible, - // so we add this dummy implementation to make Clirr happy. - throw new UnsupportedOperationException("Not implemented. This should only happen if you've written your own implementation of Selection"); - } - - /** - * Creates a {@code min(x)} built-in function call. - * - * @return the function call. - */ - public SelectionOrAlias min(Object column) { - // This method should be abstract like others here. But adding an abstract method is not binary-compatible, - // so we add this dummy implementation to make Clirr happy. - throw new UnsupportedOperationException("Not implemented. This should only happen if you've written your own implementation of Selection"); - } - - /** - * Creates a {@code sum(x)} built-in function call. - * - * @return the function call. - */ - public SelectionOrAlias sum(Object column) { - // This method should be abstract like others here. But adding an abstract method is not binary-compatible, - // so we add this dummy implementation to make Clirr happy. - throw new UnsupportedOperationException("Not implemented. This should only happen if you've written your own implementation of Selection"); - } - - /** - * Creates an {@code avg(x)} built-in function call. - * - * @return the function call. - */ - public SelectionOrAlias avg(Object column) { - // This method should be abstract like others here. But adding an abstract method is not binary-compatible, - // so we add this dummy implementation to make Clirr happy. - throw new UnsupportedOperationException("Not implemented. This should only happen if you've written your own implementation of Selection"); - } - - } - - /** - * An Selection clause for an in-construction SELECT statement. - *

    - * This only differs from {@link Selection} in that you can add an - * alias for the previously selected item through {@link SelectionOrAlias#as}. - */ - public static class SelectionOrAlias extends Selection { - - private Object previousSelection; - - /** - * Adds an alias for the just selected item. - * - * @param alias the name of the alias to use. - * @return this in-build SELECT statement - */ - public Selection as(String alias) { - assert previousSelection != null; - Object a = new Utils.Alias(previousSelection, alias); - previousSelection = null; - return addName(a); - } - - // We don't return SelectionOrAlias on purpose - private Selection addName(Object name) { - if (columnNames == null) - columnNames = new ArrayList(); - - columnNames.add(name); - return this; - } - - private SelectionOrAlias queueName(Object name) { - if (previousSelection != null) - addName(previousSelection); - - previousSelection = name; - return this; - } - - @Override - public Builder all() { - if (columnNames != null) - throw new IllegalStateException(String.format("Some columns (%s) have already been selected.", columnNames)); - if (previousSelection != null) - throw new IllegalStateException(String.format("Some columns ([%s]) have already been selected.", previousSelection)); - - return (Builder) this; - } - - @Override - public Builder countAll() { - if (columnNames != null) - throw new IllegalStateException(String.format("Some columns (%s) have already been selected.", columnNames)); - if (previousSelection != null) - throw new IllegalStateException(String.format("Some columns ([%s]) have already been selected.", previousSelection)); - - columnNames = COUNT_ALL; - return (Builder) this; - } - - @Override - public SelectionOrAlias column(String name) { - return queueName(name); - } - - @Override - public SelectionOrAlias writeTime(String name) { - return queueName(new Utils.FCall("writetime", new Utils.CName(name))); - } - - @Override - public SelectionOrAlias ttl(String name) { - return queueName(new Utils.FCall("ttl", new Utils.CName(name))); - } - - @Override - public SelectionOrAlias fcall(String name, Object... parameters) { - return queueName(new Utils.FCall(name, parameters)); - } - - @Override - public SelectionOrAlias cast(Object column, DataType targetType) { - return queueName(QueryBuilder.cast(column, targetType)); - } - - @Override - public SelectionOrAlias raw(String rawString) { - return queueName(QueryBuilder.raw(rawString)); - } - - @Override - public SelectionOrAlias path(String... segments) { - return queueName(QueryBuilder.path(segments)); - } - - @Override - public SelectionOrAlias toJson(String name) { - return queueName(QueryBuilder.toJson(name)); - } - - @Override - public SelectionOrAlias count(Object column) { - return queueName(QueryBuilder.count(column)); - } - - @Override - public SelectionOrAlias max(Object column) { - return queueName(QueryBuilder.max(column)); - } - - @Override - public SelectionOrAlias min(Object column) { - return queueName(QueryBuilder.min(column)); - } - - @Override - public SelectionOrAlias sum(Object column) { - return queueName(QueryBuilder.sum(column)); - } - - @Override - public SelectionOrAlias avg(Object column) { - return queueName(QueryBuilder.avg(column)); - } - - @Override - public Select from(String keyspace, String table) { - if (previousSelection != null) - addName(previousSelection); - previousSelection = null; - return super.from(keyspace, table); - } - - @Override - public Select from(TableMetadata table) { - if (previousSelection != null) - addName(previousSelection); - previousSelection = null; - return super.from(table); - } + public SelectionOrAlias count(Object column) { + // This method should be abstract like others here. But adding an abstract method is not + // binary-compatible, + // so we add this dummy implementation to make Clirr happy. + throw new UnsupportedOperationException( + "Not implemented. This should only happen if you've written your own implementation of Selection"); + } + + /** + * Creates a {@code max(x)} built-in function call. + * + * @return the function call. + */ + public SelectionOrAlias max(Object column) { + // This method should be abstract like others here. But adding an abstract method is not + // binary-compatible, + // so we add this dummy implementation to make Clirr happy. + throw new UnsupportedOperationException( + "Not implemented. This should only happen if you've written your own implementation of Selection"); + } + + /** + * Creates a {@code min(x)} built-in function call. + * + * @return the function call. + */ + public SelectionOrAlias min(Object column) { + // This method should be abstract like others here. But adding an abstract method is not + // binary-compatible, + // so we add this dummy implementation to make Clirr happy. + throw new UnsupportedOperationException( + "Not implemented. This should only happen if you've written your own implementation of Selection"); + } + + /** + * Creates a {@code sum(x)} built-in function call. + * + * @return the function call. + */ + public SelectionOrAlias sum(Object column) { + // This method should be abstract like others here. But adding an abstract method is not + // binary-compatible, + // so we add this dummy implementation to make Clirr happy. + throw new UnsupportedOperationException( + "Not implemented. This should only happen if you've written your own implementation of Selection"); + } + + /** + * Creates an {@code avg(x)} built-in function call. + * + * @return the function call. + */ + public SelectionOrAlias avg(Object column) { + // This method should be abstract like others here. But adding an abstract method is not + // binary-compatible, + // so we add this dummy implementation to make Clirr happy. + throw new UnsupportedOperationException( + "Not implemented. This should only happen if you've written your own implementation of Selection"); + } + } + + /** + * An Selection clause for an in-construction SELECT statement. + * + *

    This only differs from {@link Selection} in that you can add an alias for the previously + * selected item through {@link SelectionOrAlias#as}. + */ + public static class SelectionOrAlias extends Selection { + + private Object previousSelection; + + /** + * Adds an alias for the just selected item. + * + * @param alias the name of the alias to use. + * @return this in-build SELECT statement + */ + public Selection as(String alias) { + assert previousSelection != null; + Object a = new Utils.Alias(previousSelection, alias); + previousSelection = null; + return addName(a); + } + + // We don't return SelectionOrAlias on purpose + private Selection addName(Object name) { + if (columnNames == null) columnNames = new ArrayList(); + + columnNames.add(name); + return this; + } + + private SelectionOrAlias queueName(Object name) { + if (previousSelection != null) addName(previousSelection); + + previousSelection = name; + return this; + } + + @Override + public Builder all() { + if (columnNames != null) + throw new IllegalStateException( + String.format("Some columns (%s) have already been selected.", columnNames)); + if (previousSelection != null) + throw new IllegalStateException( + String.format("Some columns ([%s]) have already been selected.", previousSelection)); + + return (Builder) this; + } + + @Override + public Builder countAll() { + if (columnNames != null) + throw new IllegalStateException( + String.format("Some columns (%s) have already been selected.", columnNames)); + if (previousSelection != null) + throw new IllegalStateException( + String.format("Some columns ([%s]) have already been selected.", previousSelection)); + + columnNames = COUNT_ALL; + return (Builder) this; + } + + @Override + public SelectionOrAlias column(String name) { + return queueName(name); + } + + @Override + public SelectionOrAlias writeTime(String name) { + return queueName(new Utils.FCall("writetime", new Utils.CName(name))); + } + + @Override + public SelectionOrAlias ttl(String name) { + return queueName(new Utils.FCall("ttl", new Utils.CName(name))); + } + + @Override + public SelectionOrAlias fcall(String name, Object... parameters) { + return queueName(new Utils.FCall(name, parameters)); + } + + @Override + public SelectionOrAlias cast(Object column, DataType targetType) { + return queueName(QueryBuilder.cast(column, targetType)); + } + + @Override + public SelectionOrAlias raw(String rawString) { + return queueName(QueryBuilder.raw(rawString)); + } + + @Override + public SelectionOrAlias path(String... segments) { + return queueName(QueryBuilder.path(segments)); + } + + @Override + public SelectionOrAlias toJson(String name) { + return queueName(QueryBuilder.toJson(name)); + } + + @Override + public SelectionOrAlias count(Object column) { + return queueName(QueryBuilder.count(column)); + } + + @Override + public SelectionOrAlias max(Object column) { + return queueName(QueryBuilder.max(column)); + } + + @Override + public SelectionOrAlias min(Object column) { + return queueName(QueryBuilder.min(column)); + } + + @Override + public SelectionOrAlias sum(Object column) { + return queueName(QueryBuilder.sum(column)); + } + + @Override + public SelectionOrAlias avg(Object column) { + return queueName(QueryBuilder.avg(column)); + } + + @Override + public Select from(String keyspace, String table) { + if (previousSelection != null) addName(previousSelection); + previousSelection = null; + return super.from(keyspace, table); + } + + @Override + public Select from(TableMetadata table) { + if (previousSelection != null) addName(previousSelection); + previousSelection = null; + return super.from(table); + } + + @Override + public Select from(MaterializedViewMetadata view) { + if (previousSelection != null) { + addName(previousSelection); + } + previousSelection = null; + return super.from(view); } + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/querybuilder/Truncate.java b/driver-core/src/main/java/com/datastax/driver/core/querybuilder/Truncate.java index b76b8d4535a..0dd49546c04 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/querybuilder/Truncate.java +++ b/driver-core/src/main/java/com/datastax/driver/core/querybuilder/Truncate.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,45 +21,43 @@ import com.datastax.driver.core.ColumnMetadata; import com.datastax.driver.core.Metadata; import com.datastax.driver.core.TableMetadata; - import java.util.Arrays; import java.util.List; -/** - * A built TRUNCATE statement. - */ +/** A built TRUNCATE statement. */ public class Truncate extends BuiltStatement { - private final String table; - - Truncate(String keyspace, String table) { - this(keyspace, table, null, null); - } - - Truncate(TableMetadata table) { - this(Metadata.quoteIfNecessary(table.getKeyspace().getName()), - Metadata.quoteIfNecessary(table.getName()), - Arrays.asList(new Object[table.getPartitionKey().size()]), - table.getPartitionKey()); - } - - Truncate(String keyspace, - String table, - List routingKeyValues, - List partitionKey) { - super(keyspace, partitionKey, routingKeyValues); - this.table = table; - } - - @Override - protected StringBuilder buildQueryString(List variables, CodecRegistry codecRegistry) { - StringBuilder builder = new StringBuilder(); - - builder.append("TRUNCATE "); - if (keyspace != null) - Utils.appendName(keyspace, builder).append('.'); - Utils.appendName(table, builder); - - return builder; - } + private final String table; + + Truncate(String keyspace, String table) { + this(keyspace, table, null, null); + } + + Truncate(TableMetadata table) { + this( + Metadata.quoteIfNecessary(table.getKeyspace().getName()), + Metadata.quoteIfNecessary(table.getName()), + Arrays.asList(new Object[table.getPartitionKey().size()]), + table.getPartitionKey()); + } + + Truncate( + String keyspace, + String table, + List routingKeyValues, + List partitionKey) { + super(keyspace, partitionKey, routingKeyValues); + this.table = table; + } + + @Override + protected StringBuilder buildQueryString(List variables, CodecRegistry codecRegistry) { + StringBuilder builder = new StringBuilder(); + + builder.append("TRUNCATE "); + if (keyspace != null) Utils.appendName(keyspace, builder).append('.'); + Utils.appendName(table, builder); + + return builder; + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/querybuilder/Update.java b/driver-core/src/main/java/com/datastax/driver/core/querybuilder/Update.java index 5d3ffbbd2c2..7014b4a4a93 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/querybuilder/Update.java +++ b/driver-core/src/main/java/com/datastax/driver/core/querybuilder/Update.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,426 +22,411 @@ import com.datastax.driver.core.Metadata; import com.datastax.driver.core.TableMetadata; import com.datastax.driver.core.querybuilder.Assignment.CounterAssignment; - import java.util.ArrayList; import java.util.Arrays; import java.util.List; -/** - * A built UPDATE statement. - */ +/** A built UPDATE statement. */ public class Update extends BuiltStatement { - private final String table; - private final Assignments assignments; - private final Where where; - private final Options usings; - private final Conditions conditions; - private boolean ifExists; + private final String table; + private final Assignments assignments; + private final Where where; + private final Options usings; + private final Conditions conditions; + private boolean ifExists; + + Update(String keyspace, String table) { + this(keyspace, table, null, null); + } + + Update(TableMetadata table) { + this( + Metadata.quoteIfNecessary(table.getKeyspace().getName()), + Metadata.quoteIfNecessary(table.getName()), + Arrays.asList(new Object[table.getPartitionKey().size()]), + table.getPartitionKey()); + } + + Update( + String keyspace, + String table, + List routingKeyValues, + List partitionKey) { + super(keyspace, partitionKey, routingKeyValues); + this.table = table; + this.assignments = new Assignments(this); + this.where = new Where(this); + this.usings = new Options(this); + this.conditions = new Conditions(this); + this.ifExists = false; + } + + @Override + StringBuilder buildQueryString(List variables, CodecRegistry codecRegistry) { + StringBuilder builder = new StringBuilder(); + + builder.append("UPDATE "); + if (keyspace != null) Utils.appendName(keyspace, builder).append('.'); + Utils.appendName(table, builder); + + if (!usings.usings.isEmpty()) { + builder.append(" USING "); + Utils.joinAndAppend(builder, codecRegistry, " AND ", usings.usings, variables); + } - Update(String keyspace, String table) { - this(keyspace, table, null, null); + if (!assignments.assignments.isEmpty()) { + builder.append(" SET "); + Utils.joinAndAppend(builder, codecRegistry, ",", assignments.assignments, variables); } - Update(TableMetadata table) { - this(Metadata.quoteIfNecessary(table.getKeyspace().getName()), - Metadata.quoteIfNecessary(table.getName()), - Arrays.asList(new Object[table.getPartitionKey().size()]), - table.getPartitionKey()); + if (!where.clauses.isEmpty()) { + builder.append(" WHERE "); + Utils.joinAndAppend(builder, codecRegistry, " AND ", where.clauses, variables); } - Update(String keyspace, - String table, - List routingKeyValues, - List partitionKey) { - super(keyspace, partitionKey, routingKeyValues); - this.table = table; - this.assignments = new Assignments(this); - this.where = new Where(this); - this.usings = new Options(this); - this.conditions = new Conditions(this); - this.ifExists = false; + if (!conditions.conditions.isEmpty()) { + builder.append(" IF "); + Utils.joinAndAppend(builder, codecRegistry, " AND ", conditions.conditions, variables); } - @Override - StringBuilder buildQueryString(List variables, CodecRegistry codecRegistry) { - StringBuilder builder = new StringBuilder(); + if (ifExists) { + builder.append(" IF EXISTS"); + } - builder.append("UPDATE "); - if (keyspace != null) - Utils.appendName(keyspace, builder).append('.'); - Utils.appendName(table, builder); + return builder; + } + + /** + * Adds an assignment to this UPDATE statement. + * + *

    This is a shorter/more readable version for {@code with().and(assignment)}. + * + * @param assignment the assignment to add. + * @return the Assignments of this UPDATE statement. + */ + public Assignments with(Assignment assignment) { + return assignments.and(assignment); + } + + /** + * Returns the assignments of this UPDATE statement. + * + * @return the assignments of this UPDATE statement. + */ + public Assignments with() { + return assignments; + } + + /** + * Adds a WHERE clause to this statement. + * + *

    This is a shorter/more readable version for {@code where().and(clause)}. + * + * @param clause the clause to add. + * @return the where clause of this query to which more clause can be added. + */ + public Where where(Clause clause) { + return where.and(clause); + } + + /** + * Returns a Where statement for this query without adding clause. + * + * @return the where clause of this query to which more clause can be added. + */ + public Where where() { + return where; + } + + /** + * Adds a conditions clause (IF) to this statement. + * + *

    This is a shorter/more readable version for {@code onlyIf().and(condition)}. + * + *

    This will configure the statement as non-idempotent, see {@link + * com.datastax.driver.core.Statement#isIdempotent()} for more information. + * + * @param condition the condition to add. + * @return the conditions of this query to which more conditions can be added. + */ + public Conditions onlyIf(Clause condition) { + return conditions.and(condition); + } + + /** + * Adds a conditions clause (IF) to this statement. + * + * @return the conditions of this query to which more conditions can be added. + */ + public Conditions onlyIf() { + return conditions; + } + + /** + * Adds a new options for this UPDATE statement. + * + * @param using the option to add. + * @return the options of this UPDATE statement. + */ + public Options using(Using using) { + return usings.and(using); + } + + /** The assignments of an UPDATE statement. */ + public static class Assignments extends BuiltStatement.ForwardingStatement { + + private final List assignments = new ArrayList(); + + Assignments(Update statement) { + super(statement); + } - if (!usings.usings.isEmpty()) { - builder.append(" USING "); - Utils.joinAndAppend(builder, codecRegistry, " AND ", usings.usings, variables); - } + /** + * Adds a new assignment for this UPDATE statement. + * + * @param assignment the new Assignment to add. + * @return these Assignments. + */ + public Assignments and(Assignment assignment) { + statement.setCounterOp(assignment instanceof CounterAssignment); + if (!hasNonIdempotentOps() && !Utils.isIdempotent(assignment)) + statement.setNonIdempotentOps(); + assignments.add(assignment); + checkForBindMarkers(assignment); + return this; + } - if (!assignments.assignments.isEmpty()) { - builder.append(" SET "); - Utils.joinAndAppend(builder, codecRegistry, ",", assignments.assignments, variables); - } + /** + * Adds a where clause to the UPDATE statement those assignments are part of. + * + * @param clause the clause to add. + * @return the where clause of the UPDATE statement those assignments are part of. + */ + public Where where(Clause clause) { + return statement.where(clause); + } - if (!where.clauses.isEmpty()) { - builder.append(" WHERE "); - Utils.joinAndAppend(builder, codecRegistry, " AND ", where.clauses, variables); - } + /** + * Adds an option to the UPDATE statement those assignments are part of. + * + * @param using the using clause to add. + * @return the options of the UPDATE statement those assignments are part of. + */ + public Options using(Using using) { + return statement.using(using); + } - if (!conditions.conditions.isEmpty()) { - builder.append(" IF "); - Utils.joinAndAppend(builder, codecRegistry, " AND ", conditions.conditions, variables); - } + /** + * Adds a condition to the UPDATE statement those assignments are part of. + * + *

    This will configure the statement as non-idempotent, see {@link + * com.datastax.driver.core.Statement#isIdempotent()} for more information. + * + * @param condition the condition to add. + * @return the conditions for the UPDATE statement those assignments are part of. + */ + public Conditions onlyIf(Clause condition) { + return statement.onlyIf(condition); + } + } + + /** The WHERE clause of an UPDATE statement. */ + public static class Where extends BuiltStatement.ForwardingStatement { - if (ifExists) { - builder.append(" IF EXISTS"); - } + private final List clauses = new ArrayList(); + + Where(Update statement) { + super(statement); + } - return builder; + /** + * Adds the provided clause to this WHERE clause. + * + * @param clause the clause to add. + * @return this WHERE clause. + */ + public Where and(Clause clause) { + clauses.add(clause); + statement.maybeAddRoutingKey(clause.name(), clause.firstValue()); + if (!hasNonIdempotentOps() && !Utils.isIdempotent(clause)) { + statement.setNonIdempotentOps(); + } + checkForBindMarkers(clause); + return this; } /** - * Adds an assignment to this UPDATE statement. - *

    - * This is a shorter/more readable version for {@code with().and(assignment)}. + * Adds an assignment to the UPDATE statement this WHERE clause is part of. * * @param assignment the assignment to add. - * @return the Assignments of this UPDATE statement. + * @return the assignments of the UPDATE statement this WHERE clause is part of. */ public Assignments with(Assignment assignment) { - return assignments.and(assignment); + return statement.with(assignment); } /** - * Returns the assignments of this UPDATE statement. + * Adds an option to the UPDATE statement this WHERE clause is part of. * - * @return the assignments of this UPDATE statement. + * @param using the using clause to add. + * @return the options of the UPDATE statement this WHERE clause is part of. */ - public Assignments with() { - return assignments; + public Options using(Using using) { + return statement.using(using); } /** - * Adds a WHERE clause to this statement. - *

    - * This is a shorter/more readable version for {@code where().and(clause)}. + * Adds a condition to the UPDATE statement this WHERE clause is part of. * - * @param clause the clause to add. - * @return the where clause of this query to which more clause can be added. + *

    This will configure the statement as non-idempotent, see {@link + * com.datastax.driver.core.Statement#isIdempotent()} for more information. + * + * @param condition the condition to add. + * @return the conditions for the UPDATE statement this WHERE clause is part of. */ - public Where where(Clause clause) { - return where.and(clause); + public Conditions onlyIf(Clause condition) { + return statement.onlyIf(condition); } /** - * Returns a Where statement for this query without adding clause. + * Sets the 'IF EXISTS' option for the UPDATE statement this WHERE clause is part of. + * + *

    An update with that option will report whether the statement actually resulted in data + * being updated. The existence check and update are done transactionally in the sense that if + * multiple clients attempt to update a given row with this option, then at most one may + * succeed. Please keep in mind that using this option has a non negligible performance impact + * and should be avoided when possible. This will configure the statement as non-idempotent, see + * {@link com.datastax.driver.core.Statement#isIdempotent()} for more information. * - * @return the where clause of this query to which more clause can be added. + * @return the UPDATE statement this WHERE clause is part of. */ - public Where where() { - return where; + public IfExists ifExists() { + statement.ifExists = true; + statement.setNonIdempotentOps(); + return new IfExists(statement); + } + } + + /** The options of a UPDATE statement. */ + public static class Options extends BuiltStatement.ForwardingStatement { + + private final List usings = new ArrayList(); + + Options(Update statement) { + super(statement); } /** - * Adds a conditions clause (IF) to this statement. - *

    - * This is a shorter/more readable version for {@code onlyIf().and(condition)}. - *

    - * This will configure the statement as non-idempotent, see {@link com.datastax.driver.core.Statement#isIdempotent()} - * for more information. + * Adds the provided option. * - * @param condition the condition to add. - * @return the conditions of this query to which more conditions can be added. + * @param using an UPDATE option. + * @return this {@code Options} object. */ - public Conditions onlyIf(Clause condition) { - return conditions.and(condition); + public Options and(Using using) { + usings.add(using); + checkForBindMarkers(using); + return this; } /** - * Adds a conditions clause (IF) to this statement. + * Adds an assignment to the UPDATE statement those options are part of. * - * @return the conditions of this query to which more conditions can be added. + * @param assignment the assignment to add. + * @return the assignments of the UPDATE statement those options are part of. */ - public Conditions onlyIf() { - return conditions; + public Assignments with(Assignment assignment) { + return statement.with(assignment); } /** - * Adds a new options for this UPDATE statement. + * Adds a where clause to the UPDATE statement these options are part of. * - * @param using the option to add. - * @return the options of this UPDATE statement. + * @param clause clause to add. + * @return the WHERE clause of the UPDATE statement these options are part of. */ - public Options using(Using using) { - return usings.and(using); + public Where where(Clause clause) { + return statement.where(clause); } /** - * The assignments of an UPDATE statement. + * Adds a condition to the UPDATE statement these options are part of. + * + * @param condition the condition to add. + * @return the conditions for the UPDATE statement these options are part of. */ - public static class Assignments extends BuiltStatement.ForwardingStatement { - - private final List assignments = new ArrayList(); - - Assignments(Update statement) { - super(statement); - } - - /** - * Adds a new assignment for this UPDATE statement. - * - * @param assignment the new Assignment to add. - * @return these Assignments. - */ - public Assignments and(Assignment assignment) { - statement.setCounterOp(assignment instanceof CounterAssignment); - if (!hasNonIdempotentOps() && !Utils.isIdempotent(assignment)) - statement.setNonIdempotentOps(); - assignments.add(assignment); - checkForBindMarkers(assignment); - return this; - } - - /** - * Adds a where clause to the UPDATE statement those assignments are part of. - * - * @param clause the clause to add. - * @return the where clause of the UPDATE statement those assignments are part of. - */ - public Where where(Clause clause) { - return statement.where(clause); - } - - /** - * Adds an option to the UPDATE statement those assignments are part of. - * - * @param using the using clause to add. - * @return the options of the UPDATE statement those assignments are part of. - */ - public Options using(Using using) { - return statement.using(using); - } - - /** - * Adds a condition to the UPDATE statement those assignments are part of. - *

    - * This will configure the statement as non-idempotent, see {@link com.datastax.driver.core.Statement#isIdempotent()} - * for more information. - * - * @param condition the condition to add. - * @return the conditions for the UPDATE statement those assignments are part of. - */ - public Conditions onlyIf(Clause condition) { - return statement.onlyIf(condition); - } - + public Conditions onlyIf(Clause condition) { + return statement.onlyIf(condition); + } + } + + /** + * Conditions for an UPDATE statement. + * + *

    When provided some conditions, an update will not apply unless the provided conditions + * applies. + * + *

    Please keep in mind that provided conditions has a non negligible performance impact and + * should be avoided when possible. + */ + public static class Conditions extends BuiltStatement.ForwardingStatement { + + private final List conditions = new ArrayList(); + + Conditions(Update statement) { + super(statement); } /** - * The WHERE clause of an UPDATE statement. + * Adds the provided condition for the update. + * + *

    Note that while the query builder accept any type of {@code Clause} as conditions, + * Cassandra currently only allow equality ones. + * + * @param condition the condition to add. + * @return this {@code Conditions} clause. */ - public static class Where extends BuiltStatement.ForwardingStatement { - - private final List clauses = new ArrayList(); - - Where(Update statement) { - super(statement); - } - - /** - * Adds the provided clause to this WHERE clause. - * - * @param clause the clause to add. - * @return this WHERE clause. - */ - public Where and(Clause clause) { - clauses.add(clause); - statement.maybeAddRoutingKey(clause.name(), clause.firstValue()); - if (!hasNonIdempotentOps() && !Utils.isIdempotent(clause)) { - statement.setNonIdempotentOps(); - } - checkForBindMarkers(clause); - return this; - } - - /** - * Adds an assignment to the UPDATE statement this WHERE clause is part of. - * - * @param assignment the assignment to add. - * @return the assignments of the UPDATE statement this WHERE clause is part of. - */ - public Assignments with(Assignment assignment) { - return statement.with(assignment); - } - - /** - * Adds an option to the UPDATE statement this WHERE clause is part of. - * - * @param using the using clause to add. - * @return the options of the UPDATE statement this WHERE clause is part of. - */ - public Options using(Using using) { - return statement.using(using); - } - - /** - * Adds a condition to the UPDATE statement this WHERE clause is part of. - *

    - * This will configure the statement as non-idempotent, see {@link com.datastax.driver.core.Statement#isIdempotent()} - * for more information. - * - * @param condition the condition to add. - * @return the conditions for the UPDATE statement this WHERE clause is part of. - */ - public Conditions onlyIf(Clause condition) { - return statement.onlyIf(condition); - } - - /** - * Sets the 'IF EXISTS' option for the UPDATE statement this WHERE clause - * is part of. - *

    - * An update with that option will report whether the statement actually - * resulted in data being updated. The existence check and update are - * done transactionally in the sense that if multiple clients attempt to - * update a given row with this option, then at most one may succeed. - *

    - * Please keep in mind that using this option has a non negligible - * performance impact and should be avoided when possible. - *

    - * This will configure the statement as non-idempotent, see {@link com.datastax.driver.core.Statement#isIdempotent()} - * for more information. - * - * @return the UPDATE statement this WHERE clause is part of. - */ - public IfExists ifExists() { - statement.ifExists = true; - statement.setNonIdempotentOps(); - return new IfExists(statement); - } + public Conditions and(Clause condition) { + statement.setNonIdempotentOps(); + conditions.add(condition); + checkForBindMarkers(condition); + return this; } /** - * The options of a UPDATE statement. + * Adds an assignment to the UPDATE statement those conditions are part of. + * + * @param assignment the assignment to add. + * @return the assignments of the UPDATE statement those conditions are part of. */ - public static class Options extends BuiltStatement.ForwardingStatement { - - private final List usings = new ArrayList(); - - Options(Update statement) { - super(statement); - } - - /** - * Adds the provided option. - * - * @param using an UPDATE option. - * @return this {@code Options} object. - */ - public Options and(Using using) { - usings.add(using); - checkForBindMarkers(using); - return this; - } - - /** - * Adds an assignment to the UPDATE statement those options are part of. - * - * @param assignment the assignment to add. - * @return the assignments of the UPDATE statement those options are part of. - */ - public Assignments with(Assignment assignment) { - return statement.with(assignment); - } - - /** - * Adds a where clause to the UPDATE statement these options are part of. - * - * @param clause clause to add. - * @return the WHERE clause of the UPDATE statement these options are part of. - */ - public Where where(Clause clause) { - return statement.where(clause); - } - - /** - * Adds a condition to the UPDATE statement these options are part of. - * - * @param condition the condition to add. - * @return the conditions for the UPDATE statement these options are part of. - */ - public Conditions onlyIf(Clause condition) { - return statement.onlyIf(condition); - } + public Assignments with(Assignment assignment) { + return statement.with(assignment); } /** - * Conditions for an UPDATE statement. - *

    - * When provided some conditions, an update will not apply unless the - * provided conditions applies. - *

    - * Please keep in mind that provided conditions has a non negligible - * performance impact and should be avoided when possible. + * Adds a where clause to the UPDATE statement these conditions are part of. + * + * @param clause clause to add. + * @return the WHERE clause of the UPDATE statement these conditions are part of. */ - public static class Conditions extends BuiltStatement.ForwardingStatement { - - private final List conditions = new ArrayList(); - - Conditions(Update statement) { - super(statement); - } - - /** - * Adds the provided condition for the update. - *

    - * Note that while the query builder accept any type of {@code Clause} - * as conditions, Cassandra currently only allow equality ones. - * - * @param condition the condition to add. - * @return this {@code Conditions} clause. - */ - public Conditions and(Clause condition) { - statement.setNonIdempotentOps(); - conditions.add(condition); - checkForBindMarkers(condition); - return this; - } - - /** - * Adds an assignment to the UPDATE statement those conditions are part of. - * - * @param assignment the assignment to add. - * @return the assignments of the UPDATE statement those conditions are part of. - */ - public Assignments with(Assignment assignment) { - return statement.with(assignment); - } - - /** - * Adds a where clause to the UPDATE statement these conditions are part of. - * - * @param clause clause to add. - * @return the WHERE clause of the UPDATE statement these conditions are part of. - */ - public Where where(Clause clause) { - return statement.where(clause); - } - - /** - * Adds an option to the UPDATE statement these conditions are part of. - * - * @param using the using clause to add. - * @return the options of the UPDATE statement these conditions are part of. - */ - public Options using(Using using) { - return statement.using(using); - } + public Where where(Clause clause) { + return statement.where(clause); + } + /** + * Adds an option to the UPDATE statement these conditions are part of. + * + * @param using the using clause to add. + * @return the options of the UPDATE statement these conditions are part of. + */ + public Options using(Using using) { + return statement.using(using); } + } - public static class IfExists extends BuiltStatement.ForwardingStatement { - IfExists(Update statement) { - super(statement); - } + public static class IfExists extends BuiltStatement.ForwardingStatement { + IfExists(Update statement) { + super(statement); } + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/querybuilder/Using.java b/driver-core/src/main/java/com/datastax/driver/core/querybuilder/Using.java index 4f46ee3aa9b..77a0d6a552f 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/querybuilder/Using.java +++ b/driver-core/src/main/java/com/datastax/driver/core/querybuilder/Using.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,52 +18,51 @@ package com.datastax.driver.core.querybuilder; import com.datastax.driver.core.CodecRegistry; - import java.util.List; public abstract class Using extends Utils.Appendeable { - final String optionName; + final String optionName; - private Using(String optionName) { - this.optionName = optionName; - } + private Using(String optionName) { + this.optionName = optionName; + } - static class WithValue extends Using { - private final long value; + static class WithValue extends Using { + private final long value; - WithValue(String optionName, long value) { - super(optionName); - this.value = value; - } + WithValue(String optionName, long value) { + super(optionName); + this.value = value; + } - @Override - void appendTo(StringBuilder sb, List variables, CodecRegistry codecRegistry) { - sb.append(optionName).append(' ').append(value); - } + @Override + void appendTo(StringBuilder sb, List variables, CodecRegistry codecRegistry) { + sb.append(optionName).append(' ').append(value); + } - @Override - boolean containsBindMarker() { - return false; - } + @Override + boolean containsBindMarker() { + return false; } + } - static class WithMarker extends Using { - private final BindMarker marker; + static class WithMarker extends Using { + private final BindMarker marker; - WithMarker(String optionName, BindMarker marker) { - super(optionName); - this.marker = marker; - } + WithMarker(String optionName, BindMarker marker) { + super(optionName); + this.marker = marker; + } - @Override - void appendTo(StringBuilder sb, List variables, CodecRegistry codecRegistry) { - sb.append(optionName).append(' ').append(marker); - } + @Override + void appendTo(StringBuilder sb, List variables, CodecRegistry codecRegistry) { + sb.append(optionName).append(' ').append(marker); + } - @Override - boolean containsBindMarker() { - return true; - } + @Override + boolean containsBindMarker() { + return true; } + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/querybuilder/Utils.java b/driver-core/src/main/java/com/datastax/driver/core/querybuilder/Utils.java index da8d810dfdd..dde2a669654 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/querybuilder/Utils.java +++ b/driver-core/src/main/java/com/datastax/driver/core/querybuilder/Utils.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,9 +17,15 @@ */ package com.datastax.driver.core.querybuilder; -import com.datastax.driver.core.*; -import com.datastax.driver.core.exceptions.InvalidTypeException; +import static com.google.common.base.Preconditions.checkNotNull; +import com.datastax.driver.core.CodecRegistry; +import com.datastax.driver.core.DataType; +import com.datastax.driver.core.Metadata; +import com.datastax.driver.core.ProtocolVersion; +import com.datastax.driver.core.Token; +import com.datastax.driver.core.TypeCodec; +import com.datastax.driver.core.exceptions.InvalidTypeException; import java.math.BigDecimal; import java.math.BigInteger; import java.nio.ByteBuffer; @@ -27,475 +35,459 @@ import java.util.Set; import java.util.regex.Pattern; -import static com.google.common.base.Preconditions.checkNotNull; - // Static utilities private to the query builder abstract class Utils { - private static final Pattern alphanumeric = Pattern.compile("\\w+"); // this includes _ - private static final Pattern cnamePattern = Pattern.compile("\\w+(?:\\[.+\\])?"); - - /** - * Deal with case sensitivity for a given element id (keyspace, table, column, etc.) - * - * This method is used to convert identifiers provided by the client (through methods such as getKeyspace(String)), - * to the format used internally by the driver. - * - * We expect client-facing APIs to behave like cqlsh, that is: - * - identifiers that are mixed-case or contain special characters should be quoted. - * - unquoted identifiers will be lowercased: getKeyspace("Foo") will look for a keyspace named "foo" - * - * Copied from {@link Metadata#handleId(String)} to avoid making it public. - */ - static String handleId(String id) { - // Shouldn't really happen for this method, but no reason to fail here - if (id == null) - return null; - - if (alphanumeric.matcher(id).matches()) - return id.toLowerCase(); - - // Check if it's enclosed in quotes. If it is, remove them and unescape internal double quotes - if (!id.isEmpty() && id.charAt(0) == '"' && id.charAt(id.length() - 1) == '"') - return id.substring(1, id.length() - 1).replaceAll("\"\"", "\""); - - // Otherwise, just return the id. - // Note that this is a bit at odds with the rules explained above, because the client can pass an - // identifier that contains special characters, without the need to quote it. - // Still it's better to be lenient here rather than throwing an exception. - return id; - } - - static StringBuilder joinAndAppend(StringBuilder sb, CodecRegistry codecRegistry, String separator, List values, List variables) { - for (int i = 0; i < values.size(); i++) { - if (i > 0) - sb.append(separator); - values.get(i).appendTo(sb, variables, codecRegistry); - } - return sb; - } - - static StringBuilder joinAndAppendNames(StringBuilder sb, CodecRegistry codecRegistry, List values) { - for (int i = 0; i < values.size(); i++) { - if (i > 0) - sb.append(","); - appendName(values.get(i), codecRegistry, sb); - } - return sb; - } - - static StringBuilder joinAndAppendValues(StringBuilder sb, CodecRegistry codecRegistry, List values, List variables) { - for (int i = 0; i < values.size(); i++) { - if (i > 0) - sb.append(","); - appendValue(values.get(i), codecRegistry, sb, variables); - } - return sb; - } - - static StringBuilder appendValue(Object value, CodecRegistry codecRegistry, StringBuilder sb, List variables) { - if (value == null) { - sb.append("null"); - } else if (value instanceof BindMarker) { - sb.append(value); - } else if (value instanceof FCall) { - FCall fcall = (FCall) value; - sb.append(fcall.name).append('('); - for (int i = 0; i < fcall.parameters.length; i++) { - if (i > 0) - sb.append(','); - appendValue(fcall.parameters[i], codecRegistry, sb, variables); - } - sb.append(')'); - } else if (value instanceof Cast) { - Cast cast = (Cast) value; - sb.append("CAST("); - appendName(cast.column, codecRegistry, sb); - sb.append(" AS ").append(cast.targetType).append(")"); - } else if (value instanceof CName) { - appendName(((CName) value).name, codecRegistry, sb); - } else if (value instanceof RawString) { - sb.append(value.toString()); - } else if (value instanceof List && !isSerializable(value)) { - // bind variables are not supported inside collection literals - appendList((List) value, codecRegistry, sb); - } else if (value instanceof Set && !isSerializable(value)) { - // bind variables are not supported inside collection literals - appendSet((Set) value, codecRegistry, sb); - } else if (value instanceof Map && !isSerializable(value)) { - // bind variables are not supported inside collection literals - appendMap((Map) value, codecRegistry, sb); - } else if (variables == null || !isSerializable(value)) { - // we are not collecting statement values (variables == null) - // or the value is meant to be forcefully appended to the query string: - // format it with the appropriate codec and append it now - TypeCodec codec = codecRegistry.codecFor(value); - sb.append(codec.format(value)); - } else { - // Do not format the value nor append it to the query string: - // use a bind marker instead, - // but add the value the the statement's variables list - sb.append('?'); - variables.add(value); - return sb; - } - return sb; + private static final Pattern alphanumeric = Pattern.compile("\\w+"); // this includes _ + private static final Pattern cnamePattern = Pattern.compile("\\w+(?:\\[.+\\])?"); + + /** + * Deal with case sensitivity for a given element id (keyspace, table, column, etc.) + * + *

    This method is used to convert identifiers provided by the client (through methods such as + * getKeyspace(String)), to the format used internally by the driver. + * + *

    We expect client-facing APIs to behave like cqlsh, that is: - identifiers that are + * mixed-case or contain special characters should be quoted. - unquoted identifiers will be + * lowercased: getKeyspace("Foo") will look for a keyspace named "foo" + * + *

    Copied from {@link Metadata#handleId(String)} to avoid making it public. + */ + static String handleId(String id) { + // Shouldn't really happen for this method, but no reason to fail here + if (id == null) return null; + + if (alphanumeric.matcher(id).matches()) return id.toLowerCase(); + + // Check if it's enclosed in quotes. If it is, remove them and unescape internal double quotes + if (!id.isEmpty() && id.charAt(0) == '"' && id.charAt(id.length() - 1) == '"') + return id.substring(1, id.length() - 1).replaceAll("\"\"", "\""); + + // Otherwise, just return the id. + // Note that this is a bit at odds with the rules explained above, because the client can pass + // an + // identifier that contains special characters, without the need to quote it. + // Still it's better to be lenient here rather than throwing an exception. + return id; + } + + static StringBuilder joinAndAppend( + StringBuilder sb, + CodecRegistry codecRegistry, + String separator, + List values, + List variables) { + for (int i = 0; i < values.size(); i++) { + if (i > 0) sb.append(separator); + values.get(i).appendTo(sb, variables, codecRegistry); } - - private static StringBuilder appendList(List l, CodecRegistry codecRegistry, StringBuilder sb) { - sb.append('['); - for (int i = 0; i < l.size(); i++) { - if (i > 0) - sb.append(','); - appendValue(l.get(i), codecRegistry, sb, null); - } - sb.append(']'); - return sb; + return sb; + } + + static StringBuilder joinAndAppendNames( + StringBuilder sb, CodecRegistry codecRegistry, List values) { + for (int i = 0; i < values.size(); i++) { + if (i > 0) sb.append(","); + appendName(values.get(i), codecRegistry, sb); } - - private static StringBuilder appendSet(Set s, CodecRegistry codecRegistry, StringBuilder sb) { - sb.append('{'); - boolean first = true; - for (Object elt : s) { - if (first) first = false; - else sb.append(','); - appendValue(elt, codecRegistry, sb, null); - } - sb.append('}'); - return sb; + return sb; + } + + static StringBuilder joinAndAppendValues( + StringBuilder sb, CodecRegistry codecRegistry, List values, List variables) { + for (int i = 0; i < values.size(); i++) { + if (i > 0) sb.append(","); + appendValue(values.get(i), codecRegistry, sb, variables); } - - private static StringBuilder appendMap(Map m, CodecRegistry codecRegistry, StringBuilder sb) { - sb.append('{'); - boolean first = true; - for (Map.Entry entry : m.entrySet()) { - if (first) - first = false; - else - sb.append(','); - appendValue(entry.getKey(), codecRegistry, sb, null); - sb.append(':'); - appendValue(entry.getValue(), codecRegistry, sb, null); - } - sb.append('}'); - return sb; + return sb; + } + + static StringBuilder appendValue( + Object value, CodecRegistry codecRegistry, StringBuilder sb, List variables) { + if (value == null) { + sb.append("null"); + } else if (value instanceof BindMarker) { + sb.append(value); + } else if (value instanceof FCall) { + FCall fcall = (FCall) value; + sb.append(fcall.name).append('('); + for (int i = 0; i < fcall.parameters.length; i++) { + if (i > 0) sb.append(','); + appendValue(fcall.parameters[i], codecRegistry, sb, variables); + } + sb.append(')'); + } else if (value instanceof Cast) { + Cast cast = (Cast) value; + sb.append("CAST("); + appendName(cast.column, codecRegistry, sb); + sb.append(" AS ").append(cast.targetType).append(")"); + } else if (value instanceof CName) { + appendName(((CName) value).name, codecRegistry, sb); + } else if (value instanceof RawString) { + sb.append(value.toString()); + } else if (value instanceof List && !isSerializable(value)) { + // bind variables are not supported inside collection literals + appendList((List) value, codecRegistry, sb); + } else if (value instanceof Set && !isSerializable(value)) { + // bind variables are not supported inside collection literals + appendSet((Set) value, codecRegistry, sb); + } else if (value instanceof Map && !isSerializable(value)) { + // bind variables are not supported inside collection literals + appendMap((Map) value, codecRegistry, sb); + } else if (variables == null || !isSerializable(value)) { + // we are not collecting statement values (variables == null) + // or the value is meant to be forcefully appended to the query string: + // format it with the appropriate codec and append it now + TypeCodec codec = codecRegistry.codecFor(value); + sb.append(codec.format(value)); + } else { + // Do not format the value nor append it to the query string: + // use a bind marker instead, + // but add the value the the statement's variables list + sb.append('?'); + variables.add(value); + return sb; } - - static boolean containsBindMarker(Object value) { - if (value instanceof BindMarker) - return true; - if (value instanceof FCall) - for (Object param : ((FCall) value).parameters) - if (containsBindMarker(param)) - return true; - if (value instanceof Collection) - for (Object elt : (Collection) value) - if (containsBindMarker(elt)) - return true; - if (value instanceof Map) - for (Map.Entry entry : ((Map) value).entrySet()) - if (containsBindMarker(entry.getKey()) || containsBindMarker(entry.getValue())) - return true; - return false; + return sb; + } + + private static StringBuilder appendList( + List l, CodecRegistry codecRegistry, StringBuilder sb) { + sb.append('['); + for (int i = 0; i < l.size(); i++) { + if (i > 0) sb.append(','); + appendValue(l.get(i), codecRegistry, sb, null); } - - static boolean containsSpecialValue(Object value) { - if (value instanceof BindMarker || value instanceof FCall || value instanceof CName || value instanceof RawString) - return true; - if (value instanceof Collection) - for (Object elt : (Collection) value) - if (containsSpecialValue(elt)) - return true; - if (value instanceof Map) - for (Map.Entry entry : ((Map) value).entrySet()) - if (containsSpecialValue(entry.getKey()) || containsSpecialValue(entry.getValue())) - return true; - return false; + sb.append(']'); + return sb; + } + + private static StringBuilder appendSet(Set s, CodecRegistry codecRegistry, StringBuilder sb) { + sb.append('{'); + boolean first = true; + for (Object elt : s) { + if (first) first = false; + else sb.append(','); + appendValue(elt, codecRegistry, sb, null); } - - /** - * Return true if the given value is likely to find a suitable codec - * to be serialized as a query parameter. - * If the value is not serializable, it must be included in the query string. - * Non serializable values include special values such as function calls, - * column names and bind markers, and collections thereof. - * We also don't serialize fixed size number types. The reason is that if we do it, we will - * force a particular size (4 bytes for ints, ...) and for the query builder, we don't want - * users to have to bother with that. - * - * @param value the value to inspect. - * @return true if the value is serializable, false otherwise. - */ - static boolean isSerializable(Object value) { - if (containsSpecialValue(value)) - return false; - if (value instanceof Number && !(value instanceof BigInteger || value instanceof BigDecimal)) - return false; - if (value instanceof Collection) - for (Object elt : (Collection) value) - if (!isSerializable(elt)) - return false; - if (value instanceof Map) - for (Map.Entry entry : ((Map) value).entrySet()) - if (!isSerializable(entry.getKey()) || !isSerializable(entry.getValue())) - return false; - return true; + sb.append('}'); + return sb; + } + + private static StringBuilder appendMap( + Map m, CodecRegistry codecRegistry, StringBuilder sb) { + sb.append('{'); + boolean first = true; + for (Map.Entry entry : m.entrySet()) { + if (first) first = false; + else sb.append(','); + appendValue(entry.getKey(), codecRegistry, sb, null); + sb.append(':'); + appendValue(entry.getValue(), codecRegistry, sb, null); } - - static boolean isIdempotent(Object value) { - if (value == null) { - return true; - } else if (value instanceof Assignment) { - Assignment assignment = (Assignment) value; - return assignment.isIdempotent(); - } else if (value instanceof FCall) { - return false; - } else if (value instanceof RawString) { - return false; - } else if (value instanceof Collection) { - for (Object elt : ((Collection) value)) { - if (!isIdempotent(elt)) - return false; - } - return true; - } else if (value instanceof Map) { - for (Map.Entry entry : ((Map) value).entrySet()) { - if (!isIdempotent(entry.getKey()) || !isIdempotent(entry.getValue())) - return false; - } - } else if (value instanceof Clause) { - Object clauseValue = ((Clause) value).firstValue(); - return isIdempotent(clauseValue); - } - return true; + sb.append('}'); + return sb; + } + + static boolean containsBindMarker(Object value) { + if (value instanceof BindMarker) return true; + if (value instanceof FCall) + for (Object param : ((FCall) value).parameters) if (containsBindMarker(param)) return true; + if (value instanceof Collection) + for (Object elt : (Collection) value) if (containsBindMarker(elt)) return true; + if (value instanceof Map) + for (Map.Entry entry : ((Map) value).entrySet()) + if (containsBindMarker(entry.getKey()) || containsBindMarker(entry.getValue())) return true; + return false; + } + + static boolean containsSpecialValue(Object value) { + if (value instanceof BindMarker + || value instanceof FCall + || value instanceof CName + || value instanceof RawString) return true; + if (value instanceof Collection) + for (Object elt : (Collection) value) if (containsSpecialValue(elt)) return true; + if (value instanceof Map) + for (Map.Entry entry : ((Map) value).entrySet()) + if (containsSpecialValue(entry.getKey()) || containsSpecialValue(entry.getValue())) + return true; + return false; + } + + /** + * Return true if the given value is likely to find a suitable codec to be serialized as a query + * parameter. If the value is not serializable, it must be included in the query string. Non + * serializable values include special values such as function calls, column names and bind + * markers, and collections thereof. We also don't serialize fixed size number types. The reason + * is that if we do it, we will force a particular size (4 bytes for ints, ...) and for the query + * builder, we don't want users to have to bother with that. + * + * @param value the value to inspect. + * @return true if the value is serializable, false otherwise. + */ + static boolean isSerializable(Object value) { + if (containsSpecialValue(value)) return false; + if (value instanceof Number && !(value instanceof BigInteger || value instanceof BigDecimal)) + return false; + if (value instanceof Collection) + for (Object elt : (Collection) value) if (!isSerializable(elt)) return false; + if (value instanceof Map) + for (Map.Entry entry : ((Map) value).entrySet()) + if (!isSerializable(entry.getKey()) || !isSerializable(entry.getValue())) return false; + return true; + } + + static boolean isIdempotent(Object value) { + if (value == null) { + return true; + } else if (value instanceof Assignment) { + Assignment assignment = (Assignment) value; + return assignment.isIdempotent(); + } else if (value instanceof FCall) { + return false; + } else if (value instanceof RawString) { + return false; + } else if (value instanceof Collection) { + for (Object elt : ((Collection) value)) { + if (!isIdempotent(elt)) return false; + } + return true; + } else if (value instanceof Map) { + for (Map.Entry entry : ((Map) value).entrySet()) { + if (!isIdempotent(entry.getKey()) || !isIdempotent(entry.getValue())) return false; + } + } else if (value instanceof Clause) { + Object clauseValue = ((Clause) value).firstValue(); + return isIdempotent(clauseValue); } - - static StringBuilder appendName(String name, StringBuilder sb) { - name = name.trim(); - // FIXME: checking for token( specifically is uber ugly, we'll need some better solution. - if (name.startsWith("\"") || name.startsWith("token(") || cnamePattern.matcher(name).matches()) - sb.append(name); - else - sb.append('"').append(name).append('"'); - return sb; + return true; + } + + static StringBuilder appendName(String name, StringBuilder sb) { + name = name.trim(); + // FIXME: checking for token( specifically is uber ugly, we'll need some better solution. + if (name.startsWith("\"") || name.startsWith("token(") || cnamePattern.matcher(name).matches()) + sb.append(name); + else sb.append('"').append(name).append('"'); + return sb; + } + + static StringBuilder appendName(Object name, CodecRegistry codecRegistry, StringBuilder sb) { + if (name instanceof String) { + appendName((String) name, sb); + } else if (name instanceof CName) { + appendName(((CName) name).name, sb); + } else if (name instanceof Path) { + String[] segments = ((Path) name).segments; + for (int i = 0; i < segments.length; i++) { + if (i > 0) sb.append('.'); + appendName(segments[i], sb); + } + } else if (name instanceof FCall) { + FCall fcall = (FCall) name; + sb.append(fcall.name).append('('); + for (int i = 0; i < fcall.parameters.length; i++) { + if (i > 0) sb.append(','); + appendValue(fcall.parameters[i], codecRegistry, sb, null); + } + sb.append(')'); + } else if (name instanceof Alias) { + Alias alias = (Alias) name; + appendName(alias.column, codecRegistry, sb); + sb.append(" AS ").append(alias.alias); + } else if (name instanceof Cast) { + Cast cast = (Cast) name; + sb.append("CAST("); + appendName(cast.column, codecRegistry, sb); + sb.append(" AS ").append(cast.targetType).append(")"); + } else if (name instanceof RawString) { + sb.append(((RawString) name).str); + } else { + throw new IllegalArgumentException( + String.format("Invalid column %s of type unknown of the query builder", name)); } - - static StringBuilder appendName(Object name, CodecRegistry codecRegistry, StringBuilder sb) { - if (name instanceof String) { - appendName((String) name, sb); - } else if (name instanceof CName) { - appendName(((CName) name).name, sb); - } else if (name instanceof Path) { - String[] segments = ((Path) name).segments; - for (int i = 0; i < segments.length; i++) { - if (i > 0) - sb.append('.'); - appendName(segments[i], sb); - } - } else if (name instanceof FCall) { - FCall fcall = (FCall) name; - sb.append(fcall.name).append('('); - for (int i = 0; i < fcall.parameters.length; i++) { - if (i > 0) - sb.append(','); - appendValue(fcall.parameters[i], codecRegistry, sb, null); - } - sb.append(')'); - } else if (name instanceof Alias) { - Alias alias = (Alias) name; - appendName(alias.column, codecRegistry, sb); - sb.append(" AS ").append(alias.alias); - } else if (name instanceof Cast) { - Cast cast = (Cast) name; - sb.append("CAST("); - appendName(cast.column, codecRegistry, sb); - sb.append(" AS ").append(cast.targetType).append(")"); - } else if (name instanceof RawString) { - sb.append(((RawString) name).str); + return sb; + } + + /** + * Utility method to serialize user-provided values. + * + *

    This method is a copy of the one declared in {@link + * com.datastax.driver.core.SimpleStatement}, it was duplicated to avoid having to make it public. + * + *

    It is useful in situations where there is no metadata available and the underlying CQL type + * for the values is not known. + * + *

    This situation happens when a {@link com.datastax.driver.core.SimpleStatement} or a {@link + * com.datastax.driver.core.querybuilder.BuiltStatement} (Query Builder) contain values; in these + * places, the driver has no way to determine the right CQL type to use. + * + *

    This method performs a best-effort heuristic to guess which codec to use. Note that this is + * not particularly efficient as the codec registry needs to iterate over the registered codecs + * until it finds a suitable one. + * + * @param values The values to convert. + * @param protocolVersion The protocol version to use. + * @param codecRegistry The {@link CodecRegistry} to use. + * @return The converted values. + */ + static ByteBuffer[] convert( + Object[] values, ProtocolVersion protocolVersion, CodecRegistry codecRegistry) { + ByteBuffer[] serializedValues = new ByteBuffer[values.length]; + for (int i = 0; i < values.length; i++) { + Object value = values[i]; + if (value == null) { + // impossible to locate the right codec when object is null, + // so forcing the result to null + serializedValues[i] = null; + } else { + if (value instanceof Token) { + // bypass CodecRegistry for Token instances + serializedValues[i] = ((Token) value).serialize(protocolVersion); } else { - throw new IllegalArgumentException(String.format("Invalid column %s of type unknown of the query builder", name)); - } - return sb; - } - - /** - * Utility method to serialize user-provided values. - *

    - * This method is a copy of the one declared in {@link com.datastax.driver.core.SimpleStatement}, it was duplicated - * to avoid having to make it public. - *

    - * It is useful in situations where there is no metadata available and the underlying CQL - * type for the values is not known. - *

    - * This situation happens when a {@link com.datastax.driver.core.SimpleStatement} - * or a {@link com.datastax.driver.core.querybuilder.BuiltStatement} (Query Builder) contain values; - * in these places, the driver has no way to determine the right CQL type to use. - *

    - * This method performs a best-effort heuristic to guess which codec to use. - * Note that this is not particularly efficient as the codec registry needs to iterate over - * the registered codecs until it finds a suitable one. - * - * @param values The values to convert. - * @param protocolVersion The protocol version to use. - * @param codecRegistry The {@link CodecRegistry} to use. - * @return The converted values. - */ - static ByteBuffer[] convert(Object[] values, ProtocolVersion protocolVersion, CodecRegistry codecRegistry) { - ByteBuffer[] serializedValues = new ByteBuffer[values.length]; - for (int i = 0; i < values.length; i++) { - Object value = values[i]; - if (value == null) { - // impossible to locate the right codec when object is null, - // so forcing the result to null - serializedValues[i] = null; - } else { - if (value instanceof Token) { - // bypass CodecRegistry for Token instances - serializedValues[i] = ((Token) value).serialize(protocolVersion); - } else { - try { - TypeCodec codec = codecRegistry.codecFor(value); - serializedValues[i] = codec.serialize(value, protocolVersion); - } catch (Exception e) { - // Catch and rethrow to provide a more helpful error message (one that include which value is bad) - throw new InvalidTypeException(String.format("Value %d of type %s does not correspond to any CQL3 type", i, value.getClass()), e); - } - } - } - } - return serializedValues; - } - - /** - * Utility method to assemble different routing key components into a single {@link ByteBuffer}. - * Mainly intended for statements that need to generate a routing key out of their current values. - *

    - * This method is a copy of the one declared in {@link com.datastax.driver.core.SimpleStatement}, it was duplicated - * to avoid having to make it public. - * - * @param buffers the components of the routing key. - * @return A ByteBuffer containing the serialized routing key - */ - static ByteBuffer compose(ByteBuffer... buffers) { - if (buffers.length == 1) - return buffers[0]; - - int totalLength = 0; - for (ByteBuffer bb : buffers) - totalLength += 2 + bb.remaining() + 1; - - ByteBuffer out = ByteBuffer.allocate(totalLength); - for (ByteBuffer buffer : buffers) { - ByteBuffer bb = buffer.duplicate(); - putShortLength(out, bb.remaining()); - out.put(bb); - out.put((byte) 0); + try { + TypeCodec codec = codecRegistry.codecFor(value); + serializedValues[i] = codec.serialize(value, protocolVersion); + } catch (Exception e) { + // Catch and rethrow to provide a more helpful error message (one that include which + // value is bad) + throw new InvalidTypeException( + String.format( + "Value %d of type %s does not correspond to any CQL3 type", + i, value.getClass()), + e); + } } - out.flip(); - return out; + } } - - static void putShortLength(ByteBuffer bb, int length) { - bb.put((byte) ((length >> 8) & 0xFF)); - bb.put((byte) (length & 0xFF)); + return serializedValues; + } + + /** + * Utility method to assemble different routing key components into a single {@link ByteBuffer}. + * Mainly intended for statements that need to generate a routing key out of their current values. + * + *

    This method is a copy of the one declared in {@link + * com.datastax.driver.core.SimpleStatement}, it was duplicated to avoid having to make it public. + * + * @param buffers the components of the routing key. + * @return A ByteBuffer containing the serialized routing key + */ + static ByteBuffer compose(ByteBuffer... buffers) { + if (buffers.length == 1) return buffers[0]; + + int totalLength = 0; + for (ByteBuffer bb : buffers) totalLength += 2 + bb.remaining() + 1; + + ByteBuffer out = ByteBuffer.allocate(totalLength); + for (ByteBuffer buffer : buffers) { + ByteBuffer bb = buffer.duplicate(); + putShortLength(out, bb.remaining()); + out.put(bb); + out.put((byte) 0); } + out.flip(); + return out; + } - static abstract class Appendeable { - abstract void appendTo(StringBuilder sb, List values, CodecRegistry codecRegistry); + static void putShortLength(ByteBuffer bb, int length) { + bb.put((byte) ((length >> 8) & 0xFF)); + bb.put((byte) (length & 0xFF)); + } - abstract boolean containsBindMarker(); - } + abstract static class Appendeable { + abstract void appendTo(StringBuilder sb, List values, CodecRegistry codecRegistry); - static class RawString { - private final String str; + abstract boolean containsBindMarker(); + } - RawString(String str) { - this.str = str; - } + static class RawString { + private final String str; - @Override - public String toString() { - return str; - } + RawString(String str) { + this.str = str; } - static class FCall { - - private final String name; - private final Object[] parameters; + @Override + public String toString() { + return str; + } + } - FCall(String name, Object... parameters) { - checkNotNull(name); - this.name = name; - this.parameters = parameters; - } + static class FCall { - @Override - public String toString() { - StringBuilder sb = new StringBuilder(); - sb.append(name).append('('); - for (int i = 0; i < parameters.length; i++) { - if (i > 0) - sb.append(','); - sb.append(parameters[i]); - } - sb.append(')'); - return sb.toString(); - } + private final String name; + private final Object[] parameters; + FCall(String name, Object... parameters) { + checkNotNull(name); + this.name = name; + this.parameters = parameters; } - static class CName { - private final String name; + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(name).append('('); + for (int i = 0; i < parameters.length; i++) { + if (i > 0) sb.append(','); + sb.append(parameters[i]); + } + sb.append(')'); + return sb.toString(); + } + } - CName(String name) { - this.name = name; - } + static class CName { + private final String name; - @Override - public String toString() { - return name; - } + CName(String name) { + this.name = name; } - static class Alias { - private final Object column; - private final String alias; + @Override + public String toString() { + return name; + } + } - Alias(Object column, String alias) { - this.column = column; - this.alias = alias; - } + static class Alias { + private final Object column; + private final String alias; - @Override - public String toString() { - return String.format("%s AS %s", column, alias); - } + Alias(Object column, String alias) { + this.column = column; + this.alias = alias; } - static class Cast { - private final Object column; - private final DataType targetType; + @Override + public String toString() { + return String.format("%s AS %s", column, alias); + } + } - Cast(Object column, DataType targetType) { - this.column = column; - this.targetType = targetType; - } + static class Cast { + private final Object column; + private final DataType targetType; - @Override - public String toString() { - return String.format("CAST(%s AS %s)", column, targetType); - } + Cast(Object column, DataType targetType) { + this.column = column; + this.targetType = targetType; } - static class Path { + @Override + public String toString() { + return String.format("CAST(%s AS %s)", column, targetType); + } + } - private final String[] segments; + static class Path { - Path(String... segments) { - this.segments = segments; - } + private final String[] segments; + Path(String... segments) { + this.segments = segments; } - + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/querybuilder/package-info.java b/driver-core/src/main/java/com/datastax/driver/core/querybuilder/package-info.java index 722b304a97a..4bf5951e4e4 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/querybuilder/package-info.java +++ b/driver-core/src/main/java/com/datastax/driver/core/querybuilder/package-info.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +17,7 @@ */ /** * A CQL3 query builder. - *

    - * The main entry for this package is the {@code QueryBuilder} class. + * + *

    The main entry for this package is the {@code QueryBuilder} class. */ package com.datastax.driver.core.querybuilder; diff --git a/driver-core/src/main/java/com/datastax/driver/core/schemabuilder/AbstractCreateStatement.java b/driver-core/src/main/java/com/datastax/driver/core/schemabuilder/AbstractCreateStatement.java index 101de6b873e..c13d3eaca4d 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/schemabuilder/AbstractCreateStatement.java +++ b/driver-core/src/main/java/com/datastax/driver/core/schemabuilder/AbstractCreateStatement.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,173 +19,211 @@ import com.datastax.driver.core.DataType; import com.google.common.base.Optional; - import java.util.LinkedHashMap; import java.util.Map; -public abstract class AbstractCreateStatement> extends SchemaStatement { +public abstract class AbstractCreateStatement> + extends SchemaStatement { - protected Optional keyspaceName = Optional.absent(); - protected boolean ifNotExists; - protected Map simpleColumns = new LinkedHashMap(); + protected Optional keyspaceName = Optional.absent(); + protected boolean ifNotExists; + protected Map simpleColumns = new LinkedHashMap(); - @SuppressWarnings("unchecked") - private T self = (T) this; + @SuppressWarnings("unchecked") + private T self = (T) this; - /** - * Add the 'IF NOT EXISTS' condition to this CREATE statement. - * - * @return this CREATE statement. - */ - public T ifNotExists() { - this.ifNotExists = true; - return self; - } + /** + * Add the 'IF NOT EXISTS' condition to this CREATE statement. + * + * @return this CREATE statement. + */ + public T ifNotExists() { + this.ifNotExists = true; + return self; + } - /** - * Add a column definition to this CREATE statement. - *

    - *

    - * To add a list column: - *

    
    -     * addColumn("myList",DataType.list(DataType.text()))
    -     * 
    - *

    - * To add a set column: - *

    
    -     * addColumn("mySet",DataType.set(DataType.text()))
    -     * 
    - *

    - * To add a map column: - *

    
    -     * addColumn("myMap",DataType.map(DataType.cint(),DataType.text()))
    -     * 
    - * - * @param columnName the name of the column to be added. - * @param dataType the data type of the column to be added. - * @return this CREATE statement. - */ - public T addColumn(String columnName, DataType dataType) { - validateNotEmpty(columnName, "Column name"); - validateNotNull(dataType, "Column type"); - validateNotKeyWord(columnName, String.format("The column name '%s' is not allowed because it is a reserved keyword", columnName)); - simpleColumns.put(columnName, new NativeColumnType(dataType)); - return self; - } + /** + * Add a column definition to this CREATE statement. + * + *

    + * + *

    To add a list column: + * + *

    
    +   * addColumn("myList",DataType.list(DataType.text()))
    +   * 
    + * + *

    To add a set column: + * + *

    
    +   * addColumn("mySet",DataType.set(DataType.text()))
    +   * 
    + * + *

    To add a map column: + * + *

    
    +   * addColumn("myMap",DataType.map(DataType.cint(),DataType.text()))
    +   * 
    + * + * @param columnName the name of the column to be added. + * @param dataType the data type of the column to be added. + * @return this CREATE statement. + */ + public T addColumn(String columnName, DataType dataType) { + validateNotEmpty(columnName, "Column name"); + validateNotNull(dataType, "Column type"); + validateNotKeyWord( + columnName, + String.format( + "The column name '%s' is not allowed because it is a reserved keyword", columnName)); + simpleColumns.put(columnName, new NativeColumnType(dataType)); + return self; + } - /** - * Add a column definition to this CREATE statement, when the type contains a UDT. - * - * @param columnName the name of the column to be added. - * @param udtType the UDT type of the column to be added. Use {@link SchemaBuilder#frozen(String)} or {@link SchemaBuilder#udtLiteral(String)}. - * @return this CREATE statement. - */ - public T addUDTColumn(String columnName, UDTType udtType) { - validateNotEmpty(columnName, "Column name"); - validateNotNull(udtType, "Column type"); - validateNotKeyWord(columnName, String.format("The column name '%s' is not allowed because it is a reserved keyword", columnName)); - simpleColumns.put(columnName, udtType); - return self; - } + /** + * Add a column definition to this CREATE statement, when the type contains a UDT. + * + * @param columnName the name of the column to be added. + * @param udtType the UDT type of the column to be added. Use {@link SchemaBuilder#frozen(String)} + * or {@link SchemaBuilder#udtLiteral(String)}. + * @return this CREATE statement. + */ + public T addUDTColumn(String columnName, UDTType udtType) { + validateNotEmpty(columnName, "Column name"); + validateNotNull(udtType, "Column type"); + validateNotKeyWord( + columnName, + String.format( + "The column name '%s' is not allowed because it is a reserved keyword", columnName)); + simpleColumns.put(columnName, udtType); + return self; + } - /** - * Shorthand to add a column definition to this CREATE statement, when the type is a list of UDT. - * - * @param columnName the name of the column to be added - * @param udtType the udt type of the column to be added. Use {@link SchemaBuilder#frozen(String)}. - * @return this CREATE statement. - */ - public T addUDTListColumn(String columnName, UDTType udtType) { - validateNotEmpty(columnName, "Column name"); - validateNotNull(udtType, "Column element type"); - validateNotKeyWord(columnName, String.format("The column name '%s' is not allowed because it is a reserved keyword", columnName)); - simpleColumns.put(columnName, UDTType.list(udtType)); - return self; - } + /** + * Shorthand to add a column definition to this CREATE statement, when the type is a list of UDT. + * + * @param columnName the name of the column to be added + * @param udtType the udt type of the column to be added. Use {@link + * SchemaBuilder#frozen(String)}. + * @return this CREATE statement. + */ + public T addUDTListColumn(String columnName, UDTType udtType) { + validateNotEmpty(columnName, "Column name"); + validateNotNull(udtType, "Column element type"); + validateNotKeyWord( + columnName, + String.format( + "The column name '%s' is not allowed because it is a reserved keyword", columnName)); + simpleColumns.put(columnName, UDTType.list(udtType)); + return self; + } - /** - * Shorthand to add a column definition to this CREATE statement, when the type is a set of UDT. - * - * @param columnName the name of the column to be added - * @param udtType the udt type of the column to be added. Use {@link SchemaBuilder#frozen(String)}. - * @return this CREATE statement. - */ - public T addUDTSetColumn(String columnName, UDTType udtType) { - validateNotEmpty(columnName, "Column name"); - validateNotNull(udtType, "Column element type"); - validateNotKeyWord(columnName, String.format("The column name '%s' is not allowed because it is a reserved keyword", columnName)); - simpleColumns.put(columnName, UDTType.set(udtType)); - return self; - } + /** + * Shorthand to add a column definition to this CREATE statement, when the type is a set of UDT. + * + * @param columnName the name of the column to be added + * @param udtType the udt type of the column to be added. Use {@link + * SchemaBuilder#frozen(String)}. + * @return this CREATE statement. + */ + public T addUDTSetColumn(String columnName, UDTType udtType) { + validateNotEmpty(columnName, "Column name"); + validateNotNull(udtType, "Column element type"); + validateNotKeyWord( + columnName, + String.format( + "The column name '%s' is not allowed because it is a reserved keyword", columnName)); + simpleColumns.put(columnName, UDTType.set(udtType)); + return self; + } - /** - * Shorthand to add a column definition to this CREATE statement, when the type is a map with a UDT value type. - *

    - * Example: - *

    -     *     addUDTMapColumn("addresses", DataType.text(), frozen("address"));
    -     * 
    - * - * @param columnName the name of the column to be added. - * @param keyType the key type of the column to be added. - * @param valueUdtType the value UDT type of the column to be added. Use {@link SchemaBuilder#frozen(String)}. - * @return this CREATE statement. - */ - public T addUDTMapColumn(String columnName, DataType keyType, UDTType valueUdtType) { - validateNotEmpty(columnName, "Column name"); - validateNotNull(keyType, "Map key type"); - validateNotNull(valueUdtType, "Map value UDT type"); - validateNotKeyWord(columnName, String.format("The column name '%s' is not allowed because it is a reserved keyword", columnName)); - simpleColumns.put(columnName, UDTType.mapWithUDTValue(keyType, valueUdtType)); - return self; - } + /** + * Shorthand to add a column definition to this CREATE statement, when the type is a map with a + * UDT value type. + * + *

    Example: + * + *

    +   *     addUDTMapColumn("addresses", DataType.text(), frozen("address"));
    +   * 
    + * + * @param columnName the name of the column to be added. + * @param keyType the key type of the column to be added. + * @param valueUdtType the value UDT type of the column to be added. Use {@link + * SchemaBuilder#frozen(String)}. + * @return this CREATE statement. + */ + public T addUDTMapColumn(String columnName, DataType keyType, UDTType valueUdtType) { + validateNotEmpty(columnName, "Column name"); + validateNotNull(keyType, "Map key type"); + validateNotNull(valueUdtType, "Map value UDT type"); + validateNotKeyWord( + columnName, + String.format( + "The column name '%s' is not allowed because it is a reserved keyword", columnName)); + simpleColumns.put(columnName, UDTType.mapWithUDTValue(keyType, valueUdtType)); + return self; + } - /** - * Shorthand to add a column definition to this CREATE statement, when the type is a map with a UDT key type. - *

    - * Example: - *

    -     *     addUDTMapColumn("roles", frozen("user"), DataType.text());
    -     * 
    - * - * @param columnName the name of the column to be added. - * @param udtKeyType the key UDT type of the column to be added. Use {@link SchemaBuilder#frozen(String)}. - * @param valueType the value raw type of the column to be added. - * @return this CREATE statement. - */ - public T addUDTMapColumn(String columnName, UDTType udtKeyType, DataType valueType) { - validateNotEmpty(columnName, "Column name"); - validateNotNull(udtKeyType, "Map key UDT type"); - validateNotNull(valueType, "Map value type"); - validateNotKeyWord(columnName, String.format("The column name '%s' is not allowed because it is a reserved keyword", columnName)); - simpleColumns.put(columnName, UDTType.mapWithUDTKey(udtKeyType, valueType)); - return self; - } + /** + * Shorthand to add a column definition to this CREATE statement, when the type is a map with a + * UDT key type. + * + *

    Example: + * + *

    +   *     addUDTMapColumn("roles", frozen("user"), DataType.text());
    +   * 
    + * + * @param columnName the name of the column to be added. + * @param udtKeyType the key UDT type of the column to be added. Use {@link + * SchemaBuilder#frozen(String)}. + * @param valueType the value raw type of the column to be added. + * @return this CREATE statement. + */ + public T addUDTMapColumn(String columnName, UDTType udtKeyType, DataType valueType) { + validateNotEmpty(columnName, "Column name"); + validateNotNull(udtKeyType, "Map key UDT type"); + validateNotNull(valueType, "Map value type"); + validateNotKeyWord( + columnName, + String.format( + "The column name '%s' is not allowed because it is a reserved keyword", columnName)); + simpleColumns.put(columnName, UDTType.mapWithUDTKey(udtKeyType, valueType)); + return self; + } - /** - * Shorthand to add a column definition to this CREATE statement, when the type is a map with UDT key and value types. - *

    - * Example: - *

    -     *     addUDTMapColumn("users", frozen("user"), frozen("address"));
    -     * 
    - * - * @param columnName the name of the column to be added. - * @param udtKeyType the key UDT type of the column to be added. Use {@link SchemaBuilder#frozen(String)}. - * @param udtValueType the value UDT type of the column to be added. Use {@link SchemaBuilder#frozen(String)}. - * @return this CREATE statement. - */ - public T addUDTMapColumn(String columnName, UDTType udtKeyType, UDTType udtValueType) { - validateNotEmpty(columnName, "Column name"); - validateNotNull(udtKeyType, "Map key UDT type"); - validateNotNull(udtValueType, "Map value UDT type"); - validateNotKeyWord(columnName, String.format("The column name '%s' is not allowed because it is a reserved keyword", columnName)); - simpleColumns.put(columnName, UDTType.mapWithUDTKeyAndValue(udtKeyType, udtValueType)); - return self; - } + /** + * Shorthand to add a column definition to this CREATE statement, when the type is a map with UDT + * key and value types. + * + *

    Example: + * + *

    +   *     addUDTMapColumn("users", frozen("user"), frozen("address"));
    +   * 
    + * + * @param columnName the name of the column to be added. + * @param udtKeyType the key UDT type of the column to be added. Use {@link + * SchemaBuilder#frozen(String)}. + * @param udtValueType the value UDT type of the column to be added. Use {@link + * SchemaBuilder#frozen(String)}. + * @return this CREATE statement. + */ + public T addUDTMapColumn(String columnName, UDTType udtKeyType, UDTType udtValueType) { + validateNotEmpty(columnName, "Column name"); + validateNotNull(udtKeyType, "Map key UDT type"); + validateNotNull(udtValueType, "Map value UDT type"); + validateNotKeyWord( + columnName, + String.format( + "The column name '%s' is not allowed because it is a reserved keyword", columnName)); + simpleColumns.put(columnName, UDTType.mapWithUDTKeyAndValue(udtKeyType, udtValueType)); + return self; + } - protected String buildColumnType(Map.Entry entry) { - final ColumnType columnType = entry.getValue(); - return entry.getKey() + " " + columnType.asCQLString(); - } + protected String buildColumnType(Map.Entry entry) { + final ColumnType columnType = entry.getValue(); + return entry.getKey() + " " + columnType.asCQLString(); + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/schemabuilder/Alter.java b/driver-core/src/main/java/com/datastax/driver/core/schemabuilder/Alter.java index 89f416a5eca..0b77099be4c 100755 --- a/driver-core/src/main/java/com/datastax/driver/core/schemabuilder/Alter.java +++ b/driver-core/src/main/java/com/datastax/driver/core/schemabuilder/Alter.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,234 +17,264 @@ */ package com.datastax.driver.core.schemabuilder; +import static com.datastax.driver.core.schemabuilder.SchemaStatement.validateNotEmpty; +import static com.datastax.driver.core.schemabuilder.SchemaStatement.validateNotKeyWord; + import com.datastax.driver.core.DataType; import com.google.common.base.Optional; - import java.util.List; -import static com.datastax.driver.core.schemabuilder.SchemaStatement.validateNotEmpty; -import static com.datastax.driver.core.schemabuilder.SchemaStatement.validateNotKeyWord; - -/** - * An in-construction ALTER TABLE statement. - */ +/** An in-construction ALTER TABLE statement. */ public class Alter implements StatementStart { - private Optional keyspaceName = Optional.absent(); - private String tableName; - - Alter(String keyspaceName, String tableName) { - validateNotEmpty(keyspaceName, "Keyspace name"); - validateNotEmpty(tableName, "Table name"); - validateNotKeyWord(keyspaceName, String.format("The keyspace name '%s' is not allowed because it is a reserved keyword", keyspaceName)); - validateNotKeyWord(tableName, String.format("The table name '%s' is not allowed because it is a reserved keyword", tableName)); - this.tableName = tableName; - this.keyspaceName = Optional.fromNullable(keyspaceName); - } - - Alter(String tableName) { - validateNotEmpty(tableName, "Table name"); - validateNotKeyWord(tableName, String.format("The table name '%s' is not allowed because it is a reserved keyword", tableName)); - this.tableName = tableName; + private Optional keyspaceName = Optional.absent(); + private String tableName; + + Alter(String keyspaceName, String tableName) { + validateNotEmpty(keyspaceName, "Keyspace name"); + validateNotEmpty(tableName, "Table name"); + validateNotKeyWord( + keyspaceName, + String.format( + "The keyspace name '%s' is not allowed because it is a reserved keyword", + keyspaceName)); + validateNotKeyWord( + tableName, + String.format( + "The table name '%s' is not allowed because it is a reserved keyword", tableName)); + this.tableName = tableName; + this.keyspaceName = Optional.fromNullable(keyspaceName); + } + + Alter(String tableName) { + validateNotEmpty(tableName, "Table name"); + validateNotKeyWord( + tableName, + String.format( + "The table name '%s' is not allowed because it is a reserved keyword", tableName)); + this.tableName = tableName; + } + + /** + * Add an ALTER column clause (to change the column type) to this ALTER TABLE statement. + * + * @param columnName the name of the column to be altered. + * @return a new {@link Alter.AlterColumn} instance. + */ + public AlterColumn alterColumn(String columnName) { + validateNotEmpty(columnName, "Column to be altered"); + validateNotKeyWord( + columnName, + String.format( + "The altered column name '%s' is not allowed because it is a reserved keyword", + columnName)); + return new AlterColumn(this, columnName); + } + + /** + * Add a new ADD column clause to this ALTER TABLE statement. + * + * @param columnName the name of the column to be added. + * @return a new {@link Alter.AddColumn} instance. + */ + public AddColumn addColumn(String columnName) { + validateNotEmpty(columnName, "Added column"); + validateNotKeyWord( + columnName, + String.format( + "The new column name '%s' is not allowed because it is a reserved keyword", + columnName)); + return new AddColumn(this, columnName, false); + } + + /** + * Add a new ADD column clause to this ALTER TABLE statement, to add a static column. + * + * @param columnName the name of the column to be added. + * @return a new {@link Alter.AddColumn} instance. + */ + public AddColumn addStaticColumn(String columnName) { + validateNotEmpty(columnName, "Added static column"); + validateNotKeyWord( + columnName, + String.format( + "The new static column name '%s' is not allowed because it is a reserved keyword", + columnName)); + return new AddColumn(this, columnName, true); + } + + /** + * Add a new DROP column clause to this ALTER TABLE statement. + * + *

    Note that you cannot drop a column that is part of the primary key. + * + * @param columnName the name of the column to be dropped. + * @return the final ALTER TABLE DROP COLUMN statement. + */ + public SchemaStatement dropColumn(String columnName) { + validateNotEmpty(columnName, "Column to be dropped"); + validateNotKeyWord( + columnName, + String.format( + "The dropped column name '%s' is not allowed because it is a reserved keyword", + columnName)); + return SchemaStatement.fromQueryString(buildInternal() + " DROP " + columnName); + } + + /** + * Add a new RENAME column clause to this ALTER TABLE statement. + * + *

    Note that you can only rename a column that is part of the primary key. + * + * @param columnName the name of the column to be renamed. + * @return a new {@link Alter.RenameColumn} instance. + */ + public RenameColumn renameColumn(String columnName) { + validateNotEmpty(columnName, "Column to be renamed"); + validateNotKeyWord( + columnName, + String.format( + "The renamed column name '%s' is not allowed because it is a reserved keyword", + columnName)); + return new RenameColumn(this, columnName); + } + + /** + * Add options (WITH clause) to this ALTER TABLE statement. + * + * @return a new {@link Alter.Options} instance. + */ + public Options withOptions() { + return new Options(this); + } + + /** An ALTER column clause. */ + public static class AlterColumn { + + private final Alter alter; + private final String columnName; + + AlterColumn(Alter alter, String columnName) { + this.alter = alter; + this.columnName = columnName; } /** - * Add an ALTER column clause (to change the column type) to this ALTER TABLE statement. + * Define the new type of the altered column. * - * @param columnName the name of the column to be altered. - * @return a new {@link Alter.AlterColumn} instance. + * @param type the new type of the altered column. + * @return the final statement. */ - public AlterColumn alterColumn(String columnName) { - validateNotEmpty(columnName, "Column to be altered"); - validateNotKeyWord(columnName, String.format("The altered column name '%s' is not allowed because it is a reserved keyword", columnName)); - return new AlterColumn(this, columnName); + public SchemaStatement type(DataType type) { + return SchemaStatement.fromQueryString( + alter.buildInternal() + " ALTER " + columnName + " TYPE " + type.toString()); } /** - * Add a new ADD column clause to this ALTER TABLE statement. + * Define the new type of the altered column, when that type contains a UDT. * - * @param columnName the name of the column to be added. - * @return a new {@link Alter.AddColumn} instance. + * @param udtType the UDT type. Use {@link SchemaBuilder#frozen(String)} or {@link + * SchemaBuilder#udtLiteral(String)}. + * @return the final statement. */ - public AddColumn addColumn(String columnName) { - validateNotEmpty(columnName, "Added column"); - validateNotKeyWord(columnName, String.format("The new column name '%s' is not allowed because it is a reserved keyword", columnName)); - return new AddColumn(this, columnName, false); + public SchemaStatement udtType(UDTType udtType) { + return SchemaStatement.fromQueryString( + alter.buildInternal() + " ALTER " + columnName + " TYPE " + udtType.asCQLString()); } + } - /** - * Add a new ADD column clause to this ALTER TABLE statement, to add a static column. - * - * @param columnName the name of the column to be added. - * @return a new {@link Alter.AddColumn} instance. - */ - public AddColumn addStaticColumn(String columnName) { - validateNotEmpty(columnName, "Added static column"); - validateNotKeyWord(columnName, String.format("The new static column name '%s' is not allowed because it is a reserved keyword", columnName)); - return new AddColumn(this, columnName, true); - } + /** An ADD column clause. */ + public static class AddColumn { - /** - * Add a new DROP column clause to this ALTER TABLE statement. - *

    - * Note that you cannot drop a column that is part of the primary key. - * - * @param columnName the name of the column to be dropped. - * @return the final ALTER TABLE DROP COLUMN statement. - */ - public SchemaStatement dropColumn(String columnName) { - validateNotEmpty(columnName, "Column to be dropped"); - validateNotKeyWord(columnName, String.format("The dropped column name '%s' is not allowed because it is a reserved keyword", columnName)); - return SchemaStatement.fromQueryString(buildInternal() + " DROP " + columnName); + private final Alter alter; + private final String columnName; + private final boolean staticColumn; + + AddColumn(Alter alter, String columnName, boolean staticColumn) { + this.alter = alter; + this.columnName = columnName; + this.staticColumn = staticColumn; } /** - * Add a new RENAME column clause to this ALTER TABLE statement. - *

    - * Note that you can only rename a column that is part of the primary key. + * Define the type of the added column. * - * @param columnName the name of the column to be renamed. - * @return a new {@link Alter.RenameColumn} instance. + * @param type the type of the added column. + * @return the final statement. */ - public RenameColumn renameColumn(String columnName) { - validateNotEmpty(columnName, "Column to be renamed"); - validateNotKeyWord(columnName, String.format("The renamed column name '%s' is not allowed because it is a reserved keyword", columnName)); - return new RenameColumn(this, columnName); + public SchemaStatement type(DataType type) { + return SchemaStatement.fromQueryString( + alter.buildInternal() + + " ADD " + + columnName + + " " + + type.toString() + + (staticColumn ? " static" : "")); } /** - * Add options (WITH clause) to this ALTER TABLE statement. + * Define the type of the added column, when that type contains a UDT. * - * @return a new {@link Alter.Options} instance. + * @param udtType the UDT type of the added column. + * @return the final statement. */ - public Options withOptions() { - return new Options(this); + public SchemaStatement udtType(UDTType udtType) { + return SchemaStatement.fromQueryString( + alter.buildInternal() + + " ADD " + + columnName + + " " + + udtType.asCQLString() + + (staticColumn ? " static" : "")); } + } - /** - * An ALTER column clause. - */ - public static class AlterColumn { - - private final Alter alter; - private final String columnName; - - AlterColumn(Alter alter, String columnName) { - this.alter = alter; - this.columnName = columnName; - } - - /** - * Define the new type of the altered column. - * - * @param type the new type of the altered column. - * @return the final statement. - */ - public SchemaStatement type(DataType type) { - return SchemaStatement.fromQueryString( - alter.buildInternal() + " ALTER " + columnName + " TYPE " + type.toString()); - } - - /** - * Define the new type of the altered column, when that type contains a UDT. - * - * @param udtType the UDT type. Use {@link SchemaBuilder#frozen(String)} or {@link SchemaBuilder#udtLiteral(String)}. - * @return the final statement. - */ - public SchemaStatement udtType(UDTType udtType) { - return SchemaStatement.fromQueryString( - alter.buildInternal() + " ALTER " + columnName + " TYPE " + udtType.asCQLString()); - } - } + /** A RENAME column clause. */ + public static class RenameColumn { - /** - * An ADD column clause. - */ - public static class AddColumn { - - private final Alter alter; - private final String columnName; - private final boolean staticColumn; - - AddColumn(Alter alter, String columnName, boolean staticColumn) { - this.alter = alter; - this.columnName = columnName; - this.staticColumn = staticColumn; - } - - /** - * Define the type of the added column. - * - * @param type the type of the added column. - * @return the final statement. - */ - public SchemaStatement type(DataType type) { - return SchemaStatement.fromQueryString( - alter.buildInternal() + " ADD " + columnName + " " + type.toString() - + (staticColumn ? " static" : "")); - } - - /** - * Define the type of the added column, when that type contains a UDT. - * - * @param udtType the UDT type of the added column. - * @return the final statement. - */ - public SchemaStatement udtType(UDTType udtType) { - return SchemaStatement.fromQueryString( - alter.buildInternal() + " ADD " + columnName + " " + udtType.asCQLString() - + (staticColumn ? " static" : "")); - } - } + private final Alter alter; + private final String columnName; - /** - * A RENAME column clause. - */ - public static class RenameColumn { - - private final Alter alter; - private final String columnName; - - RenameColumn(Alter alter, String columnName) { - this.alter = alter; - this.columnName = columnName; - } - - /** - * Define the new name of the column. - * - * @param newColumnName the new name of the column. - * @return the final statement. - */ - public SchemaStatement to(String newColumnName) { - validateNotEmpty(newColumnName, "New column name"); - validateNotKeyWord(newColumnName, String.format("The new column name '%s' is not allowed because it is a reserved keyword", newColumnName)); - return SchemaStatement.fromQueryString( - alter.buildInternal() + " RENAME " + columnName + " TO " + newColumnName); - } + RenameColumn(Alter alter, String columnName) { + this.alter = alter; + this.columnName = columnName; } /** - * The table options of an ALTER TABLE statement. + * Define the new name of the column. + * + * @param newColumnName the new name of the column. + * @return the final statement. */ - public static class Options extends TableOptions { + public SchemaStatement to(String newColumnName) { + validateNotEmpty(newColumnName, "New column name"); + validateNotKeyWord( + newColumnName, + String.format( + "The new column name '%s' is not allowed because it is a reserved keyword", + newColumnName)); + return SchemaStatement.fromQueryString( + alter.buildInternal() + " RENAME " + columnName + " TO " + newColumnName); + } + } - Options(Alter alter) { - super(alter); - } + /** The table options of an ALTER TABLE statement. */ + public static class Options extends TableOptions { - @Override - protected void addSpecificOptions(List options) { - // nothing to do (no specific options) - } + Options(Alter alter) { + super(alter); } @Override - public String buildInternal() { - String tableSpec = keyspaceName.isPresent() - ? keyspaceName.get() + "." + tableName - : tableName; - - return SchemaStatement.STATEMENT_START + "ALTER TABLE " + tableSpec; + protected void addSpecificOptions(List options) { + // nothing to do (no specific options) } + } + + @Override + public String buildInternal() { + String tableSpec = keyspaceName.isPresent() ? keyspaceName.get() + "." + tableName : tableName; + + return SchemaStatement.STATEMENT_START + "ALTER TABLE " + tableSpec; + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/schemabuilder/AlterKeyspace.java b/driver-core/src/main/java/com/datastax/driver/core/schemabuilder/AlterKeyspace.java index a965ca84b9f..0d062a90e98 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/schemabuilder/AlterKeyspace.java +++ b/driver-core/src/main/java/com/datastax/driver/core/schemabuilder/AlterKeyspace.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,27 +17,23 @@ */ package com.datastax.driver.core.schemabuilder; -/** - * A built ALTER KEYSPACE statement. - */ +/** A built ALTER KEYSPACE statement. */ public class AlterKeyspace { - static final String COMMAND = "ALTER KEYSPACE"; + static final String COMMAND = "ALTER KEYSPACE"; - private final String keyspaceName; + private final String keyspaceName; - public AlterKeyspace(String keyspaceName) { - this.keyspaceName = keyspaceName; - } - - /** - * Add options for this ALTER KEYSPACE statement. - * - * @return the options of this ALTER KEYSPACE statement. - */ - public KeyspaceOptions with() { - return new KeyspaceOptions(COMMAND, keyspaceName); - } + public AlterKeyspace(String keyspaceName) { + this.keyspaceName = keyspaceName; + } + /** + * Add options for this ALTER KEYSPACE statement. + * + * @return the options of this ALTER KEYSPACE statement. + */ + public KeyspaceOptions with() { + return new KeyspaceOptions(COMMAND, keyspaceName); + } } - diff --git a/driver-core/src/main/java/com/datastax/driver/core/schemabuilder/ColumnType.java b/driver-core/src/main/java/com/datastax/driver/core/schemabuilder/ColumnType.java index a60225a154e..4531b01bd9b 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/schemabuilder/ColumnType.java +++ b/driver-core/src/main/java/com/datastax/driver/core/schemabuilder/ColumnType.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,11 +19,12 @@ /** * Wrapper around UDT and non-UDT types. - *

    - * The reason for this interface is that the core API doesn't let us build {@link com.datastax.driver.core.DataType}s representing UDTs, we have to obtain - * them from the cluster metadata. Since we want to use SchemaBuilder without a Cluster instance, UDT types will be provided via - * {@link UDTType} instances. + * + *

    The reason for this interface is that the core API doesn't let us build {@link + * com.datastax.driver.core.DataType}s representing UDTs, we have to obtain them from the cluster + * metadata. Since we want to use SchemaBuilder without a Cluster instance, UDT types will be + * provided via {@link UDTType} instances. */ interface ColumnType { - String asCQLString(); + String asCQLString(); } diff --git a/driver-core/src/main/java/com/datastax/driver/core/schemabuilder/Create.java b/driver-core/src/main/java/com/datastax/driver/core/schemabuilder/Create.java index 0224552f94a..9fdbbe1a6b6 100755 --- a/driver-core/src/main/java/com/datastax/driver/core/schemabuilder/Create.java +++ b/driver-core/src/main/java/com/datastax/driver/core/schemabuilder/Create.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,358 +17,436 @@ */ package com.datastax.driver.core.schemabuilder; +import static java.util.Map.Entry; + import com.datastax.driver.core.DataType; import com.datastax.driver.core.utils.MoreObjects; import com.google.common.base.Joiner; import com.google.common.base.Optional; import com.google.common.collect.Lists; - -import java.util.*; - -import static java.util.Map.Entry; - -/** - * A built CREATE TABLE statement. - */ +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashSet; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; + +/** A built CREATE TABLE statement. */ public class Create extends AbstractCreateStatement { - private String tableName; - private Map partitionColumns = new LinkedHashMap(); - private Map clusteringColumns = new LinkedHashMap(); - private Map staticColumns = new LinkedHashMap(); - - Create(String keyspaceName, String tableName) { - validateNotEmpty(keyspaceName, "Keyspace name"); - validateNotEmpty(tableName, "Table name"); - validateNotKeyWord(keyspaceName, String.format("The keyspace name '%s' is not allowed because it is a reserved keyword", keyspaceName)); - validateNotKeyWord(tableName, String.format("The table name '%s' is not allowed because it is a reserved keyword", tableName)); - this.tableName = tableName; - this.keyspaceName = Optional.fromNullable(keyspaceName); - } - - Create(String tableName) { - validateNotEmpty(tableName, "Table name"); - validateNotKeyWord(tableName, String.format("The table name '%s' is not allowed because it is a reserved keyword", tableName)); - this.tableName = tableName; - } - - /** - * Add a partition key column definition to this CREATE TABLE statement. - *

    - * This includes the column declaration (you don't need an additional {@code addColumn} call). - *

    - * Partition key columns are added in the order of their declaration. - * - * @param columnName the name of the partition key column to be added. - * @param dataType the data type of the partition key column to be added. - * @return this CREATE statement. - */ - public Create addPartitionKey(String columnName, DataType dataType) { - validateNotEmpty(columnName, "Partition key name"); - validateNotNull(dataType, "Partition key type"); - validateNotKeyWord(columnName, String.format("The partition key name '%s' is not allowed because it is a reserved keyword", columnName)); - partitionColumns.put(columnName, new NativeColumnType(dataType)); - return this; - } - - /** - * Add a partition key column definition to this CREATE TABLE statement, when its type contains a UDT. - *

    - * This includes the column declaration (you don't need an additional {@code addColumn} call). - *

    - * Partition key columns are added in the order of their declaration. - * - * @param columnName the name of the partition key column to be added. - * @param udtType the UDT type of the partition key column to be added. Use {@link SchemaBuilder#frozen(String)} or {@link SchemaBuilder#udtLiteral(String)}. - * @return this CREATE statement. - */ - public Create addUDTPartitionKey(String columnName, UDTType udtType) { - validateNotEmpty(columnName, "Clustering key name"); - validateNotNull(udtType, "UDT partition key type"); - validateNotKeyWord(columnName, String.format("The partition key name '%s' is not allowed because it is a reserved keyword", columnName)); - partitionColumns.put(columnName, udtType); - return this; + private String tableName; + private Map partitionColumns = new LinkedHashMap(); + private Map clusteringColumns = new LinkedHashMap(); + private Map staticColumns = new LinkedHashMap(); + + Create(String keyspaceName, String tableName) { + validateNotEmpty(keyspaceName, "Keyspace name"); + validateNotEmpty(tableName, "Table name"); + validateNotKeyWord( + keyspaceName, + String.format( + "The keyspace name '%s' is not allowed because it is a reserved keyword", + keyspaceName)); + validateNotKeyWord( + tableName, + String.format( + "The table name '%s' is not allowed because it is a reserved keyword", tableName)); + this.tableName = tableName; + this.keyspaceName = Optional.fromNullable(keyspaceName); + } + + Create(String tableName) { + validateNotEmpty(tableName, "Table name"); + validateNotKeyWord( + tableName, + String.format( + "The table name '%s' is not allowed because it is a reserved keyword", tableName)); + this.tableName = tableName; + } + + /** + * Add a partition key column definition to this CREATE TABLE statement. + * + *

    This includes the column declaration (you don't need an additional {@code addColumn} call). + * + *

    Partition key columns are added in the order of their declaration. + * + * @param columnName the name of the partition key column to be added. + * @param dataType the data type of the partition key column to be added. + * @return this CREATE statement. + */ + public Create addPartitionKey(String columnName, DataType dataType) { + validateNotEmpty(columnName, "Partition key name"); + validateNotNull(dataType, "Partition key type"); + validateNotKeyWord( + columnName, + String.format( + "The partition key name '%s' is not allowed because it is a reserved keyword", + columnName)); + partitionColumns.put(columnName, new NativeColumnType(dataType)); + return this; + } + + /** + * Add a partition key column definition to this CREATE TABLE statement, when its type contains a + * UDT. + * + *

    This includes the column declaration (you don't need an additional {@code addColumn} call). + * + *

    Partition key columns are added in the order of their declaration. + * + * @param columnName the name of the partition key column to be added. + * @param udtType the UDT type of the partition key column to be added. Use {@link + * SchemaBuilder#frozen(String)} or {@link SchemaBuilder#udtLiteral(String)}. + * @return this CREATE statement. + */ + public Create addUDTPartitionKey(String columnName, UDTType udtType) { + validateNotEmpty(columnName, "Clustering key name"); + validateNotNull(udtType, "UDT partition key type"); + validateNotKeyWord( + columnName, + String.format( + "The partition key name '%s' is not allowed because it is a reserved keyword", + columnName)); + partitionColumns.put(columnName, udtType); + return this; + } + + /** + * Add a clustering column definition to this CREATE TABLE statement. + * + *

    This includes the column declaration (you don't need an additional {@code addColumn} call). + * + *

    Clustering columns are added in the order of their declaration. + * + * @param columnName the name of the clustering column to be added. + * @param dataType the data type of the clustering column to be added. + * @return this CREATE statement. + */ + public Create addClusteringColumn(String columnName, DataType dataType) { + validateNotEmpty(columnName, "Clustering column name"); + validateNotNull(dataType, "Clustering column type"); + validateNotKeyWord( + columnName, + String.format( + "The clustering column name '%s' is not allowed because it is a reserved keyword", + columnName)); + clusteringColumns.put(columnName, new NativeColumnType(dataType)); + return this; + } + + /** + * Add a clustering column definition to this CREATE TABLE statement, when its type contains a + * UDT. + * + *

    This includes the column declaration (you don't need an additional {@code addColumn} call). + * + *

    Clustering columns are added in the order of their declaration. + * + * @param columnName the name of the clustering column to be added. + * @param udtType the UDT type of the clustering column to be added. Use {@link + * SchemaBuilder#frozen(String)} or {@link SchemaBuilder#udtLiteral(String)}. + * @return this CREATE statement. + */ + public Create addUDTClusteringColumn(String columnName, UDTType udtType) { + validateNotEmpty(columnName, "Clustering column name"); + validateNotNull(udtType, "UDT clustering column type"); + validateNotKeyWord( + columnName, + String.format( + "The clustering column name '%s' is not allowed because it is a reserved keyword", + columnName)); + clusteringColumns.put(columnName, udtType); + return this; + } + + /** + * Add a static column definition to this CREATE TABLE statement. + * + * @param columnName the name of the column to be added. + * @param dataType the data type of the column to be added. + * @return this CREATE statement. + */ + public Create addStaticColumn(String columnName, DataType dataType) { + validateNotEmpty(columnName, "Column name"); + validateNotNull(dataType, "Column type"); + validateNotKeyWord( + columnName, + String.format( + "The static column name '%s' is not allowed because it is a reserved keyword", + columnName)); + staticColumns.put(columnName, new NativeColumnType(dataType)); + return this; + } + + /** + * Add a static column definition to this CREATE TABLE statement, when its type contains a UDT. + * + * @param columnName the name of the column to be added. + * @param udtType the UDT type of the column to be added. Use {@link SchemaBuilder#frozen(String)} + * or {@link SchemaBuilder#udtLiteral(String)}. + * @return this CREATE statement. + */ + public Create addUDTStaticColumn(String columnName, UDTType udtType) { + validateNotEmpty(tableName, "Column name"); + validateNotNull(udtType, "Column UDT type"); + validateNotKeyWord( + columnName, + String.format( + "The static column name '%s' is not allowed because it is a reserved keyword", + columnName)); + staticColumns.put(columnName, udtType); + return this; + } + + /** + * Add options for this CREATE TABLE statement. + * + * @return the options of this CREATE TABLE statement. + */ + public Options withOptions() { + return new Options(this); + } + + /** The table options of a CREATE TABLE statement. */ + public static class Options extends TableOptions { + + private final Create create; + + private Options(Create create) { + super(create.asStatementStart()); + this.create = create; } - /** - * Add a clustering column definition to this CREATE TABLE statement. - *

    - * This includes the column declaration (you don't need an additional {@code addColumn} call). - *

    - * Clustering columns are added in the order of their declaration. - * - * @param columnName the name of the clustering column to be added. - * @param dataType the data type of the clustering column to be added. - * @return this CREATE statement. - */ - public Create addClusteringColumn(String columnName, DataType dataType) { - validateNotEmpty(columnName, "Clustering column name"); - validateNotNull(dataType, "Clustering column type"); - validateNotKeyWord(columnName, String.format("The clustering column name '%s' is not allowed because it is a reserved keyword", columnName)); - clusteringColumns.put(columnName, new NativeColumnType(dataType)); - return this; - } + private List clusteringOrderKeys = Lists.newArrayList(); - /** - * Add a clustering column definition to this CREATE TABLE statement, when its type contains a UDT. - *

    - * This includes the column declaration (you don't need an additional {@code addColumn} call). - *

    - * Clustering columns are added in the order of their declaration. - * - * @param columnName the name of the clustering column to be added. - * @param udtType the UDT type of the clustering column to be added. Use {@link SchemaBuilder#frozen(String)} or {@link SchemaBuilder#udtLiteral(String)}. - * @return this CREATE statement. - */ - public Create addUDTClusteringColumn(String columnName, UDTType udtType) { - validateNotEmpty(columnName, "Clustering column name"); - validateNotNull(udtType, "UDT clustering column type"); - validateNotKeyWord(columnName, String.format("The clustering column name '%s' is not allowed because it is a reserved keyword", columnName)); - clusteringColumns.put(columnName, udtType); - return this; - } + private boolean compactStorage; /** - * Add a static column definition to this CREATE TABLE statement. + * Add a clustering order for this table. * - * @param columnName the name of the column to be added. - * @param dataType the data type of the column to be added. - * @return this CREATE statement. - */ - public Create addStaticColumn(String columnName, DataType dataType) { - validateNotEmpty(columnName, "Column name"); - validateNotNull(dataType, "Column type"); - validateNotKeyWord(columnName, String.format("The static column name '%s' is not allowed because it is a reserved keyword", columnName)); - staticColumns.put(columnName, new NativeColumnType(dataType)); - return this; - } - - /** - * Add a static column definition to this CREATE TABLE statement, when its type contains a UDT. + *

    To define the order on multiple columns, call this method repeatedly. The columns will be + * declared in the call order. * - * @param columnName the name of the column to be added. - * @param udtType the UDT type of the column to be added. Use {@link SchemaBuilder#frozen(String)} or {@link SchemaBuilder#udtLiteral(String)}. - * @return this CREATE statement. + * @param columnName the clustering column name. + * @param direction the clustering direction (DESC/ASC). + * @return this {@code Options} object. */ - public Create addUDTStaticColumn(String columnName, UDTType udtType) { - validateNotEmpty(tableName, "Column name"); - validateNotNull(udtType, "Column UDT type"); - validateNotKeyWord(columnName, String.format("The static column name '%s' is not allowed because it is a reserved keyword", columnName)); - staticColumns.put(columnName, udtType); - return this; + public Options clusteringOrder(String columnName, SchemaBuilder.Direction direction) { + if (!create.clusteringColumns.containsKey(columnName)) { + throw new IllegalArgumentException( + String.format( + "Clustering key '%s' is unknown. Did you forget to declare it first?", columnName)); + } + clusteringOrderKeys.add(new ClusteringOrder(columnName, direction)); + return this; } /** - * Add options for this CREATE TABLE statement. + * Enable the compact storage option for the table. * - * @return the options of this CREATE TABLE statement. + * @return this {@code Options} object. */ - public Options withOptions() { - return new Options(this); + public Options compactStorage() { + this.compactStorage = true; + return this; } - /** - * The table options of a CREATE TABLE statement. - */ - public static class Options extends TableOptions { - - private final Create create; - - private Options(Create create) { - super(create.asStatementStart()); - this.create = create; + private static class ClusteringOrder { + private final String clusteringColumnName; + private final SchemaBuilder.Direction direction; + + ClusteringOrder(String clusteringColumnName, SchemaBuilder.Direction direction) { + validateNotEmpty(clusteringColumnName, "Column name for clustering order"); + this.clusteringColumnName = clusteringColumnName; + this.direction = direction; + } + + public String getClusteringColumnName() { + return clusteringColumnName; + } + + public String toStatement() { + return clusteringColumnName + " " + direction.name(); + } + + @Override + public String toString() { + return toStatement(); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; } - - private List clusteringOrderKeys = Lists.newArrayList(); - - private boolean compactStorage; - - /** - * Add a clustering order for this table. - *

    - * To define the order on multiple columns, call this method repeatedly. The columns will be declared in the call order. - * - * @param columnName the clustering column name. - * @param direction the clustering direction (DESC/ASC). - * @return this {@code Options} object. - */ - public Options clusteringOrder(String columnName, SchemaBuilder.Direction direction) { - if (!create.clusteringColumns.containsKey(columnName)) { - throw new IllegalArgumentException(String.format("Clustering key '%s' is unknown. Did you forget to declare it first?", columnName)); - } - clusteringOrderKeys.add(new ClusteringOrder(columnName, direction)); - return this; + if (o instanceof ClusteringOrder) { + ClusteringOrder that = (ClusteringOrder) o; + return MoreObjects.equal(this.clusteringColumnName, that.clusteringColumnName) + && MoreObjects.equal(this.direction, that.direction); } + return false; + } - /** - * Enable the compact storage option for the table. - * - * @return this {@code Options} object. - */ - public Options compactStorage() { - this.compactStorage = true; - return this; - } - - private static class ClusteringOrder { - private final String clusteringColumnName; - private final SchemaBuilder.Direction direction; - - ClusteringOrder(String clusteringColumnName, SchemaBuilder.Direction direction) { - validateNotEmpty(clusteringColumnName, "Column name for clustering order"); - this.clusteringColumnName = clusteringColumnName; - this.direction = direction; - } - - public String getClusteringColumnName() { - return clusteringColumnName; - } - - public String toStatement() { - return clusteringColumnName + " " + direction.name(); - } - - @Override - public String toString() { - return toStatement(); - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o instanceof ClusteringOrder) { - ClusteringOrder that = (ClusteringOrder) o; - return MoreObjects.equal(this.clusteringColumnName, that.clusteringColumnName) && MoreObjects.equal(this.direction, that.direction); - } - return false; - } - - @Override - public int hashCode() { - return MoreObjects.hashCode(clusteringColumnName, direction); - } - } - - @Override - protected void addSpecificOptions(List options) { - if (!clusteringOrderKeys.isEmpty()) { - options.add("CLUSTERING ORDER BY(" + Joiner.on(", ").join(clusteringOrderKeys) + ")"); - } - - if (compactStorage) { - if (!create.staticColumns.isEmpty()) { - throw new IllegalStateException(String.format("Cannot create table '%s' with compact storage and static columns '%s'", create.tableName, create.staticColumns.keySet())); - } - options.add("COMPACT STORAGE"); - } - } + @Override + public int hashCode() { + return MoreObjects.hashCode(clusteringColumnName, direction); + } } @Override - public String buildInternal() { - if (partitionColumns.size() < 1) { - throw new IllegalStateException(String.format("There should be at least one partition key defined for the table '%s'", tableName)); - } - - validateColumnsDeclaration(); - - StringBuilder createStatement = new StringBuilder(STATEMENT_START).append("CREATE TABLE"); - if (ifNotExists) { - createStatement.append(" IF NOT EXISTS"); - } - createStatement.append(" "); - if (keyspaceName.isPresent()) { - createStatement.append(keyspaceName.get()).append("."); - } - createStatement.append(tableName); - - List allColumns = new ArrayList(); - List partitionKeyColumns = new ArrayList(); - List clusteringKeyColumns = new ArrayList(); - - for (Entry entry : partitionColumns.entrySet()) { - allColumns.add(entry.getKey() + " " + entry.getValue().asCQLString()); - partitionKeyColumns.add(entry.getKey()); - } - - for (Entry entry : clusteringColumns.entrySet()) { - allColumns.add(entry.getKey() + " " + entry.getValue().asCQLString()); - clusteringKeyColumns.add(entry.getKey()); - } - - for (Entry entry : staticColumns.entrySet()) { - allColumns.add(entry.getKey() + " " + entry.getValue().asCQLString() + " static"); + protected void addSpecificOptions(List options) { + if (!clusteringOrderKeys.isEmpty()) { + options.add("CLUSTERING ORDER BY(" + Joiner.on(", ").join(clusteringOrderKeys) + ")"); + } + + if (compactStorage) { + if (!create.staticColumns.isEmpty()) { + throw new IllegalStateException( + String.format( + "Cannot create table '%s' with compact storage and static columns '%s'", + create.tableName, create.staticColumns.keySet())); } + options.add("COMPACT STORAGE"); + } + } + } + + @Override + public String buildInternal() { + if (partitionColumns.size() < 1) { + throw new IllegalStateException( + String.format( + "There should be at least one partition key defined for the table '%s'", tableName)); + } - for (Entry entry : simpleColumns.entrySet()) { - allColumns.add(buildColumnType(entry)); - } + validateColumnsDeclaration(); - String partitionKeyPart = partitionKeyColumns.size() == 1 ? - partitionKeyColumns.get(0) - : "(" + Joiner.on(", ").join(partitionKeyColumns) + ")"; + StringBuilder createStatement = new StringBuilder(STATEMENT_START).append("CREATE TABLE"); + if (ifNotExists) { + createStatement.append(" IF NOT EXISTS"); + } + createStatement.append(" "); + if (keyspaceName.isPresent()) { + createStatement.append(keyspaceName.get()).append("."); + } + createStatement.append(tableName); - String primaryKeyPart = clusteringKeyColumns.size() == 0 ? - partitionKeyPart - : partitionKeyPart + ", " + Joiner.on(", ").join(clusteringKeyColumns); + List allColumns = new ArrayList(); + List partitionKeyColumns = new ArrayList(); + List clusteringKeyColumns = new ArrayList(); - createStatement.append("(").append(COLUMN_FORMATTING); - createStatement.append(Joiner.on("," + COLUMN_FORMATTING).join(allColumns)); - createStatement.append("," + COLUMN_FORMATTING).append("PRIMARY KEY"); - createStatement.append("(").append(primaryKeyPart).append(")"); - createStatement.append(")"); + for (Entry entry : partitionColumns.entrySet()) { + allColumns.add(entry.getKey() + " " + entry.getValue().asCQLString()); + partitionKeyColumns.add(entry.getKey()); + } - return createStatement.toString(); + for (Entry entry : clusteringColumns.entrySet()) { + allColumns.add(entry.getKey() + " " + entry.getValue().asCQLString()); + clusteringKeyColumns.add(entry.getKey()); } - private void validateColumnsDeclaration() { + for (Entry entry : staticColumns.entrySet()) { + allColumns.add(entry.getKey() + " " + entry.getValue().asCQLString() + " static"); + } - final Collection partitionAndClusteringColumns = this.intersection(partitionColumns.keySet(), clusteringColumns.keySet()); - final Collection partitionAndSimpleColumns = this.intersection(partitionColumns.keySet(), simpleColumns.keySet()); - final Collection clusteringAndSimpleColumns = this.intersection(clusteringColumns.keySet(), simpleColumns.keySet()); - final Collection partitionAndStaticColumns = this.intersection(partitionColumns.keySet(), staticColumns.keySet()); - final Collection clusteringAndStaticColumns = this.intersection(clusteringColumns.keySet(), staticColumns.keySet()); - final Collection simpleAndStaticColumns = this.intersection(simpleColumns.keySet(), staticColumns.keySet()); + for (Entry entry : simpleColumns.entrySet()) { + allColumns.add(buildColumnType(entry)); + } - if (!partitionAndClusteringColumns.isEmpty()) { - throw new IllegalStateException(String.format("The '%s' columns can not be declared as partition keys and clustering keys at the same time", partitionAndClusteringColumns)); - } + String partitionKeyPart = + partitionKeyColumns.size() == 1 + ? partitionKeyColumns.get(0) + : "(" + Joiner.on(", ").join(partitionKeyColumns) + ")"; + + String primaryKeyPart = + clusteringKeyColumns.size() == 0 + ? partitionKeyPart + : partitionKeyPart + ", " + Joiner.on(", ").join(clusteringKeyColumns); + + createStatement.append("(").append(COLUMN_FORMATTING); + createStatement.append(Joiner.on("," + COLUMN_FORMATTING).join(allColumns)); + createStatement.append("," + COLUMN_FORMATTING).append("PRIMARY KEY"); + createStatement.append("(").append(primaryKeyPart).append(")"); + createStatement.append(")"); + + return createStatement.toString(); + } + + private void validateColumnsDeclaration() { + + final Collection partitionAndClusteringColumns = + this.intersection(partitionColumns.keySet(), clusteringColumns.keySet()); + final Collection partitionAndSimpleColumns = + this.intersection(partitionColumns.keySet(), simpleColumns.keySet()); + final Collection clusteringAndSimpleColumns = + this.intersection(clusteringColumns.keySet(), simpleColumns.keySet()); + final Collection partitionAndStaticColumns = + this.intersection(partitionColumns.keySet(), staticColumns.keySet()); + final Collection clusteringAndStaticColumns = + this.intersection(clusteringColumns.keySet(), staticColumns.keySet()); + final Collection simpleAndStaticColumns = + this.intersection(simpleColumns.keySet(), staticColumns.keySet()); + + if (!partitionAndClusteringColumns.isEmpty()) { + throw new IllegalStateException( + String.format( + "The '%s' columns can not be declared as partition keys and clustering keys at the same time", + partitionAndClusteringColumns)); + } - if (!partitionAndSimpleColumns.isEmpty()) { - throw new IllegalStateException(String.format("The '%s' columns can not be declared as partition keys and simple columns at the same time", partitionAndSimpleColumns)); - } + if (!partitionAndSimpleColumns.isEmpty()) { + throw new IllegalStateException( + String.format( + "The '%s' columns can not be declared as partition keys and simple columns at the same time", + partitionAndSimpleColumns)); + } - if (!clusteringAndSimpleColumns.isEmpty()) { - throw new IllegalStateException(String.format("The '%s' columns can not be declared as clustering keys and simple columns at the same time", clusteringAndSimpleColumns)); - } + if (!clusteringAndSimpleColumns.isEmpty()) { + throw new IllegalStateException( + String.format( + "The '%s' columns can not be declared as clustering keys and simple columns at the same time", + clusteringAndSimpleColumns)); + } - if (!partitionAndStaticColumns.isEmpty()) { - throw new IllegalStateException(String.format("The '%s' columns can not be declared as partition keys and static columns at the same time", partitionAndStaticColumns)); - } + if (!partitionAndStaticColumns.isEmpty()) { + throw new IllegalStateException( + String.format( + "The '%s' columns can not be declared as partition keys and static columns at the same time", + partitionAndStaticColumns)); + } - if (!clusteringAndStaticColumns.isEmpty()) { - throw new IllegalStateException(String.format("The '%s' columns can not be declared as clustering keys and static columns at the same time", clusteringAndStaticColumns)); - } + if (!clusteringAndStaticColumns.isEmpty()) { + throw new IllegalStateException( + String.format( + "The '%s' columns can not be declared as clustering keys and static columns at the same time", + clusteringAndStaticColumns)); + } - if (!simpleAndStaticColumns.isEmpty()) { - throw new IllegalStateException(String.format("The '%s' columns can not be declared as simple columns and static columns at the same time", simpleAndStaticColumns)); - } + if (!simpleAndStaticColumns.isEmpty()) { + throw new IllegalStateException( + String.format( + "The '%s' columns can not be declared as simple columns and static columns at the same time", + simpleAndStaticColumns)); + } - if (!staticColumns.isEmpty() && clusteringColumns.isEmpty()) { - throw new IllegalStateException(String.format("The table '%s' cannot declare static columns '%s' without clustering columns", tableName, staticColumns.keySet())); - } + if (!staticColumns.isEmpty() && clusteringColumns.isEmpty()) { + throw new IllegalStateException( + String.format( + "The table '%s' cannot declare static columns '%s' without clustering columns", + tableName, staticColumns.keySet())); } + } - private Collection intersection(Collection col1, Collection col2) { - Set set = new HashSet(); + private Collection intersection(Collection col1, Collection col2) { + Set set = new HashSet(); - for (T t : col1) { - if (col2.contains(t)) { - set.add(t); - } - } - return set; + for (T t : col1) { + if (col2.contains(t)) { + set.add(t); + } } + return set; + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/schemabuilder/CreateIndex.java b/driver-core/src/main/java/com/datastax/driver/core/schemabuilder/CreateIndex.java index 02433a3d1fe..e0fb1d15d44 100755 --- a/driver-core/src/main/java/com/datastax/driver/core/schemabuilder/CreateIndex.java +++ b/driver-core/src/main/java/com/datastax/driver/core/schemabuilder/CreateIndex.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,124 +17,143 @@ */ package com.datastax.driver.core.schemabuilder; -import com.google.common.base.Optional; +import static com.datastax.driver.core.schemabuilder.SchemaStatement.STATEMENT_START; +import static com.datastax.driver.core.schemabuilder.SchemaStatement.validateNotEmpty; +import static com.datastax.driver.core.schemabuilder.SchemaStatement.validateNotKeyWord; -import static com.datastax.driver.core.schemabuilder.SchemaStatement.*; +import com.google.common.base.Optional; -/** - * A built CREATE INDEX statement. - */ +/** A built CREATE INDEX statement. */ public class CreateIndex implements StatementStart { - private String indexName; - private boolean ifNotExists = false; - private Optional keyspaceName = Optional.absent(); - private String tableName; - private String columnName; - private boolean keys; - - CreateIndex(String indexName) { - validateNotEmpty(indexName, "Index name"); - validateNotKeyWord(indexName, String.format("The index name '%s' is not allowed because it is a reserved keyword", indexName)); - this.indexName = indexName; - } - + private String indexName; + private boolean ifNotExists = false; + private Optional keyspaceName = Optional.absent(); + private String tableName; + private String columnName; + private boolean keys; + + CreateIndex(String indexName) { + validateNotEmpty(indexName, "Index name"); + validateNotKeyWord( + indexName, + String.format( + "The index name '%s' is not allowed because it is a reserved keyword", indexName)); + this.indexName = indexName; + } + + /** + * Add the 'IF NOT EXISTS' condition to this CREATE INDEX statement. + * + * @return this CREATE INDEX statement. + */ + public CreateIndex ifNotExists() { + this.ifNotExists = true; + return this; + } + + /** + * Specify the keyspace and table to create the index on. + * + * @param keyspaceName the keyspace name. + * @param tableName the table name. + * @return a {@link CreateIndexOn} that will allow the specification of the column. + */ + public CreateIndexOn onTable(String keyspaceName, String tableName) { + validateNotEmpty(keyspaceName, "Keyspace name"); + validateNotEmpty(tableName, "Table name"); + validateNotKeyWord( + keyspaceName, + String.format( + "The keyspace name '%s' is not allowed because it is a reserved keyword", + keyspaceName)); + validateNotKeyWord( + tableName, + String.format( + "The table name '%s' is not allowed because it is a reserved keyword", tableName)); + this.keyspaceName = Optional.fromNullable(keyspaceName); + this.tableName = tableName; + return new CreateIndexOn(); + } + + /** + * Specify the table to create the index on. + * + * @param tableName the table name. + * @return a {@link CreateIndexOn} that will allow the specification of the column. + */ + public CreateIndexOn onTable(String tableName) { + validateNotEmpty(tableName, "Table name"); + validateNotKeyWord( + tableName, + String.format( + "The table name '%s' is not allowed because it is a reserved keyword", tableName)); + this.tableName = tableName; + return new CreateIndexOn(); + } + + public class CreateIndexOn { /** - * Add the 'IF NOT EXISTS' condition to this CREATE INDEX statement. + * Specify the column to create the index on. * - * @return this CREATE INDEX statement. + * @param columnName the column name. + * @return the final CREATE INDEX statement. */ - public CreateIndex ifNotExists() { - this.ifNotExists = true; - return this; + public SchemaStatement andColumn(String columnName) { + validateNotEmpty(columnName, "Column name"); + validateNotKeyWord( + columnName, + String.format( + "The column name '%s' is not allowed because it is a reserved keyword", columnName)); + CreateIndex.this.columnName = columnName; + return SchemaStatement.fromQueryString(buildInternal()); } /** - * Specify the keyspace and table to create the index on. + * Create an index on the keys of the given map column. * - * @param keyspaceName the keyspace name. - * @param tableName the table name. - * @return a {@link CreateIndexOn} that will allow the specification of the column. + * @param columnName the column name. + * @return the final CREATE INDEX statement. */ - public CreateIndexOn onTable(String keyspaceName, String tableName) { - validateNotEmpty(keyspaceName, "Keyspace name"); - validateNotEmpty(tableName, "Table name"); - validateNotKeyWord(keyspaceName, String.format("The keyspace name '%s' is not allowed because it is a reserved keyword", keyspaceName)); - validateNotKeyWord(tableName, String.format("The table name '%s' is not allowed because it is a reserved keyword", tableName)); - this.keyspaceName = Optional.fromNullable(keyspaceName); - this.tableName = tableName; - return new CreateIndexOn(); + public SchemaStatement andKeysOfColumn(String columnName) { + validateNotEmpty(columnName, "Column name"); + validateNotKeyWord( + columnName, + String.format( + "The column name '%s' is not allowed because it is a reserved keyword", columnName)); + CreateIndex.this.columnName = columnName; + CreateIndex.this.keys = true; + return SchemaStatement.fromQueryString(buildInternal()); } + } - /** - * Specify the table to create the index on. - * - * @param tableName the table name. - * @return a {@link CreateIndexOn} that will allow the specification of the column. - */ - public CreateIndexOn onTable(String tableName) { - validateNotEmpty(tableName, "Table name"); - validateNotKeyWord(tableName, String.format("The table name '%s' is not allowed because it is a reserved keyword", tableName)); - this.tableName = tableName; - return new CreateIndexOn(); - } + @Override + public String buildInternal() { + StringBuilder createStatement = new StringBuilder(STATEMENT_START).append("CREATE INDEX "); - public class CreateIndexOn { - /** - * Specify the column to create the index on. - * - * @param columnName the column name. - * @return the final CREATE INDEX statement. - */ - public SchemaStatement andColumn(String columnName) { - validateNotEmpty(columnName, "Column name"); - validateNotKeyWord(columnName, String.format("The column name '%s' is not allowed because it is a reserved keyword", columnName)); - CreateIndex.this.columnName = columnName; - return SchemaStatement.fromQueryString(buildInternal()); - } - - /** - * Create an index on the keys of the given map column. - * - * @param columnName the column name. - * @return the final CREATE INDEX statement. - */ - public SchemaStatement andKeysOfColumn(String columnName) { - validateNotEmpty(columnName, "Column name"); - validateNotKeyWord(columnName, String.format("The column name '%s' is not allowed because it is a reserved keyword", columnName)); - CreateIndex.this.columnName = columnName; - CreateIndex.this.keys = true; - return SchemaStatement.fromQueryString(buildInternal()); - } + if (ifNotExists) { + createStatement.append("IF NOT EXISTS "); } - @Override - public String buildInternal() { - StringBuilder createStatement = new StringBuilder(STATEMENT_START).append("CREATE INDEX "); - - if (ifNotExists) { - createStatement.append("IF NOT EXISTS "); - } - - createStatement.append(indexName).append(" ON "); + createStatement.append(indexName).append(" ON "); - if (keyspaceName.isPresent()) { - createStatement.append(keyspaceName.get()).append("."); - } - createStatement.append(tableName); - - createStatement.append("("); - if (keys) { - createStatement.append("KEYS("); - } + if (keyspaceName.isPresent()) { + createStatement.append(keyspaceName.get()).append("."); + } + createStatement.append(tableName); - createStatement.append(columnName); + createStatement.append("("); + if (keys) { + createStatement.append("KEYS("); + } - if (keys) { - createStatement.append(")"); - } - createStatement.append(")"); + createStatement.append(columnName); - return createStatement.toString(); + if (keys) { + createStatement.append(")"); } + createStatement.append(")"); + + return createStatement.toString(); + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/schemabuilder/CreateKeyspace.java b/driver-core/src/main/java/com/datastax/driver/core/schemabuilder/CreateKeyspace.java index 5108b16ae03..443635815a5 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/schemabuilder/CreateKeyspace.java +++ b/driver-core/src/main/java/com/datastax/driver/core/schemabuilder/CreateKeyspace.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,42 +17,39 @@ */ package com.datastax.driver.core.schemabuilder; -/** - * A built CREATE KEYSPACE statement. - */ +/** A built CREATE KEYSPACE statement. */ public class CreateKeyspace { - static final String command = "CREATE KEYSPACE"; - - private final String keyspaceName; - private boolean ifNotExists; - - public CreateKeyspace(String keyspaceName) { - this.keyspaceName = keyspaceName; - this.ifNotExists = false; - } - - public CreateKeyspace ifNotExists() { - this.ifNotExists = true; - return this; + static final String command = "CREATE KEYSPACE"; + + private final String keyspaceName; + private boolean ifNotExists; + + public CreateKeyspace(String keyspaceName) { + this.keyspaceName = keyspaceName; + this.ifNotExists = false; + } + + public CreateKeyspace ifNotExists() { + this.ifNotExists = true; + return this; + } + + /** + * Add options for this CREATE KEYSPACE statement. + * + * @return the options of this CREATE KEYSPACE statement. + */ + public KeyspaceOptions with() { + return new KeyspaceOptions(buildCommand(), keyspaceName); + } + + String buildCommand() { + StringBuilder createStatement = new StringBuilder(); + createStatement.append(command); + if (ifNotExists) { + createStatement.append(" IF NOT EXISTS"); } - - /** - * Add options for this CREATE KEYSPACE statement. - * - * @return the options of this CREATE KEYSPACE statement. - */ - public KeyspaceOptions with() { - return new KeyspaceOptions(buildCommand(), keyspaceName); - } - - String buildCommand() { - StringBuilder createStatement = new StringBuilder(); - createStatement.append(command); - if (ifNotExists) { - createStatement.append(" IF NOT EXISTS"); - } - return createStatement.toString(); - } - + return createStatement.toString(); + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/schemabuilder/CreateType.java b/driver-core/src/main/java/com/datastax/driver/core/schemabuilder/CreateType.java index 3bd09127d89..9425ccf1109 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/schemabuilder/CreateType.java +++ b/driver-core/src/main/java/com/datastax/driver/core/schemabuilder/CreateType.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,63 +19,70 @@ import com.google.common.base.Joiner; import com.google.common.base.Optional; - import java.util.ArrayList; import java.util.List; import java.util.Map; -/** - * A built CREATE TYPE statement. - */ +/** A built CREATE TYPE statement. */ public class CreateType extends AbstractCreateStatement { - private String typeName; + private String typeName; - CreateType(String keyspaceName, String typeName) { - validateNotEmpty(keyspaceName, "Keyspace name"); - validateNotEmpty(typeName, "Custom type name"); - validateNotKeyWord(keyspaceName, String.format("The keyspace name '%s' is not allowed because it is a reserved keyword", keyspaceName)); - validateNotKeyWord(typeName, String.format("The custom type name '%s' is not allowed because it is a reserved keyword", typeName)); - this.typeName = typeName; - this.keyspaceName = Optional.fromNullable(keyspaceName); - } + CreateType(String keyspaceName, String typeName) { + validateNotEmpty(keyspaceName, "Keyspace name"); + validateNotEmpty(typeName, "Custom type name"); + validateNotKeyWord( + keyspaceName, + String.format( + "The keyspace name '%s' is not allowed because it is a reserved keyword", + keyspaceName)); + validateNotKeyWord( + typeName, + String.format( + "The custom type name '%s' is not allowed because it is a reserved keyword", typeName)); + this.typeName = typeName; + this.keyspaceName = Optional.fromNullable(keyspaceName); + } - CreateType(String typeName) { - validateNotEmpty(typeName, "Custom type name"); - validateNotKeyWord(typeName, String.format("The custom type name '%s' is not allowed because it is a reserved keyword", typeName)); - this.typeName = typeName; - } + CreateType(String typeName) { + validateNotEmpty(typeName, "Custom type name"); + validateNotKeyWord( + typeName, + String.format( + "The custom type name '%s' is not allowed because it is a reserved keyword", typeName)); + this.typeName = typeName; + } - /** - * Generate the script for custom type creation - * - * @return a CREATE TYPE statement - */ - public String build() { - return buildInternal(); - } + /** + * Generate the script for custom type creation + * + * @return a CREATE TYPE statement + */ + public String build() { + return buildInternal(); + } - @Override - public String buildInternal() { + @Override + public String buildInternal() { - StringBuilder createStatement = new StringBuilder(STATEMENT_START).append("CREATE TYPE "); - if (ifNotExists) { - createStatement.append("IF NOT EXISTS "); - } - if (keyspaceName.isPresent()) { - createStatement.append(keyspaceName.get()).append("."); - } - createStatement.append(typeName); + StringBuilder createStatement = new StringBuilder(STATEMENT_START).append("CREATE TYPE "); + if (ifNotExists) { + createStatement.append("IF NOT EXISTS "); + } + if (keyspaceName.isPresent()) { + createStatement.append(keyspaceName.get()).append("."); + } + createStatement.append(typeName); - List allColumns = new ArrayList(); - for (Map.Entry entry : simpleColumns.entrySet()) { - allColumns.add(buildColumnType(entry)); - } + List allColumns = new ArrayList(); + for (Map.Entry entry : simpleColumns.entrySet()) { + allColumns.add(buildColumnType(entry)); + } - createStatement.append("(").append(COLUMN_FORMATTING); - createStatement.append(Joiner.on("," + COLUMN_FORMATTING).join(allColumns)); - createStatement.append(")"); + createStatement.append("(").append(COLUMN_FORMATTING); + createStatement.append(Joiner.on("," + COLUMN_FORMATTING).join(allColumns)); + createStatement.append(")"); - return createStatement.toString(); - } + return createStatement.toString(); + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/schemabuilder/Drop.java b/driver-core/src/main/java/com/datastax/driver/core/schemabuilder/Drop.java index 144ab017b2a..660e427ad48 100755 --- a/driver-core/src/main/java/com/datastax/driver/core/schemabuilder/Drop.java +++ b/driver-core/src/main/java/com/datastax/driver/core/schemabuilder/Drop.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,65 +19,83 @@ import com.google.common.base.Optional; -/** - * A built DROP statement. - */ +/** A built DROP statement. */ public class Drop extends SchemaStatement { - enum DroppedItem {TABLE, TYPE, INDEX} + enum DroppedItem { + TABLE, + TYPE, + INDEX + } - private Optional keyspaceName = Optional.absent(); - private String itemName; - private boolean ifExists; - private final String itemType; + private Optional keyspaceName = Optional.absent(); + private String itemName; + private boolean ifExists; + private final String itemType; - Drop(String keyspaceName, String itemName, DroppedItem itemType) { - this.itemType = itemType.name(); - validateNotEmpty(keyspaceName, "Keyspace name"); - validateNotEmpty(itemName, this.itemType.toLowerCase() + " name"); - validateNotKeyWord(keyspaceName, String.format("The keyspace name '%s' is not allowed because it is a reserved keyword", keyspaceName)); - validateNotKeyWord(itemName, String.format("The " + this.itemType.toLowerCase() + " name '%s' is not allowed because it is a reserved keyword", itemName)); - this.itemName = itemName; - this.keyspaceName = Optional.fromNullable(keyspaceName); - } + Drop(String keyspaceName, String itemName, DroppedItem itemType) { + this.itemType = itemType.name(); + validateNotEmpty(keyspaceName, "Keyspace name"); + validateNotEmpty(itemName, this.itemType.toLowerCase() + " name"); + validateNotKeyWord( + keyspaceName, + String.format( + "The keyspace name '%s' is not allowed because it is a reserved keyword", + keyspaceName)); + validateNotKeyWord( + itemName, + String.format( + "The " + + this.itemType.toLowerCase() + + " name '%s' is not allowed because it is a reserved keyword", + itemName)); + this.itemName = itemName; + this.keyspaceName = Optional.fromNullable(keyspaceName); + } - Drop(String itemName, DroppedItem itemType) { - this.itemType = itemType.name(); - validateNotEmpty(itemName, this.itemType.toLowerCase() + " name"); - validateNotKeyWord(itemName, String.format("The " + this.itemType.toLowerCase() + " name '%s' is not allowed because it is a reserved keyword", itemName)); - this.itemName = itemName; - } + Drop(String itemName, DroppedItem itemType) { + this.itemType = itemType.name(); + validateNotEmpty(itemName, this.itemType.toLowerCase() + " name"); + validateNotKeyWord( + itemName, + String.format( + "The " + + this.itemType.toLowerCase() + + " name '%s' is not allowed because it is a reserved keyword", + itemName)); + this.itemName = itemName; + } - /** - * Add the 'IF EXISTS' condition to this DROP statement. - * - * @return this statement. - */ - public Drop ifExists() { - this.ifExists = true; - return this; - } + /** + * Add the 'IF EXISTS' condition to this DROP statement. + * + * @return this statement. + */ + public Drop ifExists() { + this.ifExists = true; + return this; + } - @Override - public String buildInternal() { - StringBuilder dropStatement = new StringBuilder("DROP " + itemType + " "); - if (ifExists) { - dropStatement.append("IF EXISTS "); - } - if (keyspaceName.isPresent()) { - dropStatement.append(keyspaceName.get()).append("."); - } - - dropStatement.append(itemName); - return dropStatement.toString(); + @Override + public String buildInternal() { + StringBuilder dropStatement = new StringBuilder("DROP " + itemType + " "); + if (ifExists) { + dropStatement.append("IF EXISTS "); } - - /** - * Generate a DROP TABLE statement - * - * @return the final DROP TABLE statement - */ - public String build() { - return this.buildInternal(); + if (keyspaceName.isPresent()) { + dropStatement.append(keyspaceName.get()).append("."); } + + dropStatement.append(itemName); + return dropStatement.toString(); + } + + /** + * Generate a DROP TABLE statement + * + * @return the final DROP TABLE statement + */ + public String build() { + return this.buildInternal(); + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/schemabuilder/DropKeyspace.java b/driver-core/src/main/java/com/datastax/driver/core/schemabuilder/DropKeyspace.java index 2bbbbd8b886..93a6e9f7a64 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/schemabuilder/DropKeyspace.java +++ b/driver-core/src/main/java/com/datastax/driver/core/schemabuilder/DropKeyspace.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,40 +17,40 @@ */ package com.datastax.driver.core.schemabuilder; -/** - * A built DROP KEYSPACE statement. - */ +/** A built DROP KEYSPACE statement. */ public class DropKeyspace extends SchemaStatement { - private final String keyspaceName; - private boolean ifExists; + private final String keyspaceName; + private boolean ifExists; - public DropKeyspace(String keyspaceName) { - this.keyspaceName = keyspaceName; - this.ifExists = false; - validateNotEmpty(keyspaceName, "Keyspace name"); - validateNotKeyWord(keyspaceName, - String.format("The keyspace name '%s' is not allowed because it is a reserved keyword", keyspaceName)); - } + public DropKeyspace(String keyspaceName) { + this.keyspaceName = keyspaceName; + this.ifExists = false; + validateNotEmpty(keyspaceName, "Keyspace name"); + validateNotKeyWord( + keyspaceName, + String.format( + "The keyspace name '%s' is not allowed because it is a reserved keyword", + keyspaceName)); + } - /** - * Add the 'IF EXISTS' condition to this DROP statement. - * - * @return this statement. - */ - public DropKeyspace ifExists() { - this.ifExists = true; - return this; - } + /** + * Add the 'IF EXISTS' condition to this DROP statement. + * + * @return this statement. + */ + public DropKeyspace ifExists() { + this.ifExists = true; + return this; + } - @Override - public String buildInternal() { - StringBuilder dropStatement = new StringBuilder("DROP KEYSPACE "); - if (ifExists) { - dropStatement.append("IF EXISTS "); - } - dropStatement.append(keyspaceName); - return dropStatement.toString(); + @Override + public String buildInternal() { + StringBuilder dropStatement = new StringBuilder("DROP KEYSPACE "); + if (ifExists) { + dropStatement.append("IF EXISTS "); } - + dropStatement.append(keyspaceName); + return dropStatement.toString(); + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/schemabuilder/KeyspaceOptions.java b/driver-core/src/main/java/com/datastax/driver/core/schemabuilder/KeyspaceOptions.java index ae11e2280bb..f0d2d1721b9 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/schemabuilder/KeyspaceOptions.java +++ b/driver-core/src/main/java/com/datastax/driver/core/schemabuilder/KeyspaceOptions.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,111 +18,104 @@ package com.datastax.driver.core.schemabuilder; import com.google.common.base.Optional; - import java.util.Map; -/** - * The keyspace options used in CREATE KEYSPACE or ALTER KEYSPACE statements. - */ +/** The keyspace options used in CREATE KEYSPACE or ALTER KEYSPACE statements. */ public class KeyspaceOptions extends SchemaStatement { - private Optional> replication = Optional.absent(); - private Optional durableWrites = Optional.absent(); - - private final String command; - private final String keyspaceName; + private Optional> replication = Optional.absent(); + private Optional durableWrites = Optional.absent(); + + private final String command; + private final String keyspaceName; + + public KeyspaceOptions(String command, String keyspaceName) { + validateNotEmpty(keyspaceName, "Keyspace name"); + validateNotKeyWord( + keyspaceName, + String.format( + "The keyspace name '%s' is not allowed because it is a reserved keyword", + keyspaceName)); + + this.command = command; + this.keyspaceName = keyspaceName; + } + + /** + * Define the replication options for the statement. + * + * @param replication replication properties map + * @return this {@code KeyspaceOptions} object + */ + public KeyspaceOptions replication(Map replication) { + validateReplicationOptions(replication); + this.replication = Optional.fromNullable(replication); + return this; + } + + /** + * Define the durable writes option for the statement. If set to false, data written to the + * keyspace will bypass the commit log. + * + * @param durableWrites durable write option + * @return this {@code KeyspaceOptions} object + */ + public KeyspaceOptions durableWrites(Boolean durableWrites) { + this.durableWrites = Optional.fromNullable(durableWrites); + return this; + } + + @Override + String buildInternal() { + StringBuilder builtStatement = new StringBuilder(STATEMENT_START); + builtStatement.append(command); + builtStatement.append(" "); + builtStatement.append(keyspaceName); + builtStatement.append("\n\tWITH\n\t\t"); + + boolean putSeparator = false; + if (replication.isPresent()) { + + builtStatement.append("REPLICATION = {"); + + int l = replication.get().entrySet().size(); + for (Map.Entry e : replication.get().entrySet()) { + builtStatement.append("'").append(e.getKey()).append("'").append(": "); + + if (e.getValue() instanceof String) { + builtStatement.append("'").append(e.getValue()).append("'"); + } else { + builtStatement.append(e.getValue()); + } - public KeyspaceOptions(String command, String keyspaceName) { - validateNotEmpty(keyspaceName, "Keyspace name"); - validateNotKeyWord(keyspaceName, - String.format("The keyspace name '%s' is not allowed because it is a reserved keyword", keyspaceName)); + if (--l > 0) { + builtStatement.append(", "); + } + } - this.command = command; - this.keyspaceName = keyspaceName; + builtStatement.append('}'); + builtStatement.append("\n\t\t"); + putSeparator = true; } - /** - * Define the replication options for the statement. - * - * @param replication replication properties map - * @return this {@code KeyspaceOptions} object - */ - public KeyspaceOptions replication(Map replication) { - validateReplicationOptions(replication); - this.replication = Optional.fromNullable(replication); - return this; - } + if (durableWrites.isPresent()) { + if (putSeparator) { + builtStatement.append("AND "); + } - /** - * Define the durable writes option for the statement. If set to false, - * data written to the keyspace will bypass the commit log. - * - * @param durableWrites durable write option - * @return this {@code KeyspaceOptions} object - */ - public KeyspaceOptions durableWrites(Boolean durableWrites) { - this.durableWrites = Optional.fromNullable(durableWrites); - return this; + builtStatement.append("DURABLE_WRITES = " + durableWrites.get().toString()); } - @Override - String buildInternal() { - StringBuilder builtStatement = new StringBuilder(STATEMENT_START); - builtStatement.append(command); - builtStatement.append(" "); - builtStatement.append(keyspaceName); - builtStatement.append("\n\tWITH\n\t\t"); - - boolean putSeparator = false; - if (replication.isPresent()) { - - builtStatement.append("REPLICATION = {"); - - int l = replication.get().entrySet().size(); - for (Map.Entry e : replication.get().entrySet()) { - builtStatement.append("'") - .append(e.getKey()) - .append("'") - .append(": "); - - if (e.getValue() instanceof String) { - builtStatement.append("'") - .append(e.getValue()) - .append("'"); - } else { - builtStatement.append(e.getValue()); - } - - if (--l > 0) { - builtStatement.append(", "); - } - } - - builtStatement.append('}'); - builtStatement.append("\n\t\t"); - putSeparator = true; - } - - - if (durableWrites.isPresent()) { - if (putSeparator) { - builtStatement.append("AND "); - } - - builtStatement.append("DURABLE_WRITES = " + durableWrites.get().toString()); - } - + return builtStatement.toString(); + } - return builtStatement.toString(); + static void validateReplicationOptions(Map replicationOptions) { + if (replicationOptions != null && !replicationOptions.containsKey("class")) { + throw new IllegalArgumentException("Replication Strategy 'class' should be provided"); } - static void validateReplicationOptions(Map replicationOptions) { - if (replicationOptions != null && !replicationOptions.containsKey("class")) { - throw new IllegalArgumentException("Replication Strategy 'class' should be provided"); - } - - if (replicationOptions != null && !(replicationOptions.get("class") instanceof String)) { - throw new IllegalArgumentException("Replication Strategy should be of type String"); - } + if (replicationOptions != null && !(replicationOptions.get("class") instanceof String)) { + throw new IllegalArgumentException("Replication Strategy should be of type String"); } + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/schemabuilder/NativeColumnType.java b/driver-core/src/main/java/com/datastax/driver/core/schemabuilder/NativeColumnType.java index f49ff9b1585..defceef3edc 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/schemabuilder/NativeColumnType.java +++ b/driver-core/src/main/java/com/datastax/driver/core/schemabuilder/NativeColumnType.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,18 +19,16 @@ import com.datastax.driver.core.DataType; -/** - * Represents a native CQL type in a SchemaBuilder statement. - */ +/** Represents a native CQL type in a SchemaBuilder statement. */ class NativeColumnType implements ColumnType { - private final String asCQLString; + private final String asCQLString; - NativeColumnType(DataType nativeType) { - asCQLString = nativeType.toString(); - } + NativeColumnType(DataType nativeType) { + asCQLString = nativeType.toString(); + } - @Override - public String asCQLString() { - return asCQLString; - } + @Override + public String asCQLString() { + return asCQLString; + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/schemabuilder/SchemaBuilder.java b/driver-core/src/main/java/com/datastax/driver/core/schemabuilder/SchemaBuilder.java index bad84a7acca..d71bcb7e6ec 100755 --- a/driver-core/src/main/java/com/datastax/driver/core/schemabuilder/SchemaBuilder.java +++ b/driver-core/src/main/java/com/datastax/driver/core/schemabuilder/SchemaBuilder.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,394 +21,451 @@ /** * Static methods to build a CQL3 DDL statement. - *

    - * The provided builders perform very little validation of the built query. - * There is thus no guarantee that a built query is valid, and it is - * definitively possible to create invalid queries. - *

    - * Note that it could be convenient to use an 'import static' to use the methods of this class. + * + *

    The provided builders perform very little validation of the built query. There is thus no + * guarantee that a built query is valid, and it is definitively possible to create invalid queries. + * + *

    Note that it could be convenient to use an 'import static' to use the methods of this class. */ public final class SchemaBuilder { - private SchemaBuilder() { - } - - /** - * Start building a new CREATE KEYSPACE statement. - * - * @param keyspaceName the name of the keyspace to create. - * @return an in-construction CREATE KEYSPACE statement. - */ - public static CreateKeyspace createKeyspace(String keyspaceName) { - return new CreateKeyspace(keyspaceName); - } - - /** - * Start building a new CREATE TABLE statement. - * - * @param tableName the name of the table to create. - * @return an in-construction CREATE TABLE statement. - */ - public static Create createTable(String tableName) { - return new Create(tableName); - } - - /** - * Start building a new CREATE TABLE statement. - * - * @param keyspaceName the name of the keyspace to be used. - * @param tableName the name of the table to create. - * @return an in-construction CREATE TABLE statement. - */ - public static Create createTable(String keyspaceName, String tableName) { - return new Create(keyspaceName, tableName); - } - - /** - * Start building a new ALTER KEYSPACE statement. - * - * @param keyspaceName the name of the keyspace to be altered. - * @return an in-construction ALTER KEYSPACE statement. - */ - public static AlterKeyspace alterKeyspace(String keyspaceName) { - return new AlterKeyspace(keyspaceName); - } - - /** - * Start building a new ALTER TABLE statement. - * - * @param tableName the name of the table to be altered. - * @return an in-construction ALTER TABLE statement. - */ - public static Alter alterTable(String tableName) { - return new Alter(tableName); - } - - /** - * Start building a new ALTER TABLE statement. - * - * @param keyspaceName the name of the keyspace to be used. - * @param tableName the name of the table to be altered. - * @return an in-construction ALTER TABLE statement. - */ - public static Alter alterTable(String keyspaceName, String tableName) { - return new Alter(keyspaceName, tableName); - } - - /** - * Start building a new DROP TABLE statement. - * - * @param tableName the name of the table to be dropped. - * @return an in-construction DROP TABLE statement. - */ - public static Drop dropTable(String tableName) { - return new Drop(tableName, DroppedItem.TABLE); - } - - /** - * Start building a new DROP KEYSPACE statement. - * - * @param keyspaceName the name of the keyspace to be dropped. - * @return an in-construction DROP KEYSPACE statement. - */ - public static DropKeyspace dropKeyspace(String keyspaceName) { - return new DropKeyspace(keyspaceName); - } - - /** - * Start building a new DROP TABLE statement. - * - * @param keyspaceName the name of the keyspace to be used. - * @param tableName the name of the table to be dropped. - * @return an in-construction DROP TABLE statement. - */ - public static Drop dropTable(String keyspaceName, String tableName) { - return new Drop(keyspaceName, tableName, DroppedItem.TABLE); - } - - /** - * Start building a new CREATE INDEX statement. - * - * @param indexName the name of the table to create. - * @return an in-construction CREATE INDEX statement. - */ - public static CreateIndex createIndex(String indexName) { - return new CreateIndex(indexName); - } - - /** - * Start building a new DROP INDEX statement. - * - * @param indexName the name of the index to be dropped. - * @return an in-construction DROP INDEX statement. - */ - public static Drop dropIndex(String indexName) { - return new Drop(indexName, DroppedItem.INDEX); - } - - /** - * Start building a new DROP INDEX statement. - * - * @param keyspaceName the name of the keyspace to be used. - * @param indexName the name of the index to be dropped. - * @return an in-construction DROP INDEX statement. - */ - public static Drop dropIndex(String keyspaceName, String indexName) { - return new Drop(keyspaceName, indexName, DroppedItem.INDEX); - } - - /** - * Start building a new CREATE TYPE statement. - * - * @param typeName the name of the custom type to create. - * @return an in-construction CREATE TYPE statement. - */ - public static CreateType createType(String typeName) { - return new CreateType(typeName); - } - - /** - * Start building a new CREATE TYPE statement. - * - * @param keyspaceName the name of the keyspace to be used. - * @param typeName the name of the custom type to create. - * @return an in-construction CREATE TYPE statement. - */ - public static CreateType createType(String keyspaceName, String typeName) { - return new CreateType(keyspaceName, typeName); - } - - /** - * Start building a new DROP TYPE statement. - * - * @param typeName the name of the type to be dropped. - * @return an in-construction DROP TYPE statement. - */ - public static Drop dropType(String typeName) { - return new Drop(typeName, DroppedItem.TYPE); - } - - /** - * Start building a new DROP TYPE statement. - * - * @param keyspaceName the name of the keyspace to be used. - * @param typeName the name of the type to be dropped. - * @return an in-construction DROP TYPE statement. - */ - public static Drop dropType(String keyspaceName, String typeName) { - return new Drop(keyspaceName, typeName, DroppedItem.TYPE); - } - - - // Utility methods and types: these are not method starters, but they are exposed here in order to - // have a single entry point to all schema builder features. - - /** - * Build the datatype representation of a frozen UDT, to include in a schema builder statement. - *

    - * frozen("foo") will produce frozen<foo>. - * - * @param udtName the name of the UDT. - * @return the type. - */ - public static UDTType frozen(String udtName) { - return UDTType.frozen(udtName); - } - - /** - * Build the datatype representation of a complex UDT type, to include in a schema builder statement. - *

    - * As of Cassandra 2.1, this method is not strictly necessary because {@link Create} and {@link Alter} - * provide specialized methods to express simple collections of UDTs, but future versions will make it - * possible to use types such as map<text, map<text, frozen<user>>>. - * - * @param literal the type literal as it will appear in the final CQL statement. - * @return the type. - */ - public static UDTType udtLiteral(String literal) { - return UDTType.literal(literal); - } - - /** - * Create options for the size-tiered compaction strategy, for use in a CREATE or ALTER TABLE statement. - * - * @return the options. - */ - public static TableOptions.CompactionOptions.SizeTieredCompactionStrategyOptions sizedTieredStategy() { - return new TableOptions.CompactionOptions.SizeTieredCompactionStrategyOptions(); - } - - /** - * Create options for the leveled compaction strategy, to use in a CREATE or ALTER TABLE statement. - * - * @return the options. - */ - public static TableOptions.CompactionOptions.LeveledCompactionStrategyOptions leveledStrategy() { - return new TableOptions.CompactionOptions.LeveledCompactionStrategyOptions(); - } - - /** - * Create options for the date-tiered compaction strategy, to use in a CREATE or ALTER TABLE statement. - *

    - * This strategy was introduced in Cassandra 2.1.1. - * - * @return the options. - */ - public static TableOptions.CompactionOptions.DateTieredCompactionStrategyOptions dateTieredStrategy() { - return new TableOptions.CompactionOptions.DateTieredCompactionStrategyOptions(); - } - - /** - * Create options for the {@code NONE} compression strategy, to use in a CREATE or ALTER TABLE statement. - * - * @return the options. - */ - public static TableOptions.CompressionOptions noCompression() { - return new TableOptions.CompressionOptions.NoCompression(); - } - - /** - * Create options for the LZ4 compression strategy, to use in a CREATE or ALTER TABLE statement. - * - * @return the options. - */ - public static TableOptions.CompressionOptions lz4() { - return new TableOptions.CompressionOptions(TableOptions.CompressionOptions.Algorithm.LZ4); - } - - /** - * Create options for the Snappy compression strategy, to use in a CREATE or ALTER TABLE statement. - * - * @return the options. - */ - public static TableOptions.CompressionOptions snappy() { - return new TableOptions.CompressionOptions(TableOptions.CompressionOptions.Algorithm.SNAPPY); - } - - /** - * Create options for the Deflate compression strategy, to use in a CREATE or ALTER TABLE statement. - * - * @return the options. - */ - public static TableOptions.CompressionOptions deflate() { - return new TableOptions.CompressionOptions(TableOptions.CompressionOptions.Algorithm.DEFLATE); - } - - /** - * Create the speculative retry strategy that never retries reads, to use in a CREATE or ALTER TABLE statement. - * - * @return the strategy. - */ - public static TableOptions.SpeculativeRetryValue noSpeculativeRetry() { - return new TableOptions.SpeculativeRetryValue("'NONE'"); - } - - /** - * Create the speculative retry strategy that retries reads of all replicas, to use in a CREATE or ALTER TABLE statement. - * - * @return the strategy. - */ - public static TableOptions.SpeculativeRetryValue always() { - return new TableOptions.SpeculativeRetryValue("'ALWAYS'"); - } - - /** - * Create the speculative retry strategy that retries based on the effect on throughput and latency, - * to use in a CREATE or ALTER TABLE statement. - * - * @return the strategy. - */ - public static TableOptions.SpeculativeRetryValue percentile(int percentile) { - if (percentile < 0 || percentile > 100) { - throw new IllegalArgumentException("Percentile value for speculative retry should be between 0 and 100"); - } - return new TableOptions.SpeculativeRetryValue("'" + percentile + "percentile'"); - } - - /** - * Create the speculative retry strategy that retries after a given delay, to use in a CREATE or ALTER TABLE statement. - * - * @return the strategy. - */ - public static TableOptions.SpeculativeRetryValue millisecs(int millisecs) { - if (millisecs < 0) { - throw new IllegalArgumentException("Millisecond value for speculative retry should be positive"); - } - return new TableOptions.SpeculativeRetryValue("'" + millisecs + "ms'"); - } - - /** - * The direction used in clustering order declarations. - * - * @see Create.Options#clusteringOrder(String, com.datastax.driver.core.schemabuilder.SchemaBuilder.Direction) - */ - public enum Direction { - ASC, DESC - } - - /** - * Caching strategies, for use in a CREATE or ALTER TABLE statement. - */ - public enum Caching { - ALL("'all'"), KEYS_ONLY("'keys_only'"), ROWS_ONLY("'rows_only'"), NONE("'none'"); - - private String value; - - Caching(String value) { - this.value = value; - } - - String value() { - return value; - } - } - - /** - * Key caching strategies for Cassandra 2.1, for use in a CREATE or ALTER TABLE statement. - */ - public enum KeyCaching { - ALL("'all'"), NONE("'none'"); - private String value; - - KeyCaching(String value) { - this.value = value; - } - - String value() { - return value; - } - } - - /** - * Return the row caching strategy that never caches rows ({@code none}, to use in a CREATE or ALTER TABLE statement. - * - * @return the strategy. - */ - public static TableOptions.CachingRowsPerPartition noRows() { - return new TableOptions.CachingRowsPerPartition("'none'"); - } - - /** - * Return the row caching strategy that caches all rows ({@code all}), to use in a CREATE or ALTER TABLE statement. - *

    - * Be careful when choosing this option, you can starve Cassandra memory quickly if your partition is very large. - * - * @return the strategy. - */ - public static TableOptions.CachingRowsPerPartition allRows() { - return new TableOptions.CachingRowsPerPartition("'all'"); - } - - /** - * Return the row caching strategy that caches a given number of rows, to use in a CREATE or ALTER TABLE statement. - * - * @param rowNumber the number of rows to cache. - * @return the strategy. - */ - public static TableOptions.CachingRowsPerPartition rows(int rowNumber) { - if (rowNumber <= 0) { - throw new IllegalArgumentException("rows number for caching should be strictly positive"); - } - return new TableOptions.CachingRowsPerPartition(Integer.toString(rowNumber)); - } + private SchemaBuilder() {} + + /** + * Start building a new CREATE KEYSPACE statement. + * + * @param keyspaceName the name of the keyspace to create. + * @return an in-construction CREATE KEYSPACE statement. + */ + public static CreateKeyspace createKeyspace(String keyspaceName) { + return new CreateKeyspace(keyspaceName); + } + + /** + * Start building a new CREATE TABLE statement. + * + * @param tableName the name of the table to create. + * @return an in-construction CREATE TABLE statement. + */ + public static Create createTable(String tableName) { + return new Create(tableName); + } + + /** + * Start building a new CREATE TABLE statement. + * + * @param keyspaceName the name of the keyspace to be used. + * @param tableName the name of the table to create. + * @return an in-construction CREATE TABLE statement. + */ + public static Create createTable(String keyspaceName, String tableName) { + return new Create(keyspaceName, tableName); + } + + /** + * Start building a new ALTER KEYSPACE statement. + * + * @param keyspaceName the name of the keyspace to be altered. + * @return an in-construction ALTER KEYSPACE statement. + */ + public static AlterKeyspace alterKeyspace(String keyspaceName) { + return new AlterKeyspace(keyspaceName); + } + + /** + * Start building a new ALTER TABLE statement. + * + * @param tableName the name of the table to be altered. + * @return an in-construction ALTER TABLE statement. + */ + public static Alter alterTable(String tableName) { + return new Alter(tableName); + } + + /** + * Start building a new ALTER TABLE statement. + * + * @param keyspaceName the name of the keyspace to be used. + * @param tableName the name of the table to be altered. + * @return an in-construction ALTER TABLE statement. + */ + public static Alter alterTable(String keyspaceName, String tableName) { + return new Alter(keyspaceName, tableName); + } + + /** + * Start building a new DROP TABLE statement. + * + * @param tableName the name of the table to be dropped. + * @return an in-construction DROP TABLE statement. + */ + public static Drop dropTable(String tableName) { + return new Drop(tableName, DroppedItem.TABLE); + } + + /** + * Start building a new DROP KEYSPACE statement. + * + * @param keyspaceName the name of the keyspace to be dropped. + * @return an in-construction DROP KEYSPACE statement. + */ + public static DropKeyspace dropKeyspace(String keyspaceName) { + return new DropKeyspace(keyspaceName); + } + + /** + * Start building a new DROP TABLE statement. + * + * @param keyspaceName the name of the keyspace to be used. + * @param tableName the name of the table to be dropped. + * @return an in-construction DROP TABLE statement. + */ + public static Drop dropTable(String keyspaceName, String tableName) { + return new Drop(keyspaceName, tableName, DroppedItem.TABLE); + } + + /** + * Start building a new CREATE INDEX statement. + * + * @param indexName the name of the table to create. + * @return an in-construction CREATE INDEX statement. + */ + public static CreateIndex createIndex(String indexName) { + return new CreateIndex(indexName); + } + + /** + * Start building a new DROP INDEX statement. + * + * @param indexName the name of the index to be dropped. + * @return an in-construction DROP INDEX statement. + */ + public static Drop dropIndex(String indexName) { + return new Drop(indexName, DroppedItem.INDEX); + } + + /** + * Start building a new DROP INDEX statement. + * + * @param keyspaceName the name of the keyspace to be used. + * @param indexName the name of the index to be dropped. + * @return an in-construction DROP INDEX statement. + */ + public static Drop dropIndex(String keyspaceName, String indexName) { + return new Drop(keyspaceName, indexName, DroppedItem.INDEX); + } + + /** + * Start building a new CREATE TYPE statement. + * + * @param typeName the name of the custom type to create. + * @return an in-construction CREATE TYPE statement. + */ + public static CreateType createType(String typeName) { + return new CreateType(typeName); + } + + /** + * Start building a new CREATE TYPE statement. + * + * @param keyspaceName the name of the keyspace to be used. + * @param typeName the name of the custom type to create. + * @return an in-construction CREATE TYPE statement. + */ + public static CreateType createType(String keyspaceName, String typeName) { + return new CreateType(keyspaceName, typeName); + } + + /** + * Start building a new DROP TYPE statement. + * + * @param typeName the name of the type to be dropped. + * @return an in-construction DROP TYPE statement. + */ + public static Drop dropType(String typeName) { + return new Drop(typeName, DroppedItem.TYPE); + } + + /** + * Start building a new DROP TYPE statement. + * + * @param keyspaceName the name of the keyspace to be used. + * @param typeName the name of the type to be dropped. + * @return an in-construction DROP TYPE statement. + */ + public static Drop dropType(String keyspaceName, String typeName) { + return new Drop(keyspaceName, typeName, DroppedItem.TYPE); + } + + // Utility methods and types: these are not method starters, but they are exposed here in order to + // have a single entry point to all schema builder features. + + /** + * Build the datatype representation of a frozen UDT, to include in a schema builder statement. + * + *

    frozen("foo") will produce frozen<foo>. + * + * @param udtName the name of the UDT. + * @return the type. + */ + public static UDTType frozen(String udtName) { + return UDTType.frozen(udtName); + } + + /** + * Build the datatype representation of a complex UDT type, to include in a schema builder + * statement. + * + *

    As of Cassandra 2.1, this method is not strictly necessary because {@link Create} and {@link + * Alter} provide specialized methods to express simple collections of UDTs, but future versions + * will make it possible to use types such as + * map<text, map<text, frozen<user>>>. + * + * @param literal the type literal as it will appear in the final CQL statement. + * @return the type. + */ + public static UDTType udtLiteral(String literal) { + return UDTType.literal(literal); + } + + /** + * Create options for the size-tiered compaction strategy, for use in a CREATE or ALTER TABLE + * statement. + * + * @return the options. + */ + public static TableOptions.CompactionOptions.SizeTieredCompactionStrategyOptions + sizedTieredStategy() { + return new TableOptions.CompactionOptions.SizeTieredCompactionStrategyOptions(); + } + + /** + * Create options for the leveled compaction strategy, to use in a CREATE or ALTER TABLE + * statement. + * + * @return the options. + */ + public static TableOptions.CompactionOptions.LeveledCompactionStrategyOptions leveledStrategy() { + return new TableOptions.CompactionOptions.LeveledCompactionStrategyOptions(); + } + + /** + * Create options for the date-tiered compaction strategy, to use in a CREATE or ALTER TABLE + * statement. + * + *

    This strategy was introduced in Cassandra 2.1.1. + * + * @return the options. + */ + public static TableOptions.CompactionOptions.DateTieredCompactionStrategyOptions + dateTieredStrategy() { + return new TableOptions.CompactionOptions.DateTieredCompactionStrategyOptions(); + } + + /** + * Create options for the time window compaction strategy, to use in a CREATE or ALTER TABLE + * statement. + * + *

    This strategy was introduced in Cassandra 3.0.8 and 3.9. + * + * @return the options. + */ + public static TableOptions.CompactionOptions.TimeWindowCompactionStrategyOptions + timeWindowCompactionStrategy() { + return new TableOptions.CompactionOptions.TimeWindowCompactionStrategyOptions(); + } + + /** + * Create options for the {@code NONE} compression strategy, to use in a CREATE or ALTER TABLE + * statement. + * + * @return the options. + */ + public static TableOptions.CompressionOptions noCompression() { + return new TableOptions.CompressionOptions.NoCompression(); + } + + /** + * Create options for the LZ4 compression strategy, to use in a CREATE or ALTER TABLE statement. + * + * @return the options. + */ + public static TableOptions.CompressionOptions lz4() { + return new TableOptions.CompressionOptions(TableOptions.CompressionOptions.Algorithm.LZ4); + } + + /** + * Create options for the Snappy compression strategy, to use in a CREATE or ALTER TABLE + * statement. + * + * @return the options. + */ + public static TableOptions.CompressionOptions snappy() { + return new TableOptions.CompressionOptions(TableOptions.CompressionOptions.Algorithm.SNAPPY); + } + + /** + * Create options for the Deflate compression strategy, to use in a CREATE or ALTER TABLE + * statement. + * + * @return the options. + */ + public static TableOptions.CompressionOptions deflate() { + return new TableOptions.CompressionOptions(TableOptions.CompressionOptions.Algorithm.DEFLATE); + } + + /** + * Create the speculative retry strategy that never retries reads, to use in a CREATE or ALTER + * TABLE statement. + * + * @return the strategy. + */ + public static TableOptions.SpeculativeRetryValue noSpeculativeRetry() { + return new TableOptions.SpeculativeRetryValue("'NONE'"); + } + + /** + * Create the speculative retry strategy that retries reads of all replicas, to use in a CREATE or + * ALTER TABLE statement. + * + * @return the strategy. + */ + public static TableOptions.SpeculativeRetryValue always() { + return new TableOptions.SpeculativeRetryValue("'ALWAYS'"); + } + + /** + * Create the speculative retry strategy that retries based on the effect on throughput and + * latency, to use in a CREATE or ALTER TABLE statement. + * + * @return the strategy. + */ + public static TableOptions.SpeculativeRetryValue percentile(int percentile) { + if (percentile < 0 || percentile > 100) { + throw new IllegalArgumentException( + "Percentile value for speculative retry should be between 0 and 100"); + } + return new TableOptions.SpeculativeRetryValue("'" + percentile + "percentile'"); + } + + /** + * Create the speculative retry strategy that retries after a given delay, to use in a CREATE or + * ALTER TABLE statement. + * + * @return the strategy. + */ + public static TableOptions.SpeculativeRetryValue millisecs(int millisecs) { + if (millisecs < 0) { + throw new IllegalArgumentException( + "Millisecond value for speculative retry should be positive"); + } + return new TableOptions.SpeculativeRetryValue("'" + millisecs + "ms'"); + } + + /** + * The direction used in clustering order declarations. + * + * @see Create.Options#clusteringOrder(String, + * com.datastax.driver.core.schemabuilder.SchemaBuilder.Direction) + */ + public enum Direction { + ASC, + DESC + } + + /** Caching strategies, for use in a CREATE or ALTER TABLE statement. */ + public enum Caching { + ALL("'all'"), + KEYS_ONLY("'keys_only'"), + ROWS_ONLY("'rows_only'"), + NONE("'none'"); + + private String value; + + Caching(String value) { + this.value = value; + } + + String value() { + return value; + } + } + + /** Key caching strategies for Cassandra 2.1, for use in a CREATE or ALTER TABLE statement. */ + public enum KeyCaching { + ALL("'all'"), + NONE("'none'"); + private String value; + + KeyCaching(String value) { + this.value = value; + } + + String value() { + return value; + } + } + + /** + * Return the row caching strategy that never caches rows ({@code none}, to use in a CREATE or + * ALTER TABLE statement. + * + * @return the strategy. + */ + public static TableOptions.CachingRowsPerPartition noRows() { + return new TableOptions.CachingRowsPerPartition("'none'"); + } + + /** + * Return the row caching strategy that caches all rows ({@code all}), to use in a CREATE or ALTER + * TABLE statement. + * + *

    Be careful when choosing this option, you can starve Cassandra memory quickly if + * your partition is very large. + * + * @return the strategy. + */ + public static TableOptions.CachingRowsPerPartition allRows() { + return new TableOptions.CachingRowsPerPartition("'all'"); + } + + /** + * Return the row caching strategy that caches a given number of rows, to use in a CREATE or ALTER + * TABLE statement. + * + * @param rowNumber the number of rows to cache. + * @return the strategy. + */ + public static TableOptions.CachingRowsPerPartition rows(int rowNumber) { + if (rowNumber <= 0) { + throw new IllegalArgumentException("rows number for caching should be strictly positive"); + } + return new TableOptions.CachingRowsPerPartition(Integer.toString(rowNumber)); + } + + public static TableOptions.AdditionalWritePolicyValue additionalWritePolicyNever() { + return new TableOptions.AdditionalWritePolicyValue("'NEVER'"); + } + + public static TableOptions.AdditionalWritePolicyValue additionalWritePolicyAlways() { + return new TableOptions.AdditionalWritePolicyValue("'ALWAYS'"); + } + + public static TableOptions.AdditionalWritePolicyValue additionalWritePolicyPercentile( + int percentile) { + if (percentile < 0 || percentile > 100) { + throw new IllegalArgumentException( + "Percentile value for additional write policy should be between 0 and 100"); + } + return new TableOptions.AdditionalWritePolicyValue("'" + percentile + "percentile'"); + } + + public static TableOptions.AdditionalWritePolicyValue additionalWritePolicyMillisecs( + int millisecs) { + if (millisecs < 0) { + throw new IllegalArgumentException( + "Millisecond value for speculative retry should be positive"); + } + return new TableOptions.AdditionalWritePolicyValue("'" + millisecs + "ms'"); + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/schemabuilder/SchemaStatement.java b/driver-core/src/main/java/com/datastax/driver/core/schemabuilder/SchemaStatement.java index f47df613a3b..352d7c60b9e 100755 --- a/driver-core/src/main/java/com/datastax/driver/core/schemabuilder/SchemaStatement.java +++ b/driver-core/src/main/java/com/datastax/driver/core/schemabuilder/SchemaStatement.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,103 +22,104 @@ import com.datastax.driver.core.ProtocolVersion; import com.datastax.driver.core.RegularStatement; import com.google.common.base.Strings; - import java.nio.ByteBuffer; import java.util.Map; -/** - * A DDL statement generated by {@link SchemaBuilder}. - */ +/** A DDL statement generated by {@link SchemaBuilder}. */ public abstract class SchemaStatement extends RegularStatement { - static final String STATEMENT_START = "\n\t"; - static final String COLUMN_FORMATTING = "\n\t\t"; - - private volatile String cache; - - abstract String buildInternal(); - - @Override - public String getQueryString(CodecRegistry codecRegistry) { - if (cache == null) { - cache = buildInternal(); - } - return cache; - } - - @Override - public ByteBuffer[] getValues(ProtocolVersion protocolVersion, CodecRegistry codecRegistry) { - // DDL statements never have values - return new ByteBuffer[0]; - } - - @Override - public boolean hasValues(CodecRegistry codecRegistry) { - return false; - } - - @Override - public Map getNamedValues(ProtocolVersion protocolVersion, CodecRegistry codecRegistry) { - // DDL statements never have values - return null; - } - - @Override - public boolean usesNamedValues() { - return false; - } + static final String STATEMENT_START = "\n\t"; + static final String COLUMN_FORMATTING = "\n\t\t"; - @Override - public String getKeyspace() { - // This is exposed for token-aware routing. Since there is no token awareness for DDL statements, we don't need to - // return anything here (even if a keyspace has been explicitly set in the statement). - return null; - } + private volatile String cache; - /** - * {@inheritDoc} - * - * @param protocolVersion unused by this implementation (the key is always null for schema statements). - * @param codecRegistry unused by this implementation (the key is always null for schema statements). - */ - @Override - public ByteBuffer getRoutingKey(ProtocolVersion protocolVersion, CodecRegistry codecRegistry) { - return null; // there is no token awareness for DDL statements - } + abstract String buildInternal(); - static void validateNotEmpty(String columnName, String label) { - if (Strings.isNullOrEmpty(columnName)) { - throw new IllegalArgumentException(label + " should not be null or blank"); - } + @Override + public String getQueryString(CodecRegistry codecRegistry) { + if (cache == null) { + cache = buildInternal(); } - - static void validateNotNull(Object value, String label) { - if (value == null) { - throw new IllegalArgumentException(label + " should not be null"); - } - } - - static void validateNotKeyWord(String label, String message) { - if (Metadata.isReservedCqlKeyword(label)) { - throw new IllegalArgumentException(message); - } + return cache; + } + + @Override + public ByteBuffer[] getValues(ProtocolVersion protocolVersion, CodecRegistry codecRegistry) { + // DDL statements never have values + return new ByteBuffer[0]; + } + + @Override + public boolean hasValues(CodecRegistry codecRegistry) { + return false; + } + + @Override + public Map getNamedValues( + ProtocolVersion protocolVersion, CodecRegistry codecRegistry) { + // DDL statements never have values + return null; + } + + @Override + public boolean usesNamedValues() { + return false; + } + + @Override + public String getKeyspace() { + // This is exposed for token-aware routing. Since there is no token awareness for DDL + // statements, we don't need to + // return anything here (even if a keyspace has been explicitly set in the statement). + return null; + } + + /** + * {@inheritDoc} + * + * @param protocolVersion unused by this implementation (the key is always null for schema + * statements). + * @param codecRegistry unused by this implementation (the key is always null for schema + * statements). + */ + @Override + public ByteBuffer getRoutingKey(ProtocolVersion protocolVersion, CodecRegistry codecRegistry) { + return null; // there is no token awareness for DDL statements + } + + static void validateNotEmpty(String columnName, String label) { + if (Strings.isNullOrEmpty(columnName)) { + throw new IllegalArgumentException(label + " should not be null or blank"); } + } - static SchemaStatement fromQueryString(final String queryString) { - return new SchemaStatement() { - @Override - public String buildInternal() { - return queryString; - } - }; + static void validateNotNull(Object value, String label) { + if (value == null) { + throw new IllegalArgumentException(label + " should not be null"); } + } - StatementStart asStatementStart() { - return new StatementStart() { - @Override - public String buildInternal() { - return SchemaStatement.this.buildInternal(); - } - }; + static void validateNotKeyWord(String label, String message) { + if (Metadata.isReservedCqlKeyword(label)) { + throw new IllegalArgumentException(message); } + } + + static SchemaStatement fromQueryString(final String queryString) { + return new SchemaStatement() { + @Override + public String buildInternal() { + return queryString; + } + }; + } + + StatementStart asStatementStart() { + return new StatementStart() { + @Override + public String buildInternal() { + return SchemaStatement.this.buildInternal(); + } + }; + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/schemabuilder/StatementStart.java b/driver-core/src/main/java/com/datastax/driver/core/schemabuilder/StatementStart.java index 215d8a8f6e2..6bdca1dccab 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/schemabuilder/StatementStart.java +++ b/driver-core/src/main/java/com/datastax/driver/core/schemabuilder/StatementStart.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,9 +17,7 @@ */ package com.datastax.driver.core.schemabuilder; -/** - * The start of a statement, that another class will append to, to build the final statement. - */ +/** The start of a statement, that another class will append to, to build the final statement. */ interface StatementStart { - String buildInternal(); + String buildInternal(); } diff --git a/driver-core/src/main/java/com/datastax/driver/core/schemabuilder/TableOptions.java b/driver-core/src/main/java/com/datastax/driver/core/schemabuilder/TableOptions.java index a05029e223f..0e24d2b98f1 100755 --- a/driver-core/src/main/java/com/datastax/driver/core/schemabuilder/TableOptions.java +++ b/driver-core/src/main/java/com/datastax/driver/core/schemabuilder/TableOptions.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,1099 +20,1440 @@ import com.google.common.base.Joiner; import com.google.common.base.Optional; import com.google.common.base.Strings; - import java.util.ArrayList; import java.util.List; /** * The table options used in a CREATE TABLE or ALTER TABLE statement. - *

    - * Implementation notes: this class is abstract and not meant to use directly. - * The type parameter {@code T} allows usage of covariant return type and makes the builder pattern work for different sub-classes. + * + *

    Implementation notes: this class is abstract and not meant to use directly. The type parameter + * {@code T} allows usage of covariant return type and makes the builder pattern + * work for different sub-classes. * * @param the concrete sub-class of {@link com.datastax.driver.core.schemabuilder.TableOptions} - * @see details on table options + * @see details on table options */ public abstract class TableOptions extends SchemaStatement { - private StatementStart statementStart; - - private Optional cassandra20Caching = Optional.absent(); - private Optional cassandra21KeyCaching = Optional.absent(); - private Optional cassandra21RowCaching = Optional.absent(); - - private Optional bloomFilterFPChance = Optional.absent(); - - private Optional comment = Optional.absent(); - - private Optional compressionOptions = Optional.absent(); - - private Optional compactionOptions = Optional.absent(); - - private Optional dcLocalReadRepairChance = Optional.absent(); - - private Optional defaultTTL = Optional.absent(); - - private Optional gcGraceSeconds = Optional.absent(); + private StatementStart statementStart; - private Optional indexInterval = Optional.absent(); - private Optional minIndexInterval = Optional.absent(); - private Optional maxIndexInterval = Optional.absent(); - - private Optional memtableFlushPeriodInMillis = Optional.absent(); - - private Optional populateIOOnCacheFlush = Optional.absent(); - - private Optional readRepairChance = Optional.absent(); - - private Optional replicateOnWrite = Optional.absent(); + private Optional cassandra20Caching = Optional.absent(); + private Optional cassandra21KeyCaching = Optional.absent(); + private Optional cassandra21RowCaching = Optional.absent(); + + private Optional bloomFilterFPChance = Optional.absent(); + + private Optional comment = Optional.absent(); + + private Optional compressionOptions = Optional.absent(); + + private Optional compactionOptions = Optional.absent(); + + private Optional dcLocalReadRepairChance = Optional.absent(); + + private Optional defaultTTL = Optional.absent(); + + private Optional gcGraceSeconds = Optional.absent(); + + private Optional indexInterval = Optional.absent(); + private Optional minIndexInterval = Optional.absent(); + private Optional maxIndexInterval = Optional.absent(); + + private Optional memtableFlushPeriodInMillis = Optional.absent(); + + private Optional populateIOOnCacheFlush = Optional.absent(); + + private Optional readRepairChance = Optional.absent(); + + private Optional replicateOnWrite = Optional.absent(); + + private Optional speculativeRetry = Optional.absent(); + + private Optional cdc = Optional.absent(); + + private List customOptions = new ArrayList(); + + private Optional readReapair = Optional.absent(); + + private Optional additionalWritePolicy = Optional.absent(); + + @SuppressWarnings("unchecked") + private final T self = (T) this; + + TableOptions(StatementStart statementStart) { + this.statementStart = statementStart; + } + + /** + * Define the caching type for Cassandra 2.0.x. + * + *

    If no call is made to this method, the default value set by Cassandra is {@link + * SchemaBuilder.Caching#KEYS_ONLY}. + * + * @param caching the caching type (all enum values are allowed). + * @return this {@code TableOptions} object. + */ + public T caching(SchemaBuilder.Caching caching) { + this.cassandra20Caching = Optional.fromNullable(caching); + return self; + } + + /** + * Define the caching options for Cassandra 2.1.x. + * + *

    If no call is made to this method, the default values set by Cassandra are keys = {@link + * SchemaBuilder.Caching#ALL} and rows_per_partition = {@link + * com.datastax.driver.core.schemabuilder.SchemaBuilder#noRows()}. + * + * @param keys the key cache type. + * @param rowsPerPartition defines the number of rows to be cached per partition when Row Caching + * is enabled. To create instances, use {@link SchemaBuilder#noRows()}, {@link + * SchemaBuilder#allRows()} or {@link SchemaBuilder#rows(int)}. + * @return this {@code TableOptions} object. + */ + public T caching(SchemaBuilder.KeyCaching keys, CachingRowsPerPartition rowsPerPartition) { + this.cassandra21KeyCaching = Optional.fromNullable(keys); + this.cassandra21RowCaching = Optional.fromNullable(rowsPerPartition); + return self; + } + + /** + * Define the desired false-positive probability for SSTable Bloom filters. + * + *

    If no call is made to this method, the default value set by Cassandra is: + * + *

      + *
    • 0.01 for the size-tiered compaction strategy; + *
    • 0.1 for the leveled compaction strategy. + *
    + * + * @param fpChance the false positive change. This value should be between 0 and 1.0. + * @return this {@code TableOptions} object. + */ + public T bloomFilterFPChance(Double fpChance) { + validateRateValue(fpChance, "Bloom filter false positive change"); + this.bloomFilterFPChance = Optional.fromNullable(fpChance); + return self; + } + + /** + * Define a human readable comment describing the table. + * + * @param comment the comment. + * @return this {@code TableOptions} object. + */ + public T comment(String comment) { + this.comment = Optional.fromNullable(comment); + return self; + } + + /** + * Define the compression options. + * + *

    If no call is made to this method, the default value set by Cassandra is {@link + * SchemaBuilder#lz4()}. + * + * @param compressionOptions the compression options. To create instances, use {@link + * SchemaBuilder#noCompression()}, {@link SchemaBuilder#lz4()}, {@link SchemaBuilder#snappy()} + * or {@link SchemaBuilder#deflate()}. + * @return this {@code TableOptions} object. + */ + public T compressionOptions(CompressionOptions compressionOptions) { + this.compressionOptions = Optional.fromNullable(compressionOptions); + return self; + } + + /** + * Define the compaction options. + * + *

    If no call is made to this method, the default value set by Cassandra is {@link + * SchemaBuilder#sizedTieredStategy()}. + * + * @param compactionOptions the compaction options. To create instances, use {@link + * SchemaBuilder#sizedTieredStategy()}, {@link SchemaBuilder#leveledStrategy()} or {@link + * SchemaBuilder#dateTieredStrategy()} + * @return this {@code TableOptions} object. + */ + public T compactionOptions(CompactionOptions compactionOptions) { + this.compactionOptions = Optional.fromNullable(compactionOptions); + return self; + } + + /** + * Define the probability of read repairs being invoked over all replicas in the current data + * center. + * + *

    If no call is made to this method, the default value set by Cassandra is 0.0. + * + * @param dcLocalReadRepairChance the probability. + * @return this {@code TableOptions} object. + */ + public T dcLocalReadRepairChance(Double dcLocalReadRepairChance) { + validateRateValue(dcLocalReadRepairChance, "DC local read repair chance"); + this.dcLocalReadRepairChance = Optional.fromNullable(dcLocalReadRepairChance); + return self; + } + + /** + * Define the default expiration time in seconds for a table. + * + *

    + * + *

    Used in MapReduce/Hive scenarios when you have no control of TTL. + * + *

    If no call is made to this method, the default value set by Cassandra is 0. + * + * @param defaultTimeToLive the default time to live in seconds for a table. + * @return this {@code TableOptions} object. + */ + public T defaultTimeToLive(Integer defaultTimeToLive) { + this.defaultTTL = Optional.fromNullable(defaultTimeToLive); + return self; + } + + /** + * Define the time to wait before garbage collecting tombstones (deletion markers). + * + *

    The default value allows a great deal of time for consistency to be achieved prior to + * deletion. In many deployments this interval can be reduced, and in a single-node cluster it can + * be safely set to zero. + * + *

    If no call is made to this method, the default value set by Cassandra is 864000 secs (10 + * days). + * + * @param gcGraceSeconds the grace period. + * @return this {@code TableOptions} object. + */ + public T gcGraceSeconds(Integer gcGraceSeconds) { + this.gcGraceSeconds = Optional.fromNullable(gcGraceSeconds); + return self; + } + + /** + * Define the index interval for Cassandra 2.0. + * + *

    To control the sampling of entries from the primary row index, configure sample frequency of + * the partition summary by changing the index interval. After changing the index interval, + * SSTables are written to disk with new information. The interval corresponds to the number of + * index entries that are skipped between taking each sample. By default, Cassandra samples one + * row key out of every 128. The larger the interval, the smaller and less effective the sampling. + * The larger the sampling, the more effective the index, but with increased memory usage. In + * Cassandra 2.0.x, generally, the best trade off between memory usage and performance is a value + * between 128 and 512 in combination with a large table key cache. However, if you have small + * rows (many to an OS page), you may want to increase the sample size, which often lowers memory + * usage without an impact on performance. For large rows, decreasing the sample size may improve + * read performance. + * + *

    If no call is made to this method, the default value set by Cassandra is 128. + * + * @param indexInterval the index interval. + * @return this {@code TableOptions} object. + */ + public T indexInterval(Integer indexInterval) { + this.indexInterval = Optional.fromNullable(indexInterval); + return self; + } + + /** + * Define the minimum index interval for Cassandra 2.1. + * + *

    If no call is made to this method, the default value set by Cassandra is 128. + * + * @param minIndexInterval the minimum index interval. + * @return this {@code TableOptions} object. + * @see #indexInterval(Integer) + */ + public T minIndexInterval(Integer minIndexInterval) { + this.minIndexInterval = Optional.fromNullable(minIndexInterval); + return self; + } + + /** + * Define the maximum index interval for Cassandra 2.1. + * + *

    If no call is made to this method, the default value set by Cassandra is 2048. + * + * @param maxIndexInterval the maximum index interval. + * @return this {@code TableOptions} object. + * @see #indexInterval(Integer) + */ + public T maxIndexInterval(Integer maxIndexInterval) { + this.maxIndexInterval = Optional.fromNullable(maxIndexInterval); + return self; + } + + /** + * Define the memtable flush period. + * + *

    If set, this forces flushing of the memtable after the specified time elapses. + * + *

    If no call is made to this method, the default value set by Cassandra is 0. + * + * @param memtableFlushPeriodInMillis the memtable flush period in milliseconds. + * @return this {@code TableOptions} object. + */ + public T memtableFlushPeriodInMillis(Integer memtableFlushPeriodInMillis) { + this.memtableFlushPeriodInMillis = Optional.fromNullable(memtableFlushPeriodInMillis); + return self; + } + + /** + * Define whether to populate IO cache on flush of sstables. + * + *

    If set, Cassandra adds newly flushed or compacted sstables to the operating system page + * cache, potentially evicting other cached data to make room. Enable when all data in the table + * is expected to fit in memory. + * + *

    If no call is made to this method, the default value set by Cassandra is {@code false}. + * + * @param populateIOOnCacheFlush whether to populate IO cache on flush of sstables. + * @return this {@code TableOptions} object. + * @see the + * global option compaction_preheat_key_cache + */ + public T populateIOCacheOnFlush(Boolean populateIOOnCacheFlush) { + this.populateIOOnCacheFlush = Optional.fromNullable(populateIOOnCacheFlush); + return self; + } + + /** + * Define the probability with which read repairs should be invoked on non-quorum reads. The value + * must be between 0 and 1. + * + *

    If no call is made to this method, the default value set by Cassandra is 0.1. + * + * @param readRepairChance the read repair chance. + * @return this {@code TableOptions} object. + */ + public T readRepairChance(Double readRepairChance) { + validateRateValue(readRepairChance, "Read repair chance"); + this.readRepairChance = Optional.fromNullable(readRepairChance); + return self; + } + + /** + * Define whether to replicate data on write (Cassandra 2.0.x only). + * + *

    When set to {@code true}, replicates writes to all affected replicas regardless of the + * consistency level specified by the client for a write request. For counter tables, this should + * always be set to {@code true}. + * + *

    If no call is made to this method, the default value set by Cassandra is {@code true}. + * + * @param replicateOnWrite whether to replicate data on write. + * @return this {@code TableOptions} object. + */ + public T replicateOnWrite(Boolean replicateOnWrite) { + this.replicateOnWrite = Optional.fromNullable(replicateOnWrite); + return self; + } + + /** + * To override normal read timeout when read_repair_chance is not 1.0, sending another request to + * read, choose one of these values and use the property to create or alter the table: + * + *

      + *
    • ALWAYS: Retry reads of all replicas. + *
    • Xpercentile: Retry reads based on the effect on throughput and latency. + *
    • Yms: Retry reads after specified milliseconds. + *
    • NONE: Do not retry reads. + *
    + * + *

    Using the speculative retry property, you can configure rapid read protection in Cassandra + * 2.0.2 and later. Use this property to retry a request after some milliseconds have passed or + * after a percentile of the typical read latency has been reached, which is tracked per table. + * + *

    + * + *

    If no call is made to this method, the default value set by Cassandra is {code + * 99percentile}. + * + * @param speculativeRetry the speculative retry. To create instances, use {@link + * SchemaBuilder#noSpeculativeRetry()}, {@link SchemaBuilder#always()}, {@link + * SchemaBuilder#percentile(int)} or {@link SchemaBuilder#millisecs(int)}. + * @return this {@code TableOptions} object. + */ + public T speculativeRetry(SpeculativeRetryValue speculativeRetry) { + this.speculativeRetry = Optional.fromNullable(speculativeRetry); + return self; + } + + /** + * Define whether or not change data capture is enabled on this table. + * + *

    Note that using this option with a version of Apache Cassandra less than 3.8 will raise a + * syntax error. + * + *

    If no call is made to this method, the default value set by Cassandra is {@code false}. + * + * @param cdc Whether or not change data capture should be enabled for this table. + * @return this {@code TableOptions} object. + */ + public T cdc(Boolean cdc) { + this.cdc = Optional.fromNullable(cdc); + return self; + } + + /** + * Define a free-form option as a key/value pair. + * + *

    This method is provided as a fallback if the SchemaBuilder is used with a more recent + * version of Cassandra that has new, unsupported options. + * + * @param key the name of the option. + * @param value the value of the option. If it's a {@code String}, it will be included in single + * quotes, otherwise the result of invoking its {@code toString} method will be used unquoted. + * @return this {@code TableOptions} object. + */ + public T freeformOption(String key, Object value) { + if (Strings.isNullOrEmpty(key)) { + throw new IllegalArgumentException("Key for custom option should not be null or blank"); + } + customOptions.add(buildCustomOption(key, value)); + return self; + } + + /** + * Define the read_repair mode for this table. Possible values are: + * + *

      + *
    • BLOCKING + *
    • NONE + *
    + * + *

    This option is for Cassandra 4.0+. The default value is {@code BLOCKING}, which means that + * when a read repair is started, a read will block on writes sent to other replicas until the CL + * is reached by the writes. This provides monotonic quorum reads, but not partition level write + * atomicity. + * + *

    If {@code NONE} is specified, the coordinator will reconcile any differences between + * replicas, but will not attempt to repair them. This provides partition level write atomicity, + * but not monotonic quorum reads. + * + * @param readRepair the read repair mode. + * @return this {@code TableOptions} object. + */ + public T readRepair(ReadRepairValue readRepair) { + this.readReapair = Optional.fromNullable(readRepair); + return self; + } + + /** + * Define the additional_write_policy for this table. This specifies the threshold at which a + * cheap quorum write will be upgraded to include transient replicas. + * + *

    This option is for Cassandra 4.0+. The default value is "99p". + * + * @param additionalWritePolicy the additional write policy threshold. + * @return this {@code TableOptions} object. + */ + public T additionalWritePolicy(AdditionalWritePolicyValue additionalWritePolicy) { + this.additionalWritePolicy = Optional.fromNullable(additionalWritePolicy); + return self; + } + + private static String buildCustomOption(String key, Object value) { + return String.format( + "%s = %s", key, (value instanceof String) ? "'" + value + "'" : value.toString()); + } + + private List buildCommonOptions() { + List options = new ArrayList(); + + buildCachingOptions(options); + + if (bloomFilterFPChance.isPresent()) { + options.add("bloom_filter_fp_chance = " + bloomFilterFPChance.get()); + } - private Optional speculativeRetry = Optional.absent(); + if (comment.isPresent()) { + options.add("comment = '" + comment.get() + "'"); + } - private Optional cdc = Optional.absent(); + if (compressionOptions.isPresent()) { + options.add("compression = " + compressionOptions.get().build()); + } - private List customOptions = new ArrayList(); + if (compactionOptions.isPresent()) { + options.add("compaction = " + compactionOptions.get().build()); + } - @SuppressWarnings("unchecked") - private final T self = (T) this; + if (dcLocalReadRepairChance.isPresent()) { + options.add("dclocal_read_repair_chance = " + dcLocalReadRepairChance.get()); + } - TableOptions(StatementStart statementStart) { - this.statementStart = statementStart; + if (defaultTTL.isPresent()) { + options.add("default_time_to_live = " + defaultTTL.get()); } - /** - * Define the caching type for Cassandra 2.0.x. - *

    - * If no call is made to this method, the default value set by Cassandra is {@link SchemaBuilder.Caching#KEYS_ONLY}. - * - * @param caching the caching type (all enum values are allowed). - * @return this {@code TableOptions} object. - */ - public T caching(SchemaBuilder.Caching caching) { - this.cassandra20Caching = Optional.fromNullable(caching); - return self; + if (gcGraceSeconds.isPresent()) { + options.add("gc_grace_seconds = " + gcGraceSeconds.get()); } - /** - * Define the caching options for Cassandra 2.1.x. - *

    - * If no call is made to this method, the default values set by Cassandra are keys = {@link SchemaBuilder.Caching#ALL} and - * rows_per_partition = {@link com.datastax.driver.core.schemabuilder.SchemaBuilder#noRows()}. - * - * @param keys the key cache type. - * @param rowsPerPartition defines the number of rows to be cached per partition when Row Caching is enabled. - * To create instances, use - * {@link SchemaBuilder#noRows()}, - * {@link SchemaBuilder#allRows()} or - * {@link SchemaBuilder#rows(int)}. - * @return this {@code TableOptions} object. - */ - public T caching(SchemaBuilder.KeyCaching keys, CachingRowsPerPartition rowsPerPartition) { - this.cassandra21KeyCaching = Optional.fromNullable(keys); - this.cassandra21RowCaching = Optional.fromNullable(rowsPerPartition); - return self; + if (indexInterval.isPresent()) { + options.add("index_interval = " + indexInterval.get()); } - /** - * Define the desired false-positive probability for SSTable Bloom filters. - *

    - * If no call is made to this method, the default value set by Cassandra is: - *

      - *
    • 0.01 for the size-tiered compaction strategy;
    • - *
    • 0.1 for the leveled compaction strategy.
    • - *
    - * - * @param fpChance the false positive change. This value should be between 0 and 1.0. - * @return this {@code TableOptions} object. - */ - public T bloomFilterFPChance(Double fpChance) { - validateRateValue(fpChance, "Bloom filter false positive change"); - this.bloomFilterFPChance = Optional.fromNullable(fpChance); - return self; + if (minIndexInterval.isPresent()) { + options.add("min_index_interval = " + minIndexInterval.get()); } - /** - * Define a human readable comment describing the table. - * - * @param comment the comment. - * @return this {@code TableOptions} object. - */ - public T comment(String comment) { - this.comment = Optional.fromNullable(comment); - return self; + if (maxIndexInterval.isPresent()) { + options.add("max_index_interval = " + maxIndexInterval.get()); } - /** - * Define the compression options. - *

    - * If no call is made to this method, the default value set by Cassandra is {@link SchemaBuilder#lz4()}. - * - * @param compressionOptions the compression options. To create instances, use - * {@link SchemaBuilder#noCompression()}, - * {@link SchemaBuilder#lz4()}, - * {@link SchemaBuilder#snappy()} or - * {@link SchemaBuilder#deflate()}. - * @return this {@code TableOptions} object. - */ - public T compressionOptions(CompressionOptions compressionOptions) { - this.compressionOptions = Optional.fromNullable(compressionOptions); - return self; + if (memtableFlushPeriodInMillis.isPresent()) { + options.add("memtable_flush_period_in_ms = " + memtableFlushPeriodInMillis.get()); } - /** - * Define the compaction options. - *

    - * If no call is made to this method, the default value set by Cassandra is {@link SchemaBuilder#sizedTieredStategy()}. - * - * @param compactionOptions the compaction options. To create instances, use - * {@link SchemaBuilder#sizedTieredStategy()}, - * {@link SchemaBuilder#leveledStrategy()} or - * {@link SchemaBuilder#dateTieredStrategy()} - * @return this {@code TableOptions} object. - */ - public T compactionOptions(CompactionOptions compactionOptions) { - this.compactionOptions = Optional.fromNullable(compactionOptions); - return self; + if (populateIOOnCacheFlush.isPresent()) { + options.add("populate_io_cache_on_flush = " + populateIOOnCacheFlush.get()); } - /** - * Define the probability of read repairs being invoked over all replicas in the current data center. - *

    - * If no call is made to this method, the default value set by Cassandra is 0.0. - * - * @param dcLocalReadRepairChance the probability. - * @return this {@code TableOptions} object. - */ - public T dcLocalReadRepairChance(Double dcLocalReadRepairChance) { - validateRateValue(dcLocalReadRepairChance, "DC local read repair chance"); - this.dcLocalReadRepairChance = Optional.fromNullable(dcLocalReadRepairChance); - return self; + if (readRepairChance.isPresent()) { + options.add("read_repair_chance = " + readRepairChance.get()); } - /** - * Define the default expiration time in seconds for a table. - *

    - *

    - * Used in MapReduce/Hive scenarios when you have no control of TTL. - *

    - * If no call is made to this method, the default value set by Cassandra is 0. - * - * @param defaultTimeToLive the default time to live in seconds for a table. - * @return this {@code TableOptions} object. - */ - public T defaultTimeToLive(Integer defaultTimeToLive) { - this.defaultTTL = Optional.fromNullable(defaultTimeToLive); - return self; + if (replicateOnWrite.isPresent()) { + options.add("replicate_on_write = " + replicateOnWrite.get()); } - /** - * Define the time to wait before garbage collecting tombstones (deletion markers). - *

    - * The default value allows a great deal of time for consistency to be achieved prior to deletion. - * In many deployments this interval can be reduced, and in a single-node cluster it can be safely set to zero. - *

    - * If no call is made to this method, the default value set by Cassandra is 864000 secs (10 days). - * - * @param gcGraceSeconds the grace period. - * @return this {@code TableOptions} object. - */ - public T gcGraceSeconds(Integer gcGraceSeconds) { - this.gcGraceSeconds = Optional.fromNullable(gcGraceSeconds); - return self; + if (speculativeRetry.isPresent()) { + options.add("speculative_retry = " + speculativeRetry.get().value()); } - /** - * Define the index interval for Cassandra 2.0. - *

    - * To control the sampling of entries from the primary row index, configure sample frequency of the partition summary by changing the index interval. - * After changing the index interval, SSTables are written to disk with new information. The interval corresponds to the number of index entries that - * are skipped between taking each sample. By default, Cassandra samples one row key out of every 128. The larger the interval, the smaller and less - * effective the sampling. The larger the sampling, the more effective the index, but with increased memory usage. In Cassandra 2.0.x, generally, the - * best trade off between memory usage and performance is a value between 128 and 512 in combination with a large table key cache. However, if you have - * small rows (many to an OS page), you may want to increase the sample size, which often lowers memory usage without an impact on performance. For - * large rows, decreasing the sample size may improve read performance. - *

    - * If no call is made to this method, the default value set by Cassandra is 128. - * - * @param indexInterval the index interval. - * @return this {@code TableOptions} object. - */ - public T indexInterval(Integer indexInterval) { - this.indexInterval = Optional.fromNullable(indexInterval); - return self; + if (cdc.isPresent()) { + options.add("cdc = " + cdc.get()); } - /** - * Define the minimum index interval for Cassandra 2.1. - *

    - * If no call is made to this method, the default value set by Cassandra is 128. - * - * @param minIndexInterval the minimum index interval. - * @return this {@code TableOptions} object. - * @see #indexInterval(Integer) - */ - public T minIndexInterval(Integer minIndexInterval) { - this.minIndexInterval = Optional.fromNullable(minIndexInterval); - return self; + if (readReapair.isPresent()) { + options.add("read_repair = " + readReapair.get().value()); } - /** - * Define the maximum index interval for Cassandra 2.1. - *

    - * If no call is made to this method, the default value set by Cassandra is 2048. - * - * @param maxIndexInterval the maximum index interval. - * @return this {@code TableOptions} object. - * @see #indexInterval(Integer) - */ - public T maxIndexInterval(Integer maxIndexInterval) { - this.maxIndexInterval = Optional.fromNullable(maxIndexInterval); - return self; + if (additionalWritePolicy.isPresent()) { + options.add("additional_write_policy = " + additionalWritePolicy.get().value()); } - /** - * Define the memtable flush period. - *

    - * If set, this forces flushing of the memtable after the specified time elapses. - *

    - * If no call is made to this method, the default value set by Cassandra is 0. - * - * @param memtableFlushPeriodInMillis the memtable flush period in milliseconds. - * @return this {@code TableOptions} object. - */ - public T memtableFlushPeriodInMillis(Integer memtableFlushPeriodInMillis) { - this.memtableFlushPeriodInMillis = Optional.fromNullable(memtableFlushPeriodInMillis); - return self; + options.addAll(customOptions); + + return options; + } + + private void buildCachingOptions(List options) { + if (cassandra20Caching.isPresent() && cassandra21KeyCaching.isPresent()) { + throw new IllegalStateException( + "Can't use Cassandra 2.0 and 2.1 caching at the same time, you must call only one version of caching()"); + } else if (cassandra20Caching.isPresent()) { + options.add("caching = " + cassandra20Caching.get().value()); + } else if (cassandra21KeyCaching.isPresent() && cassandra21RowCaching.isPresent()) { + options.add( + String.format( + "caching = {'keys' : %s, 'rows_per_partition' : %s}", + cassandra21KeyCaching.get().value(), cassandra21RowCaching.get().value())); + } + } + + protected abstract void addSpecificOptions(List options); + + @Override + public final String buildInternal() { + List options = buildCommonOptions(); + addSpecificOptions(options); + return statementStart.buildInternal() + + STATEMENT_START + + "WITH " + + Joiner.on(" AND ").join(options); + } + + static void validateRateValue(Double rateValue, String property) { + if (rateValue != null && (rateValue < 0 || rateValue > 1.0)) { + throw new IllegalArgumentException(property + " should be between 0 and 1"); } + } - /** - * Define whether to populate IO cache on flush of sstables. - *

    - * If set, Cassandra adds newly flushed or compacted sstables to the operating system page cache, potentially evicting other cached data to make room. - * Enable when all data in the table is expected to fit in memory. - *

    - * If no call is made to this method, the default value set by Cassandra is {@code false}. - * - * @param populateIOOnCacheFlush whether to populate IO cache on flush of sstables. - * @return this {@code TableOptions} object. - * @see the global option compaction_preheat_key_cache - */ - public T populateIOCacheOnFlush(Boolean populateIOOnCacheFlush) { - this.populateIOOnCacheFlush = Optional.fromNullable(populateIOOnCacheFlush); - return self; + /** + * Compaction options for a CREATE or ALTER TABLE statement. + * + *

    To create instances, use {@link SchemaBuilder#sizedTieredStategy()}, {@link + * SchemaBuilder#leveledStrategy()}, {@link SchemaBuilder#dateTieredStrategy()} or {@link + * SchemaBuilder#timeWindowCompactionStrategy()} + */ + public abstract static class CompactionOptions { + + private Strategy strategy; + + private Optional enabled = Optional.absent(); + + private Optional tombstoneCompactionIntervalInDay = Optional.absent(); + + private Optional tombstoneThreshold = Optional.absent(); + + private Optional uncheckedTombstoneCompaction = Optional.absent(); + + private List customOptions = new ArrayList(); + + @SuppressWarnings("unchecked") + private final T self = (T) this; + + CompactionOptions(Strategy compactionStrategy) { + this.strategy = compactionStrategy; } /** - * Define the probability with which read repairs should be invoked on non-quorum reads. The value must be between 0 and 1. - *

    - * If no call is made to this method, the default value set by Cassandra is 0.1. + * Enable or disable background compaction. + * + *

    If no call is made to this method, the default value set by Cassandra is {code true}. * - * @param readRepairChance the read repair chance. - * @return this {@code TableOptions} object. + * @param enabled whether to enable background compaction for the table. + * @return this object (for call chaining). */ - public T readRepairChance(Double readRepairChance) { - validateRateValue(readRepairChance, "Read repair chance"); - this.readRepairChance = Optional.fromNullable(readRepairChance); - return self; + public T enabled(Boolean enabled) { + this.enabled = Optional.fromNullable(enabled); + return self; } /** - * Define whether to replicate data on write (Cassandra 2.0.x only). - *

    - * When set to {@code true}, replicates writes to all affected replicas regardless of the consistency level specified by the client for a write request. - * For counter tables, this should always be set to {@code true}. - *

    - * If no call is made to this method, the default value set by Cassandra is {@code true}. + * Define the minimum number of days to wait after an SSTable creation time before considering + * the SSTable for tombstone compaction. Tombstone compaction is the compaction triggered if the + * SSTable has more garbage-collectable tombstones than tombstone_threshold. * - * @param replicateOnWrite whether to replicate data on write. - * @return this {@code TableOptions} object. + *

    If no call is made to this method, the default value set by Cassandra is 1. + * + * @param tombstoneCompactionInterval the tombstone compaction interval in day. + * @return this object (for call chaining). */ - public T replicateOnWrite(Boolean replicateOnWrite) { - this.replicateOnWrite = Optional.fromNullable(replicateOnWrite); - return self; + public T tombstoneCompactionIntervalInDay(Integer tombstoneCompactionInterval) { + this.tombstoneCompactionIntervalInDay = Optional.fromNullable(tombstoneCompactionInterval); + return self; } /** - * To override normal read timeout when read_repair_chance is not 1.0, sending another request to read, choose one of these values and use the property to create - * or alter the table: - *

      - *
    • ALWAYS: Retry reads of all replicas.
    • - *
    • Xpercentile: Retry reads based on the effect on throughput and latency.
    • - *
    • Yms: Retry reads after specified milliseconds.
    • - *
    • NONE: Do not retry reads.
    • - *
    - *

    - * Using the speculative retry property, you can configure rapid read protection in Cassandra 2.0.2 and later. - * Use this property to retry a request after some milliseconds have passed or after a percentile of the typical read latency has been reached, - * which is tracked per table. - *

    - *

    - * If no call is made to this method, the default value set by Cassandra is {code 99percentile}. + * Define the ratio of garbage-collectable tombstones to all contained columns, which if + * exceeded by the SSTable triggers compaction (with no other SSTables) for the purpose of + * purging the tombstones. + * + *

    If no call is made to this method, the default value set by Cassandra is 0.2. * - * @param speculativeRetry the speculative retry. To create instances, use - * {@link SchemaBuilder#noSpeculativeRetry()}, - * {@link SchemaBuilder#always()}, - * {@link SchemaBuilder#percentile(int)} or - * {@link SchemaBuilder#millisecs(int)}. - * @return this {@code TableOptions} object. + * @param tombstoneThreshold the threshold. + * @return this object (for call chaining). */ - public T speculativeRetry(SpeculativeRetryValue speculativeRetry) { - this.speculativeRetry = Optional.fromNullable(speculativeRetry); - return self; + public T tombstoneThreshold(Double tombstoneThreshold) { + validateRateValue(tombstoneThreshold, "Tombstone threshold"); + this.tombstoneThreshold = Optional.fromNullable(tombstoneThreshold); + return self; } /** - * Define whether or not change data capture is enabled on this table. - *

    - * Note that using this option with a version of Apache Cassandra less than 3.8 will raise a syntax error. - *

    - * If no call is made to this method, the default value set by Cassandra is {@code false}. + * Enables more aggressive than normal tombstone compactions. A single SSTable tombstone + * compaction runs without checking the likelihood of success (Cassandra 2.0.9 and later). * - * @param cdc Whether or not change data capture should be enabled for this table. - * @return this {@code TableOptions} object. + *

    If no call is made to this method, the default value set by Cassandra is {code false}. + * + * @param uncheckedTombstoneCompaction whether to enable the feature. + * @return this object (for call chaining). */ - public T cdc(Boolean cdc) { - this.cdc = Optional.fromNullable(cdc); - return self; + public T uncheckedTombstoneCompaction(Boolean uncheckedTombstoneCompaction) { + this.uncheckedTombstoneCompaction = Optional.fromNullable(uncheckedTombstoneCompaction); + return self; } /** * Define a free-form option as a key/value pair. - *

    - * This method is provided as a fallback if the SchemaBuilder is used with a more recent version of Cassandra that has new, unsupported options. * - * @param key the name of the option. - * @param value the value of the option. If it's a {@code String}, it will be included in single quotes, otherwise the result of invoking its - * {@code toString} method will be used unquoted. - * @return this {@code TableOptions} object. + *

    This method is provided as a fallback if the SchemaBuilder is used with a more recent + * version of Cassandra that has new, unsupported options. + * + * @param key the name of the option. + * @param value the value of the option. If it's a {@code CharSequence}, it will be included in + * single quotes, otherwise the result of invoking its {@code toString} method will be used + * unquoted. + * @return this object (for call chaining). */ public T freeformOption(String key, Object value) { - if (Strings.isNullOrEmpty(key)) { - throw new IllegalArgumentException("Key for custom option should not be null or blank"); - } - customOptions.add(buildCustomOption(key, value)); - return self; + if (Strings.isNullOrEmpty(key)) { + throw new IllegalArgumentException("Key for custom option should not be null or blank"); + } + customOptions.add(buildCustomOption(key, value)); + return self; } private static String buildCustomOption(String key, Object value) { - return String.format("%s = %s", - key, - (value instanceof String) - ? "'" + value + "'" - : value.toString()); + return String.format( + "'%s' : %s", key, (value instanceof CharSequence) ? "'" + value + "'" : value.toString()); } - private List buildCommonOptions() { - List options = new ArrayList(); + List buildCommonOptions() { - buildCachingOptions(options); + List options = new ArrayList(); + options.add("'class' : " + strategy.strategyClass()); - if (bloomFilterFPChance.isPresent()) { - options.add("bloom_filter_fp_chance = " + bloomFilterFPChance.get()); - } + if (enabled.isPresent()) { + options.add("'enabled' : " + enabled.get()); + } - if (comment.isPresent()) { - options.add("comment = '" + comment.get() + "'"); - } + if (tombstoneCompactionIntervalInDay.isPresent()) { + options.add("'tombstone_compaction_interval' : " + tombstoneCompactionIntervalInDay.get()); + } - if (compressionOptions.isPresent()) { - options.add("compression = " + compressionOptions.get().build()); - } + if (tombstoneThreshold.isPresent()) { + options.add("'tombstone_threshold' : " + tombstoneThreshold.get()); + } - if (compactionOptions.isPresent()) { - options.add("compaction = " + compactionOptions.get().build()); - } + if (uncheckedTombstoneCompaction.isPresent()) { + options.add("'unchecked_tombstone_compaction' : " + uncheckedTombstoneCompaction.get()); + } - if (dcLocalReadRepairChance.isPresent()) { - options.add("dclocal_read_repair_chance = " + dcLocalReadRepairChance.get()); - } + options.addAll(customOptions); - if (defaultTTL.isPresent()) { - options.add("default_time_to_live = " + defaultTTL.get()); - } + return options; + } - if (gcGraceSeconds.isPresent()) { - options.add("gc_grace_seconds = " + gcGraceSeconds.get()); + public abstract String build(); + + /** Compaction options specific to SizeTiered strategy */ + public static class SizeTieredCompactionStrategyOptions + extends CompactionOptions { + + private Optional bucketHigh = Optional.absent(); + + private Optional bucketLow = Optional.absent(); + + private Optional coldReadsRatioToOmit = Optional.absent(); + + private Optional minThreshold = Optional.absent(); + + private Optional maxThreshold = Optional.absent(); + + private Optional minSSTableSizeInBytes = Optional.absent(); + + SizeTieredCompactionStrategyOptions() { + super(Strategy.SIZED_TIERED); + } + + /** + * Size-tiered compaction strategy (STCS) considers SSTables to be within the same bucket if + * the SSTable size diverges by 50% or less from the default bucket_low and default + * bucket_high values: [average-size × bucket_low, average-size × bucket_high]. + * + *

    If no call is made to this method, the default value set by Cassandra is 1.5. + * + * @param bucketHigh the new value. + * @return this object (for call chaining). + */ + public SizeTieredCompactionStrategyOptions bucketHigh(Double bucketHigh) { + this.bucketHigh = Optional.fromNullable(bucketHigh); + return this; + } + + /** + * Size-tiered compaction strategy (STCS) considers SSTables to be within the same bucket if + * the SSTable size diverges by 50% or less from the default bucket_low and default + * bucket_high values: [average-size × bucket_low, average-size × bucket_high]. + * + *

    If no call is made to this method, the default value set by Cassandra is 0.5. + * + * @param bucketLow the new value. + * @return this object (for call chaining). + */ + public SizeTieredCompactionStrategyOptions bucketLow(Double bucketLow) { + this.bucketLow = Optional.fromNullable(bucketLow); + return this; + } + + /** + * The maximum percentage of reads/sec that ignored SSTables may account for. The recommended + * range of values is 0.0 and 1.0. In Cassandra 2.0.3 and later, you can enable the + * cold_reads_to_omit property to tune performace per table. The Optimizations + * around Cold SSTables blog includes detailed information tuning performance using this + * property, which avoids compacting cold SSTables. Use the ALTER TABLE command to configure + * cold_reads_to_omit. + * + *

    If no call is made to this method, the default value set by Cassandra is 0.0 (disabled). + * + * @param coldReadsRatio the new value. + * @return this object (for call chaining). + */ + public SizeTieredCompactionStrategyOptions coldReadsRatioToOmit(Double coldReadsRatio) { + validateRateValue(coldReadsRatio, "Cold read ratio to omit "); + this.coldReadsRatioToOmit = Optional.fromNullable(coldReadsRatio); + return this; + } + + /** + * Sets the minimum number of SSTables to trigger a minor compaction + * + *

    If no call is made to this method, the default value set by Cassandra is 4. + * + * @param minThreshold the new value. + * @return this object (for call chaining). + */ + public SizeTieredCompactionStrategyOptions minThreshold(Integer minThreshold) { + this.minThreshold = Optional.fromNullable(minThreshold); + return this; + } + + /** + * Sets the maximum number of SSTables to allow in a minor compaction. In + * LeveledCompactionStrategy (LCS), it applies to L0 when L0 gets behind, that is, when L0 + * accumulates more than MAX_COMPACTING_L0 SSTables. + * + *

    If no call is made to this method, the default value set by Cassandra is 32. + * + * @param maxThreshold the new value. + * @return this object (for call chaining). + */ + public SizeTieredCompactionStrategyOptions maxThreshold(Integer maxThreshold) { + this.maxThreshold = Optional.fromNullable(maxThreshold); + return this; + } + + /** + * The SizeTieredCompactionStrategy groups SSTables for compaction into buckets. The bucketing + * process groups SSTables that differ in size by less than 50%. This results in a bucketing + * process that is too fine grained for small SSTables. If your SSTables are small, use + * min_sstable_size to define a size threshold (in bytes) below which all SSTables belong to + * one unique bucket + * + *

    If no call is made to this method, the default value set by Cassandra is 52428800 (50 + * MB). + * + * @param minSSTableSize the new value. + * @return this object (for call chaining). + */ + public SizeTieredCompactionStrategyOptions minSSTableSizeInBytes(Long minSSTableSize) { + this.minSSTableSizeInBytes = Optional.fromNullable(minSSTableSize); + return this; + } + + @Override + public String build() { + final List generalOptions = super.buildCommonOptions(); + + List options = new ArrayList(generalOptions); + + if (bucketHigh.isPresent()) { + options.add("'bucket_high' : " + bucketHigh.get()); } - if (indexInterval.isPresent()) { - options.add("index_interval = " + indexInterval.get()); + if (bucketLow.isPresent()) { + options.add("'bucket_low' : " + bucketLow.get()); } - if (minIndexInterval.isPresent()) { - options.add("min_index_interval = " + minIndexInterval.get()); + if (coldReadsRatioToOmit.isPresent()) { + options.add("'cold_reads_to_omit' : " + coldReadsRatioToOmit.get()); } - if (maxIndexInterval.isPresent()) { - options.add("max_index_interval = " + maxIndexInterval.get()); + if (minThreshold.isPresent()) { + options.add("'min_threshold' : " + minThreshold.get()); } - if (memtableFlushPeriodInMillis.isPresent()) { - options.add("memtable_flush_period_in_ms = " + memtableFlushPeriodInMillis.get()); + if (maxThreshold.isPresent()) { + options.add("'max_threshold' : " + maxThreshold.get()); } - if (populateIOOnCacheFlush.isPresent()) { - options.add("populate_io_cache_on_flush = " + populateIOOnCacheFlush.get()); + if (minSSTableSizeInBytes.isPresent()) { + options.add("'min_sstable_size' : " + minSSTableSizeInBytes.get()); } + return "{" + Joiner.on(", ").join(options) + "}"; + } + } - if (readRepairChance.isPresent()) { - options.add("read_repair_chance = " + readRepairChance.get()); + /** Compaction options specific to Leveled strategy */ + public static class LeveledCompactionStrategyOptions + extends CompactionOptions { + + private Optional ssTableSizeInMB = Optional.absent(); + + LeveledCompactionStrategyOptions() { + super(Strategy.LEVELED); + } + + /** + * The target size for SSTables that use the leveled compaction strategy. Although SSTable + * sizes should be less or equal to sstable_size_in_mb, it is possible to have a larger + * SSTable during compaction. This occurs when data for a given partition key is exceptionally + * large. The data is not split into two SSTables + * + *

    If no call is made to this method, the default value set by Cassandra is 160. + * + * @param ssTableSizeInMB the new value. + * @return this object (for call chaining). + */ + public LeveledCompactionStrategyOptions ssTableSizeInMB(Integer ssTableSizeInMB) { + this.ssTableSizeInMB = Optional.fromNullable(ssTableSizeInMB); + return this; + } + + @Override + public String build() { + final List generalOptions = super.buildCommonOptions(); + + List options = new ArrayList(generalOptions); + + if (ssTableSizeInMB.isPresent()) { + options.add("'sstable_size_in_mb' : " + ssTableSizeInMB.get()); } + return "{" + Joiner.on(", ").join(options) + "}"; + } + } - if (replicateOnWrite.isPresent()) { - options.add("replicate_on_write = " + replicateOnWrite.get()); + /** Compaction options specific to the date-tiered strategy. */ + public static class DateTieredCompactionStrategyOptions + extends CompactionOptions { + + public enum TimeStampResolution { + MICROSECONDS, + MILLISECONDS + } + + private Optional baseTimeSeconds = Optional.absent(); + + private Optional maxSSTableAgeDays = Optional.absent(); + + private Optional minThreshold = Optional.absent(); + + private Optional maxThreshold = Optional.absent(); + + private Optional timestampResolution = Optional.absent(); + + DateTieredCompactionStrategyOptions() { + super(Strategy.DATE_TIERED); + } + + /** + * Sets the size of the first window. + * + *

    If no call is made to this method, the default value set by Cassandra is 3600 (1 hour). + * + * @param baseTimeSeconds the size of the first window. + * @return this object (for call chaining). + */ + public DateTieredCompactionStrategyOptions baseTimeSeconds(Integer baseTimeSeconds) { + this.baseTimeSeconds = Optional.fromNullable(baseTimeSeconds); + return this; + } + + /** + * Stop compacting SSTables only having data older than these specified days. + * + *

    If no call is made to this method, the default value set by Cassandra is 365. + * + * @param maxSSTableAgeDays the maximum age of the SSTables to compact. + * @return this object (for call chaining). + */ + public DateTieredCompactionStrategyOptions maxSSTableAgeDays(Integer maxSSTableAgeDays) { + this.maxSSTableAgeDays = Optional.fromNullable(maxSSTableAgeDays); + return this; + } + + /** + * Sets the minimum number of SSTables to trigger a minor compaction + * + *

    If no call is made to this method, the default value set by Cassandra is 4. + * + * @param minThreshold the new value. + * @return this object (for call chaining). + */ + public DateTieredCompactionStrategyOptions minThreshold(Integer minThreshold) { + this.minThreshold = Optional.fromNullable(minThreshold); + return this; + } + + /** + * Sets the maximum number of SSTables to allow in a minor compaction. In + * LeveledCompactionStrategy (LCS), it applies to L0 when L0 gets behind, that is, when L0 + * accumulates more than MAX_COMPACTING_L0 SSTables. + * + *

    If no call is made to this method, the default value set by Cassandra is 32. + * + * @param maxThreshold the new value. + * @return this object (for call chaining). + */ + public DateTieredCompactionStrategyOptions maxThreshold(Integer maxThreshold) { + this.maxThreshold = Optional.fromNullable(maxThreshold); + return this; + } + + /** + * Sets the timestamp resolution, depending on the timestamp unit of the data you insert. + * + *

    If no call is made to this method, the default value set by Cassandra is {@code + * MICROSECONDS}. + * + * @param timestampResolution {@link TimeStampResolution#MICROSECONDS} or {@link + * TimeStampResolution#MILLISECONDS}. + * @return this object (for call chaining). + */ + public DateTieredCompactionStrategyOptions timestampResolution( + TimeStampResolution timestampResolution) { + this.timestampResolution = Optional.fromNullable(timestampResolution); + return this; + } + + @Override + public String build() { + final List generalOptions = super.buildCommonOptions(); + + List options = new ArrayList(generalOptions); + + if (baseTimeSeconds.isPresent()) { + options.add("'base_time_seconds' : " + baseTimeSeconds.get()); } - if (speculativeRetry.isPresent()) { - options.add("speculative_retry = " + speculativeRetry.get().value()); + if (maxSSTableAgeDays.isPresent()) { + options.add("'max_sstable_age_days' : " + maxSSTableAgeDays.get()); } - if (cdc.isPresent()) { - options.add("cdc = " + cdc.get()); + if (minThreshold.isPresent()) { + options.add("'min_threshold' : " + minThreshold.get()); } - options.addAll(customOptions); - - return options; - } - - private void buildCachingOptions(List options) { - if (cassandra20Caching.isPresent() && cassandra21KeyCaching.isPresent()) { - throw new IllegalStateException("Can't use Cassandra 2.0 and 2.1 caching at the same time, you must call only one version of caching()"); - } else if (cassandra20Caching.isPresent()) { - options.add("caching = " + cassandra20Caching.get().value()); - } else if (cassandra21KeyCaching.isPresent() && cassandra21RowCaching.isPresent()) { - options.add(String.format("caching = {'keys' : %s, 'rows_per_partition' : %s}", - cassandra21KeyCaching.get().value(), cassandra21RowCaching.get().value())); + if (maxThreshold.isPresent()) { + options.add("'max_threshold' : " + maxThreshold.get()); } - } - - protected abstract void addSpecificOptions(List options); - - @Override - public final String buildInternal() { - List options = buildCommonOptions(); - addSpecificOptions(options); - return statementStart.buildInternal() + STATEMENT_START + - "WITH " + Joiner.on(" AND ").join(options); - } - static void validateRateValue(Double rateValue, String property) { - if (rateValue != null && (rateValue < 0 || rateValue > 1.0)) { - throw new IllegalArgumentException(property + " should be between 0 and 1"); + if (timestampResolution.isPresent()) { + options.add("'timestamp_resolution' : '" + timestampResolution.get() + "'"); } - } - - /** - * Compaction options for a CREATE or ALTER TABLE statement. - *

    - * To create instances, use - * {@link SchemaBuilder#sizedTieredStategy()}, - * {@link SchemaBuilder#leveledStrategy()} or - * {@link SchemaBuilder#dateTieredStrategy()} - */ - public static abstract class CompactionOptions { - - private Strategy strategy; - - private Optional enabled = Optional.absent(); - - private Optional tombstoneCompactionIntervalInDay = Optional.absent(); - - private Optional tombstoneThreshold = Optional.absent(); - - private Optional uncheckedTombstoneCompaction = Optional.absent(); - - private List customOptions = new ArrayList(); - @SuppressWarnings("unchecked") - private final T self = (T) this; - - CompactionOptions(Strategy compactionStrategy) { - this.strategy = compactionStrategy; - } + return "{" + Joiner.on(", ").join(options) + "}"; + } + } - /** - * Enable or disable background compaction. - *

    - * If no call is made to this method, the default value set by Cassandra is {code true}. - * - * @param enabled whether to enable background compaction for the table. - * @return this object (for call chaining). - */ - public T enabled(Boolean enabled) { - this.enabled = Optional.fromNullable(enabled); - return self; + /** Compaction options specific to the time window strategy. */ + public static class TimeWindowCompactionStrategyOptions + extends CompactionOptions { + + public enum CompactionWindowUnit { + MINUTES, + HOURS, + DAYS + } + + public enum TimeStampResolution { + MICROSECONDS, + MILLISECONDS + } + + private Optional bucketHigh = Optional.absent(); + + private Optional bucketLow = Optional.absent(); + + private Optional compactionWindowUnit = Optional.absent(); + + private Optional compactionWindowSize = Optional.absent(); + + private Optional minThreshold = Optional.absent(); + + private Optional maxThreshold = Optional.absent(); + + private Optional minSSTableSizeInBytes = Optional.absent(); + + private Optional timestampResolution = Optional.absent(); + + private Optional unsafeAggressiveSSTableExpiration = Optional.absent(); + + TimeWindowCompactionStrategyOptions() { + super(Strategy.TIME_WINDOW); + } + + /** + * Size-tiered compaction strategy (STCS) is used in the newest window, this method sets the + * bucketHigh value used in STCS. + * + *

    If no call is made to this method, the default value set by Cassandra is 1.5. + * + * @param bucketHigh the new value. + * @return this object (for call chaining). + */ + public TimeWindowCompactionStrategyOptions bucketHigh(Double bucketHigh) { + this.bucketHigh = Optional.fromNullable(bucketHigh); + return this; + } + + /** + * Size-tiered compaction strategy (STCS) is used in the newest window, this method sets the + * bucketLow value used in STCS. + * + *

    If no call is made to this method, the default value set by Cassandra is 0.5. + * + * @param bucketLow the new value. + * @return this object (for call chaining). + */ + public TimeWindowCompactionStrategyOptions bucketLow(Double bucketLow) { + this.bucketLow = Optional.fromNullable(bucketLow); + return this; + } + + /** + * Sets the time unit used to define the window size + * + *

    If no call is made to this method, the default value set by Cassandra is {@code DAYS}. + * + * @param compactionWindowUnit {@link CompactionWindowUnit#MINUTES}, {@link + * CompactionWindowUnit#HOURS} or {@link CompactionWindowUnit#DAYS}. + * @return this object (for call chaining). + */ + public TimeWindowCompactionStrategyOptions compactionWindowUnit( + CompactionWindowUnit compactionWindowUnit) { + this.compactionWindowUnit = Optional.fromNullable(compactionWindowUnit); + return this; + } + + /** + * Sets the number of units that make up a window. + * + *

    If no call is made to this method, the default value set by Cassandra is 1. + * + * @param compactionWindowSize the size of the first window. + * @return this object (for call chaining). + */ + public TimeWindowCompactionStrategyOptions compactionWindowSize( + Integer compactionWindowSize) { + this.compactionWindowSize = Optional.fromNullable(compactionWindowSize); + return this; + } + + /** + * Sets the minimum number of SSTables to trigger a minor compaction + * + *

    If no call is made to this method, the default value set by Cassandra is 4. + * + * @param minThreshold the new value. + * @return this object (for call chaining). + */ + public TimeWindowCompactionStrategyOptions minThreshold(Integer minThreshold) { + this.minThreshold = Optional.fromNullable(minThreshold); + return this; + } + + /** + * Sets the maximum number of SSTables to allow in a minor compaction. + * + *

    If no call is made to this method, the default value set by Cassandra is 32. + * + * @param maxThreshold the new value. + * @return this object (for call chaining). + */ + public TimeWindowCompactionStrategyOptions maxThreshold(Integer maxThreshold) { + this.maxThreshold = Optional.fromNullable(maxThreshold); + return this; + } + + /** + * Size-tiered compaction strategy (STCS) is used in the newest window, this method sets the + * minSSTableSize value used in STCS. + * + *

    If no call is made to this method, the default value set by Cassandra is 52428800 (50 + * MB). + * + * @param minSSTableSize the new value. + * @return this object (for call chaining). + */ + public TimeWindowCompactionStrategyOptions minSSTableSizeInBytes(Long minSSTableSize) { + this.minSSTableSizeInBytes = Optional.fromNullable(minSSTableSize); + return this; + } + + /** + * Sets the timestamp resolution, depending on the timestamp unit of the data you insert. + * + *

    If no call is made to this method, the default value set by Cassandra is {@code + * MICROSECONDS}. + * + * @param timestampResolution {@link TimeStampResolution#MICROSECONDS} or {@link + * TimeStampResolution#MILLISECONDS}. + * @return this object (for call chaining). + */ + public TimeWindowCompactionStrategyOptions timestampResolution( + TimeStampResolution timestampResolution) { + this.timestampResolution = Optional.fromNullable(timestampResolution); + return this; + } + + /** + * Allow expired sstables to be dropped without checking if its data is shadowing other + * sstables. This is a potentially risky option that can lead to data loss or deleted data + * re-appearing. + * + *

    If no call is made to this method, the default value set by Cassandra is false. + * + * @param unsafeAggressiveSSTableExpiration whether to enable unsafe aggressive sstable + * expiration option. + * @return this object (for call chaining). + */ + public TimeWindowCompactionStrategyOptions unsafeAggressiveSSTableExpiration( + Boolean unsafeAggressiveSSTableExpiration) { + this.unsafeAggressiveSSTableExpiration = + Optional.fromNullable(unsafeAggressiveSSTableExpiration); + return this; + } + + @Override + public String build() { + final List generalOptions = super.buildCommonOptions(); + + List options = new ArrayList(generalOptions); + + if (bucketHigh.isPresent()) { + options.add("'bucket_high' : " + bucketHigh.get()); } - /** - * Define the minimum number of days to wait after an SSTable creation time before considering the SSTable for tombstone compaction. - * Tombstone compaction is the compaction triggered if the SSTable has more garbage-collectable tombstones than tombstone_threshold. - *

    - * If no call is made to this method, the default value set by Cassandra is 1. - * - * @param tombstoneCompactionInterval the tombstone compaction interval in day. - * @return this object (for call chaining). - */ - public T tombstoneCompactionIntervalInDay(Integer tombstoneCompactionInterval) { - this.tombstoneCompactionIntervalInDay = Optional.fromNullable(tombstoneCompactionInterval); - return self; + if (bucketLow.isPresent()) { + options.add("'bucket_low' : " + bucketLow.get()); } - /** - * Define the ratio of garbage-collectable tombstones to all contained columns, - * which if exceeded by the SSTable triggers compaction (with no other SSTables) for the purpose of purging the tombstones. - *

    - * If no call is made to this method, the default value set by Cassandra is 0.2. - * - * @param tombstoneThreshold the threshold. - * @return this object (for call chaining). - */ - public T tombstoneThreshold(Double tombstoneThreshold) { - validateRateValue(tombstoneThreshold, "Tombstone threshold"); - this.tombstoneThreshold = Optional.fromNullable(tombstoneThreshold); - return self; + if (compactionWindowUnit.isPresent()) { + options.add("'compaction_window_unit' : '" + compactionWindowUnit.get() + "'"); } - /** - * Enables more aggressive than normal tombstone compactions. A single SSTable tombstone compaction runs without - * checking the likelihood of success (Cassandra 2.0.9 and later). - *

    - * If no call is made to this method, the default value set by Cassandra is {code false}. - * - * @param uncheckedTombstoneCompaction whether to enable the feature. - * @return this object (for call chaining). - */ - public T uncheckedTombstoneCompaction(Boolean uncheckedTombstoneCompaction) { - this.uncheckedTombstoneCompaction = Optional.fromNullable(uncheckedTombstoneCompaction); - return self; + if (compactionWindowSize.isPresent()) { + options.add("'compaction_window_size' : " + compactionWindowSize.get()); } - /** - * Define a free-form option as a key/value pair. - *

    - * This method is provided as a fallback if the SchemaBuilder is used with a more recent version of Cassandra that has new, unsupported options. - * - * @param key the name of the option. - * @param value the value of the option. If it's a {@code CharSequence}, it will be included in single quotes, otherwise the result of invoking its - * {@code toString} method will be used unquoted. - * @return this object (for call chaining). - */ - public T freeformOption(String key, Object value) { - if (Strings.isNullOrEmpty(key)) { - throw new IllegalArgumentException("Key for custom option should not be null or blank"); - } - customOptions.add(buildCustomOption(key, value)); - return self; + if (minThreshold.isPresent()) { + options.add("'min_threshold' : " + minThreshold.get()); } - private static String buildCustomOption(String key, Object value) { - return String.format("'%s' : %s", - key, - (value instanceof CharSequence) - ? "'" + value + "'" - : value.toString()); + if (maxThreshold.isPresent()) { + options.add("'max_threshold' : " + maxThreshold.get()); } - List buildCommonOptions() { - - List options = new ArrayList(); - options.add("'class' : " + strategy.strategyClass()); - - if (enabled.isPresent()) { - options.add("'enabled' : " + enabled.get()); - } - - if (tombstoneCompactionIntervalInDay.isPresent()) { - options.add("'tombstone_compaction_interval' : " + tombstoneCompactionIntervalInDay.get()); - } - - if (tombstoneThreshold.isPresent()) { - options.add("'tombstone_threshold' : " + tombstoneThreshold.get()); - } - - if (uncheckedTombstoneCompaction.isPresent()) { - options.add("'unchecked_tombstone_compaction' : " + uncheckedTombstoneCompaction.get()); - } - - options.addAll(customOptions); - - return options; + if (minSSTableSizeInBytes.isPresent()) { + options.add("'min_sstable_size' : " + minSSTableSizeInBytes.get()); } - public abstract String build(); - - /** - * Compaction options specific to SizeTiered strategy - */ - public static class SizeTieredCompactionStrategyOptions extends CompactionOptions { - - private Optional bucketHigh = Optional.absent(); - - private Optional bucketLow = Optional.absent(); - - private Optional coldReadsRatioToOmit = Optional.absent(); - - private Optional minThreshold = Optional.absent(); - - private Optional maxThreshold = Optional.absent(); - - private Optional minSSTableSizeInBytes = Optional.absent(); - - SizeTieredCompactionStrategyOptions() { - super(Strategy.SIZED_TIERED); - } - - /** - * Size-tiered compaction strategy (STCS) considers SSTables to be within the same bucket if the SSTable size diverges by 50% - * or less from the default bucket_low and default bucket_high values: [average-size × bucket_low, average-size × bucket_high]. - *

    - * If no call is made to this method, the default value set by Cassandra is 1.5. - * - * @param bucketHigh the new value. - * @return this object (for call chaining). - */ - public SizeTieredCompactionStrategyOptions bucketHigh(Double bucketHigh) { - this.bucketHigh = Optional.fromNullable(bucketHigh); - return this; - } - - /** - * Size-tiered compaction strategy (STCS) considers SSTables to be within the same bucket if the SSTable size diverges by 50% - * or less from the default bucket_low and default bucket_high values: [average-size × bucket_low, average-size × bucket_high]. - *

    - * If no call is made to this method, the default value set by Cassandra is 0.5. - * - * @param bucketLow the new value. - * @return this object (for call chaining). - */ - public SizeTieredCompactionStrategyOptions bucketLow(Double bucketLow) { - this.bucketLow = Optional.fromNullable(bucketLow); - return this; - } - - /** - * The maximum percentage of reads/sec that ignored SSTables may account for. - * The recommended range of values is 0.0 and 1.0. - * In Cassandra 2.0.3 and later, you can enable the cold_reads_to_omit property to tune performace per table. - * The Optimizations around Cold SSTables blog includes detailed information tuning performance using this property, - * which avoids compacting cold SSTables. Use the ALTER TABLE command to configure cold_reads_to_omit. - *

    - * If no call is made to this method, the default value set by Cassandra is 0.0 (disabled). - * - * @param coldReadsRatio the new value. - * @return this object (for call chaining). - */ - public SizeTieredCompactionStrategyOptions coldReadsRatioToOmit(Double coldReadsRatio) { - validateRateValue(coldReadsRatio, "Cold read ratio to omit "); - this.coldReadsRatioToOmit = Optional.fromNullable(coldReadsRatio); - return this; - } - - /** - * Sets the minimum number of SSTables to trigger a minor compaction - *

    - * If no call is made to this method, the default value set by Cassandra is 4. - * - * @param minThreshold the new value. - * @return this object (for call chaining). - */ - public SizeTieredCompactionStrategyOptions minThreshold(Integer minThreshold) { - this.minThreshold = Optional.fromNullable(minThreshold); - return this; - } - - /** - * Sets the maximum number of SSTables to allow in a minor compaction. - * In LeveledCompactionStrategy (LCS), it applies to L0 when L0 gets behind, that is, when L0 accumulates more than MAX_COMPACTING_L0 SSTables. - *

    - * If no call is made to this method, the default value set by Cassandra is 32. - * - * @param maxThreshold the new value. - * @return this object (for call chaining). - */ - public SizeTieredCompactionStrategyOptions maxThreshold(Integer maxThreshold) { - this.maxThreshold = Optional.fromNullable(maxThreshold); - return this; - } - - /** - * The SizeTieredCompactionStrategy groups SSTables for compaction into buckets. - * The bucketing process groups SSTables that differ in size by less than 50%. This results in a bucketing process that is too fine grained for small SSTables. - * If your SSTables are small, use min_sstable_size to define a size threshold (in bytes) below which all SSTables belong to one unique bucket - *

    - * If no call is made to this method, the default value set by Cassandra is 52428800 (50 MB). - * - * @param minSSTableSize the new value. - * @return this object (for call chaining). - */ - public SizeTieredCompactionStrategyOptions minSSTableSizeInBytes(Long minSSTableSize) { - this.minSSTableSizeInBytes = Optional.fromNullable(minSSTableSize); - return this; - } - - @Override - public String build() { - final List generalOptions = super.buildCommonOptions(); - - List options = new ArrayList(generalOptions); - - if (bucketHigh.isPresent()) { - options.add("'bucket_high' : " + bucketHigh.get()); - } - - if (bucketLow.isPresent()) { - options.add("'bucket_low' : " + bucketLow.get()); - } - - if (coldReadsRatioToOmit.isPresent()) { - options.add("'cold_reads_to_omit' : " + coldReadsRatioToOmit.get()); - } - - if (minThreshold.isPresent()) { - options.add("'min_threshold' : " + minThreshold.get()); - } - - if (maxThreshold.isPresent()) { - options.add("'max_threshold' : " + maxThreshold.get()); - } - - if (minSSTableSizeInBytes.isPresent()) { - options.add("'min_sstable_size' : " + minSSTableSizeInBytes.get()); - } - return "{" + Joiner.on(", ").join(options) + "}"; - } + if (timestampResolution.isPresent()) { + options.add("'timestamp_resolution' : '" + timestampResolution.get() + "'"); } - /** - * Compaction options specific to Leveled strategy - */ - public static class LeveledCompactionStrategyOptions extends CompactionOptions { - - private Optional ssTableSizeInMB = Optional.absent(); - - LeveledCompactionStrategyOptions() { - super(Strategy.LEVELED); - } - - /** - * The target size for SSTables that use the leveled compaction strategy. - * Although SSTable sizes should be less or equal to sstable_size_in_mb, it is possible to have a larger SSTable during compaction. - * This occurs when data for a given partition key is exceptionally large. The data is not split into two SSTables - *

    - * If no call is made to this method, the default value set by Cassandra is 160. - * - * @param ssTableSizeInMB the new value. - * @return this object (for call chaining). - */ - public LeveledCompactionStrategyOptions ssTableSizeInMB(Integer ssTableSizeInMB) { - this.ssTableSizeInMB = Optional.fromNullable(ssTableSizeInMB); - return this; - } - - @Override - public String build() { - final List generalOptions = super.buildCommonOptions(); - - List options = new ArrayList(generalOptions); - - if (ssTableSizeInMB.isPresent()) { - options.add("'sstable_size_in_mb' : " + ssTableSizeInMB.get()); - } - return "{" + Joiner.on(", ").join(options) + "}"; - } - + if (unsafeAggressiveSSTableExpiration.isPresent()) { + options.add( + "'unsafe_aggressive_sstable_expiration' : '" + + unsafeAggressiveSSTableExpiration.get() + + "'"); } - /** - * Compaction options specific to the date-tiered strategy. - */ - public static class DateTieredCompactionStrategyOptions extends CompactionOptions { - - public enum TimeStampResolution {MICROSECONDS, MILLISECONDS} - - private Optional baseTimeSeconds = Optional.absent(); - - private Optional maxSSTableAgeDays = Optional.absent(); - - private Optional minThreshold = Optional.absent(); - - private Optional maxThreshold = Optional.absent(); - - private Optional timestampResolution = Optional.absent(); - - DateTieredCompactionStrategyOptions() { - super(Strategy.DATE_TIERED); - } - - /** - * Sets the size of the first window. - *

    - * If no call is made to this method, the default value set by Cassandra is 3600 (1 hour). - * - * @param baseTimeSeconds the size of the first window. - * @return this object (for call chaining). - */ - public DateTieredCompactionStrategyOptions baseTimeSeconds(Integer baseTimeSeconds) { - this.baseTimeSeconds = Optional.fromNullable(baseTimeSeconds); - return this; - } - - /** - * Stop compacting SSTables only having data older than these specified days. - *

    - * If no call is made to this method, the default value set by Cassandra is 365. - * - * @param maxSSTableAgeDays the maximum age of the SSTables to compact. - * @return this object (for call chaining). - */ - public DateTieredCompactionStrategyOptions maxSSTableAgeDays(Integer maxSSTableAgeDays) { - this.maxSSTableAgeDays = Optional.fromNullable(maxSSTableAgeDays); - return this; - } - - /** - * Sets the minimum number of SSTables to trigger a minor compaction - *

    - * If no call is made to this method, the default value set by Cassandra is 4. - * - * @param minThreshold the new value. - * @return this object (for call chaining). - */ - public DateTieredCompactionStrategyOptions minThreshold(Integer minThreshold) { - this.minThreshold = Optional.fromNullable(minThreshold); - return this; - } - - /** - * Sets the maximum number of SSTables to allow in a minor compaction. - * In LeveledCompactionStrategy (LCS), it applies to L0 when L0 gets behind, that is, when L0 accumulates more than MAX_COMPACTING_L0 SSTables. - *

    - * If no call is made to this method, the default value set by Cassandra is 32. - * - * @param maxThreshold the new value. - * @return this object (for call chaining). - */ - public DateTieredCompactionStrategyOptions maxThreshold(Integer maxThreshold) { - this.maxThreshold = Optional.fromNullable(maxThreshold); - return this; - } - - /** - * Sets the timestamp resolution, depending on the timestamp unit of the data you insert. - *

    - * If no call is made to this method, the default value set by Cassandra is {@code MICROSECONDS}. - * - * @param timestampResolution {@link TimeStampResolution#MICROSECONDS} or {@link TimeStampResolution#MILLISECONDS}. - * @return this object (for call chaining). - */ - public DateTieredCompactionStrategyOptions timestampResolution(TimeStampResolution timestampResolution) { - this.timestampResolution = Optional.fromNullable(timestampResolution); - return this; - } - - @Override - public String build() { - final List generalOptions = super.buildCommonOptions(); - - List options = new ArrayList(generalOptions); - - if (baseTimeSeconds.isPresent()) { - options.add("'base_time_seconds' : " + baseTimeSeconds.get()); - } - - if (maxSSTableAgeDays.isPresent()) { - options.add("'max_sstable_age_days' : " + maxSSTableAgeDays.get()); - } - - if (minThreshold.isPresent()) { - options.add("'min_threshold' : " + minThreshold.get()); - } - - if (maxThreshold.isPresent()) { - options.add("'max_threshold' : " + maxThreshold.get()); - } - - if (timestampResolution.isPresent()) { - options.add("'timestamp_resolution' : '" + timestampResolution.get() + "'"); - } - - return "{" + Joiner.on(", ").join(options) + "}"; - } - } + return "{" + Joiner.on(", ").join(options) + "}"; + } + } - /** - * Compaction strategies. Possible values: SIZED_TIERED, LEVELED & DATE_TIERED - */ - public static enum Strategy { - SIZED_TIERED("'SizeTieredCompactionStrategy'"), LEVELED("'LeveledCompactionStrategy'"), DATE_TIERED("'DateTieredCompactionStrategy'"); + /** + * Compaction strategies. Possible values: SIZED_TIERED, LEVELED, DATE_TIERED AND TIME_WINDOW + */ + public static enum Strategy { + SIZED_TIERED("'SizeTieredCompactionStrategy'"), + LEVELED("'LeveledCompactionStrategy'"), + DATE_TIERED("'DateTieredCompactionStrategy'"), + TIME_WINDOW("'TimeWindowCompactionStrategy'"); + + private String strategyClass; + + Strategy(String strategyClass) { + this.strategyClass = strategyClass; + } + + public String strategyClass() { + return strategyClass; + } + + @Override + public String toString() { + return strategyClass; + } + } + } - private String strategyClass; + /** + * The compression options for a CREATE or ALTER TABLE statement. + * + *

    To create instances, use {@link SchemaBuilder#noCompression()}, {@link SchemaBuilder#lz4()}, + * {@link SchemaBuilder#snappy()} or {@link SchemaBuilder#deflate()}. + */ + public static class CompressionOptions { - Strategy(String strategyClass) { - this.strategyClass = strategyClass; - } + private Algorithm algorithm; - public String strategyClass() { - return strategyClass; - } + private Optional chunkLengthInKb = Optional.absent(); - @Override - public String toString() { - return strategyClass; - } - } + private Optional crcCheckChance = Optional.absent(); + CompressionOptions(Algorithm algorithm) { + this.algorithm = algorithm; } /** - * The compression options for a CREATE or ALTER TABLE statement. - *

    - * To create instances, use - * {@link SchemaBuilder#noCompression()}, - * {@link SchemaBuilder#lz4()}, - * {@link SchemaBuilder#snappy()} or - * {@link SchemaBuilder#deflate()}. + * On disk, SSTables are compressed by block to allow random reads. This subproperty of + * compression defines the size (in KB) of the block. Values larger than the default value might + * improve the compression rate, but increases the minimum size of data to be read from disk + * when a read occurs. The default value is a good middle-ground for compressing tables. Adjust + * compression size to account for read/write access patterns (how much data is typically + * requested at once) and the average size of rows in the table. + * + *

    If no call is made to this method, the default value set by Cassandra is 54. + * + * @param chunkLengthInKb the new value. + * @return this object (for call chaining). */ - public static class CompressionOptions { - - private Algorithm algorithm; + public CompressionOptions withChunkLengthInKb(Integer chunkLengthInKb) { + this.chunkLengthInKb = Optional.fromNullable(chunkLengthInKb); + return this; + } - private Optional chunkLengthInKb = Optional.absent(); + /** + * When compression is enabled, each compressed block includes a checksum of that block for the + * purpose of detecting disk bitrate and avoiding the propagation of corruption to other + * replica. This option defines the probability with which those checksums are checked during + * read. By default they are always checked. Set to 0 to disable checksum checking and to 0.5, + * for instance, to check them on every other read. + * + *

    If no call is made to this method, the default value set by Cassandra is 1.0 (always + * check). + * + * @param crcCheckChance the new value. + * @return this object (for call chaining). + */ + public CompressionOptions withCRCCheckChance(Double crcCheckChance) { + validateRateValue(crcCheckChance, "CRC check chance"); + this.crcCheckChance = Optional.fromNullable(crcCheckChance); + return this; + } - private Optional crcCheckChance = Optional.absent(); + public String build() { + List options = new ArrayList(); + options.add("'sstable_compression' : " + algorithm.value()); - CompressionOptions(Algorithm algorithm) { - this.algorithm = algorithm; - } + if (chunkLengthInKb.isPresent()) { + options.add("'chunk_length_kb' : " + chunkLengthInKb.get()); + } - /** - * On disk, SSTables are compressed by block to allow random reads. - * This subproperty of compression defines the size (in KB) of the block. - * Values larger than the default value might improve the compression rate, but increases the minimum size of data to be read from disk when a read occurs. - * The default value is a good middle-ground for compressing tables. - * Adjust compression size to account for read/write access patterns (how much data is typically requested at once) and the average size of rows in the table. - *

    - * If no call is made to this method, the default value set by Cassandra is 54. - * - * @param chunkLengthInKb the new value. - * @return this object (for call chaining). - */ - public CompressionOptions withChunkLengthInKb(Integer chunkLengthInKb) { - this.chunkLengthInKb = Optional.fromNullable(chunkLengthInKb); - return this; - } + if (crcCheckChance.isPresent()) { + options.add("'crc_check_chance' : " + crcCheckChance.get()); + } + return "{" + Joiner.on(", ").join(options) + "}"; + } - /** - * When compression is enabled, each compressed block includes a checksum of that block for the purpose of detecting disk bitrate and avoiding the propagation - * of corruption to other replica. This option defines the probability with which those checksums are checked during read. - * By default they are always checked. Set to 0 to disable checksum checking and to 0.5, for instance, to check them on every other read. - *

    - * If no call is made to this method, the default value set by Cassandra is 1.0 (always check). - * - * @param crcCheckChance the new value. - * @return this object (for call chaining). - */ - public CompressionOptions withCRCCheckChance(Double crcCheckChance) { - validateRateValue(crcCheckChance, "CRC check chance"); - this.crcCheckChance = Optional.fromNullable(crcCheckChance); - return this; - } + /** Compression algorithms. Possible values: NONE, LZ4, SNAPPY, DEFLATE */ + public static enum Algorithm { + NONE("''"), + LZ4("'LZ4Compressor'"), + SNAPPY("'SnappyCompressor'"), + DEFLATE("'DeflateCompressor'"); - public String build() { - List options = new ArrayList(); - options.add("'sstable_compression' : " + algorithm.value()); + private String value; - if (chunkLengthInKb.isPresent()) { - options.add("'chunk_length_kb' : " + chunkLengthInKb.get()); - } + Algorithm(String value) { + this.value = value; + } - if (crcCheckChance.isPresent()) { - options.add("'crc_check_chance' : " + crcCheckChance.get()); - } - return "{" + Joiner.on(", ").join(options) + "}"; - } + public String value() { + return value; + } - /** - * Compression algorithms. Possible values: NONE, LZ4, SNAPPY, DEFLATE - */ - public static enum Algorithm { - NONE("''"), LZ4("'LZ4Compressor'"), SNAPPY("'SnappyCompressor'"), DEFLATE("'DeflateCompressor'"); + @Override + public String toString() { + return value; + } + } - private String value; + public static class NoCompression extends CompressionOptions { - Algorithm(String value) { - this.value = value; - } + public NoCompression() { + super(Algorithm.NONE); + } - public String value() { - return value; - } + @Override + public CompressionOptions withChunkLengthInKb(Integer chunkLengthInKb) { + return this; + } - @Override - public String toString() { - return value; - } - } + @Override + public CompressionOptions withCRCCheckChance(Double crcCheckChance) { + return this; + } + } + } - public static class NoCompression extends CompressionOptions { + /** + * The speculative retry options. + * + *

    To create instances, use {@link SchemaBuilder#noSpeculativeRetry()}, {@link + * SchemaBuilder#always()}, {@link SchemaBuilder#percentile(int)} or {@link + * SchemaBuilder#millisecs(int)}. + */ + public static class SpeculativeRetryValue { - public NoCompression() { - super(Algorithm.NONE); - } + private String value; - @Override - public CompressionOptions withChunkLengthInKb(Integer chunkLengthInKb) { - return this; - } + SpeculativeRetryValue(String value) { + this.value = value; + } - @Override - public CompressionOptions withCRCCheckChance(Double crcCheckChance) { - return this; - } - } + String value() { + return value; + } + } + + /** + * Define the number of rows to be cached per partition when row caching is enabled (this feature + * is only applicable to Cassandra 2.1.x). + * + *

    To create instances, use {@link SchemaBuilder#noRows()}, {@link SchemaBuilder#allRows()} or + * {@link SchemaBuilder#rows(int)}. + */ + public static class CachingRowsPerPartition { + private String value; + + CachingRowsPerPartition(String value) { + this.value = value; } - /** - * The speculative retry options. - *

    - * To create instances, use - * {@link SchemaBuilder#noSpeculativeRetry()}, - * {@link SchemaBuilder#always()}, - * {@link SchemaBuilder#percentile(int)} or - * {@link SchemaBuilder#millisecs(int)}. - */ - public static class SpeculativeRetryValue { + public String value() { + return value; + } + } - private String value; + /** Read Repair modes. Possible values: BLOCKING, NONE. */ + public static enum ReadRepairValue { + BLOCKING("'BLOCKING'"), + NONE("'NONE'"); - SpeculativeRetryValue(String value) { - this.value = value; - } + private String value; - String value() { - return value; - } + private ReadRepairValue(String value) { + this.value = value; } - /** - * Define the number of rows to be cached per partition when row caching is enabled - * (this feature is only applicable to Cassandra 2.1.x). - *

    - * To create instances, use - * {@link SchemaBuilder#noRows()}, - * {@link SchemaBuilder#allRows()} or - * {@link SchemaBuilder#rows(int)}. - */ - public static class CachingRowsPerPartition { - private String value; + public String value() { + return value; + } - CachingRowsPerPartition(String value) { - this.value = value; - } + @Override + public String toString() { + return value; + } + } + + /** + * Additional Write Policy. Default value is 99p. + * + *

    To create instances, use {@link SchemaBuilder#additionalWritePolicyNone() ()}, {@link + * SchemaBuilder#additionalWritePolicyAlways()}, {@link + * SchemaBuilder#additionalWritePolicyMillisecs(int)} or {@link + * SchemaBuilder#additionalWritePolicyMillisecs(int)}. + */ + public static class AdditionalWritePolicyValue { + private String value; + + public AdditionalWritePolicyValue(String value) { + this.value = value; + } - public String value() { - return value; - } + public String value() { + return value; } + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/schemabuilder/UDTType.java b/driver-core/src/main/java/com/datastax/driver/core/schemabuilder/UDTType.java index fbbfbbad4c4..c280543962e 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/schemabuilder/UDTType.java +++ b/driver-core/src/main/java/com/datastax/driver/core/schemabuilder/UDTType.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,48 +21,49 @@ /** * Represents a CQL type containing a user-defined type (UDT) in a SchemaBuilder statement. - *

    - * Use {@link SchemaBuilder#frozen(String)} or {@link SchemaBuilder#udtLiteral(String)} to build instances of this type. + * + *

    Use {@link SchemaBuilder#frozen(String)} or {@link SchemaBuilder#udtLiteral(String)} to build + * instances of this type. */ public final class UDTType implements ColumnType { - private final String asCQLString; + private final String asCQLString; - private UDTType(String asCQLString) { - this.asCQLString = asCQLString; - } + private UDTType(String asCQLString) { + this.asCQLString = asCQLString; + } - @Override - public String asCQLString() { - return asCQLString; - } + @Override + public String asCQLString() { + return asCQLString; + } - static UDTType frozen(String udtName) { - SchemaStatement.validateNotEmpty(udtName, "UDT name"); - return new UDTType("frozen<" + udtName + ">"); - } + static UDTType frozen(String udtName) { + SchemaStatement.validateNotEmpty(udtName, "UDT name"); + return new UDTType("frozen<" + udtName + ">"); + } - static UDTType list(UDTType elementType) { - return new UDTType("list<" + elementType.asCQLString() + ">"); - } + static UDTType list(UDTType elementType) { + return new UDTType("list<" + elementType.asCQLString() + ">"); + } - static UDTType set(UDTType elementType) { - return new UDTType("set<" + elementType.asCQLString() + ">"); - } + static UDTType set(UDTType elementType) { + return new UDTType("set<" + elementType.asCQLString() + ">"); + } - static UDTType mapWithUDTKey(UDTType keyType, DataType valueType) { - return new UDTType("map<" + keyType.asCQLString() + ", " + valueType + ">"); - } + static UDTType mapWithUDTKey(UDTType keyType, DataType valueType) { + return new UDTType("map<" + keyType.asCQLString() + ", " + valueType + ">"); + } - static UDTType mapWithUDTValue(DataType keyType, UDTType valueType) { - return new UDTType("map<" + keyType + ", " + valueType.asCQLString() + ">"); - } + static UDTType mapWithUDTValue(DataType keyType, UDTType valueType) { + return new UDTType("map<" + keyType + ", " + valueType.asCQLString() + ">"); + } - static UDTType mapWithUDTKeyAndValue(UDTType keyType, UDTType valueType) { - return new UDTType("map<" + keyType.asCQLString() + ", " + valueType.asCQLString() + ">"); - } + static UDTType mapWithUDTKeyAndValue(UDTType keyType, UDTType valueType) { + return new UDTType("map<" + keyType.asCQLString() + ", " + valueType.asCQLString() + ">"); + } - static UDTType literal(String literal) { - SchemaStatement.validateNotEmpty(literal, "UDT type literal"); - return new UDTType(literal); - } + static UDTType literal(String literal) { + SchemaStatement.validateNotEmpty(literal, "UDT type literal"); + return new UDTType(literal); + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/schemabuilder/package-info.java b/driver-core/src/main/java/com/datastax/driver/core/schemabuilder/package-info.java index 5302e30b207..1588393466f 100755 --- a/driver-core/src/main/java/com/datastax/driver/core/schemabuilder/package-info.java +++ b/driver-core/src/main/java/com/datastax/driver/core/schemabuilder/package-info.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +17,7 @@ */ /** * A CQL3 schema builder. - *

    - * The main entry for this package is the {@code SchemaBuilder} class. + * + *

    The main entry for this package is the {@code SchemaBuilder} class. */ package com.datastax.driver.core.schemabuilder; diff --git a/driver-core/src/main/java/com/datastax/driver/core/utils/Bytes.java b/driver-core/src/main/java/com/datastax/driver/core/utils/Bytes.java index 882b3103464..5a45a4c02a2 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/utils/Bytes.java +++ b/driver-core/src/main/java/com/datastax/driver/core/utils/Bytes.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,210 +21,194 @@ import java.nio.ByteBuffer; import java.util.Arrays; -/** - * Simple utility methods to make working with bytes (blob) easier. - */ +/** Simple utility methods to make working with bytes (blob) easier. */ public final class Bytes { - private Bytes() { - } + private Bytes() {} - private static final byte[] charToByte = new byte[256]; - private static final char[] byteToChar = new char[16]; - - static { - for (char c = 0; c < charToByte.length; ++c) { - if (c >= '0' && c <= '9') - charToByte[c] = (byte) (c - '0'); - else if (c >= 'A' && c <= 'F') - charToByte[c] = (byte) (c - 'A' + 10); - else if (c >= 'a' && c <= 'f') - charToByte[c] = (byte) (c - 'a' + 10); - else - charToByte[c] = (byte) -1; - } - - for (int i = 0; i < 16; ++i) { - byteToChar[i] = Integer.toHexString(i).charAt(0); - } - } + private static final byte[] charToByte = new byte[256]; + private static final char[] byteToChar = new char[16]; - /* - * We use reflexion to get access to a String protected constructor - * (if available) so we can build avoid copy when creating hex strings. - * That's stolen from Cassandra's code. - */ - private static final Constructor stringConstructor; - - static { - Constructor c; - try { - c = String.class.getDeclaredConstructor(int.class, int.class, char[].class); - c.setAccessible(true); - } catch (Exception e) { - c = null; - } - stringConstructor = c; + static { + for (char c = 0; c < charToByte.length; ++c) { + if (c >= '0' && c <= '9') charToByte[c] = (byte) (c - '0'); + else if (c >= 'A' && c <= 'F') charToByte[c] = (byte) (c - 'A' + 10); + else if (c >= 'a' && c <= 'f') charToByte[c] = (byte) (c - 'a' + 10); + else charToByte[c] = (byte) -1; } - private static String wrapCharArray(char[] c) { - if (c == null) - return null; - - String s = null; - if (stringConstructor != null) { - try { - s = stringConstructor.newInstance(0, c.length, c); - } catch (Exception e) { - // Swallowing as we'll just use a copying constructor - } - } - return s == null ? new String(c) : s; + for (int i = 0; i < 16; ++i) { + byteToChar[i] = Integer.toHexString(i).charAt(0); } - - /** - * Converts a blob to its CQL hex string representation. - *

    - * A CQL blob string representation consist of the hexadecimal - * representation of the blob bytes prefixed by "0x". - * - * @param bytes the blob/bytes to convert to a string. - * @return the CQL string representation of {@code bytes}. If {@code bytes} - * is {@code null}, this method returns {@code null}. - */ - public static String toHexString(ByteBuffer bytes) { - if (bytes == null) - return null; - - if (bytes.remaining() == 0) - return "0x"; - - char[] array = new char[2 * (bytes.remaining() + 1)]; - array[0] = '0'; - array[1] = 'x'; - return toRawHexString(bytes, array, 2); + } + + /* + * We use reflexion to get access to a String protected constructor + * (if available) so we can build avoid copy when creating hex strings. + * That's stolen from Cassandra's code. + */ + private static final Constructor stringConstructor; + + static { + Constructor c; + try { + c = String.class.getDeclaredConstructor(int.class, int.class, char[].class); + c.setAccessible(true); + } catch (Exception e) { + c = null; } - - /** - * Converts a blob to its CQL hex string representation. - *

    - * A CQL blob string representation consist of the hexadecimal - * representation of the blob bytes. - * - * @param bytes the blob/bytes to convert to a string. - * @return the CQL string representation of {@code bytes}. If {@code bytes} - * is {@code null}, this method returns {@code null}. - */ - public static String toRawHexString(ByteBuffer bytes) { - if (bytes == null) - return null; - - if (bytes.remaining() == 0) { - return ""; - } - - - char[] array = new char[2 * (bytes.remaining())]; - return toRawHexString(bytes, array, 0); + stringConstructor = c; + } + + private static String wrapCharArray(char[] c) { + if (c == null) return null; + + String s = null; + if (stringConstructor != null) { + try { + s = stringConstructor.newInstance(0, c.length, c); + } catch (Exception e) { + // Swallowing as we'll just use a copying constructor + } } - - /** - * Converts a blob to its CQL hex string representation. - *

    - * A CQL blob string representation consist of the hexadecimal - * representation of the blob bytes prefixed by "0x". - * - * @param byteArray the blob/bytes array to convert to a string. - * @return the CQL string representation of {@code bytes}. If {@code bytes} - * is {@code null}, this method returns {@code null}. - */ - public static String toHexString(byte[] byteArray) { - return toHexString(ByteBuffer.wrap(byteArray)); + return s == null ? new String(c) : s; + } + + /** + * Converts a blob to its CQL hex string representation. + * + *

    A CQL blob string representation consist of the hexadecimal representation of the blob bytes + * prefixed by "0x". + * + * @param bytes the blob/bytes to convert to a string. + * @return the CQL string representation of {@code bytes}. If {@code bytes} is {@code null}, this + * method returns {@code null}. + */ + public static String toHexString(ByteBuffer bytes) { + if (bytes == null) return null; + + if (bytes.remaining() == 0) return "0x"; + + char[] array = new char[2 * (bytes.remaining() + 1)]; + array[0] = '0'; + array[1] = 'x'; + return toRawHexString(bytes, array, 2); + } + + /** + * Converts a blob to its CQL hex string representation. + * + *

    A CQL blob string representation consist of the hexadecimal representation of the blob + * bytes. + * + * @param bytes the blob/bytes to convert to a string. + * @return the CQL string representation of {@code bytes}. If {@code bytes} is {@code null}, this + * method returns {@code null}. + */ + public static String toRawHexString(ByteBuffer bytes) { + if (bytes == null) return null; + + if (bytes.remaining() == 0) { + return ""; } - /** - * Parse an hex string representing a CQL blob. - *

    - * The input should be a valid representation of a CQL blob, i.e. it - * must start by "0x" followed by the hexadecimal representation of the - * blob bytes. - * - * @param str the CQL blob string representation to parse. - * @return the bytes corresponding to {@code str}. If {@code str} - * is {@code null}, this method returns {@code null}. - * @throws IllegalArgumentException if {@code str} is not a valid CQL - * blob string. - */ - public static ByteBuffer fromHexString(String str) { - if ((str.length() & 1) == 1) - throw new IllegalArgumentException("A CQL blob string must have an even length (since one byte is always 2 hexadecimal character)"); - - if (str.charAt(0) != '0' || str.charAt(1) != 'x') - throw new IllegalArgumentException("A CQL blob string must start with \"0x\""); - - return ByteBuffer.wrap(fromRawHexString(str, 2)); + char[] array = new char[2 * (bytes.remaining())]; + return toRawHexString(bytes, array, 0); + } + + /** + * Converts a blob to its CQL hex string representation. + * + *

    A CQL blob string representation consist of the hexadecimal representation of the blob bytes + * prefixed by "0x". + * + * @param byteArray the blob/bytes array to convert to a string. + * @return the CQL string representation of {@code bytes}. If {@code bytes} is {@code null}, this + * method returns {@code null}. + */ + public static String toHexString(byte[] byteArray) { + return toHexString(ByteBuffer.wrap(byteArray)); + } + + /** + * Parse an hex string representing a CQL blob. + * + *

    The input should be a valid representation of a CQL blob, i.e. it must start by "0x" + * followed by the hexadecimal representation of the blob bytes. + * + * @param str the CQL blob string representation to parse. + * @return the bytes corresponding to {@code str}. If {@code str} is {@code null}, this method + * returns {@code null}. + * @throws IllegalArgumentException if {@code str} is not a valid CQL blob string. + */ + public static ByteBuffer fromHexString(String str) { + if ((str.length() & 1) == 1) + throw new IllegalArgumentException( + "A CQL blob string must have an even length (since one byte is always 2 hexadecimal character)"); + + if (str.charAt(0) != '0' || str.charAt(1) != 'x') + throw new IllegalArgumentException("A CQL blob string must start with \"0x\""); + + return ByteBuffer.wrap(fromRawHexString(str, 2)); + } + + /** + * Extract the content of the provided {@code ByteBuffer} as a byte array. + * + *

    This method work with any type of {@code ByteBuffer} (direct and non-direct ones), but when + * the {@code ByteBuffer} is backed by an array, this method will try to avoid copy when possible. + * As a consequence, changes to the returned byte array may or may not reflect into the initial + * {@code ByteBuffer}. + * + * @param bytes the buffer whose content to extract. + * @return a byte array with the content of {@code bytes}. That array may be the array backing + * {@code bytes} if this can avoid a copy. + */ + public static byte[] getArray(ByteBuffer bytes) { + int length = bytes.remaining(); + + if (bytes.hasArray()) { + int boff = bytes.arrayOffset() + bytes.position(); + if (boff == 0 && length == bytes.array().length) return bytes.array(); + else return Arrays.copyOfRange(bytes.array(), boff, boff + length); } - - /** - * Extract the content of the provided {@code ByteBuffer} as a byte array. - *

    - * This method work with any type of {@code ByteBuffer} (direct and non-direct - * ones), but when the {@code ByteBuffer} is backed by an array, this method - * will try to avoid copy when possible. As a consequence, changes to the - * returned byte array may or may not reflect into the initial {@code ByteBuffer}. - * - * @param bytes the buffer whose content to extract. - * @return a byte array with the content of {@code bytes}. That array may be the - * array backing {@code bytes} if this can avoid a copy. - */ - public static byte[] getArray(ByteBuffer bytes) { - int length = bytes.remaining(); - - if (bytes.hasArray()) { - int boff = bytes.arrayOffset() + bytes.position(); - if (boff == 0 && length == bytes.array().length) - return bytes.array(); - else - return Arrays.copyOfRange(bytes.array(), boff, boff + length); - } - // else, DirectByteBuffer.get() is the fastest route - byte[] array = new byte[length]; - bytes.duplicate().get(array); - return array; + // else, DirectByteBuffer.get() is the fastest route + byte[] array = new byte[length]; + bytes.duplicate().get(array); + return array; + } + + private static String toRawHexString(ByteBuffer bytes, char[] array, int offset) { + int size = bytes.remaining(); + int bytesOffset = bytes.position(); + assert array.length >= offset + 2 * size; + for (int i = 0; i < size; i++) { + int bint = bytes.get(i + bytesOffset); + array[offset + i * 2] = byteToChar[(bint & 0xf0) >> 4]; + array[offset + 1 + i * 2] = byteToChar[bint & 0x0f]; } - - private static String toRawHexString(ByteBuffer bytes, char[] array, int offset) { - int size = bytes.remaining(); - int bytesOffset = bytes.position(); - assert array.length >= offset + 2 * size; - for (int i = 0; i < size; i++) { - int bint = bytes.get(i + bytesOffset); - array[offset + i * 2] = byteToChar[(bint & 0xf0) >> 4]; - array[offset + 1 + i * 2] = byteToChar[bint & 0x0f]; - } - return wrapCharArray(array); - } - - /** - * Converts a CQL hex string representation into a byte array. - *

    - * A CQL blob string representation consist of the hexadecimal - * representation of the blob bytes. - * - * @param str the string converted in hex representation. - * @param strOffset he offset for starting the string conversion - * @return the byte array which the String was representing. - */ - - public static byte[] fromRawHexString(String str, int strOffset) { - byte[] bytes = new byte[(str.length() - strOffset) / 2]; - for (int i = 0; i < bytes.length; i++) { - byte halfByte1 = charToByte[str.charAt(strOffset + i * 2)]; - byte halfByte2 = charToByte[str.charAt(strOffset + i * 2 + 1)]; - if (halfByte1 == -1 || halfByte2 == -1) - throw new IllegalArgumentException("Non-hex characters in " + str); - bytes[i] = (byte) ((halfByte1 << 4) | halfByte2); - } - return bytes; + return wrapCharArray(array); + } + + /** + * Converts a CQL hex string representation into a byte array. + * + *

    A CQL blob string representation consist of the hexadecimal representation of the blob + * bytes. + * + * @param str the string converted in hex representation. + * @param strOffset he offset for starting the string conversion + * @return the byte array which the String was representing. + */ + public static byte[] fromRawHexString(String str, int strOffset) { + byte[] bytes = new byte[(str.length() - strOffset) / 2]; + for (int i = 0; i < bytes.length; i++) { + byte halfByte1 = charToByte[str.charAt(strOffset + i * 2)]; + byte halfByte2 = charToByte[str.charAt(strOffset + i * 2 + 1)]; + if (halfByte1 == -1 || halfByte2 == -1) + throw new IllegalArgumentException("Non-hex characters in " + str); + bytes[i] = (byte) ((halfByte1 << 4) | halfByte2); } + return bytes; + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/utils/MoreFutures.java b/driver-core/src/main/java/com/datastax/driver/core/utils/MoreFutures.java index ef22df145f2..fe17d5b8f5d 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/utils/MoreFutures.java +++ b/driver-core/src/main/java/com/datastax/driver/core/utils/MoreFutures.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,32 +17,54 @@ */ package com.datastax.driver.core.utils; +import com.datastax.driver.core.GuavaCompatibility; import com.google.common.util.concurrent.FutureCallback; import com.google.common.util.concurrent.Futures; import com.google.common.util.concurrent.ListenableFuture; +import com.google.common.util.concurrent.SettableFuture; -/** - * Helpers to work with Guava's {@link ListenableFuture}. - */ +/** Helpers to work with Guava's {@link ListenableFuture}. */ public class MoreFutures { - /** - * An immediate successful {@code ListenableFuture}. - */ - public static final ListenableFuture VOID_SUCCESS = Futures.immediateFuture(null); + /** An immediate successful {@code ListenableFuture}. */ + public static final ListenableFuture VOID_SUCCESS = Futures.immediateFuture(null); - /** - * A {@link FutureCallback} that does nothing on failure. - */ - public static abstract class SuccessCallback implements FutureCallback { - @Override - public void onFailure(Throwable t) { /* nothing */ } + /** A {@link FutureCallback} that does nothing on failure. */ + public abstract static class SuccessCallback implements FutureCallback { + @Override + public void onFailure(Throwable t) { + /* nothing */ } + } - /** - * A {@link FutureCallback} that does nothing on success. - */ - public static abstract class FailureCallback implements FutureCallback { - @Override - public void onSuccess(V result) { /* nothing */ } + /** A {@link FutureCallback} that does nothing on success. */ + public abstract static class FailureCallback implements FutureCallback { + @Override + public void onSuccess(V result) { + /* nothing */ } + } + + /** + * Configures a {@link SettableFuture} to propagate the result of a future. + * + * @param settable future to be propagated to + * @param future future to propagate + * @param + */ + public static void propagateFuture( + final SettableFuture settable, ListenableFuture future) { + GuavaCompatibility.INSTANCE.addCallback( + future, + new FutureCallback() { + @Override + public void onSuccess(T result) { + settable.set(result); + } + + @Override + public void onFailure(Throwable t) { + settable.setException(t); + } + }); + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/utils/MoreObjects.java b/driver-core/src/main/java/com/datastax/driver/core/utils/MoreObjects.java index 5d4230c113e..7cf42382072 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/utils/MoreObjects.java +++ b/driver-core/src/main/java/com/datastax/driver/core/utils/MoreObjects.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,16 +21,16 @@ /** * Driver-specific implementation of utility object methods. - *

    - * They are available in some versions of Java/Guava, but not across all versions ranges supported by the driver, hence - * the custom implementation. + * + *

    They are available in some versions of Java/Guava, but not across all versions ranges + * supported by the driver, hence the custom implementation. */ public class MoreObjects { - public static boolean equal(Object first, Object second) { - return (first == second) || (first != null && first.equals(second)); - } + public static boolean equal(Object first, Object second) { + return (first == second) || (first != null && first.equals(second)); + } - public static int hashCode(Object... objects) { - return Arrays.hashCode(objects); - } + public static int hashCode(Object... objects) { + return Arrays.hashCode(objects); + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/utils/UUIDs.java b/driver-core/src/main/java/com/datastax/driver/core/utils/UUIDs.java index 4f1737b6b09..83b317b8c5d 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/utils/UUIDs.java +++ b/driver-core/src/main/java/com/datastax/driver/core/utils/UUIDs.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,9 +19,6 @@ import com.datastax.driver.core.Native; import com.google.common.base.Charsets; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import java.lang.management.ManagementFactory; import java.net.InetAddress; import java.net.NetworkInterface; @@ -27,361 +26,373 @@ import java.net.UnknownHostException; import java.security.MessageDigest; import java.security.NoSuchAlgorithmException; -import java.util.*; +import java.util.Calendar; +import java.util.Enumeration; +import java.util.HashSet; +import java.util.Properties; +import java.util.Random; +import java.util.Set; +import java.util.TimeZone; +import java.util.UUID; import java.util.concurrent.atomic.AtomicLong; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** - * Utility methods to help working with UUIDs, and more specifically, with time-based UUIDs - * (also known as Version 1 UUIDs). + * Utility methods to help working with UUIDs, and more specifically, with time-based UUIDs (also + * known as Version 1 UUIDs). + * *

    Notes on the algorithm used to generate time-based UUIDs

    + * * The algorithm follows roughly the description in RFC-4122, but with the following adaptations: + * *
      - *
    1. Since Java does not provide direct access to the host's MAC address, that information - * is replaced with a digest of all IP addresses available on the host;
    2. - *
    3. The process ID (PID) isn't easily available to Java either, so it is determined by one of the - * following methods, in the order they are listed below: - *
        - *
      1. If the System property {@value PID_SYSTEM_PROPERTY} is set then the value to use as a PID - * will be read from that property;
      2. - *
      3. Otherwise, if a native call to {@link Native#processId() getpid()} is possible, then the PID - * will be read from that call;
      4. - *
      5. Otherwise, an attempt will be made to read the PID from JMX's - * {@link ManagementFactory#getRuntimeMXBean() RuntimeMXBean}, which is a well-known, - * yet undocumented "hack", since most JVMs tend to use the JVM's PID as part of that MXBean name;
      6. - *
      7. If all of the above fails, a random integer will be generated and used as a surrogate PID.
      8. - *
      - *
    4. + *
    5. Since Java does not provide direct access to the host's MAC address, that information is + * replaced with a digest of all IP addresses available on the host; + *
    6. The process ID (PID) isn't easily available to Java either, so it is determined by one of + * the following methods, in the order they are listed below: + *
        + *
      1. If the System property {@value PID_SYSTEM_PROPERTY} is set then the + * value to use as a PID will be read from that property; + *
      2. Otherwise, if a native call to {@link Native#processId() getpid()} is possible, then + * the PID will be read from that call; + *
      3. Otherwise, an attempt will be made to read the PID from JMX's {@link + * ManagementFactory#getRuntimeMXBean() RuntimeMXBean}, which is a well-known, yet + * undocumented "hack", since most JVMs tend to use the JVM's PID as part of that MXBean + * name; + *
      4. If all of the above fails, a random integer will be generated and used as a surrogate + * PID. + *
      *
    * * @see JAVA-444 - * @see A Universally Unique IDentifier (UUID) URN Namespace (RFC 4122) + * @see A Universally Unique IDentifier (UUID) URN + * Namespace (RFC 4122) */ public final class UUIDs { - /** - * The System property to use to force the value of the process ID (PID). - */ - public static final String PID_SYSTEM_PROPERTY = "com.datastax.driver.PID"; - - private static final Logger LOGGER = LoggerFactory.getLogger(UUIDs.class); - - private UUIDs() { - } - - private static final long START_EPOCH = makeEpoch(); - private static final long CLOCK_SEQ_AND_NODE = makeClockSeqAndNode(); + /** The System property to use to force the value of the process ID (PID). */ + public static final String PID_SYSTEM_PROPERTY = "com.datastax.driver.PID"; + + private static final Logger LOGGER = LoggerFactory.getLogger(UUIDs.class); + + private UUIDs() {} + + private static final long START_EPOCH = makeEpoch(); + private static final long CLOCK_SEQ_AND_NODE = makeClockSeqAndNode(); + + /* + * The min and max possible lsb for a UUID. + * Note that his is not 0 and all 1's because Cassandra TimeUUIDType + * compares the lsb parts as a signed byte array comparison. So the min + * value is 8 times -128 and the max is 8 times +127. + * + * Note that we ignore the uuid variant (namely, MIN_CLOCK_SEQ_AND_NODE + * have variant 2 as it should, but MAX_CLOCK_SEQ_AND_NODE have variant 0) + * because I don't trust all uuid implementation to have correctly set + * those (pycassa don't always for instance). + */ + private static final long MIN_CLOCK_SEQ_AND_NODE = 0x8080808080808080L; + private static final long MAX_CLOCK_SEQ_AND_NODE = 0x7f7f7f7f7f7f7f7fL; + + private static final AtomicLong lastTimestamp = new AtomicLong(0L); + + private static long makeEpoch() { + // UUID v1 timestamp must be in 100-nanoseconds interval since 00:00:00.000 15 Oct 1582. + Calendar c = Calendar.getInstance(TimeZone.getTimeZone("GMT-0")); + c.set(Calendar.YEAR, 1582); + c.set(Calendar.MONTH, Calendar.OCTOBER); + c.set(Calendar.DAY_OF_MONTH, 15); + c.set(Calendar.HOUR_OF_DAY, 0); + c.set(Calendar.MINUTE, 0); + c.set(Calendar.SECOND, 0); + c.set(Calendar.MILLISECOND, 0); + return c.getTimeInMillis(); + } + + private static long makeNode() { /* - * The min and max possible lsb for a UUID. - * Note that his is not 0 and all 1's because Cassandra TimeUUIDType - * compares the lsb parts as a signed byte array comparison. So the min - * value is 8 times -128 and the max is 8 times +127. - * - * Note that we ignore the uuid variant (namely, MIN_CLOCK_SEQ_AND_NODE - * have variant 2 as it should, but MAX_CLOCK_SEQ_AND_NODE have variant 0) - * because I don't trust all uuid implementation to have correctly set - * those (pycassa don't always for instance). + * We don't have access to the MAC address (in pure JAVA at least) but + * need to generate a node part that identify this host as uniquely as + * possible. + * The spec says that one option is to take as many source that + * identify this node as possible and hash them together. That's what + * we do here by gathering all the ip of this host as well as a few + * other sources. */ - private static final long MIN_CLOCK_SEQ_AND_NODE = 0x8080808080808080L; - private static final long MAX_CLOCK_SEQ_AND_NODE = 0x7f7f7f7f7f7f7f7fL; - - private static final AtomicLong lastTimestamp = new AtomicLong(0L); - - private static long makeEpoch() { - // UUID v1 timestamp must be in 100-nanoseconds interval since 00:00:00.000 15 Oct 1582. - Calendar c = Calendar.getInstance(TimeZone.getTimeZone("GMT-0")); - c.set(Calendar.YEAR, 1582); - c.set(Calendar.MONTH, Calendar.OCTOBER); - c.set(Calendar.DAY_OF_MONTH, 15); - c.set(Calendar.HOUR_OF_DAY, 0); - c.set(Calendar.MINUTE, 0); - c.set(Calendar.SECOND, 0); - c.set(Calendar.MILLISECOND, 0); - return c.getTimeInMillis(); + try { + + MessageDigest digest = MessageDigest.getInstance("MD5"); + for (String address : getAllLocalAddresses()) update(digest, address); + + Properties props = System.getProperties(); + update(digest, props.getProperty("java.vendor")); + update(digest, props.getProperty("java.vendor.url")); + update(digest, props.getProperty("java.version")); + update(digest, props.getProperty("os.arch")); + update(digest, props.getProperty("os.name")); + update(digest, props.getProperty("os.version")); + update(digest, getProcessPiece()); + + byte[] hash = digest.digest(); + + long node = 0; + for (int i = 0; i < 6; i++) node |= (0x00000000000000ffL & (long) hash[i]) << (i * 8); + // Since we don't use the mac address, the spec says that multicast + // bit (least significant bit of the first byte of the node ID) must be 1. + return node | 0x0000010000000000L; + } catch (NoSuchAlgorithmException e) { + throw new RuntimeException(e); } - - private static long makeNode() { - - /* - * We don't have access to the MAC address (in pure JAVA at least) but - * need to generate a node part that identify this host as uniquely as - * possible. - * The spec says that one option is to take as many source that - * identify this node as possible and hash them together. That's what - * we do here by gathering all the ip of this host as well as a few - * other sources. - */ - try { - - MessageDigest digest = MessageDigest.getInstance("MD5"); - for (String address : getAllLocalAddresses()) - update(digest, address); - - Properties props = System.getProperties(); - update(digest, props.getProperty("java.vendor")); - update(digest, props.getProperty("java.vendor.url")); - update(digest, props.getProperty("java.version")); - update(digest, props.getProperty("os.arch")); - update(digest, props.getProperty("os.name")); - update(digest, props.getProperty("os.version")); - update(digest, getProcessPiece()); - - byte[] hash = digest.digest(); - - long node = 0; - for (int i = 0; i < 6; i++) - node |= (0x00000000000000ffL & (long) hash[i]) << (i * 8); - // Since we don't use the mac address, the spec says that multicast - // bit (least significant bit of the first byte of the node ID) must be 1. - return node | 0x0000010000000000L; - } catch (NoSuchAlgorithmException e) { - throw new RuntimeException(e); - } + } + + private static String getProcessPiece() { + Integer pid = null; + String pidProperty = System.getProperty(PID_SYSTEM_PROPERTY); + if (pidProperty != null) { + try { + pid = Integer.parseInt(pidProperty); + LOGGER.info("PID obtained from System property {}: {}", PID_SYSTEM_PROPERTY, pid); + } catch (NumberFormatException e) { + LOGGER.warn( + "Incorrect integer specified for PID in System property {}: {}", + PID_SYSTEM_PROPERTY, + pidProperty); + } } - - private static String getProcessPiece() { - Integer pid = null; - String pidProperty = System.getProperty(PID_SYSTEM_PROPERTY); - if (pidProperty != null) { - try { - pid = Integer.parseInt(pidProperty); - LOGGER.info("PID obtained from System property {}: {}", PID_SYSTEM_PROPERTY, pid); - } catch (NumberFormatException e) { - LOGGER.warn("Incorrect integer specified for PID in System property {}: {}", PID_SYSTEM_PROPERTY, pidProperty); - } - } - if (pid == null && Native.isGetpidAvailable()) { - try { - pid = Native.processId(); - LOGGER.info("PID obtained through native call to getpid(): {}", pid); - } catch (Exception e) { - LOGGER.warn("Native call to getpid() failed", e); - } + if (pid == null && Native.isGetpidAvailable()) { + try { + pid = Native.processId(); + if (pid == 0) { + LOGGER.warn( + "PID returned through native call was 0, JNR versions incompatible? Falling back to JMX."); + pid = null; + } else { + LOGGER.info("PID obtained through native call to getpid(): {}", pid); } - if (pid == null) { - try { - String pidJmx = ManagementFactory.getRuntimeMXBean().getName().split("@")[0]; - pid = Integer.parseInt(pidJmx); - LOGGER.info("PID obtained through JMX: {}", pid); - } catch (Exception e) { - LOGGER.warn("Failed to obtain PID from JMX", e); - } - } - if (pid == null) { - pid = new java.util.Random().nextInt(); - LOGGER.warn("Could not determine PID, falling back to a random integer: {}", pid); - } - ClassLoader loader = UUIDs.class.getClassLoader(); - int loaderId = loader != null ? System.identityHashCode(loader) : 0; - return Integer.toHexString(pid) + Integer.toHexString(loaderId); - } - - private static void update(MessageDigest digest, String value) { - if (value != null) - digest.update(value.getBytes(Charsets.UTF_8)); - } - - private static long makeClockSeqAndNode() { - long clock = new Random(System.currentTimeMillis()).nextLong(); - long node = makeNode(); - - long lsb = 0; - lsb |= (clock & 0x0000000000003FFFL) << 48; - lsb |= 0x8000000000000000L; - lsb |= node; - return lsb; + } catch (Exception e) { + LOGGER.warn("Native call to getpid() failed", e); + } } - - /** - * Creates a new random (version 4) UUID. - *

    - * This method is just a convenience for {@code UUID.randomUUID()}. - * - * @return a newly generated, pseudo random, version 4 UUID. - */ - public static UUID random() { - return UUID.randomUUID(); + if (pid == null) { + try { + String pidJmx = ManagementFactory.getRuntimeMXBean().getName().split("@")[0]; + pid = Integer.parseInt(pidJmx); + LOGGER.info("PID obtained through JMX: {}", pid); + } catch (Exception e) { + LOGGER.warn("Failed to obtain PID from JMX", e); + } } - - /** - * Creates a new time-based (version 1) UUID. - *

    - * UUIDs generated by this method are suitable for use with the - * {@code timeuuid} Cassandra type. In particular the generated UUID - * includes the timestamp of its generation. - *

    - * Note that there is no way to provide your own timestamp. This is deliberate, as we feel that this does not - * conform to the UUID specification, and therefore don't want to encourage it through the API. - * If you want to do it anyway, use the following workaround: - *

    -     * Random random = new Random();
    -     * UUID uuid = new UUID(UUIDs.startOf(userProvidedTimestamp).getMostSignificantBits(), random.nextLong());
    -     * 
    - * If you simply need to perform a range query on a {@code timeuuid} column, use the "fake" UUID generated by - * {@link #startOf(long)} and {@link #endOf(long)}. - * - * @return a new time-based UUID. - */ - public static UUID timeBased() { - return new UUID(makeMSB(getCurrentTimestamp()), CLOCK_SEQ_AND_NODE); + if (pid == null) { + pid = new java.util.Random().nextInt(); + LOGGER.warn("Could not determine PID, falling back to a random integer: {}", pid); } - - /** - * Creates a "fake" time-based UUID that sorts as the smallest possible - * version 1 UUID generated at the provided timestamp. - *

    - * Such created UUIDs are useful in queries to select a time range of a - * {@code timeuuid} column. - *

    - * The UUIDs created by this method are not unique and as such are - * not suitable for anything else than querying a specific time - * range. In particular, you should not insert such UUIDs. "True" UUIDs from - * user-provided timestamps are not supported (see {@link #timeBased()} - * for more explanations). - *

    - * Also, the timestamp to provide as a parameter must be a Unix timestamp (as - * returned by {@link System#currentTimeMillis} or {@link java.util.Date#getTime}), and - * not a count of 100-nanosecond intervals since 00:00:00.00, 15 October 1582 (as required by RFC-4122). - *

    - * In other words, given a UUID {@code uuid}, you should never call - * {@code startOf(uuid.timestamp())} but rather - * {@code startOf(unixTimestamp(uuid))}. - *

    - * Lastly, please note that Cassandra's {@code timeuuid} sorting is not compatible - * with {@link UUID#compareTo} and hence the UUIDs created by this method - * are not necessarily lower bound for that latter method. - * - * @param timestamp the Unix timestamp for which the created UUID must be a - * lower bound. - * @return the smallest (for Cassandra {@code timeuuid} sorting) UUID of {@code timestamp}. - */ - public static UUID startOf(long timestamp) { - return new UUID(makeMSB(fromUnixTimestamp(timestamp)), MIN_CLOCK_SEQ_AND_NODE); + ClassLoader loader = UUIDs.class.getClassLoader(); + int loaderId = loader != null ? System.identityHashCode(loader) : 0; + return Integer.toHexString(pid) + Integer.toHexString(loaderId); + } + + private static void update(MessageDigest digest, String value) { + if (value != null) digest.update(value.getBytes(Charsets.UTF_8)); + } + + private static long makeClockSeqAndNode() { + long clock = new Random(System.currentTimeMillis()).nextLong(); + long node = makeNode(); + + long lsb = 0; + lsb |= (clock & 0x0000000000003FFFL) << 48; + lsb |= 0x8000000000000000L; + lsb |= node; + return lsb; + } + + /** + * Creates a new random (version 4) UUID. + * + *

    This method is just a convenience for {@code UUID.randomUUID()}. + * + * @return a newly generated, pseudo random, version 4 UUID. + */ + public static UUID random() { + return UUID.randomUUID(); + } + + /** + * Creates a new time-based (version 1) UUID. + * + *

    UUIDs generated by this method are suitable for use with the {@code timeuuid} Cassandra + * type. In particular the generated UUID includes the timestamp of its generation. + * + *

    Note that there is no way to provide your own timestamp. This is deliberate, as we feel that + * this does not conform to the UUID specification, and therefore don't want to encourage it + * through the API. If you want to do it anyway, use the following workaround: + * + *

    +   * Random random = new Random();
    +   * UUID uuid = new UUID(UUIDs.startOf(userProvidedTimestamp).getMostSignificantBits(), random.nextLong());
    +   * 
    + * + * If you simply need to perform a range query on a {@code timeuuid} column, use the "fake" UUID + * generated by {@link #startOf(long)} and {@link #endOf(long)}. + * + * @return a new time-based UUID. + */ + public static UUID timeBased() { + return new UUID(makeMSB(getCurrentTimestamp()), CLOCK_SEQ_AND_NODE); + } + + /** + * Creates a "fake" time-based UUID that sorts as the smallest possible version 1 UUID generated + * at the provided timestamp. + * + *

    Such created UUIDs are useful in queries to select a time range of a {@code timeuuid} + * column. + * + *

    The UUIDs created by this method are not unique and as such are not suitable + * for anything else than querying a specific time range. In particular, you should not insert + * such UUIDs. "True" UUIDs from user-provided timestamps are not supported (see {@link + * #timeBased()} for more explanations). + * + *

    Also, the timestamp to provide as a parameter must be a Unix timestamp (as returned by + * {@link System#currentTimeMillis} or {@link java.util.Date#getTime}), and not a count + * of 100-nanosecond intervals since 00:00:00.00, 15 October 1582 (as required by RFC-4122). + * + *

    In other words, given a UUID {@code uuid}, you should never call {@code + * startOf(uuid.timestamp())} but rather {@code startOf(unixTimestamp(uuid))}. + * + *

    Lastly, please note that Cassandra's {@code timeuuid} sorting is not compatible with {@link + * UUID#compareTo} and hence the UUIDs created by this method are not necessarily lower bound for + * that latter method. + * + * @param timestamp the Unix timestamp for which the created UUID must be a lower bound. + * @return the smallest (for Cassandra {@code timeuuid} sorting) UUID of {@code timestamp}. + */ + public static UUID startOf(long timestamp) { + return new UUID(makeMSB(fromUnixTimestamp(timestamp)), MIN_CLOCK_SEQ_AND_NODE); + } + + /** + * Creates a "fake" time-based UUID that sorts as the biggest possible version 1 UUID generated at + * the provided timestamp. + * + *

    See {@link #startOf(long)} for explanations about the intended usage of such UUID. + * + * @param timestamp the Unix timestamp for which the created UUID must be an upper bound. + * @return the biggest (for Cassandra {@code timeuuid} sorting) UUID of {@code timestamp}. + */ + public static UUID endOf(long timestamp) { + long uuidTstamp = fromUnixTimestamp(timestamp + 1) - 1; + return new UUID(makeMSB(uuidTstamp), MAX_CLOCK_SEQ_AND_NODE); + } + + /** + * Return the Unix timestamp contained by the provided time-based UUID. + * + *

    This method is not equivalent to {@link UUID#timestamp()}. More precisely, a version 1 UUID + * stores a timestamp that represents the number of 100-nanoseconds intervals since midnight, 15 + * October 1582 and that is what {@link UUID#timestamp()} returns. This method however converts + * that timestamp to the equivalent Unix timestamp in milliseconds, i.e. a timestamp representing + * a number of milliseconds since midnight, January 1, 1970 UTC. In particular, the timestamps + * returned by this method are comparable to the timestamps returned by {@link + * System#currentTimeMillis}, {@link java.util.Date#getTime}, etc. + * + * @param uuid the UUID to return the timestamp of. + * @return the Unix timestamp of {@code uuid}. + * @throws IllegalArgumentException if {@code uuid} is not a version 1 UUID. + */ + public static long unixTimestamp(UUID uuid) { + if (uuid.version() != 1) + throw new IllegalArgumentException( + String.format( + "Can only retrieve the unix timestamp for version 1 uuid (provided version %d)", + uuid.version())); + + long timestamp = uuid.timestamp(); + return (timestamp / 10000) + START_EPOCH; + } + + /* + * Note that currently we use {@link System#currentTimeMillis} for a base time in + * milliseconds, and then if we are in the same milliseconds that the + * previous generation, we increment the number of nanoseconds. + * However, since the precision is 100-nanoseconds intervals, we can only + * generate 10K UUID within a millisecond safely. If we detect we have + * already generated that much UUID within a millisecond (which, while + * admittedly unlikely in a real application, is very achievable on even + * modest machines), then we stall the generator (busy spin) until the next + * millisecond as required by the RFC. + */ + private static long getCurrentTimestamp() { + while (true) { + long now = fromUnixTimestamp(System.currentTimeMillis()); + long last = lastTimestamp.get(); + if (now > last) { + if (lastTimestamp.compareAndSet(last, now)) return now; + } else { + long lastMillis = millisOf(last); + // If the clock went back in time, bail out + if (millisOf(now) < millisOf(last)) return lastTimestamp.incrementAndGet(); + + long candidate = last + 1; + // If we've generated more than 10k uuid in that millisecond, + // we restart the whole process until we get to the next millis. + // Otherwise, we try use our candidate ... unless we've been + // beaten by another thread in which case we try again. + if (millisOf(candidate) == lastMillis && lastTimestamp.compareAndSet(last, candidate)) + return candidate; + } } - - /** - * Creates a "fake" time-based UUID that sorts as the biggest possible - * version 1 UUID generated at the provided timestamp. - *

    - * See {@link #startOf(long)} for explanations about the intended usage of such UUID. - * - * @param timestamp the Unix timestamp for which the created UUID must be an - * upper bound. - * @return the biggest (for Cassandra {@code timeuuid} sorting) UUID of {@code timestamp}. - */ - public static UUID endOf(long timestamp) { - long uuidTstamp = fromUnixTimestamp(timestamp + 1) - 1; - return new UUID(makeMSB(uuidTstamp), MAX_CLOCK_SEQ_AND_NODE); - } - - /** - * Return the Unix timestamp contained by the provided time-based UUID. - *

    - * This method is not equivalent to {@link UUID#timestamp()}. More - * precisely, a version 1 UUID stores a timestamp that represents the - * number of 100-nanoseconds intervals since midnight, 15 October 1582 and - * that is what {@link UUID#timestamp()} returns. This method however - * converts that timestamp to the equivalent Unix timestamp in - * milliseconds, i.e. a timestamp representing a number of milliseconds - * since midnight, January 1, 1970 UTC. In particular, the timestamps - * returned by this method are comparable to the timestamps returned by - * {@link System#currentTimeMillis}, {@link java.util.Date#getTime}, etc. - * - * @param uuid the UUID to return the timestamp of. - * @return the Unix timestamp of {@code uuid}. - * @throws IllegalArgumentException if {@code uuid} is not a version 1 UUID. - */ - public static long unixTimestamp(UUID uuid) { - if (uuid.version() != 1) - throw new IllegalArgumentException(String.format("Can only retrieve the unix timestamp for version 1 uuid (provided version %d)", uuid.version())); - - long timestamp = uuid.timestamp(); - return (timestamp / 10000) + START_EPOCH; + } + + // Package visible for testing + static long fromUnixTimestamp(long tstamp) { + return (tstamp - START_EPOCH) * 10000; + } + + private static long millisOf(long timestamp) { + return timestamp / 10000; + } + + // Package visible for testing + static long makeMSB(long timestamp) { + long msb = 0L; + msb |= (0x00000000ffffffffL & timestamp) << 32; + msb |= (0x0000ffff00000000L & timestamp) >>> 16; + msb |= (0x0fff000000000000L & timestamp) >>> 48; + msb |= 0x0000000000001000L; // sets the version to 1. + return msb; + } + + private static Set getAllLocalAddresses() { + Set allIps = new HashSet(); + try { + InetAddress localhost = InetAddress.getLocalHost(); + allIps.add(localhost.toString()); + // Also return the hostname if available, it won't hurt (this does a dns lookup, it's only + // done once at startup) + allIps.add(localhost.getCanonicalHostName()); + InetAddress[] allMyIps = InetAddress.getAllByName(localhost.getCanonicalHostName()); + if (allMyIps != null) { + for (int i = 0; i < allMyIps.length; i++) allIps.add(allMyIps[i].toString()); + } + } catch (UnknownHostException e) { + // Ignore, we'll try the network interfaces anyway } - /* - * Note that currently we use {@link System#currentTimeMillis} for a base time in - * milliseconds, and then if we are in the same milliseconds that the - * previous generation, we increment the number of nanoseconds. - * However, since the precision is 100-nanoseconds intervals, we can only - * generate 10K UUID within a millisecond safely. If we detect we have - * already generated that much UUID within a millisecond (which, while - * admittedly unlikely in a real application, is very achievable on even - * modest machines), then we stall the generator (busy spin) until the next - * millisecond as required by the RFC. - */ - private static long getCurrentTimestamp() { - while (true) { - long now = fromUnixTimestamp(System.currentTimeMillis()); - long last = lastTimestamp.get(); - if (now > last) { - if (lastTimestamp.compareAndSet(last, now)) - return now; - } else { - long lastMillis = millisOf(last); - // If the clock went back in time, bail out - if (millisOf(now) < millisOf(last)) - return lastTimestamp.incrementAndGet(); - - long candidate = last + 1; - // If we've generated more than 10k uuid in that millisecond, - // we restart the whole process until we get to the next millis. - // Otherwise, we try use our candidate ... unless we've been - // beaten by another thread in which case we try again. - if (millisOf(candidate) == lastMillis && lastTimestamp.compareAndSet(last, candidate)) - return candidate; - } + try { + Enumeration en = NetworkInterface.getNetworkInterfaces(); + if (en != null) { + while (en.hasMoreElements()) { + Enumeration enumIpAddr = en.nextElement().getInetAddresses(); + while (enumIpAddr.hasMoreElements()) allIps.add(enumIpAddr.nextElement().toString()); } + } + } catch (SocketException e) { + // Ignore, if we've really got nothing so far, we'll throw an exception } - // Package visible for testing - static long fromUnixTimestamp(long tstamp) { - return (tstamp - START_EPOCH) * 10000; - } - - private static long millisOf(long timestamp) { - return timestamp / 10000; - } - - // Package visible for testing - static long makeMSB(long timestamp) { - long msb = 0L; - msb |= (0x00000000ffffffffL & timestamp) << 32; - msb |= (0x0000ffff00000000L & timestamp) >>> 16; - msb |= (0x0fff000000000000L & timestamp) >>> 48; - msb |= 0x0000000000001000L; // sets the version to 1. - return msb; - } - - private static Set getAllLocalAddresses() { - Set allIps = new HashSet(); - try { - InetAddress localhost = InetAddress.getLocalHost(); - allIps.add(localhost.toString()); - // Also return the hostname if available, it won't hurt (this does a dns lookup, it's only done once at startup) - allIps.add(localhost.getCanonicalHostName()); - InetAddress[] allMyIps = InetAddress.getAllByName(localhost.getCanonicalHostName()); - if (allMyIps != null) { - for (int i = 0; i < allMyIps.length; i++) - allIps.add(allMyIps[i].toString()); - } - } catch (UnknownHostException e) { - // Ignore, we'll try the network interfaces anyway - } - - try { - Enumeration en = NetworkInterface.getNetworkInterfaces(); - if (en != null) { - while (en.hasMoreElements()) { - Enumeration enumIpAddr = en.nextElement().getInetAddresses(); - while (enumIpAddr.hasMoreElements()) - allIps.add(enumIpAddr.nextElement().toString()); - } - } - } catch (SocketException e) { - // Ignore, if we've really got nothing so far, we'll throw an exception - } - - return allIps; - } + return allIps; + } } diff --git a/driver-core/src/main/resources/com/datastax/driver/core/Driver.properties b/driver-core/src/main/resources/com/datastax/driver/core/Driver.properties index fe3c96a752e..23b955a044f 100644 --- a/driver-core/src/main/resources/com/datastax/driver/core/Driver.properties +++ b/driver-core/src/main/resources/com/datastax/driver/core/Driver.properties @@ -1,11 +1,13 @@ # -# Copyright (C) 2012-2017 DataStax Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/driver-core/src/test/java/com/datastax/driver/core/AbstractBatchIdempotencyTest.java b/driver-core/src/test/java/com/datastax/driver/core/AbstractBatchIdempotencyTest.java index ed7ecf2e714..50cface64a8 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/AbstractBatchIdempotencyTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/AbstractBatchIdempotencyTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,163 +17,164 @@ */ package com.datastax.driver.core; -import org.testng.annotations.Test; - import static com.datastax.driver.core.Assertions.assertThat; -public abstract class AbstractBatchIdempotencyTest { - - protected abstract TestBatch createBatch(); - - /** - * Unify Batch and BatchStatement to avoid duplicating all tests - */ - protected interface TestBatch { - //Batch only accepts RegularStatement, so we use it for common interface - void add(RegularStatement statement); - - Boolean isIdempotent(); - - void setIdempotent(boolean idempotent); - } - - @Test(groups = "unit") - public void isIdempotent_should_return_true_if_no_statements_added() { - TestBatch batch = createBatch(); - assertThat(batch.isIdempotent()).isTrue(); - } - - @Test(groups = "unit") - public void isIdempotent_should_return_true_if_all_statements_are_idempotent() { - TestBatch batch = createBatch(); - assertThat(batch.isIdempotent()).isTrue(); - - batch.add(statementWithIdempotency(true)); - assertThat(batch.isIdempotent()).isTrue(); - - batch.add(statementWithIdempotency(true)); - assertThat(batch.isIdempotent()).isTrue(); - } +import org.testng.annotations.Test; - @Test(groups = "unit") - public void isIdempotent_should_return_false_if_any_statements_is_nonidempotent() { - TestBatch batch = createBatch(); - assertThat(batch.isIdempotent()).isTrue(); +public abstract class AbstractBatchIdempotencyTest { - batch.add(statementWithIdempotency(true)); - assertThat(batch.isIdempotent()).isTrue(); + protected abstract TestBatch createBatch(); - batch.add(statementWithIdempotency(false)); - assertThat(batch.isIdempotent()).isFalse(); + /** Unify Batch and BatchStatement to avoid duplicating all tests */ + protected interface TestBatch { + // Batch only accepts RegularStatement, so we use it for common interface + void add(RegularStatement statement); - batch.add(statementWithIdempotency(true)); - assertThat(batch.isIdempotent()).isFalse(); - } + Boolean isIdempotent(); - @Test(groups = "unit") - public void isIdempotent_should_return_null_if_no_nonidempotent_statements_and_some_are_nullidempotent() { - TestBatch batch = createBatch(); - assertThat(batch.isIdempotent()).isTrue(); + void setIdempotent(boolean idempotent); + } - batch.add(statementWithIdempotency(true)); - assertThat(batch.isIdempotent()).isTrue(); + @Test(groups = "unit") + public void isIdempotent_should_return_true_if_no_statements_added() { + TestBatch batch = createBatch(); + assertThat(batch.isIdempotent()).isTrue(); + } - batch.add(statementWithIdempotency(null)); - assertThat(batch.isIdempotent()).isNull(); + @Test(groups = "unit") + public void isIdempotent_should_return_true_if_all_statements_are_idempotent() { + TestBatch batch = createBatch(); + assertThat(batch.isIdempotent()).isTrue(); - batch.add(statementWithIdempotency(true)); - assertThat(batch.isIdempotent()).isNull(); - } + batch.add(statementWithIdempotency(true)); + assertThat(batch.isIdempotent()).isTrue(); - @Test(groups = "unit") - public void isIdempotent_should_return_false_if_both_nonidempotent_and_nullidempotent_statements_present() { - TestBatch batch = createBatch(); - assertThat(batch.isIdempotent()).isTrue(); + batch.add(statementWithIdempotency(true)); + assertThat(batch.isIdempotent()).isTrue(); + } - batch.add(statementWithIdempotency(true)); - assertThat(batch.isIdempotent()).isTrue(); + @Test(groups = "unit") + public void isIdempotent_should_return_false_if_any_statements_is_nonidempotent() { + TestBatch batch = createBatch(); + assertThat(batch.isIdempotent()).isTrue(); - batch.add(statementWithIdempotency(null)); - assertThat(batch.isIdempotent()).isNull(); + batch.add(statementWithIdempotency(true)); + assertThat(batch.isIdempotent()).isTrue(); - batch.add(statementWithIdempotency(false)); - assertThat(batch.isIdempotent()).isFalse(); + batch.add(statementWithIdempotency(false)); + assertThat(batch.isIdempotent()).isFalse(); - batch.add(statementWithIdempotency(true)); - assertThat(batch.isIdempotent()).isFalse(); + batch.add(statementWithIdempotency(true)); + assertThat(batch.isIdempotent()).isFalse(); + } - batch.add(statementWithIdempotency(null)); - assertThat(batch.isIdempotent()).isFalse(); + @Test(groups = "unit") + public void + isIdempotent_should_return_null_if_no_nonidempotent_statements_and_some_are_nullidempotent() { + TestBatch batch = createBatch(); + assertThat(batch.isIdempotent()).isTrue(); - batch.add(statementWithIdempotency(false)); - assertThat(batch.isIdempotent()).isFalse(); - } + batch.add(statementWithIdempotency(true)); + assertThat(batch.isIdempotent()).isTrue(); - @Test(groups = "unit") - public void isIdempotent_should_return_override_flag_if_no_statements_added() { - TestBatch batch = createBatch(); - assertThat(batch.isIdempotent()).isTrue(); + batch.add(statementWithIdempotency(null)); + assertThat(batch.isIdempotent()).isNull(); - batch.setIdempotent(false); - assertThat(batch.isIdempotent()).isFalse(); - } + batch.add(statementWithIdempotency(true)); + assertThat(batch.isIdempotent()).isNull(); + } - @Test(groups = "unit") - public void isIdempotent_should_return_override_flag_if_calculated_idempotency_true() { - TestBatch batch = createBatch(); - assertThat(batch.isIdempotent()).isTrue(); + @Test(groups = "unit") + public void + isIdempotent_should_return_false_if_both_nonidempotent_and_nullidempotent_statements_present() { + TestBatch batch = createBatch(); + assertThat(batch.isIdempotent()).isTrue(); - batch.add(statementWithIdempotency(true)); - assertThat(batch.isIdempotent()).isTrue(); + batch.add(statementWithIdempotency(true)); + assertThat(batch.isIdempotent()).isTrue(); - batch.setIdempotent(false); - assertThat(batch.isIdempotent()).isFalse(); - } + batch.add(statementWithIdempotency(null)); + assertThat(batch.isIdempotent()).isNull(); - @Test(groups = "unit") - public void isIdempotent_should_return_override_flag_if_calculated_idempotency_null() { - TestBatch batch = createBatch(); - assertThat(batch.isIdempotent()).isTrue(); + batch.add(statementWithIdempotency(false)); + assertThat(batch.isIdempotent()).isFalse(); - batch.add(statementWithIdempotency(null)); - assertThat(batch.isIdempotent()).isNull(); + batch.add(statementWithIdempotency(true)); + assertThat(batch.isIdempotent()).isFalse(); - batch.setIdempotent(false); - assertThat(batch.isIdempotent()).isFalse(); - } + batch.add(statementWithIdempotency(null)); + assertThat(batch.isIdempotent()).isFalse(); - @Test(groups = "unit") - public void isIdempotent_should_return_override_flag_if_calculated_idempotency_false() { - TestBatch batch = createBatch(); - assertThat(batch.isIdempotent()).isTrue(); + batch.add(statementWithIdempotency(false)); + assertThat(batch.isIdempotent()).isFalse(); + } - batch.add(statementWithIdempotency(false)); - assertThat(batch.isIdempotent()).isFalse(); + @Test(groups = "unit") + public void isIdempotent_should_return_override_flag_if_no_statements_added() { + TestBatch batch = createBatch(); + assertThat(batch.isIdempotent()).isTrue(); - batch.setIdempotent(true); - assertThat(batch.isIdempotent()).isTrue(); - } + batch.setIdempotent(false); + assertThat(batch.isIdempotent()).isFalse(); + } - @Test(groups = "unit") - public void isIdempotent_should_return_override_flag_if_calculated_idempotency_equals_override_value() { - TestBatch batch = createBatch(); - assertThat(batch.isIdempotent()).isTrue(); + @Test(groups = "unit") + public void isIdempotent_should_return_override_flag_if_calculated_idempotency_true() { + TestBatch batch = createBatch(); + assertThat(batch.isIdempotent()).isTrue(); - batch.add(statementWithIdempotency(false)); - assertThat(batch.isIdempotent()).isFalse(); + batch.add(statementWithIdempotency(true)); + assertThat(batch.isIdempotent()).isTrue(); + + batch.setIdempotent(false); + assertThat(batch.isIdempotent()).isFalse(); + } - batch.setIdempotent(false); - assertThat(batch.isIdempotent()).isFalse(); - } + @Test(groups = "unit") + public void isIdempotent_should_return_override_flag_if_calculated_idempotency_null() { + TestBatch batch = createBatch(); + assertThat(batch.isIdempotent()).isTrue(); + + batch.add(statementWithIdempotency(null)); + assertThat(batch.isIdempotent()).isNull(); + + batch.setIdempotent(false); + assertThat(batch.isIdempotent()).isFalse(); + } + + @Test(groups = "unit") + public void isIdempotent_should_return_override_flag_if_calculated_idempotency_false() { + TestBatch batch = createBatch(); + assertThat(batch.isIdempotent()).isTrue(); + + batch.add(statementWithIdempotency(false)); + assertThat(batch.isIdempotent()).isFalse(); + + batch.setIdempotent(true); + assertThat(batch.isIdempotent()).isTrue(); + } + + @Test(groups = "unit") + public void + isIdempotent_should_return_override_flag_if_calculated_idempotency_equals_override_value() { + TestBatch batch = createBatch(); + assertThat(batch.isIdempotent()).isTrue(); - private RegularStatement statementWithIdempotency(Boolean idempotency) { - RegularStatement statement = new SimpleStatement("fake statement"); - if (idempotency != null) { - statement.setIdempotent(idempotency); - assertThat(statement.isIdempotent()).isEqualTo(idempotency); - } else { - assertThat(statement.isIdempotent()).isNull(); - } - return statement; + batch.add(statementWithIdempotency(false)); + assertThat(batch.isIdempotent()).isFalse(); + + batch.setIdempotent(false); + assertThat(batch.isIdempotent()).isFalse(); + } + + private RegularStatement statementWithIdempotency(Boolean idempotency) { + RegularStatement statement = new SimpleStatement("fake statement"); + if (idempotency != null) { + statement.setIdempotent(idempotency); + assertThat(statement.isIdempotent()).isEqualTo(idempotency); + } else { + assertThat(statement.isIdempotent()).isNull(); } + return statement; + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/AbstractPoliciesTest.java b/driver-core/src/test/java/com/datastax/driver/core/AbstractPoliciesTest.java index 2ad598918ee..c8366175699 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/AbstractPoliciesTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/AbstractPoliciesTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,18 +17,6 @@ */ package com.datastax.driver.core; -import com.google.common.util.concurrent.Uninterruptibles; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.testng.annotations.AfterMethod; -import org.testng.annotations.BeforeMethod; - -import java.net.InetAddress; -import java.nio.ByteBuffer; -import java.util.HashMap; -import java.util.Map; -import java.util.concurrent.Callable; - import static com.datastax.driver.core.ConditionChecker.check; import static com.datastax.driver.core.TestUtils.CREATE_KEYSPACE_GENERIC_FORMAT; import static com.datastax.driver.core.TestUtils.CREATE_KEYSPACE_SIMPLE_FORMAT; @@ -35,162 +25,180 @@ import static java.util.concurrent.TimeUnit.MINUTES; import static org.testng.Assert.assertEquals; -public abstract class AbstractPoliciesTest extends CCMTestsSupport { - - private static final Logger logger = LoggerFactory.getLogger(AbstractPoliciesTest.class); - private String tableName; - - private static class SchemaInAgreement implements Callable { - - private final Cluster cluster; - - private SchemaInAgreement(Cluster cluster) { - this.cluster = cluster; - } - - @Override - public Boolean call() throws Exception { - return cluster.getMetadata().checkSchemaAgreement(); - } - } - - protected Map coordinators = new HashMap(); - - protected PreparedStatement prepared; - - protected void createSchema(int replicationFactor) { - final String ks = TestUtils.generateIdentifier("ks_"); - tableName = TestUtils.generateIdentifier("table_"); - session().execute(String.format(CREATE_KEYSPACE_SIMPLE_FORMAT, ks, replicationFactor)); - useKeyspace(ks); - session().execute(String.format("CREATE TABLE %s (k int PRIMARY KEY, i int)", tableName)); - check().before(5, MINUTES).that(new SchemaInAgreement(cluster())).becomesTrue(); - } - - protected void createMultiDCSchema(int dc1RF, int dc2RF) { - final String ks = TestUtils.generateIdentifier("ks_"); - tableName = TestUtils.generateIdentifier("table_"); - session().execute(String.format(CREATE_KEYSPACE_GENERIC_FORMAT, ks, "NetworkTopologyStrategy", String.format("'dc1' : %d, 'dc2' : %d", dc1RF, dc2RF))); - useKeyspace(ks); - session().execute(String.format("CREATE TABLE %s (k int PRIMARY KEY, i int)", tableName)); - check().before(5, MINUTES).that(new SchemaInAgreement(cluster())).becomesTrue(); - } - - /** - * Coordinator management/count - */ - protected void addCoordinator(ResultSet rs) { - InetAddress coordinator = rs.getExecutionInfo().getQueriedHost().getAddress(); - Integer n = coordinators.get(coordinator); - coordinators.put(coordinator, n == null ? 1 : n + 1); - } - - @BeforeMethod(groups = "long") - protected void resetCoordinators() { - coordinators = new HashMap(); - } - - @AfterMethod(groups = "long") - protected void pause() { - // pause before engaging in another expensive CCM cluster creation - Uninterruptibles.sleepUninterruptibly(1, MINUTES); - } - - private String queriedMapString() { - StringBuilder sb = new StringBuilder(); - sb.append("{"); - for (Map.Entry entry : coordinators.entrySet()) - sb.append(entry.getKey()).append(" : ").append(entry.getValue()).append(", "); - return sb.append("}").toString(); - } - - /** - * Helper test methods - */ - protected void assertQueried(String host, int n) { - try { - Integer queried = coordinators.get(InetAddress.getByName(host)); - if (logger.isDebugEnabled()) - logger.debug(String.format("Expected: %s\tReceived: %s", n, queried)); - else - assertEquals(queried == null ? 0 : queried, n, queriedMapString()); - } catch (Exception e) { - throw new RuntimeException(e); - } - } - - /** - * Init methods that handle writes using batch and consistency options. - */ - protected void init(int n) { - init(n, false, ConsistencyLevel.ONE); - } - - protected void init(int n, boolean batch) { - init(n, batch, ConsistencyLevel.ONE); - } - - protected void init(int n, ConsistencyLevel cl) { - write(n, false, cl); - } - - protected void init(int n, boolean batch, ConsistencyLevel cl) { - write(n, batch, cl); - prepared = session().prepare("SELECT * FROM " + tableName + " WHERE k = ?").setConsistencyLevel(cl); - } - - protected void write(int n) { - write(n, false, ConsistencyLevel.ONE); - } - - protected void write(int n, boolean batch) { - write(n, batch, ConsistencyLevel.ONE); - } - - protected void write(int n, ConsistencyLevel cl) { - write(n, false, cl); - } - - protected void write(int n, boolean batch, ConsistencyLevel cl) { - // We don't use insert for our test because the resultSet don't ship the queriedHost - // Also note that we don't use tracing because this would trigger requests that screw up the test - for (int i = 0; i < n; ++i) - if (batch) - // BUG: WriteType == SIMPLE - session().execute(batch() - .add(insertInto(tableName).values(new String[]{"k", "i"}, new Object[]{0, 0})) - .setConsistencyLevel(cl)); - else - session().execute(new SimpleStatement(String.format("INSERT INTO %s(k, i) VALUES (0, 0)", tableName)).setConsistencyLevel(cl)); - } - - - /** - * Query methods that handle reads based on PreparedStatements and/or ConsistencyLevels. - */ - protected void query(int n) { - query(n, false, ConsistencyLevel.ONE); - } - - protected void query(int n, boolean usePrepared) { - query(n, usePrepared, ConsistencyLevel.ONE); - } - - protected void query(int n, ConsistencyLevel cl) { - query(n, false, cl); - } +import com.google.common.util.concurrent.Uninterruptibles; +import java.net.InetAddress; +import java.nio.ByteBuffer; +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.Callable; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeMethod; - protected void query(int n, boolean usePrepared, ConsistencyLevel cl) { - if (usePrepared) { - BoundStatement bs = prepared.bind(0); - for (int i = 0; i < n; ++i) - addCoordinator(session().execute(bs)); - } else { - ByteBuffer routingKey = ByteBuffer.allocate(4); - routingKey.putInt(0, 0); - for (int i = 0; i < n; ++i) - addCoordinator(session().execute(new SimpleStatement(String.format("SELECT * FROM %s WHERE k = 0", tableName)).setRoutingKey(routingKey).setConsistencyLevel(cl))); - } - } +public abstract class AbstractPoliciesTest extends CCMTestsSupport { + private static final Logger logger = LoggerFactory.getLogger(AbstractPoliciesTest.class); + private String tableName; + + private static class SchemaInAgreement implements Callable { + + private final Cluster cluster; + + private SchemaInAgreement(Cluster cluster) { + this.cluster = cluster; + } + + @Override + public Boolean call() throws Exception { + return cluster.getMetadata().checkSchemaAgreement(); + } + } + + protected Map coordinators = new HashMap(); + + protected PreparedStatement prepared; + + protected void createSchema(int replicationFactor) { + final String ks = TestUtils.generateIdentifier("ks_"); + tableName = TestUtils.generateIdentifier("table_"); + session().execute(String.format(CREATE_KEYSPACE_SIMPLE_FORMAT, ks, replicationFactor)); + useKeyspace(ks); + session().execute(String.format("CREATE TABLE %s (k int PRIMARY KEY, i int)", tableName)); + check().before(5, MINUTES).that(new SchemaInAgreement(cluster())).becomesTrue(); + } + + protected void createMultiDCSchema(int dc1RF, int dc2RF) { + final String ks = TestUtils.generateIdentifier("ks_"); + tableName = TestUtils.generateIdentifier("table_"); + session() + .execute( + String.format( + CREATE_KEYSPACE_GENERIC_FORMAT, + ks, + "NetworkTopologyStrategy", + String.format("'dc1' : %d, 'dc2' : %d", dc1RF, dc2RF))); + useKeyspace(ks); + session().execute(String.format("CREATE TABLE %s (k int PRIMARY KEY, i int)", tableName)); + check().before(5, MINUTES).that(new SchemaInAgreement(cluster())).becomesTrue(); + } + + /** Coordinator management/count */ + protected void addCoordinator(ResultSet rs) { + InetAddress coordinator = + rs.getExecutionInfo().getQueriedHost().getEndPoint().resolve().getAddress(); + Integer n = coordinators.get(coordinator); + coordinators.put(coordinator, n == null ? 1 : n + 1); + } + + @BeforeMethod(groups = "long") + protected void resetCoordinators() { + coordinators = new HashMap(); + } + + @AfterMethod(groups = "long") + protected void pause() { + // pause before engaging in another expensive CCM cluster creation + Uninterruptibles.sleepUninterruptibly(1, MINUTES); + } + + private String queriedMapString() { + StringBuilder sb = new StringBuilder(); + sb.append("{"); + for (Map.Entry entry : coordinators.entrySet()) + sb.append(entry.getKey()).append(" : ").append(entry.getValue()).append(", "); + return sb.append("}").toString(); + } + + /** Helper test methods */ + protected void assertQueried(String host, int n) { + try { + Integer queried = coordinators.get(InetAddress.getByName(host)); + if (logger.isDebugEnabled()) + logger.debug(String.format("Expected: %s\tReceived: %s", n, queried)); + else assertEquals(queried == null ? 0 : queried, n, queriedMapString()); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + /** Init methods that handle writes using batch and consistency options. */ + protected void init(int n) { + init(n, false, ConsistencyLevel.ONE); + } + + protected void init(int n, boolean batch) { + init(n, batch, ConsistencyLevel.ONE); + } + + protected void init(int n, ConsistencyLevel cl) { + write(n, false, cl); + } + + protected void init(int n, boolean batch, ConsistencyLevel cl) { + write(n, batch, cl); + prepared = + session().prepare("SELECT * FROM " + tableName + " WHERE k = ?").setConsistencyLevel(cl); + } + + protected void write(int n) { + write(n, false, ConsistencyLevel.ONE); + } + + protected void write(int n, boolean batch) { + write(n, batch, ConsistencyLevel.ONE); + } + + protected void write(int n, ConsistencyLevel cl) { + write(n, false, cl); + } + + protected void write(int n, boolean batch, ConsistencyLevel cl) { + // We don't use insert for our test because the resultSet don't ship the queriedHost + // Also note that we don't use tracing because this would trigger requests that screw up the + // test + for (int i = 0; i < n; ++i) + if (batch) + // BUG: WriteType == SIMPLE + session() + .execute( + batch() + .add(insertInto(tableName).values(new String[] {"k", "i"}, new Object[] {0, 0})) + .setConsistencyLevel(cl)); + else + session() + .execute( + new SimpleStatement(String.format("INSERT INTO %s(k, i) VALUES (0, 0)", tableName)) + .setConsistencyLevel(cl)); + } + + /** Query methods that handle reads based on PreparedStatements and/or ConsistencyLevels. */ + protected void query(int n) { + query(n, false, ConsistencyLevel.ONE); + } + + protected void query(int n, boolean usePrepared) { + query(n, usePrepared, ConsistencyLevel.ONE); + } + + protected void query(int n, ConsistencyLevel cl) { + query(n, false, cl); + } + + protected void query(int n, boolean usePrepared, ConsistencyLevel cl) { + if (usePrepared) { + BoundStatement bs = prepared.bind(0); + for (int i = 0; i < n; ++i) addCoordinator(session().execute(bs)); + } else { + ByteBuffer routingKey = ByteBuffer.allocate(4); + routingKey.putInt(0, 0); + for (int i = 0; i < n; ++i) + addCoordinator( + session() + .execute( + new SimpleStatement(String.format("SELECT * FROM %s WHERE k = 0", tableName)) + .setRoutingKey(routingKey) + .setConsistencyLevel(cl))); + } + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/AbstractReconnectionHandlerTest.java b/driver-core/src/test/java/com/datastax/driver/core/AbstractReconnectionHandlerTest.java index a4ec8169ea9..fd36c94938f 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/AbstractReconnectionHandlerTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/AbstractReconnectionHandlerTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,6 +17,13 @@ */ package com.datastax.driver.core; +import static com.datastax.driver.core.ConditionChecker.check; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.timeout; +import static org.mockito.Mockito.verify; +import static org.testng.Assert.fail; + import com.datastax.driver.core.AbstractReconnectionHandler.HandlerFuture; import com.datastax.driver.core.AbstractReconnectionHandlerTest.MockReconnectionWork.ReconnectBehavior; import com.datastax.driver.core.exceptions.ConnectionException; @@ -23,369 +32,374 @@ import com.google.common.util.concurrent.Futures; import com.google.common.util.concurrent.ListenableFuture; import com.google.common.util.concurrent.SettableFuture; +import java.net.InetSocketAddress; +import java.util.concurrent.Callable; +import java.util.concurrent.CyclicBarrier; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.testng.annotations.AfterMethod; import org.testng.annotations.BeforeMethod; import org.testng.annotations.Test; -import java.net.InetSocketAddress; -import java.util.concurrent.*; -import java.util.concurrent.atomic.AtomicReference; - -import static com.datastax.driver.core.ConditionChecker.check; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.*; -import static org.testng.Assert.fail; - public class AbstractReconnectionHandlerTest { - private static final Logger logger = LoggerFactory.getLogger(AbstractReconnectionHandlerTest.class); - - ScheduledExecutorService executor; - MockReconnectionSchedule schedule; - MockReconnectionWork work; - AtomicReference> future = new AtomicReference>(); - AbstractReconnectionHandler handler; - Callable nextTryAssigned = new Callable() { + private static final Logger logger = + LoggerFactory.getLogger(AbstractReconnectionHandlerTest.class); + + ScheduledExecutorService executor; + MockReconnectionSchedule schedule; + MockReconnectionWork work; + AtomicReference> future = new AtomicReference>(); + AbstractReconnectionHandler handler; + Callable nextTryAssigned = + new Callable() { @Override public Boolean call() throws Exception { - return handler.handlerFuture.nextTry != null; + return handler.handlerFuture.nextTry != null; } - }; - - @BeforeMethod(groups = {"unit", "short"}) - public void setup() { - executor = spy(Executors.newScheduledThreadPool(2)); - schedule = new MockReconnectionSchedule(); - work = new MockReconnectionWork(); - future.set(null); - handler = new AbstractReconnectionHandler("test", executor, schedule, future) { - @Override - protected Connection tryReconnect() throws ConnectionException, InterruptedException, UnsupportedProtocolVersionException, ClusterNameMismatchException { - return work.tryReconnect(); - } - - @Override - protected void onReconnection(Connection connection) { - work.onReconnection(); - } + }; + + @BeforeMethod(groups = {"unit", "short"}) + public void setup() { + executor = spy(Executors.newScheduledThreadPool(2)); + schedule = new MockReconnectionSchedule(); + work = new MockReconnectionWork(); + future.set(null); + handler = + new AbstractReconnectionHandler("test", executor, schedule, future) { + @Override + protected Connection tryReconnect() + throws ConnectionException, InterruptedException, UnsupportedProtocolVersionException, + ClusterNameMismatchException { + return work.tryReconnect(); + } + + @Override + protected void onReconnection(Connection connection) { + work.onReconnection(); + } }; + } + + @AfterMethod( + groups = {"unit", "short"}, + alwaysRun = true) + public void tearDown() { + if (future.get() != null) future.get().cancel(false); + executor.shutdownNow(); + } + + @Test(groups = "unit") + public void should_complete_if_first_reconnection_succeeds() { + handler.start(); + + assertThat(future.get()).isNotNull(); + assertThat(future.get().isDone()).isFalse(); + + schedule.tick(); + work.nextReconnect = ReconnectBehavior.SUCCEED; + work.tick(); + + waitForCompletion(); + + assertThat(work.success).isTrue(); + assertThat(work.tries).isEqualTo(1); + assertThat(future.get()).isNull(); + } + + @Test(groups = "unit") + public void should_retry_until_success() { + handler.start(); + + int simulatedErrors = 10; + for (int i = 0; i < simulatedErrors; i++) { + schedule.tick(); + work.nextReconnect = ReconnectBehavior.THROW_EXCEPTION; + work.tick(); + assertThat(work.success).isFalse(); + assertThat(future.get().isDone()).isFalse(); } - @AfterMethod(groups = {"unit", "short"}, alwaysRun = true) - public void tearDown() { - if (future.get() != null) - future.get().cancel(false); - executor.shutdownNow(); - } + schedule.tick(); + work.nextReconnect = ReconnectBehavior.SUCCEED; + work.tick(); - @Test(groups = "unit") - public void should_complete_if_first_reconnection_succeeds() { - handler.start(); + waitForCompletion(); - assertThat(future.get()).isNotNull(); - assertThat(future.get().isDone()).isFalse(); + assertThat(work.success).isTrue(); + assertThat(work.tries).isEqualTo(simulatedErrors + 1); + assertThat(future.get()).isNull(); + } - schedule.tick(); - work.nextReconnect = ReconnectBehavior.SUCCEED; - work.tick(); + @Test(groups = "unit") + public void should_stop_if_cancelled_before_first_attempt() { + schedule.delay = 10 * 1000; // give ourselves time to cancel + handler.start(); + schedule.tick(); - waitForCompletion(); + future.get().cancel(false); - assertThat(work.success).isTrue(); - assertThat(work.tries).isEqualTo(1); - assertThat(future.get()).isNull(); - } + waitForCompletion(); - @Test(groups = "unit") - public void should_retry_until_success() { - handler.start(); - - int simulatedErrors = 10; - for (int i = 0; i < simulatedErrors; i++) { - schedule.tick(); - work.nextReconnect = ReconnectBehavior.THROW_EXCEPTION; - work.tick(); - assertThat(work.success).isFalse(); - assertThat(future.get().isDone()).isFalse(); - } + assertThat(work.success).isFalse(); + assertThat(work.tries).isEqualTo(0); + assertThat(future.get().isCancelled()).isTrue(); + } - schedule.tick(); - work.nextReconnect = ReconnectBehavior.SUCCEED; - work.tick(); + @Test(groups = "short") + public void should_stop_if_cancelled_between_attempts() { + handler.start(); - waitForCompletion(); + // Wait for the initial schedule of a reconnect. + verify(executor, timeout(10000)).schedule(handler, 0, TimeUnit.MILLISECONDS); - assertThat(work.success).isTrue(); - assertThat(work.tries).isEqualTo(simulatedErrors + 1); - assertThat(future.get()).isNull(); - } + // Force a failed reconnect. + schedule.tick(); + work.nextReconnect = ReconnectBehavior.THROW_EXCEPTION; + // Tick work, should trigger the barrier in tryReconnect. + work.tick(); - @Test(groups = "unit") - public void should_stop_if_cancelled_before_first_attempt() { - schedule.delay = 10 * 1000; // give ourselves time to cancel - handler.start(); - schedule.tick(); + // Tick schedule, should cause nextDelayMs to proceed, reconnect handler will call reschedule. + schedule.delay = 3000; + schedule.tick(); - future.get().cancel(false); + // Ensure reconnect is scheduled (slight timing window after handling failed reconnect + // and scheduling next reconnect). + verify(executor, timeout(10000)).schedule(handler, schedule.delay, TimeUnit.MILLISECONDS); - waitForCompletion(); + // Wait until nextTry is assigned after schedule completes. + check().before(10000).that(nextTryAssigned).becomesTrue(); - assertThat(work.success).isFalse(); - assertThat(work.tries).isEqualTo(0); - assertThat(future.get().isCancelled()).isTrue(); - } + future.get().cancel(false); - @Test(groups = "short") - public void should_stop_if_cancelled_between_attempts() { - handler.start(); + // Should immediately return as the future was cancelled while the task was scheduled. + waitForCompletion(); - // Wait for the initial schedule of a reconnect. - verify(executor, timeout(10000)).schedule(handler, 0, TimeUnit.MILLISECONDS); + assertThat(work.success).isFalse(); + // Should have had 1 failed attempt, no second attempt since cancelled. + assertThat(work.tries).isEqualTo(1); - // Force a failed reconnect. - schedule.tick(); - work.nextReconnect = ReconnectBehavior.THROW_EXCEPTION; - // Tick work, should trigger the barrier in tryReconnect. - work.tick(); + // The future will be marked cancelled and thus not executed. + ListenableFuture currentAttempt = future.get(); - // Tick schedule, should cause nextDelayMs to proceed, reconnect handler will call reschedule. - schedule.delay = 3000; - schedule.tick(); + assertThat(currentAttempt).isInstanceOf(HandlerFuture.class); + HandlerFuture handlerFuture = (HandlerFuture) currentAttempt; + assertThat(handlerFuture.isCancelled()); - // Ensure reconnect is scheduled (slight timing window after handling failed reconnect - // and scheduling next reconnect). - verify(executor, timeout(10000)).schedule(handler, schedule.delay, TimeUnit.MILLISECONDS); + // The next try should also be cancelled. + assertThat(handlerFuture.nextTry).isNotNull(); + assertThat(handlerFuture.nextTry.isCancelled()); + } - // Wait until nextTry is assigned after schedule completes. - check().before(10000).that(nextTryAssigned).becomesTrue(); + @Test(groups = "unit") + public void should_complete_if_cancelled_during_successful_reconnect() + throws InterruptedException { + handler.start(); - future.get().cancel(false); + schedule.tick(); + work.nextReconnect = ReconnectBehavior.SUCCEED; - // Should immediately return as the future was cancelled while the task was scheduled. - waitForCompletion(); + // short pause to make sure we are in the middle of the handler's run method (it checks + // if the future is cancelled at the beginning) + TimeUnit.MILLISECONDS.sleep(100); + // don't force interruption because that's what the production code does + future.get().cancel(false); - assertThat(work.success).isFalse(); - // Should have had 1 failed attempt, no second attempt since cancelled. - assertThat(work.tries).isEqualTo(1); + work.tick(); - // The future will be marked cancelled and thus not executed. - ListenableFuture currentAttempt = future.get(); + waitForCompletion(); - assertThat(currentAttempt).isInstanceOf(HandlerFuture.class); - HandlerFuture handlerFuture = (HandlerFuture) currentAttempt; - assertThat(handlerFuture.isCancelled()); + assertThat(work.success).isTrue(); + assertThat(work.tries).isEqualTo(1); + } - // The next try should also be cancelled. - assertThat(handlerFuture.nextTry).isNotNull(); - assertThat(handlerFuture.nextTry.isCancelled()); - } + @Test(groups = "unit") + public void should_stop_if_cancelled_during_failed_reconnect() throws InterruptedException { + handler.start(); - @Test(groups = "unit") - public void should_complete_if_cancelled_during_successful_reconnect() throws InterruptedException { - handler.start(); + schedule.tick(); + work.nextReconnect = ReconnectBehavior.THROW_EXCEPTION; - schedule.tick(); - work.nextReconnect = ReconnectBehavior.SUCCEED; + // short pause to make sure we are in the middle of the handler's run method (it checks + // if the future is cancelled at the beginning) + TimeUnit.MILLISECONDS.sleep(100); + // don't force interruption because that's what the production code does + future.get().cancel(false); - // short pause to make sure we are in the middle of the handler's run method (it checks - // if the future is cancelled at the beginning) - TimeUnit.MILLISECONDS.sleep(100); - // don't force interruption because that's what the production code does - future.get().cancel(false); + work.tick(); - work.tick(); + // Need to + schedule.tick(); - waitForCompletion(); + waitForCompletion(); - assertThat(work.success).isTrue(); - assertThat(work.tries).isEqualTo(1); - } + assertThat(work.success).isFalse(); + assertThat(work.tries).isEqualTo(1); + } - @Test(groups = "unit") - public void should_stop_if_cancelled_during_failed_reconnect() throws InterruptedException { - handler.start(); + @Test(groups = "unit") + public void should_yield_to_another_running_handler() { + // Set an uncompleted future, representing a running handler + future.set(SettableFuture.create()); - schedule.tick(); - work.nextReconnect = ReconnectBehavior.THROW_EXCEPTION; + handler.start(); - // short pause to make sure we are in the middle of the handler's run method (it checks - // if the future is cancelled at the beginning) - TimeUnit.MILLISECONDS.sleep(100); - // don't force interruption because that's what the production code does - future.get().cancel(false); + // Increase the delay to make sure that the first attempt does not start before the check + // for cancellation (which would require calling work.tick()) + schedule.delay = 5000; + schedule.tick(); - work.tick(); + waitForCompletion(); - // Need to - schedule.tick(); + assertThat(work.success).isFalse(); + } - waitForCompletion(); + /** + * Note: a handler that succeeds immediately resets the future to null, so there is a very small + * window of opportunity for this scenario. Therefore we consider that if we find a completed + * future, the connection was successfully re-established a few milliseconds ago, so we don't + * start another attempt. + */ + @Test(groups = "unit") + public void should_yield_to_another_handler_that_just_succeeded() { + future.set(Futures.immediateCheckedFuture(null)); - assertThat(work.success).isFalse(); - assertThat(work.tries).isEqualTo(1); - } + handler.start(); - @Test(groups = "unit") - public void should_yield_to_another_running_handler() { - // Set an uncompleted future, representing a running handler - future.set(SettableFuture.create()); + schedule.tick(); - handler.start(); + waitForCompletion(); - // Increase the delay to make sure that the first attempt does not start before the check - // for cancellation (which would require calling work.tick()) - schedule.delay = 5000; - schedule.tick(); + assertThat(work.success).isFalse(); + } - waitForCompletion(); + @Test(groups = "unit") + public void should_run_if_another_handler_was_cancelled() { + future.set(Futures.immediateCancelledFuture()); - assertThat(work.success).isFalse(); - } + handler.start(); - /** - * Note: a handler that succeeds immediately resets the future to null, so there is a very small window of opportunity - * for this scenario. Therefore we consider that if we find a completed future, the connection was successfully - * re-established a few milliseconds ago, so we don't start another attempt. - */ - @Test(groups = "unit") - public void should_yield_to_another_handler_that_just_succeeded() { - future.set(Futures.immediateCheckedFuture(null)); + schedule.tick(); + work.nextReconnect = ReconnectBehavior.SUCCEED; + work.tick(); - handler.start(); + waitForCompletion(); - schedule.tick(); + assertThat(work.success).isTrue(); + assertThat(work.tries).isEqualTo(1); + assertThat(future.get()).isNull(); + } - waitForCompletion(); + /** + * A reconnection schedule that allows manually setting the delay. + * + *

    To make testing easier, nextDelay blocks until tick() is called from the main thread. + */ + static class MockReconnectionSchedule implements ReconnectionSchedule { + volatile long delay; + private final CyclicBarrier barrier = new CyclicBarrier(2); - assertThat(work.success).isFalse(); - } + // Hack to work around the fact that the first call to nextDelayMs is synchronous + private volatile boolean firstDelay = true; + private volatile boolean firstTick = true; - @Test(groups = "unit") - public void should_run_if_another_handler_was_cancelled() { - future.set(Futures.immediateCancelledFuture()); - - handler.start(); - - schedule.tick(); - work.nextReconnect = ReconnectBehavior.SUCCEED; - work.tick(); - - waitForCompletion(); - - assertThat(work.success).isTrue(); - assertThat(work.tries).isEqualTo(1); - assertThat(future.get()).isNull(); - } - - /** - * A reconnection schedule that allows manually setting the delay. - *

    - * To make testing easier, nextDelay blocks until tick() is called from the main thread. - */ - static class MockReconnectionSchedule implements ReconnectionSchedule { - volatile long delay; - private final CyclicBarrier barrier = new CyclicBarrier(2); - - // Hack to work around the fact that the first call to nextDelayMs is synchronous - private volatile boolean firstDelay = true; - private volatile boolean firstTick = true; - - @Override - public long nextDelayMs() { - if (firstDelay) - firstDelay = false; - else { - logger.debug("in schedule, waiting for tick from main thread"); - try { - barrier.await(10, TimeUnit.SECONDS); - logger.debug("in schedule, got tick from main thread, proceeding"); - } catch (Exception e) { - fail("Error while waiting for tick", e); - } - } - logger.debug("in schedule, returning {}", delay); - return delay; - } - - public void tick() { - if (firstTick) - firstTick = false; - else { - logger.debug("send tick to schedule"); - try { - barrier.await(10, TimeUnit.SECONDS); - } catch (Exception e) { - fail("Error while sending tick, no thread was waiting", e); - } - barrier.reset(); - } + @Override + public long nextDelayMs() { + if (firstDelay) firstDelay = false; + else { + logger.debug("in schedule, waiting for tick from main thread"); + try { + barrier.await(10, TimeUnit.SECONDS); + logger.debug("in schedule, got tick from main thread, proceeding"); + } catch (Exception e) { + fail("Error while waiting for tick", e); } + } + logger.debug("in schedule, returning {}", delay); + return delay; } - /** - * Simulates the work done by the overridable methods of the handler. - *

    - * Allows choosing whether the next reconnect will succeed or throw an exception. - * To make testing easier, tryReconnect blocks until tick() is called from the main thread. - */ - static class MockReconnectionWork { - enum ReconnectBehavior { - SUCCEED, THROW_EXCEPTION - } - - private final CyclicBarrier barrier = new CyclicBarrier(2); - - volatile ReconnectBehavior nextReconnect; - - volatile int tries = 0; - volatile boolean success = false; - - protected Connection tryReconnect() throws ConnectionException { - tries += 1; - logger.debug("in reconnection work, wait for tick from main thread"); - try { - barrier.await(60, TimeUnit.SECONDS); - logger.debug("in reconnection work, got tick from main thread, proceeding"); - } catch (Exception e) { - fail("Error while waiting for tick", e); - } - switch (nextReconnect) { - case SUCCEED: - logger.debug("simulate reconnection success"); - return null; - case THROW_EXCEPTION: - logger.debug("simulate reconnection error"); - throw new ConnectionException(new InetSocketAddress(8888), - "Simulated exception from mock reconnection"); - default: - throw new AssertionError(); - } + public void tick() { + if (firstTick) firstTick = false; + else { + logger.debug("send tick to schedule"); + try { + barrier.await(10, TimeUnit.SECONDS); + } catch (Exception e) { + fail("Error while sending tick, no thread was waiting", e); } + barrier.reset(); + } + } + } + + /** + * Simulates the work done by the overridable methods of the handler. + * + *

    Allows choosing whether the next reconnect will succeed or throw an exception. To make + * testing easier, tryReconnect blocks until tick() is called from the main thread. + */ + static class MockReconnectionWork { + enum ReconnectBehavior { + SUCCEED, + THROW_EXCEPTION + } - public void tick() { - logger.debug("send tick to reconnection work"); - try { - barrier.await(60, TimeUnit.SECONDS); - } catch (Exception e) { - fail("Error while sending tick, no thread was waiting", e); - } - barrier.reset(); - } + private final CyclicBarrier barrier = new CyclicBarrier(2); + + volatile ReconnectBehavior nextReconnect; + + volatile int tries = 0; + volatile boolean success = false; + + protected Connection tryReconnect() throws ConnectionException { + tries += 1; + logger.debug("in reconnection work, wait for tick from main thread"); + try { + barrier.await(60, TimeUnit.SECONDS); + logger.debug("in reconnection work, got tick from main thread, proceeding"); + } catch (Exception e) { + fail("Error while waiting for tick", e); + } + switch (nextReconnect) { + case SUCCEED: + logger.debug("simulate reconnection success"); + return null; + case THROW_EXCEPTION: + logger.debug("simulate reconnection error"); + throw new ConnectionException( + EndPoints.forAddress(new InetSocketAddress(8888)), + "Simulated exception from mock reconnection"); + default: + throw new AssertionError(); + } + } - protected void onReconnection() { - success = true; - } + public void tick() { + logger.debug("send tick to reconnection work"); + try { + barrier.await(60, TimeUnit.SECONDS); + } catch (Exception e) { + fail("Error while sending tick, no thread was waiting", e); + } + barrier.reset(); } - private void waitForCompletion() { - executor.shutdown(); - try { - boolean shutdown = executor.awaitTermination(30, TimeUnit.SECONDS); - if (!shutdown) - fail("executor ran for longer than expected"); - } catch (InterruptedException e) { - fail("Interrupted while waiting for executor to shutdown"); - } + protected void onReconnection() { + success = true; + } + } + + private void waitForCompletion() { + executor.shutdown(); + try { + boolean shutdown = executor.awaitTermination(30, TimeUnit.SECONDS); + if (!shutdown) fail("executor ran for longer than expected"); + } catch (InterruptedException e) { + fail("Interrupted while waiting for executor to shutdown"); } + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/AbstractReplicationStrategyTest.java b/driver-core/src/test/java/com/datastax/driver/core/AbstractReplicationStrategyTest.java index 28d4c9ac6ec..766abe6ddb5 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/AbstractReplicationStrategyTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/AbstractReplicationStrategyTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,9 +17,13 @@ */ package com.datastax.driver.core; +import static org.mockito.Mockito.mock; +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertNotNull; +import static org.testng.Assert.assertTrue; + import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableList.Builder; - import java.net.InetAddress; import java.net.InetSocketAddress; import java.net.UnknownHostException; @@ -26,138 +32,140 @@ import java.util.Map; import java.util.Set; -import static org.mockito.Mockito.mock; -import static org.testng.Assert.*; - /** - * Base class for replication strategy tests. Currently only supports testing - * using the default Murmur3Partitioner. + * Base class for replication strategy tests. Currently only supports testing using the default + * Murmur3Partitioner. */ public class AbstractReplicationStrategyTest { - private static final Token.Factory partitioner = Token.getFactory("Murmur3Partitioner"); - - private static final Cluster.Manager mockManager = mock(Cluster.Manager.class); - - protected static class HostMock extends Host { - private final InetSocketAddress address; - - private HostMock(InetSocketAddress address, Cluster.Manager manager) { - super(address, new ConvictionPolicy.DefaultConvictionPolicy.Factory(), manager); - this.address = address; - } + private static final Token.Factory partitioner = Token.getFactory("Murmur3Partitioner"); - private HostMock(InetSocketAddress address, String dc, String rack, Cluster.Manager manager) { - this(address, manager); - this.setLocationInfo(dc, rack); - } + private static final Cluster.Manager mockManager = mock(Cluster.Manager.class); - @Override - public String toString() { - return address.toString(); - } + protected static class HostMock extends Host { + private final InetSocketAddress address; - public InetSocketAddress getMockAddress() { - return address; - } - - @Override - public boolean equals(Object o) { - if (!(o instanceof HostMock)) - return false; - - return address.equals(((HostMock) o).address); - } - - @Override - public int hashCode() { - return address.hashCode(); - } - } - - protected static Token.Factory partitioner() { - return partitioner; + private HostMock(InetSocketAddress address, Cluster.Manager manager) { + super( + EndPoints.forAddress(address), + new ConvictionPolicy.DefaultConvictionPolicy.Factory(), + manager); + this.address = address; } - /** - * Convenience method to quickly create a mock host by a given address. - * Specified address must be accessible, otherwise a RuntimeException is thrown - */ - protected static HostMock host(InetSocketAddress address) { - return new HostMock(address, mockManager); + private HostMock(InetSocketAddress address, String dc, String rack, Cluster.Manager manager) { + this(address, manager); + this.setLocationInfo(dc, rack); } - /** - * Convenience method to quickly create a mock host by the given address - * located in the given datacenter/rack - */ - protected static HostMock host(InetSocketAddress address, String dc, String rack) { - return new HostMock(address, dc, rack, mockManager); + @Override + public String toString() { + return address.toString(); } - /** - * Convenience method to cast a Host object into a MockHost. - * Returns null if parameter host is not a mock - */ - protected static HostMock asMock(Host host) { - return (host instanceof HostMock ? (HostMock) host : null); + public InetSocketAddress getMockAddress() { + return address; } - /** - * Convenience method to quickly retrieve a mock host's address as specified - * if created by the host(...) methods. Returns null if - * given host is not a mock. - */ - protected static InetSocketAddress mockAddress(Host host) { - HostMock mock = asMock(host); - return mock == null ? null : mock.getMockAddress(); - } + @Override + public boolean equals(Object o) { + if (!(o instanceof HostMock)) return false; - protected static Token token(String value) { - return partitioner.fromString(value); + return address.equals(((HostMock) o).address); } - protected static List tokens(String... values) { - Builder builder = ImmutableList.builder(); - for (String value : values) { - builder.add(token(value)); - } - return builder.build(); + @Override + public int hashCode() { + return address.hashCode(); } - - /** - * Asserts that the replica map for a given token contains the expected list of replica hosts. - * Hosts are checked in order, replica placement should be an ordered set - */ - protected static void assertReplicaPlacement(Map> replicaMap, Token token, InetSocketAddress... expected) { - Set replicaSet = replicaMap.get(token); - assertNotNull(replicaSet); - assertReplicasForToken(replicaSet, expected); + } + + protected static Token.Factory partitioner() { + return partitioner; + } + + /** + * Convenience method to quickly create a mock host by a given address. Specified address must be + * accessible, otherwise a RuntimeException is thrown + */ + protected static HostMock host(InetSocketAddress address) { + return new HostMock(address, mockManager); + } + + /** + * Convenience method to quickly create a mock host by the given address located in the given + * datacenter/rack + */ + protected static HostMock host(InetSocketAddress address, String dc, String rack) { + return new HostMock(address, dc, rack, mockManager); + } + + /** + * Convenience method to cast a Host object into a MockHost. Returns null if parameter host is not + * a mock + */ + protected static HostMock asMock(Host host) { + return (host instanceof HostMock ? (HostMock) host : null); + } + + /** + * Convenience method to quickly retrieve a mock host's address as specified if created by the + * host(...) methods. Returns null if given host is not a mock. + */ + protected static InetSocketAddress mockAddress(Host host) { + HostMock mock = asMock(host); + return mock == null ? null : mock.getMockAddress(); + } + + protected static Token token(String value) { + return partitioner.fromString(value); + } + + protected static List tokens(String... values) { + Builder builder = ImmutableList.builder(); + for (String value : values) { + builder.add(token(value)); } - - /** - * Checks if a given ordered set of replicas matches the expected list of replica hosts - */ - protected static void assertReplicasForToken(Set replicaSet, InetSocketAddress... expected) { - final String message = "Contents of replica set: " + replicaSet + " do not match expected hosts: " + Arrays.toString(expected); - assertEquals(replicaSet.size(), expected.length, message); - - int i = 0; - for (Host hostReturned : replicaSet) { - boolean match = true; - - if (!expected[i++].equals(mockAddress(hostReturned))) { - match = false; - } - assertTrue(match, message); - } + return builder.build(); + } + + /** + * Asserts that the replica map for a given token contains the expected list of replica hosts. + * Hosts are checked in order, replica placement should be an ordered set + */ + protected static void assertReplicaPlacement( + Map> replicaMap, Token token, InetSocketAddress... expected) { + Set replicaSet = replicaMap.get(token); + assertNotNull(replicaSet); + assertReplicasForToken(replicaSet, expected); + } + + /** Checks if a given ordered set of replicas matches the expected list of replica hosts */ + protected static void assertReplicasForToken( + Set replicaSet, InetSocketAddress... expected) { + final String message = + "Contents of replica set: " + + replicaSet + + " do not match expected hosts: " + + Arrays.toString(expected); + assertEquals(replicaSet.size(), expected.length, message); + + int i = 0; + for (Host hostReturned : replicaSet) { + boolean match = true; + + if (!expected[i++].equals(mockAddress(hostReturned))) { + match = false; + } + assertTrue(match, message); } + } - protected static InetSocketAddress socketAddress(String address) { - try { - return new InetSocketAddress(InetAddress.getByName(address), 9042); - } catch (UnknownHostException ex) { - throw new RuntimeException(ex); - } + protected static InetSocketAddress socketAddress(String address) { + try { + return new InetSocketAddress(InetAddress.getByName(address), 9042); + } catch (UnknownHostException ex) { + throw new RuntimeException(ex); } + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/AdditionalWritePolicyTest.java b/driver-core/src/test/java/com/datastax/driver/core/AdditionalWritePolicyTest.java new file mode 100644 index 00000000000..415387ca672 --- /dev/null +++ b/driver-core/src/test/java/com/datastax/driver/core/AdditionalWritePolicyTest.java @@ -0,0 +1,162 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import static com.datastax.driver.core.Assertions.assertThat; +import static org.testng.Assert.fail; + +import com.datastax.driver.core.exceptions.InvalidQueryException; +import com.datastax.driver.core.schemabuilder.SchemaBuilder; +import com.datastax.driver.core.schemabuilder.TableOptions; +import com.datastax.driver.core.utils.CassandraVersion; +import org.testng.annotations.Test; + +@CassandraVersion( + value = "4.0.0-alpha1", + description = "Additional Write Policy is for Cassandra 4.0+") +public class AdditionalWritePolicyTest extends CCMTestsSupport { + + private void cleanup(String tableName) { + session().execute(String.format("DROP TABLE IF EXISTS %s", tableName)); + } + + @Test(groups = "short") + public void should_create_table_with_additional_write_policy_default() { + String test_table = "awp_default"; + session() + .execute( + SchemaBuilder.createTable(test_table) + .addPartitionKey("pk", DataType.text()) + .addColumn("data", DataType.text())); + assertThat( + cluster() + .getMetadata() + .getKeyspace(keyspace) + .getTable(test_table) + .getOptions() + .getAdditionalWritePolicy()) + .isEqualTo("99p"); + cleanup(test_table); + } + + @Test(groups = "short") + public void should_create_table_with_additonal_write_policy_percentile() { + String test_table = "awp_percentile"; + session() + .execute( + SchemaBuilder.createTable(test_table) + .addPartitionKey("pk", DataType.text()) + .addColumn("data", DataType.text()) + .withOptions() + .additionalWritePolicy(SchemaBuilder.additionalWritePolicyPercentile(44))); + assertThat( + cluster() + .getMetadata() + .getKeyspace(keyspace) + .getTable(test_table) + .getOptions() + .getAdditionalWritePolicy()) + .isEqualTo("44p"); + cleanup(test_table); + } + + @Test(groups = "short") + public void should_create_table_with_additonal_write_policy_millisecs() { + String test_table = "awp_millisecs"; + session() + .execute( + SchemaBuilder.createTable(test_table) + .addPartitionKey("pk", DataType.text()) + .addColumn("data", DataType.text()) + .withOptions() + .additionalWritePolicy(SchemaBuilder.additionalWritePolicyMillisecs(350))); + assertThat( + cluster() + .getMetadata() + .getKeyspace(keyspace) + .getTable(test_table) + .getOptions() + .getAdditionalWritePolicy()) + .isEqualTo("350ms"); + cleanup(test_table); + } + + @Test(groups = "short") + public void should_create_table_with_additonal_write_policy_never() { + String test_table = "awp_never"; + session() + .execute( + SchemaBuilder.createTable(test_table) + .addPartitionKey("pk", DataType.text()) + .addColumn("data", DataType.text()) + .withOptions() + .additionalWritePolicy(SchemaBuilder.additionalWritePolicyNever())); + assertThat( + cluster() + .getMetadata() + .getKeyspace(keyspace) + .getTable(test_table) + .getOptions() + .getAdditionalWritePolicy()) + .isEqualTo("NEVER"); + cleanup(test_table); + } + + @Test(groups = "short") + public void should_create_table_with_additonal_write_policy_always() { + String test_table = "awp_always"; + session() + .execute( + SchemaBuilder.createTable(test_table) + .addPartitionKey("pk", DataType.text()) + .addColumn("data", DataType.text()) + .withOptions() + .additionalWritePolicy(SchemaBuilder.additionalWritePolicyAlways())); + assertThat( + cluster() + .getMetadata() + .getKeyspace(keyspace) + .getTable(test_table) + .getOptions() + .getAdditionalWritePolicy()) + .isEqualTo("ALWAYS"); + cleanup(test_table); + } + + @Test(groups = "short") + public void should_fail_to_create_table_with_invalid_additonal_write_policy() { + String test_table = "awp_invalid"; + try { + session() + .execute( + SchemaBuilder.createTable(test_table) + .addPartitionKey("pk", DataType.text()) + .addColumn("data", DataType.text()) + .withOptions() + .additionalWritePolicy(new TableOptions.AdditionalWritePolicyValue("'ALL'"))); + fail("Should not be able to create table with invlaid 'additional_write_policy': 'ALL'"); + } catch (InvalidQueryException iqe) { + assertThat(iqe) + .hasMessageContaining("Invalid value") + .hasMessageContaining("ALL") + .hasMessageContaining("for option"); + } finally { + cleanup(test_table); + } + } +} diff --git a/driver-core/src/test/java/com/datastax/driver/core/AggregateMetadataAssert.java b/driver-core/src/test/java/com/datastax/driver/core/AggregateMetadataAssert.java index 6148ab6997c..bf77e1c41c9 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/AggregateMetadataAssert.java +++ b/driver-core/src/test/java/com/datastax/driver/core/AggregateMetadataAssert.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,27 +17,28 @@ */ package com.datastax.driver.core; -import org.assertj.core.api.AbstractAssert; - import static org.assertj.core.api.Assertions.assertThat; -public class AggregateMetadataAssert extends AbstractAssert { - protected AggregateMetadataAssert(AggregateMetadata actual) { - super(actual, AggregateMetadataAssert.class); - } +import org.assertj.core.api.AbstractAssert; + +public class AggregateMetadataAssert + extends AbstractAssert { + protected AggregateMetadataAssert(AggregateMetadata actual) { + super(actual, AggregateMetadataAssert.class); + } - public AggregateMetadataAssert hasSignature(String name) { - assertThat(actual.getSignature()).isEqualTo(name); - return this; - } + public AggregateMetadataAssert hasSignature(String name) { + assertThat(actual.getSignature()).isEqualTo(name); + return this; + } - public AggregateMetadataAssert isInKeyspace(String keyspaceName) { - assertThat(actual.getKeyspace().getName()).isEqualTo(keyspaceName); - return this; - } + public AggregateMetadataAssert isInKeyspace(String keyspaceName) { + assertThat(actual.getKeyspace().getName()).isEqualTo(keyspaceName); + return this; + } - public AggregateMetadataAssert hasInitCond(Object initCond) { - assertThat(actual.getInitCond()).isEqualTo(initCond); - return this; - } + public AggregateMetadataAssert hasInitCond(Object initCond) { + assertThat(actual.getInitCond()).isEqualTo(initCond); + return this; + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/AggregateMetadataTest.java b/driver-core/src/test/java/com/datastax/driver/core/AggregateMetadataTest.java index 6dbdde7abaf..7f9432fda99 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/AggregateMetadataTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/AggregateMetadataTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,263 +17,312 @@ */ package com.datastax.driver.core; -import com.datastax.driver.core.utils.CassandraVersion; -import org.testng.SkipException; -import org.testng.annotations.Test; - import static com.datastax.driver.core.Assertions.assertThat; import static com.datastax.driver.core.DataType.cint; import static com.datastax.driver.core.DataType.text; import static com.datastax.driver.core.TestUtils.serializeForDynamicCompositeType; +import com.datastax.driver.core.utils.CassandraVersion; +import org.testng.SkipException; +import org.testng.annotations.Test; + @CassandraVersion("2.2.0") @CCMConfig(config = "enable_user_defined_functions:true") public class AggregateMetadataTest extends CCMTestsSupport { - @Test(groups = "short") - public void should_parse_and_format_aggregate_with_initcond_and_no_finalfunc() { - // given - String cqlFunction = String.format("CREATE FUNCTION %s.cat(s text,v int) RETURNS NULL ON NULL INPUT RETURNS text LANGUAGE java AS 'return s+v;';", keyspace); - String cqlAggregate = String.format("CREATE AGGREGATE %s.cat_tos(int) SFUNC cat STYPE text INITCOND '0';", keyspace); - // when - session().execute(cqlFunction); - session().execute(cqlAggregate); - // then - KeyspaceMetadata keyspace = cluster().getMetadata().getKeyspace(this.keyspace); - FunctionMetadata stateFunc = keyspace.getFunction("cat", text(), cint()); - AggregateMetadata aggregate = keyspace.getAggregate("cat_tos", cint()); - assertThat(aggregate).isNotNull(); - assertThat(aggregate.getSignature()).isEqualTo("cat_tos(int)"); - assertThat(aggregate.getSimpleName()).isEqualTo("cat_tos"); - assertThat(aggregate.getArgumentTypes()).containsExactly(cint()); - assertThat(aggregate.getFinalFunc()).isNull(); - assertThat(aggregate.getInitCond()).isEqualTo("0"); - assertThat(aggregate.getReturnType()).isEqualTo(text()); - assertThat(aggregate.getStateFunc()).isEqualTo(stateFunc); - assertThat(aggregate.getStateType()).isEqualTo(text()); - assertThat(aggregate.toString()).isEqualTo(cqlAggregate); - assertThat(aggregate.exportAsString()).isEqualTo(String.format("CREATE AGGREGATE %s.cat_tos(int)\n" - + "SFUNC cat STYPE text\n" - + "INITCOND '0';", this.keyspace)); - } + @Test(groups = "short") + public void should_parse_and_format_aggregate_with_initcond_and_no_finalfunc() { + // given + String cqlFunction = + String.format( + "CREATE FUNCTION %s.cat(s text,v int) RETURNS NULL ON NULL INPUT RETURNS text LANGUAGE java AS 'return s+v;';", + keyspace); + String cqlAggregate = + String.format( + "CREATE AGGREGATE %s.cat_tos(int) SFUNC cat STYPE text INITCOND '0';", keyspace); + // when + session().execute(cqlFunction); + session().execute(cqlAggregate); + // then + KeyspaceMetadata keyspace = cluster().getMetadata().getKeyspace(this.keyspace); + FunctionMetadata stateFunc = keyspace.getFunction("cat", text(), cint()); + AggregateMetadata aggregate = keyspace.getAggregate("cat_tos", cint()); + assertThat(aggregate).isNotNull(); + assertThat(aggregate.getSignature()).isEqualTo("cat_tos(int)"); + assertThat(aggregate.getSimpleName()).isEqualTo("cat_tos"); + assertThat(aggregate.getArgumentTypes()).containsExactly(cint()); + assertThat(aggregate.getFinalFunc()).isNull(); + assertThat(aggregate.getInitCond()).isEqualTo("0"); + assertThat(aggregate.getReturnType()).isEqualTo(text()); + assertThat(aggregate.getStateFunc()).isEqualTo(stateFunc); + assertThat(aggregate.getStateType()).isEqualTo(text()); + assertThat(aggregate.toString()).isEqualTo(cqlAggregate); + assertThat(aggregate.exportAsString()) + .isEqualTo( + String.format( + "CREATE AGGREGATE %s.cat_tos(int)\n" + + " SFUNC cat\n" + + " STYPE text\n" + + " INITCOND '0';", + this.keyspace)); + } - @Test(groups = "short") - public void should_parse_and_format_aggregate_with_no_arguments() { - // given - String cqlFunction = String.format("CREATE FUNCTION %s.inc(i int) RETURNS NULL ON NULL INPUT RETURNS int LANGUAGE java AS 'return i+1;';", keyspace); - String cqlAggregate = String.format("CREATE AGGREGATE %s.mycount() SFUNC inc STYPE int INITCOND 0;", keyspace); - // when - session().execute(cqlFunction); - session().execute(cqlAggregate); - // then - KeyspaceMetadata keyspace = cluster().getMetadata().getKeyspace(this.keyspace); - FunctionMetadata stateFunc = keyspace.getFunction("inc", cint()); - AggregateMetadata aggregate = keyspace.getAggregate("mycount"); - assertThat(aggregate).isNotNull(); - assertThat(aggregate.getSignature()).isEqualTo("mycount()"); - assertThat(aggregate.getSimpleName()).isEqualTo("mycount"); - assertThat(aggregate.getArgumentTypes()).isEmpty(); - assertThat(aggregate.getFinalFunc()).isNull(); - assertThat(aggregate.getInitCond()).isEqualTo(0); - assertThat(aggregate.getReturnType()).isEqualTo(cint()); - assertThat(aggregate.getStateFunc()).isEqualTo(stateFunc); - assertThat(aggregate.getStateType()).isEqualTo(cint()); - assertThat(aggregate.toString()).isEqualTo(cqlAggregate); - assertThat(aggregate.exportAsString()).isEqualTo(String.format("CREATE AGGREGATE %s.mycount()\n" - + "SFUNC inc STYPE int\n" - + "INITCOND 0;", this.keyspace)); - } + @Test(groups = "short") + public void should_parse_and_format_aggregate_with_no_arguments() { + // given + String cqlFunction = + String.format( + "CREATE FUNCTION %s.inc(i int) RETURNS NULL ON NULL INPUT RETURNS int LANGUAGE java AS 'return i+1;';", + keyspace); + String cqlAggregate = + String.format("CREATE AGGREGATE %s.mycount() SFUNC inc STYPE int INITCOND 0;", keyspace); + // when + session().execute(cqlFunction); + session().execute(cqlAggregate); + // then + KeyspaceMetadata keyspace = cluster().getMetadata().getKeyspace(this.keyspace); + FunctionMetadata stateFunc = keyspace.getFunction("inc", cint()); + AggregateMetadata aggregate = keyspace.getAggregate("mycount"); + assertThat(aggregate).isNotNull(); + assertThat(aggregate.getSignature()).isEqualTo("mycount()"); + assertThat(aggregate.getSimpleName()).isEqualTo("mycount"); + assertThat(aggregate.getArgumentTypes()).isEmpty(); + assertThat(aggregate.getFinalFunc()).isNull(); + assertThat(aggregate.getInitCond()).isEqualTo(0); + assertThat(aggregate.getReturnType()).isEqualTo(cint()); + assertThat(aggregate.getStateFunc()).isEqualTo(stateFunc); + assertThat(aggregate.getStateType()).isEqualTo(cint()); + assertThat(aggregate.toString()).isEqualTo(cqlAggregate); + assertThat(aggregate.exportAsString()) + .isEqualTo( + String.format( + "CREATE AGGREGATE %s.mycount()\n" + + " SFUNC inc\n" + + " STYPE int\n" + + " INITCOND 0;", + this.keyspace)); + } - @Test(groups = "short") - public void should_parse_and_format_aggregate_with_final_function() { - // given - String cqlFunction1 = String.format("CREATE FUNCTION %s.plus(i int, j int) RETURNS NULL ON NULL INPUT RETURNS int LANGUAGE java AS 'return i+j;';", keyspace); - String cqlFunction2 = String.format("CREATE FUNCTION %s.announce(i int) RETURNS NULL ON NULL INPUT RETURNS int LANGUAGE java AS 'return i;';", keyspace); - String cqlAggregate = String.format("CREATE AGGREGATE %s.prettysum(int) SFUNC plus STYPE int FINALFUNC announce INITCOND 0;", keyspace); - // when - session().execute(cqlFunction1); - session().execute(cqlFunction2); - session().execute(cqlAggregate); - // then - KeyspaceMetadata keyspace = cluster().getMetadata().getKeyspace(this.keyspace); - FunctionMetadata stateFunc = keyspace.getFunction("plus", cint(), cint()); - FunctionMetadata finalFunc = keyspace.getFunction("announce", cint()); - AggregateMetadata aggregate = keyspace.getAggregate("prettysum", cint()); - assertThat(aggregate).isNotNull(); - assertThat(aggregate.getSignature()).isEqualTo("prettysum(int)"); - assertThat(aggregate.getSimpleName()).isEqualTo("prettysum"); - assertThat(aggregate.getArgumentTypes()).containsExactly(cint()); - assertThat(aggregate.getFinalFunc()).isEqualTo(finalFunc); - assertThat(aggregate.getInitCond()).isEqualTo(0); - assertThat(aggregate.getReturnType()).isEqualTo(cint()); - assertThat(aggregate.getStateFunc()).isEqualTo(stateFunc); - assertThat(aggregate.getStateType()).isEqualTo(cint()); - assertThat(aggregate.toString()).isEqualTo(cqlAggregate); - assertThat(aggregate.exportAsString()).isEqualTo(String.format("CREATE AGGREGATE %s.prettysum(int)\n" - + "SFUNC plus STYPE int\n" - + "FINALFUNC announce\n" - + "INITCOND 0;", this.keyspace)); - } + @Test(groups = "short") + public void should_parse_and_format_aggregate_with_final_function() { + // given + String cqlFunction1 = + String.format( + "CREATE FUNCTION %s.plus(i int, j int) RETURNS NULL ON NULL INPUT RETURNS int LANGUAGE java AS 'return i+j;';", + keyspace); + String cqlFunction2 = + String.format( + "CREATE FUNCTION %s.announce(i int) RETURNS NULL ON NULL INPUT RETURNS int LANGUAGE java AS 'return i;';", + keyspace); + String cqlAggregate = + String.format( + "CREATE AGGREGATE %s.prettysum(int) SFUNC plus STYPE int FINALFUNC announce INITCOND 0;", + keyspace); + // when + session().execute(cqlFunction1); + session().execute(cqlFunction2); + session().execute(cqlAggregate); + // then + KeyspaceMetadata keyspace = cluster().getMetadata().getKeyspace(this.keyspace); + FunctionMetadata stateFunc = keyspace.getFunction("plus", cint(), cint()); + FunctionMetadata finalFunc = keyspace.getFunction("announce", cint()); + AggregateMetadata aggregate = keyspace.getAggregate("prettysum", cint()); + assertThat(aggregate).isNotNull(); + assertThat(aggregate.getSignature()).isEqualTo("prettysum(int)"); + assertThat(aggregate.getSimpleName()).isEqualTo("prettysum"); + assertThat(aggregate.getArgumentTypes()).containsExactly(cint()); + assertThat(aggregate.getFinalFunc()).isEqualTo(finalFunc); + assertThat(aggregate.getInitCond()).isEqualTo(0); + assertThat(aggregate.getReturnType()).isEqualTo(cint()); + assertThat(aggregate.getStateFunc()).isEqualTo(stateFunc); + assertThat(aggregate.getStateType()).isEqualTo(cint()); + assertThat(aggregate.toString()).isEqualTo(cqlAggregate); + assertThat(aggregate.exportAsString()) + .isEqualTo( + String.format( + "CREATE AGGREGATE %s.prettysum(int)\n" + + " SFUNC plus\n" + + " STYPE int\n" + + " FINALFUNC announce\n" + + " INITCOND 0;", + this.keyspace)); + } - @Test(groups = "short") - public void should_parse_and_format_aggregate_with_no_initcond() { - // given - String cqlFunction = String.format("CREATE FUNCTION %s.plus2(i int, j int) CALLED ON NULL INPUT RETURNS int LANGUAGE java AS 'return i+j;';", keyspace); - String cqlAggregate = String.format("CREATE AGGREGATE %s.sum(int) SFUNC plus2 STYPE int;", keyspace); - // when - session().execute(cqlFunction); - session().execute(cqlAggregate); - // then - KeyspaceMetadata keyspace = cluster().getMetadata().getKeyspace(this.keyspace); - FunctionMetadata stateFunc = keyspace.getFunction("plus2", cint(), cint()); - AggregateMetadata aggregate = keyspace.getAggregate("sum", cint()); - assertThat(aggregate).isNotNull(); - assertThat(aggregate.getSignature()).isEqualTo("sum(int)"); - assertThat(aggregate.getSimpleName()).isEqualTo("sum"); - assertThat(aggregate.getArgumentTypes()).containsExactly(cint()); - assertThat(aggregate.getFinalFunc()).isNull(); - assertThat(aggregate.getInitCond()).isNull(); - assertThat(aggregate.getReturnType()).isEqualTo(cint()); - assertThat(aggregate.getStateFunc()).isEqualTo(stateFunc); - assertThat(aggregate.getStateType()).isEqualTo(cint()); - assertThat(aggregate.toString()).isEqualTo(cqlAggregate); - assertThat(aggregate.exportAsString()).isEqualTo(String.format("CREATE AGGREGATE %s.sum(int)\n" - + "SFUNC plus2 STYPE int;", this.keyspace)); - } + @Test(groups = "short") + public void should_parse_and_format_aggregate_with_no_initcond() { + // given + String cqlFunction = + String.format( + "CREATE FUNCTION %s.plus2(i int, j int) CALLED ON NULL INPUT RETURNS int LANGUAGE java AS 'return i+j;';", + keyspace); + String cqlAggregate = + String.format("CREATE AGGREGATE %s.sum(int) SFUNC plus2 STYPE int;", keyspace); + // when + session().execute(cqlFunction); + session().execute(cqlAggregate); + // then + KeyspaceMetadata keyspace = cluster().getMetadata().getKeyspace(this.keyspace); + FunctionMetadata stateFunc = keyspace.getFunction("plus2", cint(), cint()); + AggregateMetadata aggregate = keyspace.getAggregate("sum", cint()); + assertThat(aggregate).isNotNull(); + assertThat(aggregate.getSignature()).isEqualTo("sum(int)"); + assertThat(aggregate.getSimpleName()).isEqualTo("sum"); + assertThat(aggregate.getArgumentTypes()).containsExactly(cint()); + assertThat(aggregate.getFinalFunc()).isNull(); + assertThat(aggregate.getInitCond()).isNull(); + assertThat(aggregate.getReturnType()).isEqualTo(cint()); + assertThat(aggregate.getStateFunc()).isEqualTo(stateFunc); + assertThat(aggregate.getStateType()).isEqualTo(cint()); + assertThat(aggregate.toString()).isEqualTo(cqlAggregate); + assertThat(aggregate.exportAsString()) + .isEqualTo( + String.format( + "CREATE AGGREGATE %s.sum(int)\n" + " SFUNC plus2\n" + " STYPE int;", + this.keyspace)); + } - @Test(groups = "short") - public void should_parse_and_format_aggregate_with_udts() { - // given - String cqlFunction = String.format( - "CREATE FUNCTION %s.\"MY_FUNC\"(address1 \"Address\", address2 \"Address\") " - + "CALLED ON NULL INPUT " - + "RETURNS \"Address\" " - + "LANGUAGE java " - + "AS 'return address1;'", keyspace); - String cqlAggregate = String.format( - "CREATE AGGREGATE %s.\"MY_AGGREGATE\"(\"Address\") " - + "SFUNC \"MY_FUNC\" " - + "STYPE \"Address\";", - keyspace); - // when - session().execute(cqlFunction); - session().execute(cqlAggregate); - // then - KeyspaceMetadata keyspace = cluster().getMetadata().getKeyspace(this.keyspace); - UserType addressType = keyspace.getUserType("\"Address\""); - FunctionMetadata stateFunc = keyspace.getFunction("\"MY_FUNC\"", addressType, addressType); - AggregateMetadata aggregate = keyspace.getAggregate("\"MY_AGGREGATE\"", addressType); - assertThat(aggregate).isNotNull(); - assertThat(aggregate.getSignature()).isEqualTo("\"MY_AGGREGATE\"(\"Address\")"); - assertThat(aggregate.getSimpleName()).isEqualTo("MY_AGGREGATE"); - assertThat(aggregate.getArgumentTypes()).containsExactly(addressType); - assertThat(aggregate.getFinalFunc()).isNull(); - assertThat(aggregate.getInitCond()).isNull(); - assertThat(aggregate.getReturnType()).isEqualTo(addressType); - assertThat(aggregate.getStateFunc()).isEqualTo(stateFunc); - assertThat(aggregate.getStateType()).isEqualTo(addressType); - assertThat(aggregate.toString()).isEqualTo(cqlAggregate); - } + @Test(groups = "short") + public void should_parse_and_format_aggregate_with_udts() { + // given + String cqlFunction = + String.format( + "CREATE FUNCTION %s.\"MY_FUNC\"(address1 \"Address\", address2 \"Address\") " + + "CALLED ON NULL INPUT " + + "RETURNS \"Address\" " + + "LANGUAGE java " + + "AS 'return address1;'", + keyspace); + String cqlAggregate = + String.format( + "CREATE AGGREGATE %s.\"MY_AGGREGATE\"(\"Address\") " + + "SFUNC \"MY_FUNC\" " + + "STYPE \"Address\";", + keyspace); + // when + session().execute(cqlFunction); + session().execute(cqlAggregate); + // then + KeyspaceMetadata keyspace = cluster().getMetadata().getKeyspace(this.keyspace); + UserType addressType = keyspace.getUserType("\"Address\""); + FunctionMetadata stateFunc = keyspace.getFunction("\"MY_FUNC\"", addressType, addressType); + AggregateMetadata aggregate = keyspace.getAggregate("\"MY_AGGREGATE\"", addressType); + assertThat(aggregate).isNotNull(); + assertThat(aggregate.getSignature()).isEqualTo("\"MY_AGGREGATE\"(\"Address\")"); + assertThat(aggregate.getSimpleName()).isEqualTo("MY_AGGREGATE"); + assertThat(aggregate.getArgumentTypes()).containsExactly(addressType); + assertThat(aggregate.getFinalFunc()).isNull(); + assertThat(aggregate.getInitCond()).isNull(); + assertThat(aggregate.getReturnType()).isEqualTo(addressType); + assertThat(aggregate.getStateFunc()).isEqualTo(stateFunc); + assertThat(aggregate.getStateType()).isEqualTo(addressType); + assertThat(aggregate.toString()).isEqualTo(cqlAggregate); + } - /** - * Validates aggregates with DynamicCompositeType state types and an initcond value that is a literal, i.e.: - * 's@foo:i@32' can be appropriately parsed and generate a CQL string with the init cond as a hex string, i.e.: - * 0x80730003666f6f00806900040000002000. - * - * @jira_ticket JAVA-1046 - * @test_category metadata - * @since 3.0.1 - */ - @Test(groups = "short") - @CassandraVersion("2.2.0") - public void should_parse_and_format_aggregate_with_composite_type_literal_initcond() { - VersionNumber ver = ccm().getCassandraVersion(); - if (ver.getMajor() == 3) { - if ((ver.getMinor() >= 1 && ver.getMinor() < 4) || (ver.getMinor() == 0 && ver.getPatch() < 4)) { - throw new SkipException("Requires C* 2.2.X, 3.0.4+ or 3.4.X+"); - } - } - parse_and_format_aggregate_with_composite_type("ag0", "'s@foo:i@32'"); + /** + * Validates aggregates with DynamicCompositeType state types and an initcond value that is a + * literal, i.e.: 's@foo:i@32' can be appropriately parsed and generate a CQL string with the init + * cond as a hex string, i.e.: 0x80730003666f6f00806900040000002000. + * + * @jira_ticket JAVA-1046 + * @test_category metadata + * @since 3.0.1 + */ + @Test(groups = "short") + @CassandraVersion("2.2.0") + public void should_parse_and_format_aggregate_with_composite_type_literal_initcond() { + VersionNumber ver = ccm().getCassandraVersion(); + if (ver.getMajor() == 3) { + if ((ver.getMinor() >= 1 && ver.getMinor() < 4) + || (ver.getMinor() == 0 && ver.getPatch() < 4)) { + throw new SkipException("Requires C* 2.2.X, 3.0.4+ or 3.4.X+"); + } } + parse_and_format_aggregate_with_composite_type("ag0", "'s@foo:i@32'"); + } - /** - * Validates aggregates with DynamicCompositeType state types and an initcond value that is a hex string - * representing the bytes for the type, i.e.: 0x80730003666f6f00806900040000002000' can be appropriately parsed - * and generates an equivalent CQL string. - * - * @jira_ticket JAVA-1046 - * @test_category metadata - * @since 3.0.1 - */ - @Test(groups = "short") - @CassandraVersion("3.4") - public void should_parse_and_format_aggregate_with_composite_type_hex_initcond() { - VersionNumber ver = ccm().getCassandraVersion(); - if ((ver.getMinor() >= 1 && ver.getMinor() < 4)) { - throw new SkipException("Requires 3.0.4+ or 3.4.X+"); - } - parse_and_format_aggregate_with_composite_type("ag1", "0x80730003666f6f00806900040000002000"); + /** + * Validates aggregates with DynamicCompositeType state types and an initcond value that is a hex + * string representing the bytes for the type, i.e.: 0x80730003666f6f00806900040000002000' can be + * appropriately parsed and generates an equivalent CQL string. + * + * @jira_ticket JAVA-1046 + * @test_category metadata + * @since 3.0.1 + */ + @Test(groups = "short") + @CassandraVersion("3.4") + public void should_parse_and_format_aggregate_with_composite_type_hex_initcond() { + VersionNumber ver = ccm().getCassandraVersion(); + if ((ver.getMinor() >= 1 && ver.getMinor() < 4)) { + throw new SkipException("Requires 3.0.4+ or 3.4.X+"); } + parse_and_format_aggregate_with_composite_type("ag1", "0x80730003666f6f00806900040000002000"); + } - public void parse_and_format_aggregate_with_composite_type(String aggregateName, String initCond) { - // given - DataType custom = DataType.custom( - "org.apache.cassandra.db.marshal.DynamicCompositeType(" + - "s=>org.apache.cassandra.db.marshal.UTF8Type," + - "i=>org.apache.cassandra.db.marshal.Int32Type)"); - String cqlFunction = String.format( - "CREATE FUNCTION %s.%s_id(i %s) " + - "RETURNS NULL ON NULL INPUT " + - "RETURNS %s " + - "LANGUAGE java " + - "AS 'return i;';", keyspace, aggregateName, custom, custom); - session().execute(cqlFunction); - - String cqlAggregate = String.format( - "CREATE AGGREGATE %s.%s() " + - "SFUNC %s_id " + - "STYPE %s " + - "INITCOND %s;", keyspace, aggregateName, aggregateName, custom, initCond); + public void parse_and_format_aggregate_with_composite_type( + String aggregateName, String initCond) { + // given + DataType custom = + DataType.custom( + "org.apache.cassandra.db.marshal.DynamicCompositeType(" + + "s=>org.apache.cassandra.db.marshal.UTF8Type," + + "i=>org.apache.cassandra.db.marshal.Int32Type)"); + String cqlFunction = + String.format( + "CREATE FUNCTION %s.%s_id(i %s) " + + "RETURNS NULL ON NULL INPUT " + + "RETURNS %s " + + "LANGUAGE java " + + "AS 'return i;';", + keyspace, aggregateName, custom, custom); + session().execute(cqlFunction); - String expectedAggregate = String.format( - "CREATE AGGREGATE %s.%s() " + - "SFUNC %s_id " + - "STYPE %s " + - "INITCOND 0x80730003666f6f00806900040000002000;", keyspace, aggregateName, aggregateName, custom); + String cqlAggregate = + String.format( + "CREATE AGGREGATE %s.%s() " + "SFUNC %s_id " + "STYPE %s " + "INITCOND %s;", + keyspace, aggregateName, aggregateName, custom, initCond); - int agCount = 0; + String expectedAggregate = + String.format( + "CREATE AGGREGATE %s.%s() " + + "SFUNC %s_id " + + "STYPE %s " + + "INITCOND 0x80730003666f6f00806900040000002000;", + keyspace, aggregateName, aggregateName, custom); - // when - session().execute(cqlAggregate); - // then - KeyspaceMetadata keyspace = cluster().getMetadata().getKeyspace(this.keyspace); - FunctionMetadata stateFunc = keyspace.getFunction(aggregateName + "_id", custom); - AggregateMetadata aggregate = keyspace.getAggregate(aggregateName); - assertThat(aggregate).isNotNull(); - assertThat(aggregate.getSignature()).isEqualTo(aggregateName + "()"); - assertThat(aggregate.getSimpleName()).isEqualTo(aggregateName); - assertThat(aggregate.getArgumentTypes()).isEmpty(); - assertThat(aggregate.getFinalFunc()).isNull(); - assertThat(aggregate.getInitCond()).isEqualTo(serializeForDynamicCompositeType("foo", 32)); - assertThat(aggregate.getReturnType()).isEqualTo(custom); - assertThat(aggregate.getStateFunc()).isEqualTo(stateFunc); - assertThat(aggregate.getStateType()).isEqualTo(custom); + int agCount = 0; - assertThat(aggregate.toString()).isEqualTo(expectedAggregate); - } + // when + session().execute(cqlAggregate); + // then + KeyspaceMetadata keyspace = cluster().getMetadata().getKeyspace(this.keyspace); + FunctionMetadata stateFunc = keyspace.getFunction(aggregateName + "_id", custom); + AggregateMetadata aggregate = keyspace.getAggregate(aggregateName); + assertThat(aggregate).isNotNull(); + assertThat(aggregate.getSignature()).isEqualTo(aggregateName + "()"); + assertThat(aggregate.getSimpleName()).isEqualTo(aggregateName); + assertThat(aggregate.getArgumentTypes()).isEmpty(); + assertThat(aggregate.getFinalFunc()).isNull(); + assertThat(aggregate.getInitCond()).isEqualTo(serializeForDynamicCompositeType("foo", 32)); + assertThat(aggregate.getReturnType()).isEqualTo(custom); + assertThat(aggregate.getStateFunc()).isEqualTo(stateFunc); + assertThat(aggregate.getStateType()).isEqualTo(custom); - @Override - public void onTestContextInitialized() { - execute( - String.format("CREATE TYPE IF NOT EXISTS %s.phone (number text)", keyspace), - String.format("CREATE TYPE IF NOT EXISTS %s.\"Address\" (" - + " street text," - + " city text," - + " zip int," - + " phones frozen>>," - + " location frozen>" - + ")", keyspace) - ); - } + assertThat(aggregate.toString()).isEqualTo(expectedAggregate); + } + @Override + public void onTestContextInitialized() { + execute( + String.format("CREATE TYPE IF NOT EXISTS %s.phone (number text)", keyspace), + String.format( + "CREATE TYPE IF NOT EXISTS %s.\"Address\" (" + + " street text," + + " city text," + + " zip int," + + " phones frozen>>," + + " location frozen>" + + ")", + keyspace)); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/Assertions.java b/driver-core/src/test/java/com/datastax/driver/core/Assertions.java index 86ecc081c14..11d10f715ac 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/Assertions.java +++ b/driver-core/src/test/java/com/datastax/driver/core/Assertions.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,80 +17,81 @@ */ package com.datastax.driver.core; -/** - * Augment AssertJ with custom assertions for the Java driver. - */ +/** Augment AssertJ with custom assertions for the Java Driver. */ public class Assertions extends org.assertj.core.api.Assertions { - public static ClusterAssert assertThat(Cluster cluster) { - return new ClusterAssert(cluster); - } - - public static HostAssert assertThat(Host host) { - return new HostAssert(host); - } - - public static SessionAssert assertThat(Session session) { - return new SessionAssert(session); - } - - public static TokenRangeAssert assertThat(TokenRange range) { - return new TokenRangeAssert(range); - } - - public static DataTypeAssert assertThat(DataType type) { - return new DataTypeAssert(type); - } - - /** - * This method is here only to disambiguate - * calls to assertThat with a UserType instance, - * because UserType also implements Iterable. - */ - public static DataTypeAssert assertThat(UserType type) { - return new DataTypeAssert(type); - } - - public static LocalDateAssert assertThat(LocalDate localDate) { - return new LocalDateAssert(localDate); - } - - public static KeyspaceMetadataAssert assertThat(KeyspaceMetadata metadata) { - return new KeyspaceMetadataAssert(metadata); - } - - public static TableMetadataAssert assertThat(TableMetadata table) { - return new TableMetadataAssert(table); - } - - public static ColumnMetadataAssert assertThat(ColumnMetadata column) { - return new ColumnMetadataAssert(column); - } - - public static FunctionMetadataAssert assertThat(FunctionMetadata function) { - return new FunctionMetadataAssert(function); - } - - public static AggregateMetadataAssert assertThat(AggregateMetadata aggregate) { - return new AggregateMetadataAssert(aggregate); - } - - public static IndexMetadataAssert assertThat(IndexMetadata index) { - return new IndexMetadataAssert(index); - } - - public static TypeCodecAssert assertThat(TypeCodec codec) { - return new TypeCodecAssert(codec); - } - - public static MaterializedViewMetadataAssert assertThat(MaterializedViewMetadata view) { - return new MaterializedViewMetadataAssert(view); - } - - public static VersionNumberAssert assertThat(VersionNumber actual) { - return new VersionNumberAssert(actual); - } - public static ResultSetAssert assertThat(ResultSet rows) { - return new ResultSetAssert(rows); - } - + public static ClusterAssert assertThat(Cluster cluster) { + return new ClusterAssert(cluster); + } + + public static HostAssert assertThat(Host host) { + return new HostAssert(host); + } + + public static SessionAssert assertThat(Session session) { + return new SessionAssert(session); + } + + public static TokenRangeAssert assertThat(TokenRange range) { + return new TokenRangeAssert(range); + } + + public static DataTypeAssert assertThat(DataType type) { + return new DataTypeAssert(type); + } + + /** + * This method is here only to disambiguate calls to assertThat with a UserType instance, because + * UserType also implements Iterable. + */ + public static DataTypeAssert assertThat(UserType type) { + return new DataTypeAssert(type); + } + + public static LocalDateAssert assertThat(LocalDate localDate) { + return new LocalDateAssert(localDate); + } + + public static KeyspaceMetadataAssert assertThat(KeyspaceMetadata metadata) { + return new KeyspaceMetadataAssert(metadata); + } + + public static TableMetadataAssert assertThat(TableMetadata table) { + return new TableMetadataAssert(table); + } + + public static ColumnMetadataAssert assertThat(ColumnMetadata column) { + return new ColumnMetadataAssert(column); + } + + public static FunctionMetadataAssert assertThat(FunctionMetadata function) { + return new FunctionMetadataAssert(function); + } + + public static AggregateMetadataAssert assertThat(AggregateMetadata aggregate) { + return new AggregateMetadataAssert(aggregate); + } + + public static IndexMetadataAssert assertThat(IndexMetadata index) { + return new IndexMetadataAssert(index); + } + + public static TypeCodecAssert assertThat(TypeCodec codec) { + return new TypeCodecAssert(codec); + } + + public static MaterializedViewMetadataAssert assertThat(MaterializedViewMetadata view) { + return new MaterializedViewMetadataAssert(view); + } + + public static VersionNumberAssert assertThat(VersionNumber actual) { + return new VersionNumberAssert(actual); + } + + public static ResultSetAssert assertThat(ResultSet rows) { + return new ResultSetAssert(rows); + } + + public static ColumnDefinitionsAssert assertThat(ColumnDefinitions variables) { + return new ColumnDefinitionsAssert(variables); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/AsyncQueryTest.java b/driver-core/src/test/java/com/datastax/driver/core/AsyncQueryTest.java index b6904475644..fe75253b9bf 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/AsyncQueryTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/AsyncQueryTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,285 +17,312 @@ */ package com.datastax.driver.core; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.fail; +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertTrue; + import com.datastax.driver.core.exceptions.InvalidQueryException; import com.datastax.driver.core.utils.CassandraVersion; import com.google.common.base.Function; import com.google.common.base.Throwables; -import com.google.common.collect.Lists; import com.google.common.util.concurrent.AsyncFunction; -import com.google.common.util.concurrent.Futures; import com.google.common.util.concurrent.ListenableFuture; import com.google.common.util.concurrent.Uninterruptibles; +import java.nio.ByteBuffer; +import java.util.Collection; +import java.util.Map; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Executor; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.testng.annotations.DataProvider; import org.testng.annotations.Test; -import java.nio.ByteBuffer; -import java.util.Collection; -import java.util.Map; -import java.util.concurrent.*; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.fail; -import static org.testng.Assert.assertEquals; -import static org.testng.Assert.assertTrue; - public class AsyncQueryTest extends CCMTestsSupport { - Logger logger = LoggerFactory.getLogger(AsyncQueryTest.class); - - @DataProvider(name = "keyspace") - public static Object[][] keyspace() { - return new Object[][]{{"asyncquerytest"}, {"\"AsyncQueryTest\""}}; + Logger logger = LoggerFactory.getLogger(AsyncQueryTest.class); + + @DataProvider(name = "keyspace") + public static Object[][] keyspace() { + return new Object[][] {{"asyncquerytest"}, {"\"AsyncQueryTest\""}}; + } + + @Override + public void onTestContextInitialized() { + for (Object[] objects : keyspace()) { + String keyspace = (String) objects[0]; + execute( + String.format( + "create keyspace %s WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}", + keyspace), + String.format("create table %s.foo(k int, v int, primary key (k, v))", keyspace)); + for (int v = 1; v <= 100; v++) + execute(String.format("insert into %s.foo (k, v) values (1, %d)", keyspace, v)); } + } - @Override - public void onTestContextInitialized() { - for (Object[] objects : keyspace()) { - String keyspace = (String) objects[0]; - execute( - String.format("create keyspace %s WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}", keyspace), - String.format("create table %s.foo(k int, v int, primary key (k, v))", keyspace)); - for (int v = 1; v <= 100; v++) - execute(String.format("insert into %s.foo (k, v) values (1, %d)", keyspace, v)); - } - } - - /** - * Checks that a cancelled query releases the connection (JAVA-407). - */ - @Test(groups = "short") - public void cancelled_query_should_release_the_connection() throws InterruptedException { - ResultSetFuture future = session().executeAsync("select release_version from system.local"); - future.cancel(true); - assertTrue(future.isCancelled()); - - TimeUnit.MILLISECONDS.sleep(100); - - HostConnectionPool pool = getPool(session()); - for (Connection connection : pool.connections) { - assertEquals(connection.inFlight.get(), 0); - } - } + /** Checks that a cancelled query releases the connection (JAVA-407). */ + @Test(groups = "short") + public void cancelled_query_should_release_the_connection() throws InterruptedException { + ResultSetFuture future = session().executeAsync("select release_version from system.local"); + future.cancel(true); + assertTrue(future.isCancelled()); - @Test(groups = "short") - public void should_init_cluster_and_session_if_needed() throws Exception { - // For this test we need an uninitialized cluster, so we can't reuse the one provided by the - // parent class. Rebuild a new one with the same (unique) host. - Host host = cluster().getMetadata().allHosts().iterator().next(); + TimeUnit.MILLISECONDS.sleep(100); - Cluster cluster2 = register(Cluster.builder() - .addContactPointsWithPorts(Lists.newArrayList(host.getSocketAddress())) - .build()); - try { - Session session2 = cluster2.newSession(); - - // Neither cluster2 nor session2 are initialized at this point - assertThat(cluster2.manager.metadata).isNull(); - - ResultSetFuture future = session2.executeAsync("select release_version from system.local"); - Row row = Uninterruptibles.getUninterruptibly(future).one(); - - assertThat(row.getString(0)).isNotEmpty(); - } finally { - cluster2.close(); - } + HostConnectionPool pool = getPool(session()); + for (Connection connection : pool.connections) { + assertEquals(connection.inFlight.get(), 0); } + } - @Test(groups = "short", dataProvider = "keyspace", enabled = false, - description = "disabled because the blocking USE call in the current pool implementation makes it deadlock") - public void should_chain_query_on_async_session_init_with_same_executor(String keyspace) throws Exception { - ListenableFuture resultFuture = connectAndQuery(keyspace, GuavaCompatibility.INSTANCE.sameThreadExecutor()); + @Test(groups = "short") + public void should_init_cluster_and_session_if_needed() throws Exception { + // For this test we need an uninitialized cluster, so we can't reuse the one provided by the + // parent class. Rebuild a new one with the same (unique) host. + Cluster cluster2 = register(createClusterBuilder().build()); + try { + Session session2 = cluster2.newSession(); - Integer result = Uninterruptibles.getUninterruptibly(resultFuture); - assertThat(result).isEqualTo(1); - } + // Neither cluster2 nor session2 are initialized at this point + assertThat(cluster2.manager.metadata).isNull(); - @Test(groups = "short", dataProvider = "keyspace") - public void should_chain_query_on_async_session_init_with_different_executor(String keyspace) throws Exception { - ExecutorService executor = Executors.newFixedThreadPool(1); + ResultSetFuture future = session2.executeAsync("select release_version from system.local"); + Row row = Uninterruptibles.getUninterruptibly(future).one(); - ListenableFuture resultFuture = connectAndQuery(keyspace, executor); - - Integer result = Uninterruptibles.getUninterruptibly(resultFuture); - assertThat(result).isEqualTo(1); - - executor.shutdownNow(); + assertThat(row.getString(0)).isNotEmpty(); + } finally { + cluster2.close(); } - - @Test(groups = "short") - public void should_propagate_error_to_chained_query_if_session_init_fails() throws Exception { - ListenableFuture resultFuture = connectAndQuery("wrong_keyspace", GuavaCompatibility.INSTANCE.sameThreadExecutor()); - - try { - Uninterruptibles.getUninterruptibly(resultFuture); - } catch (ExecutionException e) { - assertThat(e.getCause()) - .isInstanceOf(InvalidQueryException.class) - .hasMessage("Keyspace 'wrong_keyspace' does not exist"); - } + } + + @Test( + groups = "short", + dataProvider = "keyspace", + enabled = false, + description = + "disabled because the blocking USE call in the current pool implementation makes it deadlock") + public void should_chain_query_on_async_session_init_with_same_executor(String keyspace) + throws Exception { + ListenableFuture resultFuture = + connectAndQuery(keyspace, GuavaCompatibility.INSTANCE.sameThreadExecutor()); + + Integer result = Uninterruptibles.getUninterruptibly(resultFuture); + assertThat(result).isEqualTo(1); + } + + @Test(groups = "short", dataProvider = "keyspace") + public void should_chain_query_on_async_session_init_with_different_executor(String keyspace) + throws Exception { + ExecutorService executor = Executors.newFixedThreadPool(1); + + ListenableFuture resultFuture = connectAndQuery(keyspace, executor); + + Integer result = Uninterruptibles.getUninterruptibly(resultFuture); + assertThat(result).isEqualTo(1); + + executor.shutdownNow(); + } + + @Test(groups = "short") + public void should_propagate_error_to_chained_query_if_session_init_fails() throws Exception { + ListenableFuture resultFuture = + connectAndQuery("wrong_keyspace", GuavaCompatibility.INSTANCE.sameThreadExecutor()); + + try { + Uninterruptibles.getUninterruptibly(resultFuture); + } catch (ExecutionException e) { + assertThat(e.getCause()) + .isInstanceOf(InvalidQueryException.class) + .hasMessage("Keyspace 'wrong_keyspace' does not exist"); } - - @Test(groups = "short") - public void should_fail_when_synchronous_call_on_io_thread() throws Exception { - for (int i = 0; i < 1000; i++) { - ResultSetFuture f = session().executeAsync("select release_version from system.local"); - ListenableFuture f2 = Futures.transform(f, new Function() { + } + + @Test(groups = "short") + public void should_fail_when_synchronous_call_on_io_thread() throws Exception { + for (int i = 0; i < 1000; i++) { + ResultSetFuture f = session().executeAsync("select release_version from system.local"); + ListenableFuture f2 = + GuavaCompatibility.INSTANCE.transform( + f, + new Function() { @Override public Thread apply(ResultSet input) { - session().execute("select release_version from system.local"); - return Thread.currentThread(); + session().execute("select release_version from system.local"); + return Thread.currentThread(); } - }); - if (isFailed(f2, IllegalStateException.class, "Detected a synchronous call on an I/O thread")) { - return; - } - } - fail("callback was not executed on io thread in 1000 attempts, something may be wrong."); + }); + if (isFailed( + f2, IllegalStateException.class, "Detected a synchronous call on an I/O thread")) { + return; + } } - - @Test(groups = "short") - public void should_fail_when_synchronous_call_on_io_thread_with_session_wrapper() throws Exception { - final Session session = new SessionWrapper(session()); - for (int i = 0; i < 1000; i++) { - ResultSetFuture f = session.executeAsync("select release_version from system.local"); - ListenableFuture f2 = Futures.transform(f, new Function() { + fail("callback was not executed on io thread in 1000 attempts, something may be wrong."); + } + + @Test(groups = "short") + public void should_fail_when_synchronous_call_on_io_thread_with_session_wrapper() + throws Exception { + final Session session = new SessionWrapper(session()); + for (int i = 0; i < 1000; i++) { + ResultSetFuture f = session.executeAsync("select release_version from system.local"); + ListenableFuture f2 = + GuavaCompatibility.INSTANCE.transform( + f, + new Function() { @Override public Thread apply(ResultSet input) { - session.execute("select release_version from system.local"); - return Thread.currentThread(); + session.execute("select release_version from system.local"); + return Thread.currentThread(); } - }); - if (isFailed(f2, IllegalStateException.class, "Detected a synchronous call on an I/O thread")) { - return; - } - } - fail("callback was not executed on io thread in 1000 attempts, something may be wrong."); + }); + if (isFailed( + f2, IllegalStateException.class, "Detected a synchronous call on an I/O thread")) { + return; + } } - - @Test(groups = "short") - @CassandraVersion(value = "2.0.0", description = "Paging is not supported until 2.0") - public void should_fail_when_auto_paging_on_io_thread() throws Exception { - for (int i = 0; i < 1000; i++) { - Statement statement = new SimpleStatement("select v from asyncquerytest.foo where k = 1"); - // Ensure results will be paged (there are 100 rows in the test table) - statement.setFetchSize(10); - ResultSetFuture f = session().executeAsync(statement); - ListenableFuture f2 = Futures.transform(f, new Function() { + fail("callback was not executed on io thread in 1000 attempts, something may be wrong."); + } + + @Test(groups = "short") + @CassandraVersion(value = "2.0.0", description = "Paging is not supported until 2.0") + public void should_fail_when_auto_paging_on_io_thread() throws Exception { + for (int i = 0; i < 1000; i++) { + Statement statement = new SimpleStatement("select v from asyncquerytest.foo where k = 1"); + // Ensure results will be paged (there are 100 rows in the test table) + statement.setFetchSize(10); + ResultSetFuture f = session().executeAsync(statement); + ListenableFuture f2 = + GuavaCompatibility.INSTANCE.transform( + f, + new Function() { @Override public Thread apply(ResultSet rs) { - rs.all(); // Consume the whole result set - return Thread.currentThread(); + rs.all(); // Consume the whole result set + return Thread.currentThread(); } - }); - if (isFailed(f2, IllegalStateException.class, "Detected a synchronous call on an I/O thread")) { - return; - } - } - fail("callback was not executed on io thread in 1000 attempts, something may be wrong."); + }); + if (isFailed( + f2, IllegalStateException.class, "Detected a synchronous call on an I/O thread")) { + return; + } + } + fail("callback was not executed on io thread in 1000 attempts, something may be wrong."); + } + + private boolean isFailed( + ListenableFuture future, Class expectedException, String expectedMessage) { + try { + Thread executedThread = future.get(); + if (executedThread != Thread.currentThread()) { + fail("Expected a failed future, callback was executed on " + executedThread); + } else { + // Callback was invoked on the same thread, which indicates that the future completed + // before the transform callback was registered. Try again to produce case where callback + // is called on io thread. + logger.warn("Future completed before transform callback registered, will try again."); + } + } catch (Exception e) { + assertThat(Throwables.getRootCause(e)) + .isInstanceOf(expectedException) + .hasMessageContaining(expectedMessage); + return true; + } + return false; + } + + private ListenableFuture connectAndQuery(String keyspace, Executor executor) { + ListenableFuture sessionFuture = cluster().connectAsync(keyspace); + ListenableFuture queryFuture = + GuavaCompatibility.INSTANCE.transformAsync( + sessionFuture, + new AsyncFunction() { + @Override + public ListenableFuture apply(Session session) throws Exception { + return session.executeAsync("select v from foo where k = 1"); + } + }, + executor); + return GuavaCompatibility.INSTANCE.transform( + queryFuture, + new Function() { + @Override + public Integer apply(ResultSet rs) { + return rs.one().getInt(0); + } + }, + executor); + } + + private static HostConnectionPool getPool(Session session) { + Collection pools = ((SessionManager) session).pools.values(); + assertEquals(pools.size(), 1); + return pools.iterator().next(); + } + + private static class SessionWrapper extends AbstractSession { + + private final Session session; + + public SessionWrapper(Session session) { + this.session = session; } - private boolean isFailed(ListenableFuture future, Class expectedException, String expectedMessage) { - try { - Thread executedThread = future.get(); - if (executedThread != Thread.currentThread()) { - fail("Expected a failed future, callback was executed on " + executedThread); - } else { - // Callback was invoked on the same thread, which indicates that the future completed - // before the transform callback was registered. Try again to produce case where callback - // is called on io thread. - logger.warn("Future completed before transform callback registered, will try again."); - } - } catch (Exception e) { - assertThat(Throwables.getRootCause(e)) - .isInstanceOf(expectedException) - .hasMessageContaining(expectedMessage); - return true; - } - return false; + @Override + public ResultSet execute(Statement statement) { + // test a custom call to checkNotInEventLoop() + checkNotInEventLoop(); + return executeAsync(statement).getUninterruptibly(); } - private ListenableFuture connectAndQuery(String keyspace, Executor executor) { - ListenableFuture sessionFuture = cluster().connectAsync(keyspace); - ListenableFuture queryFuture = GuavaCompatibility.INSTANCE.transformAsync(sessionFuture, new AsyncFunction() { - @Override - public ListenableFuture apply(Session session) throws Exception { - return session.executeAsync("select v from foo where k = 1"); - } - }, executor); - return Futures.transform(queryFuture, new Function() { - @Override - public Integer apply(ResultSet rs) { - return rs.one().getInt(0); - } - }, executor); + @Override + public String getLoggedKeyspace() { + return session.getLoggedKeyspace(); } - private static HostConnectionPool getPool(Session session) { - Collection pools = ((SessionManager) session).pools.values(); - assertEquals(pools.size(), 1); - return pools.iterator().next(); + @Override + public Session init() { + return session.init(); } - private static class SessionWrapper extends AbstractSession { - - private final Session session; - - public SessionWrapper(Session session) { - this.session = session; - } - - @Override - public ResultSet execute(Statement statement) { - // test a custom call to checkNotInEventLoop() - checkNotInEventLoop(); - return executeAsync(statement).getUninterruptibly(); - } - - @Override - public String getLoggedKeyspace() { - return session.getLoggedKeyspace(); - } - - @Override - public Session init() { - return session.init(); - } - - @Override - public ListenableFuture initAsync() { - return session.initAsync(); - } - - @Override - public ResultSetFuture executeAsync(Statement statement) { - return session.executeAsync(statement); - } - - @Override - public CloseFuture closeAsync() { - return session.closeAsync(); - } - - @Override - public boolean isClosed() { - return session.isClosed(); - } - - @Override - public Cluster getCluster() { - return session.getCluster(); - } - - @Override - public State getState() { - return session.getState(); - } - - @Override - protected ListenableFuture prepareAsync(String query, Map customPayload) { - return ((SessionManager) session).prepareAsync(query, customPayload); - } + @Override + public ListenableFuture initAsync() { + return session.initAsync(); + } + + @Override + public ResultSetFuture executeAsync(Statement statement) { + return session.executeAsync(statement); + } + + @Override + public CloseFuture closeAsync() { + return session.closeAsync(); + } + + @Override + public boolean isClosed() { + return session.isClosed(); + } + + @Override + public Cluster getCluster() { + return session.getCluster(); + } + + @Override + public State getState() { + return session.getState(); + } + + @Override + protected ListenableFuture prepareAsync( + String query, Map customPayload) { + return ((SessionManager) session).prepareAsync(query, customPayload); } + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/AsyncResultSetTest.java b/driver-core/src/test/java/com/datastax/driver/core/AsyncResultSetTest.java index c2b1991d9e3..08e7f009b2a 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/AsyncResultSetTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/AsyncResultSetTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,77 +17,69 @@ */ package com.datastax.driver.core; +import static org.assertj.core.api.Assertions.assertThat; + import com.datastax.driver.core.utils.CassandraVersion; import com.google.common.util.concurrent.AsyncFunction; import com.google.common.util.concurrent.Futures; import com.google.common.util.concurrent.ListenableFuture; -import org.testng.annotations.BeforeMethod; -import org.testng.annotations.Test; - import java.util.Set; import java.util.concurrent.ConcurrentSkipListSet; - -import static org.assertj.core.api.Assertions.assertThat; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; @CassandraVersion(value = "2.0.0", description = "uses paging") public class AsyncResultSetTest extends CCMTestsSupport { - @Override - public void onTestContextInitialized() { - execute( - "create table ints (i int primary key)"); - } + @Override + public void onTestContextInitialized() { + execute("create table ints (i int primary key)"); + } - @BeforeMethod(groups = "short") - public void cleanup() { - session().execute("truncate ints"); - } + @BeforeMethod(groups = "short") + public void cleanup() { + session().execute("truncate ints"); + } - @Test(groups = "short") - public void should_iterate_single_page_result_set_asynchronously() { - should_iterate_result_set_asynchronously(100, 500); - } + @Test(groups = "short") + public void should_iterate_single_page_result_set_asynchronously() { + should_iterate_result_set_asynchronously(100, 500); + } - @Test(groups = "short") - public void should_iterate_multi_page_result_set_asynchronously() { - should_iterate_result_set_asynchronously(1000, 20); - } + @Test(groups = "short") + public void should_iterate_multi_page_result_set_asynchronously() { + should_iterate_result_set_asynchronously(1000, 20); + } - private void should_iterate_result_set_asynchronously(int totalCount, int fetchSize) { - for (int i = 0; i < totalCount; i++) - session().execute(String.format("insert into ints (i) values (%d)", i)); + private void should_iterate_result_set_asynchronously(int totalCount, int fetchSize) { + for (int i = 0; i < totalCount; i++) + session().execute(String.format("insert into ints (i) values (%d)", i)); - Statement statement = new SimpleStatement("select * from ints").setFetchSize(fetchSize); - ResultsAccumulator results = new ResultsAccumulator(); + Statement statement = new SimpleStatement("select * from ints").setFetchSize(fetchSize); + ResultsAccumulator results = new ResultsAccumulator(); - ListenableFuture future = GuavaCompatibility.INSTANCE.transformAsync( - session().executeAsync(statement), - results); + ListenableFuture future = + GuavaCompatibility.INSTANCE.transformAsync(session().executeAsync(statement), results); - Futures.getUnchecked(future); + Futures.getUnchecked(future); - assertThat(results.all.size()).isEqualTo(totalCount); - } + assertThat(results.all.size()).isEqualTo(totalCount); + } - /** - * Dummy transformation that accumulates all traversed results - */ - static class ResultsAccumulator implements AsyncFunction { - final Set all = new ConcurrentSkipListSet(); + /** Dummy transformation that accumulates all traversed results */ + static class ResultsAccumulator implements AsyncFunction { + final Set all = new ConcurrentSkipListSet(); - @Override - public ListenableFuture apply(ResultSet rs) throws Exception { - int remainingInPage = rs.getAvailableWithoutFetching(); - for (Row row : rs) { - all.add(row.getInt(0)); - if (--remainingInPage == 0) - break; - } - boolean wasLastPage = rs.getExecutionInfo().getPagingState() == null; - if (wasLastPage) - return Futures.immediateFuture(rs); - else - return GuavaCompatibility.INSTANCE.transformAsync(rs.fetchMoreResults(), this); - } + @Override + public ListenableFuture apply(ResultSet rs) throws Exception { + int remainingInPage = rs.getAvailableWithoutFetching(); + for (Row row : rs) { + all.add(row.getInt(0)); + if (--remainingInPage == 0) break; + } + boolean wasLastPage = rs.getExecutionInfo().getPagingState() == null; + if (wasLastPage) return Futures.immediateFuture(rs); + else return GuavaCompatibility.INSTANCE.transformAsync(rs.fetchMoreResults(), this); } + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/AtomicMonotonicTimestampGeneratorTest.java b/driver-core/src/test/java/com/datastax/driver/core/AtomicMonotonicTimestampGeneratorTest.java index df7ae1c0ec3..e7bdf848b24 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/AtomicMonotonicTimestampGeneratorTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/AtomicMonotonicTimestampGeneratorTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,81 +17,81 @@ */ package com.datastax.driver.core; +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.fail; + import com.google.common.collect.Lists; import com.google.common.util.concurrent.Futures; import com.google.common.util.concurrent.ListenableFuture; import com.google.common.util.concurrent.ListeningExecutorService; import com.google.common.util.concurrent.MoreExecutors; -import org.apache.log4j.Level; -import org.apache.log4j.Logger; -import org.testng.annotations.Test; - import java.util.List; import java.util.SortedSet; import java.util.concurrent.ConcurrentSkipListSet; import java.util.concurrent.ExecutionException; import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; - -import static org.testng.Assert.assertEquals; -import static org.testng.Assert.fail; +import org.apache.log4j.Level; +import org.apache.log4j.Logger; +import org.testng.annotations.Test; public class AtomicMonotonicTimestampGeneratorTest { - @Test(groups = "unit") - public void should_generate_incrementing_timestamps_for_all_threads() throws InterruptedException { - // Create a generator with a fixed millisecond value - final long fixedTime = 1; - final AtomicMonotonicTimestampGenerator generator = new AtomicMonotonicTimestampGenerator(); - generator.clock = new MockClocks.FixedTimeClock(fixedTime); + @Test(groups = "unit") + public void should_generate_incrementing_timestamps_for_all_threads() + throws InterruptedException { + // Create a generator with a fixed millisecond value + final long fixedTime = 1; + final AtomicMonotonicTimestampGenerator generator = new AtomicMonotonicTimestampGenerator(); + generator.clock = new MockClocks.FixedTimeClock(fixedTime); - MemoryAppender appender = new MemoryAppender(); - Logger logger = Logger.getLogger(TimestampGenerator.class); - Level originalLevel = logger.getLevel(); - logger.setLevel(Level.WARN); - logger.addAppender(appender); + MemoryAppender appender = new MemoryAppender(); + Logger logger = Logger.getLogger(TimestampGenerator.class); + Level originalLevel = logger.getLevel(); + logger.setLevel(Level.WARN); + logger.addAppender(appender); - try { - // Generate 1000 timestamps shared among multiple threads - final int testThreadsCount = 2; - assertEquals(1000 % testThreadsCount, 0); - final SortedSet allTimestamps = new ConcurrentSkipListSet(); - ListeningExecutorService executor = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(testThreadsCount)); + try { + // Generate 1000 timestamps shared among multiple threads + final int testThreadsCount = 2; + assertEquals(1000 % testThreadsCount, 0); + final SortedSet allTimestamps = new ConcurrentSkipListSet(); + ListeningExecutorService executor = + MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(testThreadsCount)); - List> futures = Lists.newArrayListWithExpectedSize(testThreadsCount); - for (int i = 0; i < testThreadsCount; i++) { - futures.add(executor.submit( - new Runnable() { - @Override - public void run() { - for (int i = 0; i < 1000 / testThreadsCount; i++) - allTimestamps.add(generator.next()); - } - })); - } - executor.shutdown(); - executor.awaitTermination(1, TimeUnit.SECONDS); + List> futures = Lists.newArrayListWithExpectedSize(testThreadsCount); + for (int i = 0; i < testThreadsCount; i++) { + futures.add( + executor.submit( + new Runnable() { + @Override + public void run() { + for (int i = 0; i < 1000 / testThreadsCount; i++) + allTimestamps.add(generator.next()); + } + })); + } + executor.shutdown(); + executor.awaitTermination(1, TimeUnit.SECONDS); - try { - Futures.allAsList(futures).get(); - } catch (ExecutionException e) { - Throwable cause = e.getCause(); - if (cause instanceof AssertionError) - throw (AssertionError) cause; - else - fail("Error in a test thread", cause); - } + try { + Futures.allAsList(futures).get(); + } catch (ExecutionException e) { + Throwable cause = e.getCause(); + if (cause instanceof AssertionError) throw (AssertionError) cause; + else fail("Error in a test thread", cause); + } - // Ensure that the 1000 microseconds for the mocked millisecond value have been generated - int i = 0; - for (Long timestamp : allTimestamps) { - Long expected = fixedTime + i; - assertEquals(timestamp, expected); - i += 1; - } - } finally { - logger.removeAppender(appender); - logger.setLevel(originalLevel); - } + // Ensure that the 1000 microseconds for the mocked millisecond value have been generated + int i = 0; + for (Long timestamp : allTimestamps) { + Long expected = fixedTime + i; + assertEquals(timestamp, expected); + i += 1; + } + } finally { + logger.removeAppender(appender); + logger.setLevel(originalLevel); } + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/AuthenticationTest.java b/driver-core/src/test/java/com/datastax/driver/core/AuthenticationTest.java index fa01f49e9d3..0166fac1b2a 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/AuthenticationTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/AuthenticationTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,156 +17,160 @@ */ package com.datastax.driver.core; +import static com.datastax.driver.core.CreateCCM.TestMode.PER_METHOD; +import static com.datastax.driver.core.TestUtils.findHost; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.atLeastOnce; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.verify; +import static org.testng.Assert.fail; + import com.datastax.driver.core.exceptions.AuthenticationException; import com.google.common.util.concurrent.Uninterruptibles; -import org.apache.log4j.Level; -import org.testng.annotations.BeforeMethod; -import org.testng.annotations.Test; - import java.net.InetSocketAddress; import java.util.Timer; import java.util.TimerTask; import java.util.concurrent.TimeUnit; +import org.apache.log4j.Level; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; -import static com.datastax.driver.core.CreateCCM.TestMode.PER_METHOD; -import static com.datastax.driver.core.TestUtils.findHost; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.*; - -/** - * Tests for authenticated cluster access - */ +/** Tests for authenticated cluster access */ @CreateCCM(PER_METHOD) @CCMConfig( - config = "authenticator:PasswordAuthenticator", - jvmArgs = "-Dcassandra.superuser_setup_delay_ms=0", - createCluster = false) + config = "authenticator:PasswordAuthenticator", + jvmArgs = "-Dcassandra.superuser_setup_delay_ms=0", + createCluster = false) public class AuthenticationTest extends CCMTestsSupport { - @BeforeMethod(groups = "short") - public void sleepIf12() { - // For C* 1.2, sleep before attempting to connect as there is a small delay between - // user being created. - if (ccm().getCassandraVersion().getMajor() < 2) { - Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS); - } + @BeforeMethod(groups = "short") + public void sleepIf12() { + // For C* 1.2, sleep before attempting to connect as there is a small delay between + // user being created. + if (ccm().getCassandraVersion().getMajor() < 2) { + Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS); } - - @Test(groups = "short") - public void should_connect_with_credentials() throws InterruptedException { - PlainTextAuthProvider authProvider = spy(new PlainTextAuthProvider("cassandra", "cassandra")); - Cluster cluster = Cluster.builder() - .addContactPoints(getContactPoints()) - .withPort(ccm().getBinaryPort()) - .withAuthProvider(authProvider) - .build(); + } + + @Test(groups = "short") + public void should_connect_with_credentials() { + PlainTextAuthProvider authProvider = spy(new PlainTextAuthProvider("cassandra", "cassandra")); + Cluster cluster = createClusterBuilder().withAuthProvider(authProvider).build(); + cluster.connect(); + verify(authProvider, atLeastOnce()) + .newAuthenticator( + findHost(cluster, 1).getEndPoint(), "org.apache.cassandra.auth.PasswordAuthenticator"); + assertThat(cluster.getMetrics().getErrorMetrics().getAuthenticationErrors().getCount()) + .isEqualTo(0); + } + + /** + * Validates that if cluster initialization fails, it should fail with an authentication + * exception. + * + *

    In addition, a repeated attempt to initialize raises an error indicating that it had + * previously failed. + * + * @jira_ticket JAVA-1221 + */ + @Test(groups = "short") + public void should_fail_to_connect_with_wrong_credentials() { + Cluster cluster = register(createClusterBuilder().withCredentials("bogus", "bogus").build()); + + try { + cluster.connect(); + fail("Should throw AuthenticationException when attempting to connect"); + } catch (AuthenticationException e) { + try { cluster.connect(); - verify(authProvider, atLeastOnce()).newAuthenticator(findHost(cluster, 1).getSocketAddress(), "org.apache.cassandra.auth.PasswordAuthenticator"); - assertThat(cluster.getMetrics().getErrorMetrics().getAuthenticationErrors().getCount()).isEqualTo(0); - } - - @Test(groups = "short", expectedExceptions = AuthenticationException.class) - public void should_fail_to_connect_with_wrong_credentials() throws InterruptedException { - Cluster cluster = register(Cluster.builder() - .addContactPoints(getContactPoints()) - .withPort(ccm().getBinaryPort()) - .withCredentials("bogus", "bogus") - .build()); - try { - cluster.connect(); - } finally { - assertThat(cluster.getMetrics().getErrorMetrics().getAuthenticationErrors().getCount()).isEqualTo(1); - } + fail("Should throw IllegalStateException when attempting to connect again."); + } catch (IllegalStateException e1) { + assertThat(e1.getCause()).isSameAs(e); + assertThat(e1) + .hasMessage( + "Can't use this cluster instance because it encountered an error in its initialization"); + } + } finally { + cluster.close(); } - - @Test(groups = "short", expectedExceptions = AuthenticationException.class) - public void should_fail_to_connect_without_credentials() throws InterruptedException { - Cluster cluster = register(Cluster.builder() - .addContactPoints(getContactPoints()) - .withPort(ccm().getBinaryPort()) - .build()); - try { - cluster.connect(); - } finally { - assertThat(cluster.getMetrics().getErrorMetrics().getAuthenticationErrors().getCount()).isEqualTo(1); - } + } + + @Test(groups = "short", expectedExceptions = AuthenticationException.class) + public void should_fail_to_connect_without_credentials() { + Cluster cluster = register(createClusterBuilder().build()); + cluster.connect(); + } + + /** + * Ensures that authentication is possible even if the server is busy during SASL handshake. + * + * @jira_ticket JAVA-1429 + */ + @Test(groups = "short") + @CCMConfig(dirtiesContext = true) + public void should_connect_with_slow_server() { + Cluster cluster = + createClusterBuilder() + .withAuthProvider(new SlowAuthProvider()) + .withPoolingOptions(new PoolingOptions().setHeartbeatIntervalSeconds(1)) + .build(); + cluster.connect(); + } + + private class SlowAuthProvider extends PlainTextAuthProvider { + + public SlowAuthProvider() { + super("cassandra", "cassandra"); } - /** - * Ensures that authentication is possible even if the server is busy during - * SASL handshake. - * - * @jira_ticket JAVA-1429 - */ - @Test(groups = "short") - @CCMConfig(dirtiesContext = true) - public void should_connect_with_slow_server() throws InterruptedException { - Cluster cluster = Cluster.builder() - .addContactPoints(getContactPoints()) - .withPort(ccm().getBinaryPort()) - .withAuthProvider(new SlowAuthProvider()) - .withPoolingOptions(new PoolingOptions() - .setHeartbeatIntervalSeconds(1)) - .build(); - cluster.connect(); + @Override + public Authenticator newAuthenticator(EndPoint host, String authenticator) + throws AuthenticationException { + simulateBusyServer(); + return super.newAuthenticator(host, authenticator); } - - private class SlowAuthProvider extends PlainTextAuthProvider { - - public SlowAuthProvider() { - super("cassandra", "cassandra"); - } - - @Override - public Authenticator newAuthenticator(InetSocketAddress host, String authenticator) throws AuthenticationException { - simulateBusyServer(); - return super.newAuthenticator(host, authenticator); - } - - } - - private void simulateBusyServer() { - ccm().pause(1); - new Timer().schedule(new TimerTask() { - @Override - public void run() { + } + + private void simulateBusyServer() { + ccm().pause(1); + new Timer() + .schedule( + new TimerTask() { + @Override + public void run() { ccm().resume(1); - } - }, 2000); + } + }, + 2000); + } + + /** + * Ensures that when a host replies with AuthenticationException during connection pool + * initialization the pool creation is aborted. + * + * @jira_ticket JAVA-1431 + */ + @Test(groups = "short") + public void should_not_create_pool_with_wrong_credentials() { + PlainTextAuthProvider authProvider = new PlainTextAuthProvider("cassandra", "cassandra"); + Cluster cluster = register(createClusterBuilder().withAuthProvider(authProvider).build()); + cluster.init(); + authProvider.setPassword("wrong"); + Level previous = TestUtils.setLogLevel(Session.class, Level.WARN); + MemoryAppender logs = new MemoryAppender().enableFor(Session.class); + Session session; + try { + session = cluster.connect(); + } finally { + TestUtils.setLogLevel(Session.class, previous); + logs.disableFor(Session.class); } - - /** - * Ensures that when a host replies with AuthenticationException - * during connection pool initialization the pool creation is aborted. - * - * @jira_ticket JAVA-1431 - */ - @Test(groups = "short") - public void should_not_create_pool_with_wrong_credentials() throws InterruptedException { - PlainTextAuthProvider authProvider = new PlainTextAuthProvider("cassandra", "cassandra"); - Cluster cluster = register(Cluster.builder() - .addContactPoints(getContactPoints()) - .withPort(ccm().getBinaryPort()) - .withAuthProvider(authProvider) - .build()); - cluster.init(); - authProvider.setPassword("wrong"); - Level previous = TestUtils.setLogLevel(Session.class, Level.WARN); - MemoryAppender logs = new MemoryAppender().enableFor(Session.class); - Session session; - try { - session = cluster.connect(); - } finally { - TestUtils.setLogLevel(Session.class, previous); - logs.disableFor(Session.class); - } - assertThat(session.getState().getConnectedHosts()).isEmpty(); - InetSocketAddress host = ccm().addressOfNode(1); - assertThat(logs.get()) - .contains( - "Error creating pool to " + host, - "Authentication error on host " + host, - AuthenticationException.class.getSimpleName()); - } - + assertThat(session.getState().getConnectedHosts()).isEmpty(); + InetSocketAddress host = ccm().addressOfNode(1); + assertThat(logs.get()) + .contains( + "Error creating pool to " + host, + "Authentication error on host " + host, + AuthenticationException.class.getSimpleName()); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/BatchStatementIdempotencyTest.java b/driver-core/src/test/java/com/datastax/driver/core/BatchStatementIdempotencyTest.java index 2dc933a4ec6..2fd645275f3 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/BatchStatementIdempotencyTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/BatchStatementIdempotencyTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,28 +19,28 @@ public class BatchStatementIdempotencyTest extends AbstractBatchIdempotencyTest { - @Override - protected TestBatch createBatch() { - return new TestBatchStatementWrapper(); - } + @Override + protected TestBatch createBatch() { + return new TestBatchStatementWrapper(); + } - static class TestBatchStatementWrapper implements TestBatch { + static class TestBatchStatementWrapper implements TestBatch { - private final BatchStatement batch = new BatchStatement(); + private final BatchStatement batch = new BatchStatement(); - @Override - public void add(RegularStatement statement) { - batch.add(statement); - } + @Override + public void add(RegularStatement statement) { + batch.add(statement); + } - @Override - public Boolean isIdempotent() { - return batch.isIdempotent(); - } + @Override + public Boolean isIdempotent() { + return batch.isIdempotent(); + } - @Override - public void setIdempotent(boolean idempotent) { - batch.setIdempotent(idempotent); - } + @Override + public void setIdempotent(boolean idempotent) { + batch.setIdempotent(idempotent); } + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/BatchStatementTest.java b/driver-core/src/test/java/com/datastax/driver/core/BatchStatementTest.java index 360897cf1f6..07320f87a1b 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/BatchStatementTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/BatchStatementTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,82 +17,86 @@ */ package com.datastax.driver.core; -import com.datastax.driver.core.exceptions.UnsupportedFeatureException; -import com.datastax.driver.core.utils.CassandraVersion; -import org.testng.annotations.Test; - import static com.datastax.driver.core.CreateCCM.TestMode.PER_METHOD; import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertTrue; +import com.datastax.driver.core.exceptions.UnsupportedFeatureException; +import com.datastax.driver.core.utils.CassandraVersion; +import org.testng.annotations.Test; + @CreateCCM(PER_METHOD) public class BatchStatementTest extends CCMTestsSupport { - @Override - public void onTestContextInitialized() { - execute("CREATE TABLE test (k text, v int, PRIMARY KEY (k, v))"); - } + @Override + public void onTestContextInitialized() { + execute("CREATE TABLE test (k text, v int, PRIMARY KEY (k, v))"); + } - @Test(groups = "short") - public void simpleBatchTest() { - try { - PreparedStatement st = session().prepare("INSERT INTO test (k, v) VALUES (?, ?)"); + @Test(groups = "short") + public void simpleBatchTest() { + try { + PreparedStatement st = session().prepare("INSERT INTO test (k, v) VALUES (?, ?)"); - BatchStatement batch = new BatchStatement(); + BatchStatement batch = new BatchStatement(); - batch.add(new SimpleStatement("INSERT INTO test (k, v) VALUES (?, ?)", "key1", 0)); - batch.add(st.bind("key1", 1)); - batch.add(st.bind("key2", 0)); + batch.add(new SimpleStatement("INSERT INTO test (k, v) VALUES (?, ?)", "key1", 0)); + batch.add(st.bind("key1", 1)); + batch.add(st.bind("key2", 0)); - assertEquals(3, batch.size()); + assertEquals(3, batch.size()); - session().execute(batch); + session().execute(batch); - ResultSet rs = session().execute("SELECT * FROM test"); + ResultSet rs = session().execute("SELECT * FROM test"); - Row r; + Row r; - r = rs.one(); - assertEquals(r.getString("k"), "key1"); - assertEquals(r.getInt("v"), 0); + r = rs.one(); + assertEquals(r.getString("k"), "key1"); + assertEquals(r.getInt("v"), 0); - r = rs.one(); - assertEquals(r.getString("k"), "key1"); - assertEquals(r.getInt("v"), 1); + r = rs.one(); + assertEquals(r.getString("k"), "key1"); + assertEquals(r.getInt("v"), 1); - r = rs.one(); - assertEquals(r.getString("k"), "key2"); - assertEquals(r.getInt("v"), 0); + r = rs.one(); + assertEquals(r.getString("k"), "key2"); + assertEquals(r.getInt("v"), 0); - assertTrue(rs.isExhausted()); + assertTrue(rs.isExhausted()); - } catch (UnsupportedFeatureException e) { - // This is expected when testing the protocol v1 - assertEquals(cluster().getConfiguration().getProtocolOptions().getProtocolVersion(), ProtocolVersion.V1); - } + } catch (UnsupportedFeatureException e) { + // This is expected when testing the protocol v1 + assertEquals( + cluster().getConfiguration().getProtocolOptions().getProtocolVersion(), + ProtocolVersion.V1); } + } - @Test(groups = "short") - @CassandraVersion(value = "2.0.9", description = "This will only work with C* 2.0.9 (CASSANDRA-7337)") - public void casBatchTest() { - PreparedStatement st = session().prepare("INSERT INTO test (k, v) VALUES (?, ?) IF NOT EXISTS"); + @Test(groups = "short") + @CassandraVersion( + value = "2.0.9", + description = "This will only work with C* 2.0.9 (CASSANDRA-7337)") + public void casBatchTest() { + PreparedStatement st = session().prepare("INSERT INTO test (k, v) VALUES (?, ?) IF NOT EXISTS"); - BatchStatement batch = new BatchStatement(); + BatchStatement batch = new BatchStatement(); - batch.add(new SimpleStatement("INSERT INTO test (k, v) VALUES (?, ?)", "key1", 0)); - batch.add(st.bind("key1", 1)); - batch.add(st.bind("key1", 2)); + batch.add(new SimpleStatement("INSERT INTO test (k, v) VALUES (?, ?)", "key1", 0)); + batch.add(st.bind("key1", 1)); + batch.add(st.bind("key1", 2)); - assertEquals(3, batch.size()); + assertEquals(3, batch.size()); - ResultSet rs = session().execute(batch); - Row r = rs.one(); - assertTrue(!r.isNull("[applied]")); - assertEquals(r.getBool("[applied]"), true); + ResultSet rs = session().execute(batch); + Row r = rs.one(); + assertTrue(!r.isNull("[applied]")); + assertEquals(r.getBool("[applied]"), true); - rs = session().execute(batch); - r = rs.one(); - assertTrue(!r.isNull("[applied]")); - assertEquals(r.getBool("[applied]"), false); - } + rs = session().execute(batch); + r = rs.one(); + assertTrue(!r.isNull("[applied]")); + assertEquals(r.getBool("[applied]"), false); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/BoundStatementTest.java b/driver-core/src/test/java/com/datastax/driver/core/BoundStatementTest.java index d12eae8a14e..7b33590fe38 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/BoundStatementTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/BoundStatementTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,53 +17,53 @@ */ package com.datastax.driver.core; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.fail; + import com.datastax.driver.core.exceptions.CodecNotFoundException; import com.google.common.collect.Lists; import org.testng.annotations.BeforeClass; import org.testng.annotations.Test; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.fail; - public class BoundStatementTest extends CCMTestsSupport { - PreparedStatement prepared; + PreparedStatement prepared; - @Override - public void onTestContextInitialized() { - execute("CREATE TABLE foo (k int primary key, v1 text, v2 list)"); - } + @Override + public void onTestContextInitialized() { + execute("CREATE TABLE foo (k int primary key, v1 text, v2 list)"); + } - @BeforeClass(groups = "short") - public void setup() { - prepared = session().prepare("INSERT INTO foo (k, v1, v2) VALUES (?, ?, ?)"); - } + @BeforeClass(groups = "short") + public void setup() { + prepared = session().prepare("INSERT INTO foo (k, v1, v2) VALUES (?, ?, ?)"); + } - @Test(groups = "short") - public void should_get_single_value() { - // This test is not exhaustive, note that the method is also covered in DataTypeIntegrationTest. - BoundStatement statement = prepared.bind(1, "test", Lists.newArrayList(1)); + @Test(groups = "short") + public void should_get_single_value() { + // This test is not exhaustive, note that the method is also covered in DataTypeIntegrationTest. + BoundStatement statement = prepared.bind(1, "test", Lists.newArrayList(1)); - assertThat(statement.getInt(0)) - .isEqualTo(statement.getInt("k")) - .isEqualTo(1); + assertThat(statement.getInt(0)).isEqualTo(statement.getInt("k")).isEqualTo(1); - assertThat(statement.getString(1)) - .isEqualTo(statement.getString("v1")) - .isEqualTo("test"); + assertThat(statement.getString(1)).isEqualTo(statement.getString("v1")).isEqualTo("test"); - assertThat(statement.getList(2, Integer.class)) - .isEqualTo(statement.getList("v2", Integer.class)) - .isEqualTo(Lists.newArrayList(1)); + assertThat(statement.getList(2, Integer.class)) + .isEqualTo(statement.getList("v2", Integer.class)) + .isEqualTo(Lists.newArrayList(1)); - try { - statement.getString(0); - fail("Expected codec not found error"); - } catch (CodecNotFoundException e) { /* expected */ } + try { + statement.getString(0); + fail("Expected codec not found error"); + } catch (CodecNotFoundException e) { + /* expected */ + } - try { - statement.getString(3); - fail("Expected index error"); - } catch (IndexOutOfBoundsException e) { /* expected */ } + try { + statement.getString(3); + fail("Expected index error"); + } catch (IndexOutOfBoundsException e) { + /* expected */ } + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/BytesToSegmentDecoderTest.java b/driver-core/src/test/java/com/datastax/driver/core/BytesToSegmentDecoderTest.java new file mode 100644 index 00000000000..70608509988 --- /dev/null +++ b/driver-core/src/test/java/com/datastax/driver/core/BytesToSegmentDecoderTest.java @@ -0,0 +1,139 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.datastax.driver.core.ProtocolOptions.Compression; +import com.datastax.driver.core.exceptions.CrcMismatchException; +import com.google.common.base.Strings; +import io.netty.buffer.ByteBuf; +import io.netty.buffer.ByteBufAllocator; +import io.netty.buffer.ByteBufUtil; +import io.netty.buffer.Unpooled; +import io.netty.channel.embedded.EmbeddedChannel; +import io.netty.handler.codec.DecoderException; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; + +public class BytesToSegmentDecoderTest { + + // Hard-coded test data, the values were generated with our encoding methods. + // We're not really testing the decoding itself here, only that our subclass calls the + // LengthFieldBasedFrameDecoder parent constructor with the right parameters. + private static final ByteBuf REGULAR_HEADER = byteBuf("04000201f9f2"); + private static final ByteBuf REGULAR_PAYLOAD = byteBuf("00000001"); + private static final ByteBuf REGULAR_TRAILER = byteBuf("1fd6022d"); + private static final ByteBuf REGULAR_WRONG_HEADER = byteBuf("04000202f9f2"); + private static final ByteBuf REGULAR_WRONG_TRAILER = byteBuf("1fd6022e"); + + private static final ByteBuf MAX_HEADER = byteBuf("ffff03254047"); + private static final ByteBuf MAX_PAYLOAD = + byteBuf(Strings.repeat("01", Segment.MAX_PAYLOAD_LENGTH)); + private static final ByteBuf MAX_TRAILER = byteBuf("a05c2f13"); + + private static final ByteBuf LZ4_HEADER = byteBuf("120020000491c94f"); + private static final ByteBuf LZ4_PAYLOAD_UNCOMPRESSED = + byteBuf("00000001000000010000000100000001"); + private static final ByteBuf LZ4_PAYLOAD_COMPRESSED = + byteBuf("f00100000001000000010000000100000001"); + private static final ByteBuf LZ4_TRAILER = byteBuf("2bd67f90"); + + private EmbeddedChannel channel; + + @BeforeMethod(groups = "unit") + public void setup() { + channel = new EmbeddedChannel(); + } + + @Test(groups = "unit") + public void should_decode_regular_segment() { + channel.pipeline().addLast(newDecoder(Compression.NONE)); + channel.writeInbound(Unpooled.wrappedBuffer(REGULAR_HEADER, REGULAR_PAYLOAD, REGULAR_TRAILER)); + Segment segment = (Segment) channel.readInbound(); + assertThat(segment.isSelfContained()).isTrue(); + assertThat(segment.getPayload()).isEqualTo(REGULAR_PAYLOAD); + } + + @Test(groups = "unit") + public void should_decode_max_length_segment() { + channel.pipeline().addLast(newDecoder(Compression.NONE)); + channel.writeInbound(Unpooled.wrappedBuffer(MAX_HEADER, MAX_PAYLOAD, MAX_TRAILER)); + Segment segment = (Segment) channel.readInbound(); + assertThat(segment.isSelfContained()).isTrue(); + assertThat(segment.getPayload()).isEqualTo(MAX_PAYLOAD); + } + + @Test(groups = "unit") + public void should_decode_segment_from_multiple_incoming_chunks() { + channel.pipeline().addLast(newDecoder(Compression.NONE)); + // Send the header in two slices, to cover the case where the length can't be read the first + // time: + ByteBuf headerStart = REGULAR_HEADER.slice(0, 3); + ByteBuf headerEnd = REGULAR_HEADER.slice(3, 3); + channel.writeInbound(headerStart); + channel.writeInbound(headerEnd); + channel.writeInbound(REGULAR_PAYLOAD.duplicate()); + channel.writeInbound(REGULAR_TRAILER.duplicate()); + Segment segment = (Segment) channel.readInbound(); + assertThat(segment.isSelfContained()).isTrue(); + assertThat(segment.getPayload()).isEqualTo(REGULAR_PAYLOAD); + } + + @Test(groups = "unit") + public void should_decode_compressed_segment() { + channel.pipeline().addLast(newDecoder(Compression.LZ4)); + // We need a contiguous buffer for this one, because of how our decompressor operates + ByteBuf buffer = Unpooled.wrappedBuffer(LZ4_HEADER, LZ4_PAYLOAD_COMPRESSED, LZ4_TRAILER).copy(); + channel.writeInbound(buffer); + Segment segment = (Segment) channel.readInbound(); + assertThat(segment.isSelfContained()).isTrue(); + assertThat(segment.getPayload()).isEqualTo(LZ4_PAYLOAD_UNCOMPRESSED); + } + + @Test(groups = "unit") + public void should_surface_header_crc_mismatch() { + try { + channel.pipeline().addLast(newDecoder(Compression.NONE)); + channel.writeInbound( + Unpooled.wrappedBuffer(REGULAR_WRONG_HEADER, REGULAR_PAYLOAD, REGULAR_TRAILER)); + } catch (DecoderException exception) { + assertThat(exception).hasCauseInstanceOf(CrcMismatchException.class); + } + } + + @Test(groups = "unit") + public void should_surface_trailer_crc_mismatch() { + try { + channel.pipeline().addLast(newDecoder(Compression.NONE)); + channel.writeInbound( + Unpooled.wrappedBuffer(REGULAR_HEADER, REGULAR_PAYLOAD, REGULAR_WRONG_TRAILER)); + } catch (DecoderException exception) { + assertThat(exception).hasCauseInstanceOf(CrcMismatchException.class); + } + } + + private BytesToSegmentDecoder newDecoder(Compression compression) { + return new BytesToSegmentDecoder(new SegmentCodec(ByteBufAllocator.DEFAULT, compression)); + } + + private static ByteBuf byteBuf(String hex) { + return Unpooled.unreleasableBuffer( + Unpooled.unmodifiableBuffer(Unpooled.wrappedBuffer(ByteBufUtil.decodeHexDump(hex)))); + } +} diff --git a/driver-core/src/test/java/com/datastax/driver/core/CCMAccess.java b/driver-core/src/test/java/com/datastax/driver/core/CCMAccess.java index 04e6648255b..22b7504a1d2 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/CCMAccess.java +++ b/driver-core/src/test/java/com/datastax/driver/core/CCMAccess.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,260 +19,272 @@ import java.io.Closeable; import java.io.File; +import java.net.InetAddress; import java.net.InetSocketAddress; +import java.util.List; import java.util.Map; public interface CCMAccess extends Closeable { - enum Workload {cassandra, solr, hadoop, spark, cfs, graph} - - // Inspection methods - - /** - * @return The CCM cluster name. - */ - String getClusterName(); - - /** - * Returns the Cassandra version of this CCM cluster. If {@link #getDSEVersion()} is non-null it is assumed - * that this value is only used for representing the compatible Cassandra version for that DSE version. - *

    - * - * @return The version of this CCM cluster. - */ - VersionNumber getCassandraVersion(); - - /** - * Returns the DSE version of this CCM cluster if this is a DSE cluster, otherwise null. - *

    - * - * @return The version of this CCM cluster. - */ - VersionNumber getDSEVersion(); - - /** - * @return The config directory for this CCM cluster. - */ - File getCcmDir(); - - /** - * @return The cluster directory for this CCM cluster. - */ - File getClusterDir(); - - /** - * @param n the node number, starting with 1. - * @return The node directory for this CCM cluster. - */ - File getNodeDir(int n); - - /** - * @param n the node number, starting with 1. - * @return The node config directory for this CCM cluster. - */ - File getNodeConfDir(int n); - - /** - * @return The storage port for this CCM cluster. - */ - int getStoragePort(); - - /** - * @return The thrift port for this CCM cluster. - */ - int getThriftPort(); - - /** - * @return The binary port for this CCM cluster. - */ - int getBinaryPort(); - - /** - * Signals that logs for this CCM cluster should be kept after the cluster is stopped. - */ - void setKeepLogs(boolean keepLogs); - - /** - * Returns the address of the {@code nth} host in the CCM cluster (counting from 1, i.e., - * {@code addressOfNode(1)} returns the address of the first node. - *

    - * In multi-DC setups, nodes are numbered in ascending order of their datacenter number. - * E.g. with 2 DCs and 3 nodes in each DC, the first node in DC 2 is number 4. - * - * @return the address of the {@code nth} host in the cluster. - */ - InetSocketAddress addressOfNode(int n); - - - // Methods altering the whole cluster - - /** - * Starts the cluster. - */ - void start(); - - /** - * Stops the cluster. - */ - void stop(); - - /** - * Aggressively stops the cluster. - */ - void forceStop(); - - /** - * Alias for {@link #stop()}. - */ - @Override - void close(); - - /** - * Removes this CCM cluster and deletes all of its files. - */ - void remove(); - - /** - * Updates the config files for all nodes in the CCM cluster. - */ - void updateConfig(Map configs); - - /** - * Updates the DSE config files for all nodes in the CCM cluster. - */ - void updateDSEConfig(Map configs); - - /** - * Checks for errors in the logs of all nodes in the cluster. - */ - String checkForErrors(); - - // Methods altering nodes - - /** - * Starts the {@code nth} host in the CCM cluster. - * - * @param n the node number (starting from 1). - */ - void start(int n); - - /** - * Stops the {@code nth} host in the CCM cluster. - * - * @param n the node number (starting from 1). - */ - void stop(int n); - - /** - * Aggressively stops the {@code nth} host in the CCM cluster. - * - * @param n the node number (starting from 1). - */ - void forceStop(int n); - - /** - * Pauses the {@code nth} host in the CCM cluster. - * - * @param n the node number (starting from 1). - */ - void pause(int n); - - /** - * Resumes the {@code nth} host in the CCM cluster. - * - * @param n the node number (starting from 1). - */ - void resume(int n); - - /** - * Removes the {@code nth} host in the CCM cluster. - * - * @param n the node number (starting from 1). - */ - void remove(int n); - - /** - * Adds the {@code nth} host in the CCM cluster. - * - * @param n the node number (starting from 1). - */ - void add(int n); - - /** - * Adds the {@code nth} host in the CCM cluster. - * - * @param n the node number (starting from 1). - */ - void add(int dc, int n); - - /** - * Decommissions the {@code nth} host in the CCM cluster. - * - * @param n the node number (starting from 1). - */ - void decommission(int n); - - /** - * Updates the {@code nth} host's config file in the CCM cluster. - * - * @param n the node number (starting from 1). - */ - void updateNodeConfig(int n, String key, Object value); - - /** - * Updates the {@code nth} host's config file in the CCM cluster. - * - * @param n the node number (starting from 1). - */ - void updateNodeConfig(int n, Map configs); - - /** - * Updates the {@code nth} host's dse config file in the CCM cluster. - * - * @param n the node number (starting from 1). - */ - void updateDSENodeConfig(int n, String key, Object value); - - /** - * Updates the {@code nth} host's dse config file in the CCM cluster. - * - * @param n the node number (starting from 1). - */ - void updateDSENodeConfig(int n, Map configs); - - /** - * Sets the workload(s) for the {@code nth} host in the CCM cluster. - * - * @param n the node number (starting from 1). - */ - void setWorkload(int n, Workload... workload); - - - // Methods blocking until nodes are up or down - - /** - * Waits for a host to be up by pinging the TCP socket directly, without using the Java driver's API. - */ - void waitForUp(int node); - - /** - * Waits for a host to be down by pinging the TCP socket directly, without using the Java driver's API. - */ - void waitForDown(int node); - - /** - * @return The target protocolVersion to use when connecting to this CCM cluster. - *

    - * This should be based on the highest protocol version that both the cluster and driver support. - *

    - * For example, C* 2.0.17 should return {@link ProtocolVersion#V2} since C* supports up to V2 and the driver - * supports that version. - */ - ProtocolVersion getProtocolVersion(); - - /** - * @param maximumAllowed The maximum protocol version to use. - * @return The target protocolVersion or maximumAllowed if {@link #getProtocolVersion} is greater. - */ - ProtocolVersion getProtocolVersion(ProtocolVersion maximumAllowed); + enum Workload { + cassandra, + solr, + hadoop, + spark, + cfs, + graph + } + + // Inspection methods + + /** @return The CCM cluster name. */ + String getClusterName(); + + /** + * Returns the Cassandra version of this CCM cluster. If {@link #getDSEVersion()} is non-null it + * is assumed that this value is only used for representing the compatible Cassandra version for + * that DSE version. + * + * @return The version of this CCM cluster. + */ + VersionNumber getCassandraVersion(); + + /** + * Returns the DSE version of this CCM cluster if this is a DSE cluster, otherwise null. + * + * @return The version of this CCM cluster. + */ + VersionNumber getDSEVersion(); + + /** @return The config directory for this CCM cluster. */ + File getCcmDir(); + + /** @return The cluster directory for this CCM cluster. */ + File getClusterDir(); + + /** + * @param n the node number, starting with 1. + * @return The node directory for this CCM cluster. + */ + File getNodeDir(int n); + + /** + * @param n the node number, starting with 1. + * @return The node config directory for this CCM cluster. + */ + File getNodeConfDir(int n); + + /** @return The storage port for this CCM cluster. */ + int getStoragePort(); + + /** @return The thrift port for this CCM cluster. */ + int getThriftPort(); + + /** @return The binary port for this CCM cluster. */ + int getBinaryPort(); + + /** Signals that logs for this CCM cluster should be kept after the cluster is stopped. */ + void setKeepLogs(boolean keepLogs); + + /** + * Returns the node count for each datacenter, mapped in the corresponding cell of the returned + * int array. + * + *

    This is the count that was passed at initialization (that is, the argument to {@link + * CCMBridge.Builder#withNodes(int...)} or {@link CCMConfig#numberOfNodes()}). Note that it will + * NOT be updated dynamically if nodes are added or removed at runtime. + * + * @return the node count for each datacenter. + */ + int[] getNodeCount(); + + /** + * Returns the contact points to use to contact the CCM cluster. + * + *

    This reflects the initial number of nodes in the cluster, as configured at initialization + * (that is, the argument to {@link CCMBridge.Builder#withNodes(int...)} or {@link + * CCMConfig#numberOfNodes()}). Note that it will NOT be updated dynamically if nodes are + * added or removed at runtime. + * + * @return the contact points to use to contact the CCM cluster. + */ + List getContactPoints(); + + /** + * Returns the address of the {@code nth} host in the CCM cluster (counting from 1, i.e., {@code + * addressOfNode(1)} returns the address of the first node. + * + *

    In multi-DC setups, nodes are numbered in ascending order of their datacenter number. E.g. + * with 2 DCs and 3 nodes in each DC, the first node in DC 2 is number 4. + * + * @return the address of the {@code nth} host in the cluster. + */ + InetSocketAddress addressOfNode(int n); + + /** + * Returns the address that the @{code nth} host in the CCM cluster is listening on JMX on + * (counting from 1, i.e, {@code jmxAddressOfNode(1)} returns the jmx address of the first node. + * + *

    In multi-DC setups, nodes are numbered in ascending order of their datacenter number. E.g. + * with 2 DCs and 3 nodes in each DC, the first node in DC 2 is number 4. + * + * @return the address of the JMX listener of the {@code nth} host in the cluster + */ + InetSocketAddress jmxAddressOfNode(int n); + + // Methods altering the whole cluster + + /** Starts the cluster. */ + void start(); + + /** Stops the cluster. */ + void stop(); + + /** Aggressively stops the cluster. */ + void forceStop(); + + /** Alias for {@link #stop()}. */ + @Override + void close(); + + /** Removes this CCM cluster and deletes all of its files. */ + void remove(); + + /** Updates the config files for all nodes in the CCM cluster. */ + void updateConfig(Map configs); + + /** Updates the DSE config files for all nodes in the CCM cluster. */ + void updateDSEConfig(Map configs); + + /** Checks for errors in the logs of all nodes in the cluster. */ + String checkForErrors(); + + // Methods altering nodes + + /** + * Starts the {@code nth} host in the CCM cluster. + * + * @param n the node number (starting from 1). + */ + void start(int n); + + /** + * Stops the {@code nth} host in the CCM cluster. + * + * @param n the node number (starting from 1). + */ + void stop(int n); + + /** + * Aggressively stops the {@code nth} host in the CCM cluster. + * + * @param n the node number (starting from 1). + */ + void forceStop(int n); + + /** + * Pauses the {@code nth} host in the CCM cluster. + * + * @param n the node number (starting from 1). + */ + void pause(int n); + + /** + * Resumes the {@code nth} host in the CCM cluster. + * + * @param n the node number (starting from 1). + */ + void resume(int n); + + /** + * Removes the {@code nth} host in the CCM cluster. + * + * @param n the node number (starting from 1). + */ + void remove(int n); + + /** + * Adds the {@code nth} host in the CCM cluster. + * + * @param n the node number (starting from 1). + */ + void add(int n); + + /** + * Adds the {@code nth} host in the CCM cluster. + * + * @param n the node number (starting from 1). + */ + void add(int dc, int n); + + /** + * Decommissions the {@code nth} host in the CCM cluster. + * + * @param n the node number (starting from 1). + */ + void decommission(int n); + + /** + * Updates the {@code nth} host's config file in the CCM cluster. + * + * @param n the node number (starting from 1). + */ + void updateNodeConfig(int n, String key, Object value); + + /** + * Updates the {@code nth} host's config file in the CCM cluster. + * + * @param n the node number (starting from 1). + */ + void updateNodeConfig(int n, Map configs); + + /** + * Updates the {@code nth} host's dse config file in the CCM cluster. + * + * @param n the node number (starting from 1). + */ + void updateDSENodeConfig(int n, String key, Object value); + + /** + * Updates the {@code nth} host's dse config file in the CCM cluster. + * + * @param n the node number (starting from 1). + */ + void updateDSENodeConfig(int n, Map configs); + + /** + * Sets the workload(s) for the {@code nth} host in the CCM cluster. + * + * @param n the node number (starting from 1). + */ + void setWorkload(int n, Workload... workload); + + // Methods blocking until nodes are up or down + + /** + * Waits for a host to be up by pinging the TCP socket directly, without using the Java Driver's + * API. + */ + void waitForUp(int node); + + /** + * Waits for a host to be down by pinging the TCP socket directly, without using the Java Driver's + * API. + */ + void waitForDown(int node); + + /** + * @return The target protocolVersion to use when connecting to this CCM cluster. + *

    This should be based on the highest protocol version that both the cluster and driver + * support. + *

    For example, C* 2.0.17 should return {@link ProtocolVersion#V2} since C* supports up to + * V2 and the driver supports that version. + */ + ProtocolVersion getProtocolVersion(); + + /** + * @param maximumAllowed The maximum protocol version to use. + * @return The target protocolVersion or maximumAllowed if {@link #getProtocolVersion} is greater. + */ + ProtocolVersion getProtocolVersion(ProtocolVersion maximumAllowed); } diff --git a/driver-core/src/test/java/com/datastax/driver/core/CCMBridge.java b/driver-core/src/test/java/com/datastax/driver/core/CCMBridge.java index e103f0d4c7e..5070e673070 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/CCMBridge.java +++ b/driver-core/src/test/java/com/datastax/driver/core/CCMBridge.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,6 +17,8 @@ */ package com.datastax.driver.core; +import static com.datastax.driver.core.TestUtils.executeNoFail; + import com.google.common.base.Joiner; import com.google.common.base.Throwables; import com.google.common.collect.ImmutableMap; @@ -23,1134 +27,1302 @@ import com.google.common.io.ByteStreams; import com.google.common.io.Closer; import com.google.common.io.Files; -import org.apache.commons.exec.*; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.*; +import java.io.BufferedReader; +import java.io.File; +import java.io.FileOutputStream; +import java.io.FileReader; +import java.io.FileWriter; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.io.PrintWriter; +import java.io.StringWriter; +import java.net.InetAddress; import java.net.InetSocketAddress; -import java.util.*; +import java.net.UnknownHostException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.regex.Matcher; import java.util.regex.Pattern; - -import static com.datastax.driver.core.TestUtils.*; +import org.apache.commons.exec.CommandLine; +import org.apache.commons.exec.DefaultExecutor; +import org.apache.commons.exec.ExecuteStreamHandler; +import org.apache.commons.exec.ExecuteWatchdog; +import org.apache.commons.exec.Executor; +import org.apache.commons.exec.LogOutputStream; +import org.apache.commons.exec.PumpStreamHandler; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; public class CCMBridge implements CCMAccess { - private static final Logger logger = LoggerFactory.getLogger(CCMBridge.class); - - private static final VersionNumber GLOBAL_CASSANDRA_VERSION_NUMBER; - - private static final VersionNumber GLOBAL_DSE_VERSION_NUMBER; - - private static final Set CASSANDRA_INSTALL_ARGS; - - public static final String DEFAULT_CLIENT_TRUSTSTORE_PASSWORD = "cassandra1sfun"; - public static final String DEFAULT_CLIENT_TRUSTSTORE_PATH = "/client.truststore"; - - public static final File DEFAULT_CLIENT_TRUSTSTORE_FILE = createTempStore(DEFAULT_CLIENT_TRUSTSTORE_PATH); - - public static final String DEFAULT_CLIENT_KEYSTORE_PASSWORD = "cassandra1sfun"; - public static final String DEFAULT_CLIENT_KEYSTORE_PATH = "/client.keystore"; - - public static final File DEFAULT_CLIENT_KEYSTORE_FILE = createTempStore(DEFAULT_CLIENT_KEYSTORE_PATH); - - // Contain the same keypair as the client keystore, but in format usable by OpenSSL - public static final File DEFAULT_CLIENT_PRIVATE_KEY_FILE = createTempStore("/client.key"); - public static final File DEFAULT_CLIENT_CERT_CHAIN_FILE = createTempStore("/client.crt"); - - public static final String DEFAULT_SERVER_TRUSTSTORE_PASSWORD = "cassandra1sfun"; - public static final String DEFAULT_SERVER_TRUSTSTORE_PATH = "/server.truststore"; - - private static final File DEFAULT_SERVER_TRUSTSTORE_FILE = createTempStore(DEFAULT_SERVER_TRUSTSTORE_PATH); - - public static final String DEFAULT_SERVER_KEYSTORE_PASSWORD = "cassandra1sfun"; - public static final String DEFAULT_SERVER_KEYSTORE_PATH = "/server.keystore"; - - private static final File DEFAULT_SERVER_KEYSTORE_FILE = createTempStore(DEFAULT_SERVER_KEYSTORE_PATH); - - /** - * The environment variables to use when invoking CCM. Inherits the current processes environment, but will also - * prepend to the PATH variable the value of the 'ccm.path' property and set JAVA_HOME variable to the - * 'ccm.java.home' variable. - *

    - * At times it is necessary to use a separate java install for CCM then what is being used for running tests. - * For example, if you want to run tests with JDK 6 but against Cassandra 2.0, which requires JDK 7. - */ - private static final Map ENVIRONMENT_MAP; - - /** - * A mapping of full DSE versions to their C* counterpart. This is not meant to be comprehensive. - * If C* version cannot be derived, the method makes a 'best guess'. - */ - private static final Map dseToCassandraVersions = ImmutableMap.builder() - .put("5.0.4", "3.0.10") - .put("5.0.3", "3.0.9") - .put("5.0.2", "3.0.8") - .put("5.0.1", "3.0.7") - .put("5.0", "3.0.7") - .put("4.8.11", "2.1.17") - .put("4.8.10", "2.1.15") - .put("4.8.9", "2.1.15") - .put("4.8.8", "2.1.14") - .put("4.8.7", "2.1.14") - .put("4.8.6", "2.1.13") - .put("4.8.5", "2.1.13") - .put("4.8.4", "2.1.12") - .put("4.8.3", "2.1.11") - .put("4.8.2", "2.1.11") - .put("4.8.1", "2.1.11") - .put("4.8", "2.1.9") - .put("4.7.9", "2.1.15") - .put("4.7.8", "2.1.13") - .put("4.7.7", "2.1.12") - .put("4.7.6", "2.1.11") - .put("4.7.5", "2.1.11") - .put("4.7.4", "2.1.11") - .put("4.7.3", "2.1.8") - .put("4.7.2", "2.1.8") - .put("4.7.1", "2.1.5") - .put("4.6.11", "2.0.16") - .put("4.6.10", "2.0.16") - .put("4.6.9", "2.0.16") - .put("4.6.8", "2.0.16") - .put("4.6.7", "2.0.14") - .put("4.6.6", "2.0.14") - .put("4.6.5", "2.0.14") - .put("4.6.4", "2.0.14") - .put("4.6.3", "2.0.12") - .put("4.6.2", "2.0.12") - .put("4.6.1", "2.0.12") - .put("4.6", "2.0.11") - .put("4.5.9", "2.0.16") - .put("4.5.8", "2.0.14") - .put("4.5.7", "2.0.12") - .put("4.5.6", "2.0.12") - .put("4.5.5", "2.0.12") - .put("4.5.4", "2.0.11") - .put("4.5.3", "2.0.11") - .put("4.5.2", "2.0.10") - .put("4.5.1", "2.0.8") - .put("4.5", "2.0.8") - .put("4.0", "2.0") - .put("3.2", "1.2") - .put("3.1", "1.2") - .build(); - - /** - * The command to use to launch CCM - */ - private static final String CCM_COMMAND; - - static { - String inputCassandraVersion = System.getProperty("cassandra.version"); - String installDirectory = System.getProperty("cassandra.directory"); - String branch = System.getProperty("cassandra.branch"); - - String dseProperty = System.getProperty("dse"); - // If -Ddse, if the value is empty interpret it as enabled, - // otherwise if there is a value, parse as boolean. - boolean isDse = dseProperty != null && (dseProperty.isEmpty() || Boolean.parseBoolean(dseProperty)); - - ImmutableSet.Builder installArgs = ImmutableSet.builder(); - if (installDirectory != null && !installDirectory.trim().isEmpty()) { - installArgs.add("--install-dir=" + new File(installDirectory).getAbsolutePath()); - } else if (branch != null && !branch.trim().isEmpty()) { - installArgs.add("-v git:" + branch.trim().replaceAll("\"", "")); - } else { - installArgs.add("-v " + inputCassandraVersion); - } - - if (isDse) { - installArgs.add("--dse"); - } - - CASSANDRA_INSTALL_ARGS = installArgs.build(); - - // Inherit the current environment. - Map envMap = Maps.newHashMap(new ProcessBuilder().environment()); - // If ccm.path is set, override the PATH variable with it. - String ccmPath = System.getProperty("ccm.path"); - if (ccmPath != null) { - String existingPath = envMap.get("PATH"); - if (existingPath == null) { - existingPath = ""; - } - envMap.put("PATH", ccmPath + File.pathSeparator + existingPath); - } - - if (isWindows()) { - CCM_COMMAND = "powershell.exe -ExecutionPolicy Unrestricted ccm.py"; - } else { - CCM_COMMAND = "ccm"; - } - - // If ccm.java.home is set, override the JAVA_HOME variable with it. - String ccmJavaHome = System.getProperty("ccm.java.home"); - if (ccmJavaHome != null) { - envMap.put("JAVA_HOME", ccmJavaHome); - } - ENVIRONMENT_MAP = ImmutableMap.copyOf(envMap); - - if (isDse) { - GLOBAL_DSE_VERSION_NUMBER = VersionNumber.parse(inputCassandraVersion); - GLOBAL_CASSANDRA_VERSION_NUMBER = CCMBridge.getCassandraVersion(GLOBAL_DSE_VERSION_NUMBER); - logger.info("Tests requiring CCM will by default use DSE version {} (C* {}, install arguments: {})", - GLOBAL_DSE_VERSION_NUMBER, GLOBAL_CASSANDRA_VERSION_NUMBER, CASSANDRA_INSTALL_ARGS); - } else { - GLOBAL_CASSANDRA_VERSION_NUMBER = VersionNumber.parse(inputCassandraVersion); - GLOBAL_DSE_VERSION_NUMBER = null; - logger.info("Tests requiring CCM will by default use Cassandra version {} (install arguments: {})", - GLOBAL_CASSANDRA_VERSION_NUMBER, CASSANDRA_INSTALL_ARGS); - } + private static final Logger logger = LoggerFactory.getLogger(CCMBridge.class); + + private static final VersionNumber GLOBAL_CASSANDRA_VERSION_NUMBER; + + private static final VersionNumber GLOBAL_DSE_VERSION_NUMBER; + + private static final Set CASSANDRA_INSTALL_ARGS; + + public static final String DEFAULT_CLIENT_TRUSTSTORE_PASSWORD = "cassandra1sfun"; + public static final String DEFAULT_CLIENT_TRUSTSTORE_PATH = "/client.truststore"; + + public static final File DEFAULT_CLIENT_TRUSTSTORE_FILE = + createTempStore(DEFAULT_CLIENT_TRUSTSTORE_PATH); + + public static final String DEFAULT_CLIENT_KEYSTORE_PASSWORD = "cassandra1sfun"; + public static final String DEFAULT_CLIENT_KEYSTORE_PATH = "/client.keystore"; + + public static final File DEFAULT_CLIENT_KEYSTORE_FILE = + createTempStore(DEFAULT_CLIENT_KEYSTORE_PATH); + + // Contain the same keypair as the client keystore, but in format usable by OpenSSL + public static final File DEFAULT_CLIENT_PRIVATE_KEY_FILE = createTempStore("/client.key"); + public static final File DEFAULT_CLIENT_CERT_CHAIN_FILE = createTempStore("/client.crt"); + + public static final String DEFAULT_SERVER_TRUSTSTORE_PASSWORD = "cassandra1sfun"; + public static final String DEFAULT_SERVER_TRUSTSTORE_PATH = "/server.truststore"; + + private static final File DEFAULT_SERVER_TRUSTSTORE_FILE = + createTempStore(DEFAULT_SERVER_TRUSTSTORE_PATH); + + public static final String DEFAULT_SERVER_KEYSTORE_PASSWORD = "cassandra1sfun"; + public static final String DEFAULT_SERVER_KEYSTORE_PATH = "/server.keystore"; + + private static final File DEFAULT_SERVER_KEYSTORE_FILE = + createTempStore(DEFAULT_SERVER_KEYSTORE_PATH); + + /** + * The environment variables to use when invoking CCM. Inherits the current processes environment, + * but will also prepend to the PATH variable the value of the 'ccm.path' property and set + * JAVA_HOME variable to the 'ccm.java.home' variable. + * + *

    At times it is necessary to use a separate java install for CCM then what is being used for + * running tests. For example, if you want to run tests with JDK 6 but against Cassandra 2.0, + * which requires JDK 7. + */ + private static final Map ENVIRONMENT_MAP; + + /** + * A mapping of full DSE versions to their C* counterpart. This is not meant to be comprehensive. + * If C* version cannot be derived, the method makes a 'best guess'. + */ + private static final Map dseToCassandraVersions = + ImmutableMap.builder() + .put("5.1", "3.11") + .put("5.0.4", "3.0.10") + .put("5.0.3", "3.0.9") + .put("5.0.2", "3.0.8") + .put("5.0.1", "3.0.7") + .put("5.0", "3.0.7") + .put("4.8.11", "2.1.17") + .put("4.8.10", "2.1.15") + .put("4.8.9", "2.1.15") + .put("4.8.8", "2.1.14") + .put("4.8.7", "2.1.14") + .put("4.8.6", "2.1.13") + .put("4.8.5", "2.1.13") + .put("4.8.4", "2.1.12") + .put("4.8.3", "2.1.11") + .put("4.8.2", "2.1.11") + .put("4.8.1", "2.1.11") + .put("4.8", "2.1.9") + .put("4.7.9", "2.1.15") + .put("4.7.8", "2.1.13") + .put("4.7.7", "2.1.12") + .put("4.7.6", "2.1.11") + .put("4.7.5", "2.1.11") + .put("4.7.4", "2.1.11") + .put("4.7.3", "2.1.8") + .put("4.7.2", "2.1.8") + .put("4.7.1", "2.1.5") + .put("4.6.11", "2.0.16") + .put("4.6.10", "2.0.16") + .put("4.6.9", "2.0.16") + .put("4.6.8", "2.0.16") + .put("4.6.7", "2.0.14") + .put("4.6.6", "2.0.14") + .put("4.6.5", "2.0.14") + .put("4.6.4", "2.0.14") + .put("4.6.3", "2.0.12") + .put("4.6.2", "2.0.12") + .put("4.6.1", "2.0.12") + .put("4.6", "2.0.11") + .put("4.5.9", "2.0.16") + .put("4.5.8", "2.0.14") + .put("4.5.7", "2.0.12") + .put("4.5.6", "2.0.12") + .put("4.5.5", "2.0.12") + .put("4.5.4", "2.0.11") + .put("4.5.3", "2.0.11") + .put("4.5.2", "2.0.10") + .put("4.5.1", "2.0.8") + .put("4.5", "2.0.8") + .put("4.0", "2.0") + .put("3.2", "1.2") + .put("3.1", "1.2") + .build(); + + /** The command to use to launch CCM */ + private static final String CCM_COMMAND; + + static { + String inputCassandraVersion = System.getProperty("cassandra.version"); + String installDirectory = System.getProperty("cassandra.directory"); + String branch = System.getProperty("cassandra.branch"); + + ImmutableSet.Builder installArgs = ImmutableSet.builder(); + if (installDirectory != null && !installDirectory.trim().isEmpty()) { + installArgs.add("--install-dir=" + new File(installDirectory).getAbsolutePath()); + } else if (branch != null && !branch.trim().isEmpty()) { + installArgs.add("-v git:" + branch.trim().replaceAll("\"", "")); + } else { + installArgs.add("-v " + inputCassandraVersion); } - /** - * @return {@link VersionNumber} configured for Cassandra based on system properties. - */ - public static VersionNumber getGlobalCassandraVersion() { - return GLOBAL_CASSANDRA_VERSION_NUMBER; + if (isDse()) { + installArgs.add("--dse"); } - /** - * @return {@link VersionNumber} configured for DSE based on system properties. - */ - public static VersionNumber getGlobalDSEVersion() { - return GLOBAL_DSE_VERSION_NUMBER; + CASSANDRA_INSTALL_ARGS = installArgs.build(); + + // Inherit the current environment. + Map envMap = Maps.newHashMap(new ProcessBuilder().environment()); + // If ccm.path is set, override the PATH variable with it. + String ccmPath = System.getProperty("ccm.path"); + if (ccmPath != null) { + String existingPath = envMap.get("PATH"); + if (existingPath == null) { + existingPath = ""; + } + envMap.put("PATH", ccmPath + File.pathSeparator + existingPath); } - /** - * @return The mapped cassandra version to the given dseVersion. - * If the DSE version can't be derived the following logic is used: - *

      - *
    1. If <= 3.X, use C* 1.2
    2. - *
    3. If 4.X, use 2.1 for >= 4.7, 2.0 otherwise.
    4. - *
    5. Otherwise 3.0
    6. - *
    - */ - public static VersionNumber getCassandraVersion(VersionNumber dseVersion) { - String cassandraVersion = dseToCassandraVersions.get(dseVersion.toString()); - if (cassandraVersion != null) { - return VersionNumber.parse(cassandraVersion); - } else if (dseVersion.getMajor() <= 3) { - return VersionNumber.parse("1.2"); - } else if (dseVersion.getMajor() == 4) { - if (dseVersion.getMinor() >= 7) { - return VersionNumber.parse("2.1"); - } else { - return VersionNumber.parse("2.0"); - } - } else { - // Fallback on 3.0 by default. - return VersionNumber.parse("3.0"); - } + if (isWindows()) { + CCM_COMMAND = "powershell.exe -ExecutionPolicy Unrestricted ccm.py"; + } else { + CCM_COMMAND = "ccm"; } - /** - * Checks if the operating system is a Windows one - * - * @return true if the operating system is a Windows one, false otherwise. - */ - public static boolean isWindows() { - - String osName = System.getProperty("os.name"); - return osName != null && osName.startsWith("Windows"); + // If ccm.java.home is set, override the JAVA_HOME variable with it. + String ccmJavaHome = System.getProperty("ccm.java.home"); + if (ccmJavaHome != null) { + envMap.put("JAVA_HOME", ccmJavaHome); + } + ENVIRONMENT_MAP = ImmutableMap.copyOf(envMap); + + if (isDse()) { + GLOBAL_DSE_VERSION_NUMBER = VersionNumber.parse(inputCassandraVersion); + GLOBAL_CASSANDRA_VERSION_NUMBER = CCMBridge.getCassandraVersion(GLOBAL_DSE_VERSION_NUMBER); + logger.info( + "Tests requiring CCM will by default use DSE version {} (C* {}, install arguments: {})", + GLOBAL_DSE_VERSION_NUMBER, + GLOBAL_CASSANDRA_VERSION_NUMBER, + CASSANDRA_INSTALL_ARGS); + } else { + GLOBAL_CASSANDRA_VERSION_NUMBER = VersionNumber.parse(inputCassandraVersion); + GLOBAL_DSE_VERSION_NUMBER = null; + logger.info( + "Tests requiring CCM will by default use Cassandra version {} (install arguments: {})", + GLOBAL_CASSANDRA_VERSION_NUMBER, + CASSANDRA_INSTALL_ARGS); } + } + + /** @return {@link VersionNumber} configured for Cassandra based on system properties. */ + public static VersionNumber getGlobalCassandraVersion() { + return GLOBAL_CASSANDRA_VERSION_NUMBER; + } + + /** @return {@link VersionNumber} configured for DSE based on system properties. */ + public static VersionNumber getGlobalDSEVersion() { + return GLOBAL_DSE_VERSION_NUMBER; + } + + public static boolean isDse() { + // System property "dse" must be present and evaluate to TRUE to indicate DSE is enabled. + return Boolean.getBoolean("dse"); + } + + /** + * @return The mapped cassandra version to the given dseVersion. If the DSE version can't be + * derived the following logic is used: + *
      + *
    1. If <= 3.X, use C* 1.2 + *
    2. If 4.X, use 2.1 for >= 4.7, 2.0 otherwise. + *
    3. If 5.X, use 3.0 for 5.0, 3.11 otherwise. + *
    4. Otherwise 4.0 + *
    + */ + public static VersionNumber getCassandraVersion(VersionNumber dseVersion) { + String cassandraVersion = dseToCassandraVersions.get(dseVersion.toString()); + if (cassandraVersion != null) { + return VersionNumber.parse(cassandraVersion); + } else if (dseVersion.getMajor() <= 3) { + return VersionNumber.parse("1.2"); + } else if (dseVersion.getMajor() == 4) { + if (dseVersion.getMinor() >= 7) { + return VersionNumber.parse("2.1"); + } else { + return VersionNumber.parse("2.0"); + } + } else if (dseVersion.getMajor() == 5) { + if (dseVersion.getMinor() == 0) { + return VersionNumber.parse("3.0"); + } else { + return VersionNumber.parse("3.11"); + } + } else if (dseVersion.getMajor() == 6) { + if (dseVersion.getMinor() < 8) { + return VersionNumber.parse("3.11"); + } else { + return VersionNumber.parse("4.0"); + } + } else { + // Fallback on 4.0 by default. + return VersionNumber.parse("4.0"); + } + } - private final String clusterName; + /** + * Checks if the operating system is a Windows one + * + * @return true if the operating system is a Windows one, false + * otherwise. + */ + public static boolean isWindows() { - private final VersionNumber cassandraVersion; + String osName = System.getProperty("os.name"); + return osName != null && osName.startsWith("Windows"); + } - private final VersionNumber dseVersion; + private final String clusterName; - private final int storagePort; + private final VersionNumber cassandraVersion; - private final int thriftPort; + private final VersionNumber dseVersion; - private final int binaryPort; + private final int storagePort; - private final File ccmDir; + private final int thriftPort; - private final boolean isDSE; + private final int binaryPort; - private final String jvmArgs; + private final String ipPrefix; - private boolean keepLogs = false; + private final File ccmDir; - private boolean started = false; + private final boolean isDSE; - private boolean closed = false; + private final String jvmArgs; - private final int[] nodes; + private boolean keepLogs = false; - private CCMBridge(String clusterName, VersionNumber cassandraVersion, VersionNumber dseVersion, - int storagePort, int thriftPort, int binaryPort, String jvmArgs, int[] nodes) { - this.clusterName = clusterName; - this.cassandraVersion = cassandraVersion; - this.dseVersion = dseVersion; - this.storagePort = storagePort; - this.thriftPort = thriftPort; - this.binaryPort = binaryPort; - this.isDSE = dseVersion != null; - this.jvmArgs = jvmArgs; - this.nodes = nodes; - this.ccmDir = Files.createTempDir(); - } + private boolean started = false; - public static Builder builder() { - return new Builder(); - } + private boolean closed = false; - @Override - public String getClusterName() { - return clusterName; - } + private final int[] nodes; - @Override - public InetSocketAddress addressOfNode(int n) { - return new InetSocketAddress(TestUtils.ipOfNode(n), binaryPort); - } + private final int[] jmxPorts; - @Override - public VersionNumber getCassandraVersion() { - return cassandraVersion; - } + protected CCMBridge( + String clusterName, + VersionNumber cassandraVersion, + VersionNumber dseVersion, + String ipPrefix, + int storagePort, + int thriftPort, + int binaryPort, + int[] jmxPorts, + String jvmArgs, + int[] nodes) { - @Override - public VersionNumber getDSEVersion() { - return dseVersion; - } - - @Override - public File getCcmDir() { - return ccmDir; - } - - @Override - public File getClusterDir() { - return new File(ccmDir, clusterName); - } - - @Override - public File getNodeDir(int n) { - return new File(getClusterDir(), "node" + n); - } - - @Override - public File getNodeConfDir(int n) { - return new File(getNodeDir(n), "conf"); - } - - @Override - public int getStoragePort() { - return storagePort; - } - - @Override - public int getThriftPort() { - return thriftPort; - } + this.clusterName = clusterName; + this.cassandraVersion = cassandraVersion; + this.dseVersion = dseVersion; + this.ipPrefix = ipPrefix; + this.storagePort = storagePort; + this.thriftPort = thriftPort; + this.binaryPort = binaryPort; + this.isDSE = dseVersion != null; + this.jvmArgs = jvmArgs; + this.nodes = nodes; + this.ccmDir = Files.createTempDir(); + this.jmxPorts = jmxPorts; + } - @Override - public int getBinaryPort() { - return binaryPort; - } + public static Builder builder() { + return new Builder(); + } - @Override - public void setKeepLogs(boolean keepLogs) { - this.keepLogs = keepLogs; - } + @Override + public String getClusterName() { + return clusterName; + } - @Override - public synchronized void close() { - if (closed) - return; - logger.debug("Closing: {}", this); - if (keepLogs) { - executeNoFail(new Runnable() { - @Override - public void run() { - stop(); - } - }, false); - logger.info("Error during tests, kept C* logs in " + getCcmDir()); - } else { - executeNoFail(new Runnable() { - @Override - public void run() { - remove(); - } - }, false); - executeNoFail(new Runnable() { - @Override - public void run() { - org.assertj.core.util.Files.delete(getCcmDir()); - } - }, false); - } - closed = true; - logger.debug("Closed: {}", this); - } + @Override + public int[] getNodeCount() { + return Arrays.copyOf(nodes, nodes.length); + } - /** - * Based on C* version, return the wait arguments. - * - * @return For C* 1.x, --wait-other-notice otherwise --no-wait - */ - private String getStartWaitArguments() { - // make a small exception for C* 1.2 as it has a bug where it starts listening on the binary - // interface slightly before it joins the cluster. - if (this.cassandraVersion.getMajor() == 1) { - return " --wait-other-notice"; - } else { - return " --no-wait"; + @Override + public List getContactPoints() { + List contactPoints = new ArrayList(); + int n = 1; + for (int dc = 1; dc <= nodes.length; dc++) { + int nodesInDc = nodes[dc - 1]; + for (int i = 0; i < nodesInDc; i++) { + try { + contactPoints.add(InetAddress.getByName(ipOfNode(n))); + } catch (UnknownHostException e) { + Throwables.propagate(e); } + n++; + } } - - @Override - public synchronized void start() { - if (started) - return; - if (logger.isDebugEnabled()) - logger.debug("Starting: {} - free memory: {} MB", this, TestUtils.getFreeMemoryMB()); - try { - String cmd = CCM_COMMAND + " start " + jvmArgs + getStartWaitArguments(); - if (isWindows() && this.cassandraVersion.compareTo(VersionNumber.parse("2.2.4")) >= 0) { - cmd += " --quiet-windows"; + return contactPoints; + } + + protected String ipOfNode(int n) { + return ipPrefix + n; + } + + @Override + public InetSocketAddress addressOfNode(int n) { + return new InetSocketAddress(ipOfNode(n), binaryPort); + } + + @Override + public InetSocketAddress jmxAddressOfNode(int n) { + return new InetSocketAddress("localhost", jmxPorts[n - 1]); + } + + @Override + public VersionNumber getCassandraVersion() { + return cassandraVersion; + } + + @Override + public VersionNumber getDSEVersion() { + return dseVersion; + } + + @Override + public File getCcmDir() { + return ccmDir; + } + + @Override + public File getClusterDir() { + return new File(ccmDir, clusterName); + } + + @Override + public File getNodeDir(int n) { + return new File(getClusterDir(), "node" + n); + } + + @Override + public File getNodeConfDir(int n) { + return new File(getNodeDir(n), "conf"); + } + + @Override + public int getStoragePort() { + return storagePort; + } + + @Override + public int getThriftPort() { + return thriftPort; + } + + @Override + public int getBinaryPort() { + return binaryPort; + } + + @Override + public void setKeepLogs(boolean keepLogs) { + this.keepLogs = keepLogs; + } + + @Override + public synchronized void close() { + if (closed) return; + logger.debug("Closing: {}", this); + if (keepLogs) { + executeNoFail( + new Runnable() { + @Override + public void run() { + stop(); } - execute(cmd); - - // Wait for binary interface on each node. - int n = 1; - for (int dc = 1; dc <= nodes.length; dc++) { - int nodesInDc = nodes[dc - 1]; - for (int i = 0; i < nodesInDc; i++) { - InetSocketAddress addr = new InetSocketAddress(ipOfNode(n), binaryPort); - logger.debug("Waiting for binary protocol to show up for {}", addr); - TestUtils.waitUntilPortIsUp(addr); - n++; - } + }, + false); + logger.info("Error during tests, kept C* logs in " + getCcmDir()); + } else { + executeNoFail( + new Runnable() { + @Override + public void run() { + remove(); } - } catch (CCMException e) { - logger.error("Could not start " + this, e); - logger.error("CCM output:\n{}", e.getOut()); - setKeepLogs(true); - String errors = checkForErrors(); - if (errors != null) - logger.error("CCM check errors:\n{}", errors); - throw e; - } - if (logger.isDebugEnabled()) - logger.debug("Started: {} - Free memory: {} MB", this, TestUtils.getFreeMemoryMB()); - started = true; - } - - @Override - public synchronized void stop() { - if (closed) - return; - if (logger.isDebugEnabled()) - logger.debug("Stopping: {} - free memory: {} MB", this, TestUtils.getFreeMemoryMB()); - execute(CCM_COMMAND + " stop"); - if (logger.isDebugEnabled()) - logger.debug("Stopped: {} - free memory: {} MB", this, TestUtils.getFreeMemoryMB()); - closed = true; - } - - @Override - public synchronized void forceStop() { - if (closed) - return; - logger.debug("Force stopping: {}", this); - execute(CCM_COMMAND + " stop --not-gently"); - closed = true; + }, + false); + executeNoFail( + new Runnable() { + @Override + public void run() { + org.assertj.core.util.Files.delete(getCcmDir()); + } + }, + false); } - - @Override - public synchronized void remove() { - stop(); - logger.debug("Removing: {}", this); - execute(CCM_COMMAND + " remove"); + closed = true; + logger.debug("Closed: {}", this); + } + + /** + * Based on C* version, return the wait arguments. + * + * @return For C* 1.x, --wait-other-notice otherwise --no-wait + */ + private String getStartWaitArguments() { + // make a small exception for C* 1.2 as it has a bug where it starts listening on the binary + // interface slightly before it joins the cluster. + if (this.cassandraVersion.getMajor() == 1) { + return " --wait-other-notice"; } - - @Override - public String checkForErrors() { - logger.debug("Checking for errors in: {}", this); - try { - return execute(CCM_COMMAND + " checklogerror"); - } catch (CCMException e) { - logger.warn("Check for errors failed"); - return null; + return ""; + } + + @Override + public synchronized void start() { + if (started) return; + if (logger.isDebugEnabled()) + logger.debug("Starting: {} - free memory: {} MB", this, TestUtils.getFreeMemoryMB()); + try { + String cmd = CCM_COMMAND + " start " + jvmArgs + getStartWaitArguments(); + if (isWindows() && this.cassandraVersion.compareTo(VersionNumber.parse("2.2.4")) >= 0) { + cmd += " --quiet-windows"; + } + execute(cmd); + + // Wait for binary interface on each node. + int n = 1; + for (int dc = 1; dc <= nodes.length; dc++) { + int nodesInDc = nodes[dc - 1]; + for (int i = 0; i < nodesInDc; i++) { + InetSocketAddress addr = addressOfNode(n); + logger.debug("Waiting for binary protocol to show up for {}", addr); + TestUtils.waitUntilPortIsUp(addr); + n++; } + } + } catch (CCMException e) { + logger.error("Could not start " + this, e); + logger.error("CCM output:\n{}", e.getOut()); + setKeepLogs(true); + String errors = checkForErrors(); + if (errors != null) logger.error("CCM check errors:\n{}", errors); + throw e; } - - @Override - public void start(int n) { - logger.debug(String.format("Starting: node %s (%s%s:%s) in %s", n, TestUtils.IP_PREFIX, n, binaryPort, this)); - try { - String cmd = CCM_COMMAND + " node%d start " + jvmArgs + getStartWaitArguments(); - if (isWindows() && this.cassandraVersion.compareTo(VersionNumber.parse("2.2.4")) >= 0) { - cmd += " --quiet-windows"; - } - execute(cmd, n); - // Wait for binary interface - InetSocketAddress addr = new InetSocketAddress(ipOfNode(n), binaryPort); - logger.debug("Waiting for binary protocol to show up for {}", addr); - TestUtils.waitUntilPortIsUp(addr); - } catch (CCMException e) { - logger.error(String.format("Could not start node %s in %s", n, this), e); - logger.error("CCM output:\n{}", e.getOut()); - setKeepLogs(true); - String errors = checkForErrors(); - if (errors != null) - logger.error("CCM check errors:\n{}", errors); - throw e; - } + if (logger.isDebugEnabled()) + logger.debug("Started: {} - Free memory: {} MB", this, TestUtils.getFreeMemoryMB()); + started = true; + } + + @Override + public synchronized void stop() { + if (closed) return; + if (logger.isDebugEnabled()) + logger.debug("Stopping: {} - free memory: {} MB", this, TestUtils.getFreeMemoryMB()); + execute(CCM_COMMAND + " stop"); + if (logger.isDebugEnabled()) + logger.debug("Stopped: {} - free memory: {} MB", this, TestUtils.getFreeMemoryMB()); + closed = true; + } + + @Override + public synchronized void forceStop() { + if (closed) return; + logger.debug("Force stopping: {}", this); + execute(CCM_COMMAND + " stop --not-gently"); + closed = true; + } + + @Override + public synchronized void remove() { + stop(); + logger.debug("Removing: {}", this); + execute(CCM_COMMAND + " remove"); + } + + @Override + public String checkForErrors() { + logger.debug("Checking for errors in: {}", this); + try { + return execute(CCM_COMMAND + " checklogerror"); + } catch (CCMException e) { + logger.warn("Check for errors failed"); + return null; } - - @Override - public void stop(int n) { - logger.debug(String.format("Stopping: node %s (%s%s:%s) in %s", n, TestUtils.IP_PREFIX, n, binaryPort, this)); - execute(CCM_COMMAND + " node%d stop", n); + } + + @Override + public void start(int n) { + logger.debug( + String.format("Starting: node %s (%s%s:%s) in %s", n, ipPrefix, n, binaryPort, this)); + try { + String cmd = CCM_COMMAND + " node%d start " + jvmArgs + getStartWaitArguments(); + if (isWindows() && this.cassandraVersion.compareTo(VersionNumber.parse("2.2.4")) >= 0) { + cmd += " --quiet-windows"; + } + execute(cmd, n); + // Wait for binary interface + InetSocketAddress addr = new InetSocketAddress(ipOfNode(n), binaryPort); + logger.debug("Waiting for binary protocol to show up for {}", addr); + TestUtils.waitUntilPortIsUp(addr); + } catch (CCMException e) { + logger.error(String.format("Could not start node %s in %s", n, this), e); + logger.error("CCM output:\n{}", e.getOut()); + setKeepLogs(true); + String errors = checkForErrors(); + if (errors != null) logger.error("CCM check errors:\n{}", errors); + throw e; } - - @Override - public void forceStop(int n) { - logger.debug(String.format("Force stopping: node %s (%s%s:%s) in %s", n, TestUtils.IP_PREFIX, n, binaryPort, this)); - execute(CCM_COMMAND + " node%d stop --not-gently", n); + } + + @Override + public void stop(int n) { + logger.debug( + String.format("Stopping: node %s (%s%s:%s) in %s", n, ipPrefix, n, binaryPort, this)); + execute(CCM_COMMAND + " node%d stop", n); + } + + @Override + public void forceStop(int n) { + logger.debug( + String.format("Force stopping: node %s (%s%s:%s) in %s", n, ipPrefix, n, binaryPort, this)); + execute(CCM_COMMAND + " node%d stop --not-gently", n); + } + + @Override + public void pause(int n) { + logger.debug( + String.format("Pausing: node %s (%s%s:%s) in %s", n, ipPrefix, n, binaryPort, this)); + execute(CCM_COMMAND + " node%d pause", n); + } + + @Override + public void resume(int n) { + logger.debug( + String.format("Resuming: node %s (%s%s:%s) in %s", n, ipPrefix, n, binaryPort, this)); + execute(CCM_COMMAND + " node%d resume", n); + } + + @Override + public void remove(int n) { + logger.debug( + String.format("Removing: node %s (%s%s:%s) from %s", n, ipPrefix, n, binaryPort, this)); + execute(CCM_COMMAND + " node%d remove", n); + } + + @Override + public void add(int n) { + add(1, n); + } + + @Override + public void add(int dc, int n) { + logger.debug( + String.format("Adding: node %s (%s%s:%s) to %s", n, ipPrefix, n, binaryPort, this)); + String thriftItf = ipOfNode(n) + ":" + thriftPort; + String storageItf = ipOfNode(n) + ":" + storagePort; + String binaryItf = ipOfNode(n) + ":" + binaryPort; + String remoteLogItf = ipOfNode(n) + ":" + TestUtils.findAvailablePort(); + execute( + CCM_COMMAND + + " add node%d -d dc%s -i %s%d -t %s -l %s --binary-itf %s -j %d -r %s -s -b" + + (isDSE ? " --dse" : ""), + n, + dc, + ipPrefix, + n, + thriftItf, + storageItf, + binaryItf, + TestUtils.findAvailablePort(), + remoteLogItf); + } + + @Override + public void decommission(int n) { + logger.debug( + String.format( + "Decommissioning: node %s (%s%s:%s) from %s", n, ipPrefix, n, binaryPort, this)); + // Special case for C* 3.12+, DSE 5.1+, force decommission (see CASSANDRA-12510) + String cmd = CCM_COMMAND + " node%d decommission"; + if (this.cassandraVersion.compareTo(VersionNumber.parse("3.12")) >= 0 + || (this.dseVersion != null + && this.dseVersion.compareTo(VersionNumber.parse("5.1.0")) >= 0)) { + cmd += " --force"; } - - @Override - public void pause(int n) { - logger.debug(String.format("Pausing: node %s (%s%s:%s) in %s", n, TestUtils.IP_PREFIX, n, binaryPort, this)); - execute(CCM_COMMAND + " node%d pause", n); + execute(cmd, n); + } + + @Override + public void updateConfig(Map configs) { + StringBuilder confStr = new StringBuilder(); + for (Map.Entry entry : configs.entrySet()) { + confStr.append(entry.getKey()).append(":").append(entry.getValue()).append(" "); } - - @Override - public void resume(int n) { - logger.debug(String.format("Resuming: node %s (%s%s:%s) in %s", n, TestUtils.IP_PREFIX, n, binaryPort, this)); - execute(CCM_COMMAND + " node%d resume", n); + execute(CCM_COMMAND + " updateconf " + confStr); + } + + @Override + public void updateDSEConfig(Map configs) { + StringBuilder confStr = new StringBuilder(); + for (Map.Entry entry : configs.entrySet()) { + confStr.append(entry.getKey()).append(":").append(entry.getValue()).append(" "); } - - @Override - public void remove(int n) { - logger.debug(String.format("Removing: node %s (%s%s:%s) from %s", n, TestUtils.IP_PREFIX, n, binaryPort, this)); - execute(CCM_COMMAND + " node%d remove", n); + execute(CCM_COMMAND + " updatedseconf " + confStr); + } + + @Override + public void updateNodeConfig(int n, String key, Object value) { + updateNodeConfig(n, ImmutableMap.builder().put(key, value).build()); + } + + @Override + public void updateNodeConfig(int n, Map configs) { + StringBuilder confStr = new StringBuilder(); + for (Map.Entry entry : configs.entrySet()) { + confStr.append(entry.getKey()).append(":").append(entry.getValue()).append(" "); } - - @Override - public void add(int n) { - add(1, n); + execute(CCM_COMMAND + " node%s updateconf %s", n, confStr); + } + + @Override + public void updateDSENodeConfig(int n, String key, Object value) { + updateDSENodeConfig(n, ImmutableMap.builder().put(key, value).build()); + } + + @Override + public void updateDSENodeConfig(int n, Map configs) { + StringBuilder confStr = new StringBuilder(); + for (Map.Entry entry : configs.entrySet()) { + confStr.append(entry.getKey()).append(":").append(entry.getValue()).append(" "); } - - @Override - public void add(int dc, int n) { - logger.debug(String.format("Adding: node %s (%s%s:%s) to %s", n, TestUtils.IP_PREFIX, n, binaryPort, this)); - String thriftItf = TestUtils.ipOfNode(n) + ":" + thriftPort; - String storageItf = TestUtils.ipOfNode(n) + ":" + storagePort; - String binaryItf = TestUtils.ipOfNode(n) + ":" + binaryPort; - String remoteLogItf = TestUtils.ipOfNode(n) + ":" + TestUtils.findAvailablePort(); - execute(CCM_COMMAND + " add node%d -d dc%s -i %s%d -t %s -l %s --binary-itf %s -j %d -r %s -s -b" + (isDSE ? " --dse" : ""), - n, dc, TestUtils.IP_PREFIX, n, thriftItf, storageItf, binaryItf, TestUtils.findAvailablePort(), remoteLogItf); + execute(CCM_COMMAND + " node%s updatedseconf %s", n, confStr); + } + + @Override + public void setWorkload(int node, Workload... workload) { + String workloadStr = Joiner.on(",").join(workload); + execute(CCM_COMMAND + " node%d setworkload %s", node, workloadStr); + } + + private String execute(String command, Object... args) { + String fullCommand = String.format(command, args) + " --config-dir=" + ccmDir; + Closer closer = Closer.create(); + // 10 minutes timeout + ExecuteWatchdog watchDog = new ExecuteWatchdog(TimeUnit.MINUTES.toMillis(10)); + StringWriter sw = new StringWriter(); + final PrintWriter pw = new PrintWriter(sw); + closer.register(pw); + try { + logger.trace("Executing: " + fullCommand); + CommandLine cli = CommandLine.parse(fullCommand); + Executor executor = new DefaultExecutor(); + LogOutputStream outStream = + new LogOutputStream() { + @Override + protected void processLine(String line, int logLevel) { + String out = "ccmout> " + line; + logger.debug(out); + pw.println(out); + } + }; + LogOutputStream errStream = + new LogOutputStream() { + @Override + protected void processLine(String line, int logLevel) { + String err = "ccmerr> " + line; + logger.error(err); + pw.println(err); + } + }; + closer.register(outStream); + closer.register(errStream); + ExecuteStreamHandler streamHandler = new PumpStreamHandler(outStream, errStream); + executor.setStreamHandler(streamHandler); + executor.setWatchdog(watchDog); + int retValue = executor.execute(cli, ENVIRONMENT_MAP); + if (retValue != 0) { + logger.error( + "Non-zero exit code ({}) returned from executing ccm command: {}", + retValue, + fullCommand); + pw.flush(); + throw new CCMException( + String.format( + "Non-zero exit code (%s) returned from executing ccm command: %s", + retValue, fullCommand), + sw.toString()); + } + } catch (IOException e) { + if (watchDog.killedProcess()) + logger.error("The command {} was killed after 10 minutes", fullCommand); + pw.flush(); + throw new CCMException( + String.format("The command %s failed to execute", fullCommand), sw.toString(), e); + } finally { + try { + closer.close(); + } catch (IOException e) { + Throwables.propagate(e); + } } - - @Override - public void decommission(int n) { - logger.debug(String.format("Decommissioning: node %s (%s%s:%s) from %s", n, TestUtils.IP_PREFIX, n, binaryPort, this)); - // Special case for C* 3.12+, DSE 5.1+, force decommission (see CASSANDRA-12510) - String cmd = CCM_COMMAND + " node%d decommission"; - if (this.cassandraVersion.compareTo(VersionNumber.parse("3.12")) >= 0) { - cmd += " --force"; - } - execute(cmd, n); + return sw.toString(); + } + + /** + * Waits for a host to be up by pinging the TCP socket directly, without using the Java Driver's + * API. + */ + @Override + public void waitForUp(int node) { + TestUtils.waitUntilPortIsUp(addressOfNode(node)); + } + + /** + * Waits for a host to be down by pinging the TCP socket directly, without using the Java Driver's + * API. + */ + @Override + public void waitForDown(int node) { + TestUtils.waitUntilPortIsDown(addressOfNode(node)); + } + + @Override + public ProtocolVersion getProtocolVersion() { + VersionNumber version = getCassandraVersion(); + if (version.compareTo(VersionNumber.parse("2.0")) < 0) { + return ProtocolVersion.V1; + } else if (version.compareTo(VersionNumber.parse("2.1")) < 0) { + return ProtocolVersion.V2; + } else if (version.compareTo(VersionNumber.parse("2.2")) < 0) { + return ProtocolVersion.V3; + } else if (version.compareTo(VersionNumber.parse("4.0")) < 0) { + return ProtocolVersion.V4; + } else { + return ProtocolVersion.V5; } - - @Override - public void updateConfig(Map configs) { - StringBuilder confStr = new StringBuilder(); - for (Map.Entry entry : configs.entrySet()) { - confStr - .append(entry.getKey()) - .append(":") - .append(entry.getValue()) - .append(" "); - } - execute(CCM_COMMAND + " updateconf " + confStr); + } + + @Override + public ProtocolVersion getProtocolVersion(ProtocolVersion maximumAllowed) { + ProtocolVersion versionToUse = getProtocolVersion(); + return versionToUse.compareTo(maximumAllowed) > 0 ? maximumAllowed : versionToUse; + } + + /** + * Extracts a keystore from the classpath into a temporary file. + * + *

    + * + *

    This is needed as the keystore could be part of a built test jar used by other projects, and + * they need to be extracted to a file system so cassandra may use them. + * + * @param storePath Path in classpath where the keystore exists. + * @return The generated File. + */ + private static File createTempStore(String storePath) { + File f = null; + Closer closer = Closer.create(); + try { + InputStream trustStoreIs = CCMBridge.class.getResourceAsStream(storePath); + closer.register(trustStoreIs); + f = File.createTempFile("server", ".store"); + logger.debug("Created store file {} for {}.", f, storePath); + OutputStream trustStoreOs = new FileOutputStream(f); + closer.register(trustStoreOs); + ByteStreams.copy(trustStoreIs, trustStoreOs); + } catch (IOException e) { + logger.warn("Failure to write keystore, SSL-enabled servers may fail to start.", e); + } finally { + try { + closer.close(); + } catch (IOException e) { + logger.warn("Failure closing streams.", e); + } } - - @Override - public void updateDSEConfig(Map configs) { - StringBuilder confStr = new StringBuilder(); - for (Map.Entry entry : configs.entrySet()) { - confStr - .append(entry.getKey()) - .append(":") - .append(entry.getValue()) - .append(" "); - } - execute(CCM_COMMAND + " updatedseconf " + confStr); + return f; + } + + @Override + public String toString() { + return "CCM cluster " + clusterName; + } + + @Override + protected void finalize() throws Throwable { + logger.debug("GC'ing {}", this); + close(); + super.finalize(); + } + + /** use {@link #builder()} to get an instance */ + public static class Builder { + + public static final String RANDOM_PORT = "__RANDOM_PORT__"; + private static final Pattern RANDOM_PORT_PATTERN = Pattern.compile(RANDOM_PORT); + + private String ipPrefix = TestUtils.IP_PREFIX; + int[] nodes = {1}; + private int[] jmxPorts = {}; + private boolean start = true; + private boolean dse = isDse(); + private VersionNumber version = null; + private final Set createOptions = new LinkedHashSet(); + private final Set jvmArgs = new LinkedHashSet(); + private final Map cassandraConfiguration = Maps.newLinkedHashMap(); + private final Map dseConfiguration = Maps.newLinkedHashMap(); + private final Map workloads = new HashMap(); + + private Builder() { + cassandraConfiguration.put("start_rpc", false); + cassandraConfiguration.put("storage_port", RANDOM_PORT); + cassandraConfiguration.put("rpc_port", RANDOM_PORT); + cassandraConfiguration.put("native_transport_port", RANDOM_PORT); } - @Override - public void updateNodeConfig(int n, String key, Object value) { - updateNodeConfig(n, ImmutableMap.builder().put(key, value).build()); + /** + * IP Prefix to use for the address of each node. Its format has to be like {@code "127.1.1."}. + */ + public Builder withIpPrefix(String ipPrefix) { + this.ipPrefix = ipPrefix; + return this; } - @Override - public void updateNodeConfig(int n, Map configs) { - StringBuilder confStr = new StringBuilder(); - for (Map.Entry entry : configs.entrySet()) { - confStr - .append(entry.getKey()) - .append(":") - .append(entry.getValue()) - .append(" "); - } - execute(CCM_COMMAND + " node%s updateconf %s", n, confStr); + /** Number of hosts for each DC. Defaults to [1] (1 DC with 1 node) */ + public Builder withNodes(int... nodes) { + this.nodes = nodes; + return this; } - @Override - public void updateDSENodeConfig(int n, String key, Object value) { - updateDSENodeConfig(n, ImmutableMap.builder().put(key, value).build()); + public Builder withoutNodes() { + return withNodes(); } - @Override - public void updateDSENodeConfig(int n, Map configs) { - StringBuilder confStr = new StringBuilder(); - for (Map.Entry entry : configs.entrySet()) { - confStr - .append(entry.getKey()) - .append(":") - .append(entry.getValue()) - .append(" "); - } - execute(CCM_COMMAND + " node%s updatedseconf %s", n, confStr); + /** Enables SSL encryption. */ + public Builder withSSL() { + cassandraConfiguration.put("client_encryption_options.enabled", "true"); + cassandraConfiguration.put("client_encryption_options.optional", "false"); + cassandraConfiguration.put( + "client_encryption_options.keystore", DEFAULT_SERVER_KEYSTORE_FILE.getAbsolutePath()); + cassandraConfiguration.put( + "client_encryption_options.keystore_password", DEFAULT_SERVER_KEYSTORE_PASSWORD); + return this; } - @Override - public void setWorkload(int node, Workload... workload) { - String workloadStr = Joiner.on(",").join(workload); - execute(CCM_COMMAND + " node%d setworkload %s", node, workloadStr); + /** Enables client authentication. This also enables encryption ({@link #withSSL()}. */ + public Builder withAuth() { + withSSL(); + cassandraConfiguration.put("client_encryption_options.require_client_auth", "true"); + cassandraConfiguration.put( + "client_encryption_options.truststore", DEFAULT_SERVER_TRUSTSTORE_FILE.getAbsolutePath()); + cassandraConfiguration.put( + "client_encryption_options.truststore_password", DEFAULT_SERVER_TRUSTSTORE_PASSWORD); + return this; } - private String execute(String command, Object... args) { - String fullCommand = String.format(command, args) + " --config-dir=" + ccmDir; - Closer closer = Closer.create(); - // 10 minutes timeout - ExecuteWatchdog watchDog = new ExecuteWatchdog(TimeUnit.MINUTES.toMillis(10)); - StringWriter sw = new StringWriter(); - final PrintWriter pw = new PrintWriter(sw); - closer.register(pw); - try { - logger.trace("Executing: " + fullCommand); - CommandLine cli = CommandLine.parse(fullCommand); - Executor executor = new DefaultExecutor(); - LogOutputStream outStream = new LogOutputStream() { - @Override - protected void processLine(String line, int logLevel) { - String out = "ccmout> " + line; - logger.debug(out); - pw.println(out); - } - }; - LogOutputStream errStream = new LogOutputStream() { - @Override - protected void processLine(String line, int logLevel) { - String err = "ccmerr> " + line; - logger.error(err); - pw.println(err); - } - }; - closer.register(outStream); - closer.register(errStream); - ExecuteStreamHandler streamHandler = new PumpStreamHandler(outStream, errStream); - executor.setStreamHandler(streamHandler); - executor.setWatchdog(watchDog); - int retValue = executor.execute(cli, ENVIRONMENT_MAP); - if (retValue != 0) { - logger.error("Non-zero exit code ({}) returned from executing ccm command: {}", retValue, fullCommand); - pw.flush(); - throw new CCMException(String.format("Non-zero exit code (%s) returned from executing ccm command: %s", retValue, fullCommand), sw.toString()); - } - } catch (IOException e) { - if (watchDog.killedProcess()) - logger.error("The command {} was killed after 10 minutes", fullCommand); - pw.flush(); - throw new CCMException(String.format("The command %s failed to execute", fullCommand), sw.toString(), e); - } finally { - try { - closer.close(); - } catch (IOException e) { - Throwables.propagate(e); - } - } - return sw.toString(); + /** Whether to start the cluster immediately (defaults to true if this is never called). */ + public Builder notStarted() { + this.start = false; + return this; } /** - * Waits for a host to be up by pinging the TCP socket directly, without using the Java driver's API. + * The Cassandra or DSE version to use. If not specified the globally configured version is used + * instead. */ - @Override - public void waitForUp(int node) { - TestUtils.waitUntilPortIsUp(addressOfNode(node)); + public Builder withVersion(VersionNumber version) { + this.version = version; + return this; + } + + /** Indicates whether or not this cluster is meant to be a DSE cluster. */ + public Builder withDSE(boolean dse) { + this.dse = dse; + return this; } /** - * Waits for a host to be down by pinging the TCP socket directly, without using the Java driver's API. + * Free-form options that will be added at the end of the {@code ccm create} command (defaults + * to {@link #CASSANDRA_INSTALL_ARGS} if this is never called). */ - @Override - public void waitForDown(int node) { - TestUtils.waitUntilPortIsDown(addressOfNode(node)); + public Builder withCreateOptions(String... createOptions) { + Collections.addAll(this.createOptions, createOptions); + return this; } - @Override - public ProtocolVersion getProtocolVersion() { - VersionNumber version = getCassandraVersion(); - if (version.compareTo(VersionNumber.parse("2.0")) < 0) { - return ProtocolVersion.V1; - } else if (version.compareTo(VersionNumber.parse("2.1")) < 0) { - return ProtocolVersion.V2; - } else if (version.compareTo(VersionNumber.parse("2.2")) < 0) { - return ProtocolVersion.V3; - } else { - return ProtocolVersion.V4; - } + /** Customizes entries in cassandra.yaml (can be called multiple times) */ + public Builder withCassandraConfiguration(String key, Object value) { + this.cassandraConfiguration.put(key, value); + return this; } - @Override - public ProtocolVersion getProtocolVersion(ProtocolVersion maximumAllowed) { - ProtocolVersion versionToUse = getProtocolVersion(); - return versionToUse.compareTo(maximumAllowed) > 0 ? maximumAllowed : versionToUse; + /** Customizes entries in dse.yaml (can be called multiple times) */ + public Builder withDSEConfiguration(String key, Object value) { + this.dseConfiguration.put(key, value); + return this; } /** - *

    - * Extracts a keystore from the classpath into a temporary file. - *

    - *

    - *

    - * This is needed as the keystore could be part of a built test jar used by other - * projects, and they need to be extracted to a file system so cassandra may use them. - *

    - * - * @param storePath Path in classpath where the keystore exists. - * @return The generated File. + * JVM args to use when starting hosts. System properties should be provided one by one, as a + * string in the form: {@code -Dname=value}. */ - private static File createTempStore(String storePath) { - File f = null; - Closer closer = Closer.create(); - try { - InputStream trustStoreIs = CCMBridge.class.getResourceAsStream(storePath); - closer.register(trustStoreIs); - f = File.createTempFile("server", ".store"); - logger.debug("Created store file {} for {}.", f, storePath); - OutputStream trustStoreOs = new FileOutputStream(f); - closer.register(trustStoreOs); - ByteStreams.copy(trustStoreIs, trustStoreOs); - } catch (IOException e) { - logger.warn("Failure to write keystore, SSL-enabled servers may fail to start.", e); - } finally { - try { - closer.close(); - } catch (IOException e) { - logger.warn("Failure closing streams.", e); - } - } - return f; + public Builder withJvmArgs(String... jvmArgs) { + Collections.addAll(this.jvmArgs, jvmArgs); + return this; } - @Override - public String toString() { - return "CCM cluster " + clusterName; + public Builder withStoragePort(int port) { + cassandraConfiguration.put("storage_port", port); + return this; } - @Override - protected void finalize() throws Throwable { - logger.debug("GC'ing {}", this); - close(); - super.finalize(); + public Builder withThriftPort(int port) { + cassandraConfiguration.put("rpc_port", port); + return this; } - /** - * use {@link #builder()} to get an instance - */ - public static class Builder { - - public static final String RANDOM_PORT = "__RANDOM_PORT__"; - private static final Pattern RANDOM_PORT_PATTERN = Pattern.compile(RANDOM_PORT); - - int[] nodes = {1}; - private boolean start = true; - private boolean dse = false; - private VersionNumber version = null; - private Set createOptions = new LinkedHashSet(); - private Set jvmArgs = new LinkedHashSet(); - private final Map cassandraConfiguration = Maps.newLinkedHashMap(); - private final Map dseConfiguration = Maps.newLinkedHashMap(); - private Map workloads = new HashMap(); - - private Builder() { - cassandraConfiguration.put("start_rpc", false); - cassandraConfiguration.put("storage_port", RANDOM_PORT); - cassandraConfiguration.put("rpc_port", RANDOM_PORT); - cassandraConfiguration.put("native_transport_port", RANDOM_PORT); - } - - /** - * Number of hosts for each DC. Defaults to [1] (1 DC with 1 node) - */ - public Builder withNodes(int... nodes) { - this.nodes = nodes; - return this; - } - - public Builder withoutNodes() { - return withNodes(); - } - - /** - * Enables SSL encryption. - */ - public Builder withSSL() { - cassandraConfiguration.put("client_encryption_options.enabled", "true"); - cassandraConfiguration.put("client_encryption_options.keystore", DEFAULT_SERVER_KEYSTORE_FILE.getAbsolutePath()); - cassandraConfiguration.put("client_encryption_options.keystore_password", DEFAULT_SERVER_KEYSTORE_PASSWORD); - return this; - } - - /** - * Enables client authentication. - * This also enables encryption ({@link #withSSL()}. - */ - public Builder withAuth() { - withSSL(); - cassandraConfiguration.put("client_encryption_options.require_client_auth", "true"); - cassandraConfiguration.put("client_encryption_options.truststore", DEFAULT_SERVER_TRUSTSTORE_FILE.getAbsolutePath()); - cassandraConfiguration.put("client_encryption_options.truststore_password", DEFAULT_SERVER_TRUSTSTORE_PASSWORD); - return this; - } - - /** - * Whether to start the cluster immediately (defaults to true if this is never called). - */ - public Builder notStarted() { - this.start = false; - return this; - } - - /** - * The Cassandra or DSE version to use. If not specified the globally configured version is used instead. - */ - public Builder withVersion(VersionNumber version) { - this.version = version; - return this; - } - - /** - * Indicates whether or not this cluster is meant to be a DSE cluster. - */ - public Builder withDSE(boolean dse) { - this.dse = dse; - return this; - } - - /** - * Free-form options that will be added at the end of the {@code ccm create} command - * (defaults to {@link #CASSANDRA_INSTALL_ARGS} if this is never called). - */ - public Builder withCreateOptions(String... createOptions) { - Collections.addAll(this.createOptions, createOptions); - return this; - } - - /** - * Customizes entries in cassandra.yaml (can be called multiple times) - */ - public Builder withCassandraConfiguration(String key, Object value) { - this.cassandraConfiguration.put(key, value); - return this; - } - - /** - * Customizes entries in dse.yaml (can be called multiple times) - */ - public Builder withDSEConfiguration(String key, Object value) { - this.dseConfiguration.put(key, value); - return this; - } + public Builder withBinaryPort(int port) { + cassandraConfiguration.put("native_transport_port", port); + return this; + } - /** - * JVM args to use when starting hosts. - * System properties should be provided one by one, as a string in the form: - * {@code -Dname=value}. - */ - public Builder withJvmArgs(String... jvmArgs) { - Collections.addAll(this.jvmArgs, jvmArgs); - return this; - } + public Builder withJmxPorts(int... ports) { + this.jmxPorts = ports; + return this; + } - public Builder withStoragePort(int port) { - cassandraConfiguration.put("storage_port", port); - return this; - } + /** + * Sets the DSE workload for a given node. + * + * @param node The node to set the workload for (starting with 1). + * @param workload The workload(s) (e.g. solr, spark, hadoop) + * @return This builder + */ + public Builder withWorkload(int node, Workload... workload) { + this.workloads.put(node, workload); + return this; + } - public Builder withThriftPort(int port) { - cassandraConfiguration.put("rpc_port", port); - return this; + public CCMBridge build() { + // be careful NOT to alter internal state (hashCode/equals) during build! + String clusterName = TestUtils.generateIdentifier("ccm_"); + + VersionNumber dseVersion; + VersionNumber cassandraVersion; + boolean versionConfigured = this.version != null; + // No version was explicitly provided, fallback on global config. + if (!versionConfigured) { + dseVersion = GLOBAL_DSE_VERSION_NUMBER; + cassandraVersion = GLOBAL_CASSANDRA_VERSION_NUMBER; + } else if (dse) { + // given version is the DSE version, base cassandra version on DSE version. + dseVersion = this.version; + cassandraVersion = getCassandraVersion(dseVersion); + } else { + // given version is cassandra version. + dseVersion = null; + cassandraVersion = this.version; + } + + Map cassandraConfiguration = randomizePorts(this.cassandraConfiguration); + int storagePort = Integer.parseInt(cassandraConfiguration.get("storage_port").toString()); + int thriftPort = Integer.parseInt(cassandraConfiguration.get("rpc_port").toString()); + int binaryPort = + Integer.parseInt(cassandraConfiguration.get("native_transport_port").toString()); + + // Copy any supplied jmx ports over, and find available ports for the rest + int numNodes = 0; + for (int i : nodes) { + numNodes += i; + } + + int[] generatedJmxPorts = new int[numNodes]; + for (int i = 0; i < numNodes; i++) { + if (i >= jmxPorts.length) { + generatedJmxPorts[i] = TestUtils.findAvailablePort(); + } else { + generatedJmxPorts[i] = jmxPorts[i]; } - - public Builder withBinaryPort(int port) { - cassandraConfiguration.put("native_transport_port", port); - return this; + } + + if (!isThriftSupported(cassandraVersion, dseVersion)) { + // remove thrift configuration + cassandraConfiguration.remove("start_rpc"); + cassandraConfiguration.remove("rpc_port"); + cassandraConfiguration.remove("thrift_prepared_statements_cache_size_mb"); + } + if (!dse) { + if (isMaterializedViewsDisabledByDefault(cassandraVersion)) { + // enable materialized views + cassandraConfiguration.put("enable_materialized_views", true); } - - /** - * Sets the DSE workload for a given node. - * - * @param node The node to set the workload for (starting with 1). - * @param workload The workload(s) (e.g. solr, spark, hadoop) - * @return This builder - */ - public Builder withWorkload(int node, Workload... workload) { - this.workloads.put(node, workload); - return this; + if (isSasiConfigEnablementRequired(cassandraVersion)) { + // enable SASI indexing in config (disabled by default in C* 4.0) + cassandraConfiguration.put("enable_sasi_indexes", true); } - - public CCMBridge build() { - // be careful NOT to alter internal state (hashCode/equals) during build! - String clusterName = TestUtils.generateIdentifier("ccm_"); - - - VersionNumber dseVersion; - VersionNumber cassandraVersion; - boolean versionConfigured = this.version != null; - // No version was explicitly provided, fallback on global config. - if (!versionConfigured) { - dseVersion = GLOBAL_DSE_VERSION_NUMBER; - cassandraVersion = GLOBAL_CASSANDRA_VERSION_NUMBER; - } else if (dse) { - // given version is the DSE version, base cassandra version on DSE version. - dseVersion = this.version; - cassandraVersion = getCassandraVersion(dseVersion); - } else { - // given version is cassandra version. - dseVersion = null; - cassandraVersion = this.version; - } - - Map cassandraConfiguration = randomizePorts(this.cassandraConfiguration); - int storagePort = Integer.parseInt(cassandraConfiguration.get("storage_port").toString()); - int thriftPort = Integer.parseInt(cassandraConfiguration.get("rpc_port").toString()); - int binaryPort = Integer.parseInt(cassandraConfiguration.get("native_transport_port").toString()); - if (!isThriftSupported(cassandraVersion)) { - // remove thrift configuration - cassandraConfiguration.remove("start_rpc"); - cassandraConfiguration.remove("rpc_port"); - cassandraConfiguration.remove("thrift_prepared_statements_cache_size_mb"); - } - final CCMBridge ccm = new CCMBridge(clusterName, cassandraVersion, dseVersion, storagePort, thriftPort, binaryPort, joinJvmArgs(), nodes); - - Runtime.getRuntime().addShutdownHook(new Thread() { + } + final CCMBridge ccm = + new CCMBridge( + clusterName, + cassandraVersion, + dseVersion, + ipPrefix, + storagePort, + thriftPort, + binaryPort, + generatedJmxPorts, + joinJvmArgs(), + nodes); + + Runtime.getRuntime() + .addShutdownHook( + new Thread() { @Override public void run() { - ccm.close(); + ccm.close(); } - }); - ccm.execute(buildCreateCommand(clusterName, versionConfigured, cassandraVersion, dseVersion)); - updateNodeConf(ccm); - ccm.updateConfig(cassandraConfiguration); - if (dseVersion != null) { - Map dseConfiguration = Maps.newLinkedHashMap(this.dseConfiguration); - if (dseVersion.getMajor() >= 5) { - // randomize DSE specific ports if dse present and greater than 5.0 - dseConfiguration.put("lease_netty_server_port", RANDOM_PORT); - dseConfiguration.put("internode_messaging_options.port", RANDOM_PORT); - } - dseConfiguration = randomizePorts(dseConfiguration); - if (!dseConfiguration.isEmpty()) - ccm.updateDSEConfig(dseConfiguration); - } - for (Map.Entry entry : workloads.entrySet()) { - ccm.setWorkload(entry.getKey(), entry.getValue()); - } - if (start) - ccm.start(); - return ccm; + }); + ccm.execute(buildCreateCommand(clusterName, versionConfigured, cassandraVersion, dseVersion)); + updateNodeConf(ccm); + ccm.updateConfig(cassandraConfiguration); + if (dseVersion != null) { + Map dseConfiguration = Maps.newLinkedHashMap(this.dseConfiguration); + if (dseVersion.getMajor() >= 5) { + // randomize DSE specific ports if dse present and greater than 5.0 + dseConfiguration.put("lease_netty_server_port", RANDOM_PORT); + dseConfiguration.put("internode_messaging_options.port", RANDOM_PORT); } + dseConfiguration = randomizePorts(dseConfiguration); + if (!dseConfiguration.isEmpty()) ccm.updateDSEConfig(dseConfiguration); + } + for (Map.Entry entry : workloads.entrySet()) { + ccm.setWorkload(entry.getKey(), entry.getValue()); + } + if (start) ccm.start(); + return ccm; + } - private static boolean isThriftSupported(VersionNumber cassandraVersion) { - return cassandraVersion.compareTo(VersionNumber.parse("4.0")) < 0; - } + private static boolean isThriftSupported( + VersionNumber cassandraVersion, VersionNumber dseVersion) { + if (dseVersion == null) { + // Thrift is removed from some pre-release 4.x versions, make the comparison work for those + return cassandraVersion.nextStable().compareTo(VersionNumber.parse("4.0")) < 0; + } else { + return dseVersion.nextStable().compareTo(VersionNumber.parse("6.0")) < 0; + } + } - public int weight() { - // the weight is simply function of the number of nodes - int totalNodes = 0; - for (int nodesPerDc : this.nodes) { - totalNodes += nodesPerDc; - } - return totalNodes; - } + private static boolean isMaterializedViewsDisabledByDefault(VersionNumber cassandraVersion) { + return cassandraVersion.nextStable().compareTo(VersionNumber.parse("4.0")) >= 0; + } - private String joinJvmArgs() { - StringBuilder allJvmArgs = new StringBuilder(""); - String quote = isWindows() ? "\"" : ""; - for (String jvmArg : jvmArgs) { - // Windows requires jvm arguments to be quoted, while *nix requires unquoted. - allJvmArgs.append(" "); - allJvmArgs.append(quote); - allJvmArgs.append("--jvm_arg="); - allJvmArgs.append(randomizePorts(jvmArg)); - allJvmArgs.append(quote); - } - return allJvmArgs.toString(); - } + private static boolean isSasiConfigEnablementRequired(VersionNumber cassandraVersion) { + return cassandraVersion.nextStable().compareTo(VersionNumber.parse("4.0")) >= 0; + } - private String buildCreateCommand(String clusterName, boolean versionConfigured, VersionNumber - cassandraVersion, VersionNumber dseVersion) { - StringBuilder result = new StringBuilder(CCM_COMMAND + " create"); - result.append(" ").append(clusterName); - result.append(" -i ").append(TestUtils.IP_PREFIX); - result.append(" "); - if (nodes.length > 0) { - result.append(" -n "); - for (int i = 0; i < nodes.length; i++) { - int node = nodes[i]; - if (i > 0) - result.append(':'); - result.append(node); - } - } + public int weight() { + // the weight is simply function of the number of nodes + int totalNodes = 0; + for (int nodesPerDc : this.nodes) { + totalNodes += nodesPerDc; + } + return totalNodes; + } - Set lCreateOptions = new LinkedHashSet(createOptions); - if (!versionConfigured) { - // If no version was provided, use the default install ags. - lCreateOptions.addAll(CASSANDRA_INSTALL_ARGS); - } else { - if (dseVersion != null) { - lCreateOptions.add("--dse"); - lCreateOptions.add("-v"); - lCreateOptions.add(dseVersion.toString()); - } else { - lCreateOptions.add("-v"); - lCreateOptions.add(cassandraVersion.toString()); - } - } - result.append(" ").append(Joiner.on(" ").join(randomizePorts(lCreateOptions))); - return result.toString(); - } + private String joinJvmArgs() { + StringBuilder allJvmArgs = new StringBuilder(""); + String quote = isWindows() ? "\"" : ""; + for (String jvmArg : jvmArgs) { + // Windows requires jvm arguments to be quoted, while *nix requires unquoted. + allJvmArgs.append(" "); + allJvmArgs.append(quote); + allJvmArgs.append("--jvm_arg="); + allJvmArgs.append(randomizePorts(jvmArg)); + allJvmArgs.append(quote); + } + return allJvmArgs.toString(); + } - /** - * This is a workaround for an oddity in CCM: - * when we create a cluster with -n option and - * non-standard ports, the node.conf files are not updated accordingly. - */ - private void updateNodeConf(CCMBridge ccm) { - int n = 1; - Closer closer = Closer.create(); - try { - for (int dc = 1; dc <= nodes.length; dc++) { - int nodesInDc = nodes[dc - 1]; - for (int i = 0; i < nodesInDc; i++) { - int jmxPort = findAvailablePort(); - int debugPort = findAvailablePort(); - logger.trace("Node {} in cluster {} using JMX port {} and debug port {}", n, ccm.getClusterName(), jmxPort, debugPort); - File nodeConf = new File(ccm.getNodeDir(n), "node.conf"); - File nodeConf2 = new File(ccm.getNodeDir(n), "node.conf.tmp"); - BufferedReader br = closer.register(new BufferedReader(new FileReader(nodeConf))); - PrintWriter pw = closer.register(new PrintWriter(new FileWriter(nodeConf2))); - String line; - while ((line = br.readLine()) != null) { - line = line - .replace("9042", Integer.toString(ccm.binaryPort)) - .replace("9160", Integer.toString(ccm.thriftPort)) - .replace("7000", Integer.toString(ccm.storagePort)); - if (line.startsWith("jmx_port")) { - line = String.format("jmx_port: '%s'", jmxPort); - } else if (line.startsWith("remote_debug_port")) { - line = String.format("remote_debug_port: %s:%s", TestUtils.ipOfNode(n), debugPort); - } - pw.println(line); - } - pw.flush(); - pw.close(); - Files.move(nodeConf2, nodeConf); - n++; - } - } - } catch (IOException e) { - Throwables.propagate(e); - } finally { - try { - closer.close(); - } catch (IOException e) { - Throwables.propagate(e); - } - } + private String buildCreateCommand( + String clusterName, + boolean versionConfigured, + VersionNumber cassandraVersion, + VersionNumber dseVersion) { + StringBuilder result = new StringBuilder(CCM_COMMAND + " create"); + result.append(" ").append(clusterName); + result.append(" -i ").append(ipPrefix); + result.append(" "); + if (nodes.length > 0) { + result.append(" -n "); + for (int i = 0; i < nodes.length; i++) { + int node = nodes[i]; + if (i > 0) result.append(':'); + result.append(node); } - - private Set randomizePorts(Set set) { - Set randomized = new LinkedHashSet(); - for (String value : set) { - randomized.add(randomizePorts(value)); - } - return randomized; + } + + Set lCreateOptions = new LinkedHashSet(createOptions); + if (!versionConfigured) { + // If no version was provided, use the default install ags. + lCreateOptions.addAll(CASSANDRA_INSTALL_ARGS); + } else { + if (dseVersion != null) { + lCreateOptions.add("--dse"); + lCreateOptions.add("-v"); + lCreateOptions.add(dseVersion.toString()); + } else { + lCreateOptions.add("-v"); + lCreateOptions.add(cassandraVersion.toString()); } + } + result.append(" ").append(Joiner.on(" ").join(randomizePorts(lCreateOptions))); + return result.toString(); + } - private Map randomizePorts(Map map) { - Map randomized = new HashMap(); - for (Map.Entry entry : map.entrySet()) { - Object value = entry.getValue(); - if (value instanceof CharSequence) { - value = randomizePorts((CharSequence) value); - } - randomized.put(entry.getKey(), value); + /** + * This is a workaround for an oddity in CCM: when we create a cluster with -n option and + * non-standard ports, the node.conf files are not updated accordingly. + */ + private void updateNodeConf(CCMBridge ccm) { + int n = 1; + Closer closer = Closer.create(); + try { + for (int dc = 1; dc <= nodes.length; dc++) { + int nodesInDc = nodes[dc - 1]; + for (int i = 0; i < nodesInDc; i++) { + int jmxPort = ccm.jmxAddressOfNode(n).getPort(); + int debugPort = TestUtils.findAvailablePort(); + logger.trace( + "Node {} in cluster {} using JMX port {} and debug port {}", + n, + ccm.getClusterName(), + jmxPort, + debugPort); + File nodeConf = new File(ccm.getNodeDir(n), "node.conf"); + File nodeConf2 = new File(ccm.getNodeDir(n), "node.conf.tmp"); + BufferedReader br = closer.register(new BufferedReader(new FileReader(nodeConf))); + PrintWriter pw = closer.register(new PrintWriter(new FileWriter(nodeConf2))); + String line; + while ((line = br.readLine()) != null) { + line = + line.replace("9042", Integer.toString(ccm.binaryPort)) + .replace("9160", Integer.toString(ccm.thriftPort)) + .replace("7000", Integer.toString(ccm.storagePort)); + if (line.startsWith("jmx_port")) { + line = String.format("jmx_port: '%s'", jmxPort); + } else if (line.startsWith("remote_debug_port")) { + line = String.format("remote_debug_port: %s:%s", ipPrefix + n, debugPort); + } + pw.println(line); } - return randomized; + pw.flush(); + pw.close(); + Files.move(nodeConf2, nodeConf); + n++; + } } - - private String randomizePorts(CharSequence str) { - Matcher matcher = RANDOM_PORT_PATTERN.matcher(str); - StringBuffer sb = new StringBuffer(); - while (matcher.find()) { - matcher.appendReplacement(sb, Integer.toString(TestUtils.findAvailablePort())); - } - matcher.appendTail(sb); - return sb.toString(); + } catch (IOException e) { + Throwables.propagate(e); + } finally { + try { + closer.close(); + } catch (IOException e) { + Throwables.propagate(e); } + } + } - @Override - @SuppressWarnings("SimplifiableIfStatement") - public boolean equals(Object o) { - // do not include start as it is not relevant to the settings of the cluster. - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - - Builder builder = (Builder) o; - - if (dse != builder.dse) return false; - if (!Arrays.equals(nodes, builder.nodes)) return false; - if (version != null ? !version.equals(builder.version) : builder.version != null) return false; - if (!createOptions.equals(builder.createOptions)) return false; - if (!jvmArgs.equals(builder.jvmArgs)) return false; - if (!cassandraConfiguration.equals(builder.cassandraConfiguration)) return false; - if (!dseConfiguration.equals(builder.dseConfiguration)) return false; - return workloads.equals(builder.workloads); - } + private Set randomizePorts(Set set) { + Set randomized = new LinkedHashSet(); + for (String value : set) { + randomized.add(randomizePorts(value)); + } + return randomized; + } - @Override - public int hashCode() { - // do not include start as it is not relevant to the settings of the cluster. - int result = Arrays.hashCode(nodes); - result = 31 * result + (dse ? 1 : 0); - result = 31 * result + (version != null ? version.hashCode() : 0); - result = 31 * result + createOptions.hashCode(); - result = 31 * result + jvmArgs.hashCode(); - result = 31 * result + cassandraConfiguration.hashCode(); - result = 31 * result + dseConfiguration.hashCode(); - result = 31 * result + workloads.hashCode(); - return result; + private Map randomizePorts(Map map) { + Map randomized = new HashMap(); + for (Map.Entry entry : map.entrySet()) { + Object value = entry.getValue(); + if (value instanceof CharSequence) { + value = randomizePorts((CharSequence) value); } + randomized.put(entry.getKey(), value); + } + return randomized; + } + + private String randomizePorts(CharSequence str) { + Matcher matcher = RANDOM_PORT_PATTERN.matcher(str); + StringBuffer sb = new StringBuffer(); + while (matcher.find()) { + matcher.appendReplacement(sb, Integer.toString(TestUtils.findAvailablePort())); + } + matcher.appendTail(sb); + return sb.toString(); + } + + @Override + @SuppressWarnings("SimplifiableIfStatement") + public boolean equals(Object o) { + // do not include start as it is not relevant to the settings of the cluster. + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + Builder builder = (Builder) o; + + if (ipPrefix != builder.ipPrefix) return false; + if (dse != builder.dse) return false; + if (!Arrays.equals(nodes, builder.nodes)) return false; + if (version != null ? !version.equals(builder.version) : builder.version != null) + return false; + if (!createOptions.equals(builder.createOptions)) return false; + if (!jvmArgs.equals(builder.jvmArgs)) return false; + if (!cassandraConfiguration.equals(builder.cassandraConfiguration)) return false; + if (!dseConfiguration.equals(builder.dseConfiguration)) return false; + return workloads.equals(builder.workloads); } + @Override + public int hashCode() { + // do not include start as it is not relevant to the settings of the cluster. + int result = Arrays.hashCode(nodes); + result = 31 * result + (dse ? 1 : 0); + result = 31 * result + ipPrefix.hashCode(); + result = 31 * result + (version != null ? version.hashCode() : 0); + result = 31 * result + createOptions.hashCode(); + result = 31 * result + jvmArgs.hashCode(); + result = 31 * result + cassandraConfiguration.hashCode(); + result = 31 * result + dseConfiguration.hashCode(); + result = 31 * result + workloads.hashCode(); + return result; + } + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/CCMBridgeTest.java b/driver-core/src/test/java/com/datastax/driver/core/CCMBridgeTest.java new file mode 100644 index 00000000000..e82db86ea88 --- /dev/null +++ b/driver-core/src/test/java/com/datastax/driver/core/CCMBridgeTest.java @@ -0,0 +1,64 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import static org.assertj.core.api.Assertions.assertThat; + +import java.net.InetSocketAddress; +import javax.management.remote.JMXConnector; +import javax.management.remote.JMXConnectorFactory; +import javax.management.remote.JMXServiceURL; +import org.testng.annotations.Test; + +/** A simple test to validate jmx ports work */ +@Test +@CCMConfig(numberOfNodes = 3) +public class CCMBridgeTest extends CCMTestsSupport { + + @Test(groups = "short") + public void should_make_JMX_connection() throws Exception { + InetSocketAddress addr1 = ccm().jmxAddressOfNode(1); + InetSocketAddress addr2 = ccm().jmxAddressOfNode(2); + InetSocketAddress addr3 = ccm().jmxAddressOfNode(3); + + assertThat(addr1.getPort()).isNotEqualTo(addr2.getPort()); + assertThat(addr1.getPort()).isNotEqualTo(addr3.getPort()); + assertThat(addr2.getPort()).isNotEqualTo(addr3.getPort()); + + JMXServiceURL url = + new JMXServiceURL( + String.format( + "service:jmx:rmi:///jndi/rmi://%s:%d/jmxrmi", + addr2.getAddress().getHostAddress(), addr2.getPort())); + JMXConnector jmxc = JMXConnectorFactory.connect(url, null); + assertThat(jmxc.getConnectionId().isEmpty()).isFalse(); + } + + @Test(groups = "short") + public void should_configure_JMX_ports_through_builder() throws Exception { + CCMBridge.Builder ccmBuilder = + CCMBridge.builder().withNodes(3).notStarted().withJmxPorts(12345); + CCMAccess ccm = ccmBuilder.build(); + assertThat(ccm.jmxAddressOfNode(1).getPort()).isEqualTo(12345); + + int port2 = ccm.jmxAddressOfNode(2).getPort(); + int port3 = ccm.jmxAddressOfNode(3).getPort(); + assertThat(port2).isBetween(0, 65535); + assertThat(port3).isBetween(0, 65535); + } +} diff --git a/driver-core/src/test/java/com/datastax/driver/core/CCMCache.java b/driver-core/src/test/java/com/datastax/driver/core/CCMCache.java index 78eb8435202..caa2a1ed218 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/CCMCache.java +++ b/driver-core/src/test/java/com/datastax/driver/core/CCMCache.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,350 +18,367 @@ package com.datastax.driver.core; import com.google.common.base.Throwables; -import com.google.common.cache.*; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - +import com.google.common.cache.CacheBuilder; +import com.google.common.cache.CacheLoader; +import com.google.common.cache.LoadingCache; +import com.google.common.cache.RemovalListener; +import com.google.common.cache.RemovalNotification; +import com.google.common.cache.Weigher; import java.io.File; +import java.net.InetAddress; import java.net.InetSocketAddress; import java.util.Iterator; +import java.util.List; import java.util.Map; import java.util.concurrent.ExecutionException; import java.util.concurrent.atomic.AtomicInteger; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; public class CCMCache { - private static final Logger LOGGER = LoggerFactory.getLogger(CCMCache.class); - - private static class CachedCCMAccess implements CCMAccess { - - private final CCMAccess ccm; - - private final AtomicInteger refCount = new AtomicInteger(1); - - private volatile boolean evicted = false; - - private CachedCCMAccess(CCMAccess ccm) { - this.ccm = ccm; - } - - @Override - public String getClusterName() { - return ccm.getClusterName(); - } - - @Override - public VersionNumber getCassandraVersion() { - return ccm.getCassandraVersion(); - } - - @Override - public VersionNumber getDSEVersion() { - return ccm.getDSEVersion(); - } - - @Override - public File getCcmDir() { - return ccm.getCcmDir(); - } - - @Override - public File getClusterDir() { - return ccm.getClusterDir(); - } - - @Override - public File getNodeDir(int n) { - return ccm.getNodeDir(n); - } - - @Override - public File getNodeConfDir(int n) { - return ccm.getNodeConfDir(n); - } - - @Override - public int getStoragePort() { - return ccm.getStoragePort(); - } - - @Override - public int getThriftPort() { - return ccm.getThriftPort(); - } - - @Override - public int getBinaryPort() { - return ccm.getBinaryPort(); - } - - @Override - public void setKeepLogs(boolean keepLogs) { - ccm.setKeepLogs(keepLogs); - } - - @Override - public InetSocketAddress addressOfNode(int n) { - return ccm.addressOfNode(n); - } - - @Override - public void start() { - ccm.start(); - } - - @Override - public void stop() { - ccm.stop(); - } - - @Override - public void forceStop() { - ccm.forceStop(); - } - - @Override - public void close() { - refCount.decrementAndGet(); - maybeClose(); - } - - private void maybeClose() { - if (refCount.get() <= 0 && evicted) { - ccm.close(); - } - } - - @Override - public void remove() { - ccm.remove(); - } - - @Override - public void updateConfig(Map configs) { - ccm.updateConfig(configs); - } - - @Override - public void updateDSEConfig(Map configs) { - ccm.updateDSEConfig(configs); - } - - @Override - public String checkForErrors() { - return ccm.checkForErrors(); - } - - @Override - public void start(int n) { - ccm.start(n); - } - - @Override - public void stop(int n) { - ccm.stop(n); - } - - @Override - public void forceStop(int n) { - ccm.forceStop(n); - } - - @Override - public void pause(int n) { - ccm.pause(n); - } - - @Override - public void resume(int n) { - ccm.resume(n); - } - - @Override - public void remove(int n) { - ccm.remove(n); - } - - @Override - public void add(int n) { - ccm.add(n); - } - - @Override - public void add(int dc, int n) { - ccm.add(dc, n); - } - - @Override - public void decommission(int n) { - ccm.decommission(n); - } - - @Override - public void updateNodeConfig(int n, String key, Object value) { - ccm.updateNodeConfig(n, key, value); - } - - @Override - public void updateNodeConfig(int n, Map configs) { - ccm.updateNodeConfig(n, configs); - } - - @Override - public void updateDSENodeConfig(int n, String key, Object value) { - ccm.updateDSENodeConfig(n, key, value); - } - - @Override - public void updateDSENodeConfig(int n, Map configs) { - ccm.updateDSENodeConfig(n, configs); - } - - @Override - public void setWorkload(int n, Workload... workload) { - ccm.setWorkload(n, workload); - } - - @Override - public void waitForUp(int node) { - ccm.waitForUp(node); - } - - @Override - public void waitForDown(int node) { - ccm.waitForDown(node); - } - - @Override - public ProtocolVersion getProtocolVersion() { - return ccm.getProtocolVersion(); - } - - @Override - public ProtocolVersion getProtocolVersion(ProtocolVersion maximumAllowed) { - return ccm.getProtocolVersion(maximumAllowed); - } - - @Override - public String toString() { - return ccm.toString(); - } - } - - private static class CCMAccessLoader extends CacheLoader { - - @Override - public CachedCCMAccess load(CCMBridge.Builder key) { - return new CachedCCMAccess(key.build()); - } - - } - - private static class CCMAccessWeigher implements Weigher { - - @Override - public int weigh(CCMBridge.Builder key, CachedCCMAccess value) { - return key.weight(); - } - - } - - private static class CCMAccessRemovalListener implements RemovalListener { - - @Override - public void onRemoval(RemovalNotification notification) { - CachedCCMAccess cached = notification.getValue(); - if (cached != null && cached.ccm != null) { - LOGGER.debug("Evicting: {}, reason: {}", cached.ccm, notification.getCause()); - cached.evicted = true; - cached.maybeClose(); - } - } - - } - - /** - * A LoadingCache that stores running CCM clusters. - */ - private static final LoadingCache CACHE; - - // The amount of memory one CCM node takes in MB. - private static final int ONE_CCM_NODE_MB = 800; - - static { - long maximumWeight; - String numberOfNodes = System.getProperty("ccm.maxNumberOfNodes"); - if (numberOfNodes == null) { - long freeMemoryMB = TestUtils.getFreeMemoryMB(); - if (freeMemoryMB < ONE_CCM_NODE_MB) - LOGGER.warn("Not enough available memory: {} MB, CCM clusters might fail to start", freeMemoryMB); - // CCM nodes are started with -Xms500M -Xmx500M - // and allocate up to 100MB non-heap memory in the general case, - // to be conservative we treat 1 "slot" as 800Mb. - // We leave 3 slots out to avoid starving system memory, - // and we pick a value with a minimum of 1 slot and a maximum of 8 slots. - // For example, an 8GB VM with ~6.5GB currently available heap will yield 5 slots ((6500/800) - 3 = 5). - long slotsAvailable = (freeMemoryMB / ONE_CCM_NODE_MB) - 3; - maximumWeight = Math.min(8, Math.max(1, slotsAvailable)); - } else { - maximumWeight = Integer.parseInt(numberOfNodes); - } - LOGGER.info("Maximum number of running CCM nodes: {}", maximumWeight); - CACHE = CacheBuilder.newBuilder() - .initialCapacity(3) - .softValues() - .maximumWeight(maximumWeight) - .weigher(new CCMAccessWeigher()) - .removalListener(new CCMAccessRemovalListener()) - .recordStats() - .build(new CCMAccessLoader()); - } - - /** - * Creates or recycles a {@link CCMAccess} instance and returns it. - *

    - * Caller MUST call {@link CCMAccess#close()} when done with the cluster, - * to ensure that resources will be properly freed. - */ - public static CCMAccess get(CCMBridge.Builder key) { - CachedCCMAccess ccm = CACHE.getIfPresent(key); - if (ccm != null) { - ccm.refCount.incrementAndGet(); - } else { - try { - ccm = CACHE.get(key); - } catch (ExecutionException e) { - throw Throwables.propagate(e); - } - } - logCache(); - return ccm; - } - - /** - * Removes the given key from the cache. - */ - public static void remove(CCMBridge.Builder key) { - CACHE.invalidate(key); - } - - private static void logCache() { - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("Free memory: {} MB", TestUtils.getFreeMemoryMB()); - StringBuilder sb = new StringBuilder(); - Iterator> iterator = CACHE.asMap().entrySet().iterator(); - while (iterator.hasNext()) { - Map.Entry entry = iterator.next(); - sb.append(entry.getValue().getClusterName()) - .append(" (") - .append(entry.getKey().weight()) - .append(")"); - if (iterator.hasNext()) - sb.append(", "); - } - LOGGER.debug("Cache contents: {{}}", sb.toString()); - LOGGER.debug("Cache stats: {}", CACHE.stats()); - } + private static final Logger LOGGER = LoggerFactory.getLogger(CCMCache.class); + + private static class CachedCCMAccess implements CCMAccess { + + private final CCMAccess ccm; + + private final AtomicInteger refCount = new AtomicInteger(1); + + private volatile boolean evicted = false; + + private CachedCCMAccess(CCMAccess ccm) { + this.ccm = ccm; + } + + @Override + public String getClusterName() { + return ccm.getClusterName(); + } + + @Override + public VersionNumber getCassandraVersion() { + return ccm.getCassandraVersion(); + } + + @Override + public VersionNumber getDSEVersion() { + return ccm.getDSEVersion(); + } + + @Override + public File getCcmDir() { + return ccm.getCcmDir(); + } + + @Override + public File getClusterDir() { + return ccm.getClusterDir(); + } + + @Override + public File getNodeDir(int n) { + return ccm.getNodeDir(n); + } + + @Override + public File getNodeConfDir(int n) { + return ccm.getNodeConfDir(n); + } + + @Override + public int getStoragePort() { + return ccm.getStoragePort(); + } + + @Override + public int getThriftPort() { + return ccm.getThriftPort(); + } + + @Override + public int getBinaryPort() { + return ccm.getBinaryPort(); + } + + @Override + public void setKeepLogs(boolean keepLogs) { + ccm.setKeepLogs(keepLogs); + } + + @Override + public int[] getNodeCount() { + return ccm.getNodeCount(); + } + + @Override + public List getContactPoints() { + return ccm.getContactPoints(); + } + + @Override + public InetSocketAddress addressOfNode(int n) { + return ccm.addressOfNode(n); + } + + @Override + public InetSocketAddress jmxAddressOfNode(int n) { + return ccm.jmxAddressOfNode(n); + } + + @Override + public void start() { + ccm.start(); + } + + @Override + public void stop() { + ccm.stop(); + } + + @Override + public void forceStop() { + ccm.forceStop(); + } + + @Override + public void close() { + refCount.decrementAndGet(); + maybeClose(); + } + + private void maybeClose() { + if (refCount.get() <= 0 && evicted) { + ccm.close(); + } + } + + @Override + public void remove() { + ccm.remove(); } + @Override + public void updateConfig(Map configs) { + ccm.updateConfig(configs); + } + + @Override + public void updateDSEConfig(Map configs) { + ccm.updateDSEConfig(configs); + } + + @Override + public String checkForErrors() { + return ccm.checkForErrors(); + } + + @Override + public void start(int n) { + ccm.start(n); + } + + @Override + public void stop(int n) { + ccm.stop(n); + } + + @Override + public void forceStop(int n) { + ccm.forceStop(n); + } + + @Override + public void pause(int n) { + ccm.pause(n); + } + + @Override + public void resume(int n) { + ccm.resume(n); + } + + @Override + public void remove(int n) { + ccm.remove(n); + } + + @Override + public void add(int n) { + ccm.add(n); + } + + @Override + public void add(int dc, int n) { + ccm.add(dc, n); + } + + @Override + public void decommission(int n) { + ccm.decommission(n); + } + + @Override + public void updateNodeConfig(int n, String key, Object value) { + ccm.updateNodeConfig(n, key, value); + } + + @Override + public void updateNodeConfig(int n, Map configs) { + ccm.updateNodeConfig(n, configs); + } + + @Override + public void updateDSENodeConfig(int n, String key, Object value) { + ccm.updateDSENodeConfig(n, key, value); + } + + @Override + public void updateDSENodeConfig(int n, Map configs) { + ccm.updateDSENodeConfig(n, configs); + } + + @Override + public void setWorkload(int n, Workload... workload) { + ccm.setWorkload(n, workload); + } + + @Override + public void waitForUp(int node) { + ccm.waitForUp(node); + } + + @Override + public void waitForDown(int node) { + ccm.waitForDown(node); + } + + @Override + public ProtocolVersion getProtocolVersion() { + return ccm.getProtocolVersion(); + } + + @Override + public ProtocolVersion getProtocolVersion(ProtocolVersion maximumAllowed) { + return ccm.getProtocolVersion(maximumAllowed); + } + + @Override + public String toString() { + return ccm.toString(); + } + } + + private static class CCMAccessLoader extends CacheLoader { + + @Override + public CachedCCMAccess load(CCMBridge.Builder key) { + return new CachedCCMAccess(key.build()); + } + } + + private static class CCMAccessWeigher implements Weigher { + + @Override + public int weigh(CCMBridge.Builder key, CachedCCMAccess value) { + return key.weight(); + } + } + + private static class CCMAccessRemovalListener + implements RemovalListener { + + @Override + public void onRemoval(RemovalNotification notification) { + CachedCCMAccess cached = notification.getValue(); + if (cached != null && cached.ccm != null) { + LOGGER.debug("Evicting: {}, reason: {}", cached.ccm, notification.getCause()); + cached.evicted = true; + cached.maybeClose(); + } + } + } + + /** A LoadingCache that stores running CCM clusters. */ + private static final LoadingCache CACHE; + + // The amount of memory one CCM node takes in MB. + private static final int ONE_CCM_NODE_MB = 800; + + static { + long maximumWeight; + String numberOfNodes = System.getProperty("ccm.maxNumberOfNodes"); + if (numberOfNodes == null) { + long freeMemoryMB = TestUtils.getFreeMemoryMB(); + if (freeMemoryMB < ONE_CCM_NODE_MB) + LOGGER.warn( + "Not enough available memory: {} MB, CCM clusters might fail to start", freeMemoryMB); + // CCM nodes are started with -Xms500M -Xmx500M + // and allocate up to 100MB non-heap memory in the general case, + // to be conservative we treat 1 "slot" as 800Mb. + // We leave 3 slots out to avoid starving system memory, + // and we pick a value with a minimum of 1 slot and a maximum of 8 slots. + // For example, an 8GB VM with ~6.5GB currently available heap will yield 5 slots ((6500/800) + // - 3 = 5). + long slotsAvailable = (freeMemoryMB / ONE_CCM_NODE_MB) - 3; + maximumWeight = Math.min(8, Math.max(1, slotsAvailable)); + } else { + maximumWeight = Integer.parseInt(numberOfNodes); + } + LOGGER.info("Maximum number of running CCM nodes: {}", maximumWeight); + CACHE = + CacheBuilder.newBuilder() + .initialCapacity(3) + .softValues() + .maximumWeight(maximumWeight) + .weigher(new CCMAccessWeigher()) + .removalListener(new CCMAccessRemovalListener()) + .recordStats() + .build(new CCMAccessLoader()); + } + + /** + * Creates or recycles a {@link CCMAccess} instance and returns it. + * + *

    Caller MUST call {@link CCMAccess#close()} when done with the cluster, to ensure that + * resources will be properly freed. + */ + public static CCMAccess get(CCMBridge.Builder key) { + CachedCCMAccess ccm = CACHE.getIfPresent(key); + if (ccm != null) { + ccm.refCount.incrementAndGet(); + } else { + try { + ccm = CACHE.get(key); + } catch (ExecutionException e) { + throw Throwables.propagate(e); + } + } + logCache(); + return ccm; + } + + /** Removes the given key from the cache. */ + public static void remove(CCMBridge.Builder key) { + CACHE.invalidate(key); + } + + private static void logCache() { + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("Free memory: {} MB", TestUtils.getFreeMemoryMB()); + StringBuilder sb = new StringBuilder(); + Iterator> iterator = + CACHE.asMap().entrySet().iterator(); + while (iterator.hasNext()) { + Map.Entry entry = iterator.next(); + sb.append(entry.getValue().getClusterName()) + .append(" (") + .append(entry.getKey().weight()) + .append(")"); + if (iterator.hasNext()) sb.append(", "); + } + LOGGER.debug("Cache contents: {{}}", sb.toString()); + LOGGER.debug("Cache stats: {}", CACHE.stats()); + } + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/CCMConfig.java b/driver-core/src/test/java/com/datastax/driver/core/CCMConfig.java index e8350f1faa8..dc51f963bc2 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/CCMConfig.java +++ b/driver-core/src/test/java/com/datastax/driver/core/CCMConfig.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,298 +17,292 @@ */ package com.datastax.driver.core; -import java.lang.annotation.Retention; -import java.lang.annotation.Target; - import static java.lang.annotation.ElementType.METHOD; import static java.lang.annotation.ElementType.TYPE; import static java.lang.annotation.RetentionPolicy.RUNTIME; +import java.lang.annotation.Retention; +import java.lang.annotation.Target; + /** * The configuration to use when running tests with {@link CCMTestsSupport}. - *

    - * This annotation can be used at the class level, or at the method level if you annotate the class with - * {@code @CreateCCM(PER_METHOD)}. + * + *

    This annotation can be used at the class level, or at the method level if you annotate the + * class with {@code @CreateCCM(PER_METHOD)}. */ @Retention(RUNTIME) @Target({TYPE, METHOD}) public @interface CCMConfig { - final class Undefined { - } - - /** - * The number of nodes to create, per data center. - * If not set, this defaults to {@code {1}}, i.e., one data center with one node. - *

    - * This attribute is ignored if {@link #ccmProvider()} is defined. - * - * @return The number of nodes to create, per data center. - */ - int[] numberOfNodes() default {}; + final class Undefined {} - /** - * The C* or DSE version to use; defaults to the version defined by - * the System property {@code cassandra.version}. - *

    - * Note that setting this attribute completely - * overrides the System properties {@code cassandra.version} - * and {@code cassandra.directory}. - *

    - * This attribute is ignored if {@link #ccmProvider()} is defined. - * - * @return The C* or DSE version to use - * @see CCMBridge#getCassandraVersion() - */ - String version() default ""; + /** + * The number of nodes to create, per data center. If not set, this defaults to {@code {1}}, i.e., + * one data center with one node. + * + *

    This attribute is ignored if {@link #ccmProvider()} is defined. + * + * @return The number of nodes to create, per data center. + */ + int[] numberOfNodes() default {}; - /** - * Whether to launch a DSE instance rather than an OSS C*. - *

    - * Note that setting this attribute completely - * overrides the System property {@code dse}. - *

    - * This attribute is ignored if {@link #ccmProvider()} is defined. - * - * @return {@code true} to launch a DSE instance, {@code false} to launch an OSS C* instance (default). - */ - boolean[] dse() default {}; + /** + * The C* or DSE version to use; defaults to the version defined by the System property {@code + * cassandra.version}. + * + *

    Note that setting this attribute completely overrides the System properties {@code + * cassandra.version} and {@code cassandra.directory}. + * + *

    This attribute is ignored if {@link #ccmProvider()} is defined. + * + * @return The C* or DSE version to use + * @see CCMBridge#getCassandraVersion() + */ + String version() default ""; - /** - * Configuration items to add to cassandra.yaml configuration file. - * Each configuration item must be in the form {@code key:value}. - *

    - * This attribute is ignored if {@link #ccmProvider()} is defined. - * - * @return Configuration items to add to cassandra.yaml configuration file. - */ - String[] config() default {}; + /** + * Whether to launch a DSE instance rather than an OSS C*. + * + *

    Note that setting this attribute completely overrides the System property {@code dse}. + * + *

    This attribute is ignored if {@link #ccmProvider()} is defined. + * + * @return {@code true} to launch a DSE instance, {@code false} to launch an OSS C* instance + * (default). + */ + boolean[] dse() default {}; - /** - * Configuration items to add to dse.yaml configuration file. - * Each configuration item must be in the form {@code key:value}. - *

    - * This attribute is ignored if {@link #ccmProvider()} is defined. - * - * @return Configuration items to add to dse.yaml configuration file. - */ - String[] dseConfig() default {}; + /** + * Configuration items to add to cassandra.yaml configuration file. Each configuration item must + * be in the form {@code key:value}. + * + *

    This attribute is ignored if {@link #ccmProvider()} is defined. + * + * @return Configuration items to add to cassandra.yaml configuration file. + */ + String[] config() default {}; - /** - * JVM args to use when starting hosts. - * System properties should be provided one by one, in the form - * {@code -Dname=value}. - *

    - * This attribute is ignored if {@link #ccmProvider()} is defined. - * - * @return JVM args to use when starting hosts. - */ - String[] jvmArgs() default {}; + /** + * Configuration items to add to dse.yaml configuration file. Each configuration item must be in + * the form {@code key:value}. + * + *

    This attribute is ignored if {@link #ccmProvider()} is defined. + * + * @return Configuration items to add to dse.yaml configuration file. + */ + String[] dseConfig() default {}; - /** - * Free-form options that will be added at the end of the {@code ccm create} command. - *

    - * This attribute is ignored if {@link #ccmProvider()} is defined. - * - * @return Free-form options that will be added at the end of the {@code ccm create} command. - */ - String[] options() default {}; + /** + * JVM args to use when starting hosts. System properties should be provided one by one, in the + * form {@code -Dname=value}. + * + *

    This attribute is ignored if {@link #ccmProvider()} is defined. + * + * @return JVM args to use when starting hosts. + */ + String[] jvmArgs() default {}; - /** - * Whether to use SSL encryption. - *

    - * This attribute is ignored if {@link #ccmProvider()} is defined. - * - * @return {@code true} to use encryption, {@code false} to use unencrypted communication (default). - */ - boolean[] ssl() default {}; + /** + * Free-form options that will be added at the end of the {@code ccm create} command. + * + *

    This attribute is ignored if {@link #ccmProvider()} is defined. + * + * @return Free-form options that will be added at the end of the {@code ccm create} command. + */ + String[] options() default {}; - /** - * Whether to use authentication. Implies the use of SSL encryption. - *

    - * This attribute is ignored if {@link #ccmProvider()} is defined. - * - * @return {@code true} to use authentication, {@code false} to use unauthenticated communication (default). - */ - boolean[] auth() default {}; + /** + * Whether to use SSL encryption. + * + *

    This attribute is ignored if {@link #ccmProvider()} is defined. + * + * @return {@code true} to use encryption, {@code false} to use unencrypted communication + * (default). + */ + boolean[] ssl() default {}; - /** - * The workloads to assign to each node. If this attribute is defined, - * the number of its elements must be lesser than or equal to the number of nodes in the cluster. - *

    - * This attribute is ignored if {@link #ccmProvider()} is defined. - * - * @return The workloads to assign to each node. - */ - CCMWorkload[] workloads() default {}; + /** + * Whether to use authentication. Implies the use of SSL encryption. + * + *

    This attribute is ignored if {@link #ccmProvider()} is defined. + * + * @return {@code true} to use authentication, {@code false} to use unauthenticated communication + * (default). + */ + boolean[] auth() default {}; - /** - * Returns {@code true} if a {@link CCMBridge} instance should be automatically created, {@code false} otherwise. - *

    - * If {@code true}, a CCM cluster builder will be created or retrieved from the cache. - * If {@code false}, no CCM cluster will be activated for this test; this can be useful - * when some tests in a test class require CCM, while others don't but are in one of the allowed - * CCM test groups ("short" for example). - *

    - * The cluster will be created once for the whole class if CCM test mode is {@link CreateCCM.TestMode#PER_CLASS}, - * or once per test method, if the CCM test mode is {@link CreateCCM.TestMode#PER_METHOD}, - * - * @return {@code true} if a {@link Cluster} instance should be automatically created, {@code false} otherwise. - */ - boolean[] createCcm() default {}; + /** + * The workloads to assign to each node. If this attribute is defined, the number of its elements + * must be lesser than or equal to the number of nodes in the cluster. + * + *

    This attribute is ignored if {@link #ccmProvider()} is defined. + * + * @return The workloads to assign to each node. + */ + CCMWorkload[] workloads() default {}; - /** - * Returns {@code true} if a {@link Cluster} instance should be automatically created, {@code false} otherwise. - *

    - * If {@code true}, a cluster builder will be obtained by invoking the method - * specified by {@link #clusterProvider()} and {@link #clusterProviderClass()}. - *

    - * The CCM cluster will be created once for the whole class if CCM test mode is {@link CreateCCM.TestMode#PER_CLASS}, - * or once per test method, if the CCM test mode is {@link CreateCCM.TestMode#PER_METHOD}, - * - * @return {@code true} if a {@link Cluster} instance should be automatically created, {@code false} otherwise. - */ - boolean[] createCluster() default {}; + /** + * Returns {@code true} if a {@link CCMBridge} instance should be automatically created, {@code + * false} otherwise. + * + *

    If {@code true}, a CCM cluster builder will be created or retrieved from the cache. If + * {@code false}, no CCM cluster will be activated for this test; this can be useful when some + * tests in a test class require CCM, while others don't but are in one of the allowed CCM test + * groups ("short" for example). + * + *

    The cluster will be created once for the whole class if CCM test mode is {@link + * CreateCCM.TestMode#PER_CLASS}, or once per test method, if the CCM test mode is {@link + * CreateCCM.TestMode#PER_METHOD}, + * + * @return {@code true} if a {@link Cluster} instance should be automatically created, {@code + * false} otherwise. + */ + boolean[] createCcm() default {}; - /** - * Returns {@code true} if a {@link Session} instance should be automatically created, {@code false} otherwise. - *

    - * If {@code true}, a cluster builder will be obtained by invoking the method - * specified by {@link #clusterProvider()} and {@link #clusterProviderClass()}, - * and then the session will be created through {@link Cluster#connect()}. - *

    - * The session will be created once for the whole class if CCM test mode is {@link CreateCCM.TestMode#PER_CLASS}, - * or once per test method, if the CCM test mode is {@link CreateCCM.TestMode#PER_METHOD}, - * - * @return {@code true} if a {@link Session} instance should be automatically created, {@code false} otherwise. - */ - boolean[] createSession() default {}; + /** + * Returns {@code true} if a {@link Cluster} instance should be automatically created, {@code + * false} otherwise. + * + *

    If {@code true}, a cluster builder will be obtained by invoking the method specified by + * {@link #clusterProvider()} and {@link #clusterProviderClass()}. + * + *

    The CCM cluster will be created once for the whole class if CCM test mode is {@link + * CreateCCM.TestMode#PER_CLASS}, or once per test method, if the CCM test mode is {@link + * CreateCCM.TestMode#PER_METHOD}, + * + * @return {@code true} if a {@link Cluster} instance should be automatically created, {@code + * false} otherwise. + */ + boolean[] createCluster() default {}; - /** - * Returns {@code true} if a test keyspace should be automatically created, {@code false} otherwise. - *

    - * If {@code true}, a cluster builder will be obtained by invoking the method - * specified by {@link #clusterProvider()} and {@link #clusterProviderClass()}, - * then a session will be created through {@link Cluster#connect()}, - * and this session will be used to create the test keyspace. - *

    - * The keyspace will be created once for the whole class if CCM test mode is {@link CreateCCM.TestMode#PER_CLASS}, - * or once per test method, if the CCM test mode is {@link CreateCCM.TestMode#PER_METHOD}. - *

    - * The test keyspace will be automatically populated upon creation with fixtures provided by - * {@link #testInitializer()} and {@link #testInitializerClass()}. - * - * @return {@code true} if a test keyspace should be automatically created, {@code false} otherwise. - */ - boolean[] createKeyspace() default {}; + /** + * Returns {@code true} if a {@link Session} instance should be automatically created, {@code + * false} otherwise. + * + *

    If {@code true}, a cluster builder will be obtained by invoking the method specified by + * {@link #clusterProvider()} and {@link #clusterProviderClass()}, and then the session will be + * created through {@link Cluster#connect()}. + * + *

    The session will be created once for the whole class if CCM test mode is {@link + * CreateCCM.TestMode#PER_CLASS}, or once per test method, if the CCM test mode is {@link + * CreateCCM.TestMode#PER_METHOD}, + * + * @return {@code true} if a {@link Session} instance should be automatically created, {@code + * false} otherwise. + */ + boolean[] createSession() default {}; - /** - * Returns {@code true} if the test class or the test method alters the CCM cluster, - * e.g. by adding or removing nodes, - * in which case, it should not be reused after the test is finished. - *

    - * If {@code true}, CCM cluster will be destroyed after the test method if test mode is - * {@link CreateCCM.TestMode#PER_METHOD}, or after the test class, if test mode - * is {@link CreateCCM.TestMode#PER_CLASS}. - * - * @return {@code true} if the test class or the test method alters the CCM cluster used, - * {@code false} otherwise. - */ - boolean[] dirtiesContext() default {}; + /** + * Returns {@code true} if a test keyspace should be automatically created, {@code false} + * otherwise. + * + *

    If {@code true}, a cluster builder will be obtained by invoking the method specified by + * {@link #clusterProvider()} and {@link #clusterProviderClass()}, then a session will be created + * through {@link Cluster#connect()}, and this session will be used to create the test keyspace. + * + *

    The keyspace will be created once for the whole class if CCM test mode is {@link + * CreateCCM.TestMode#PER_CLASS}, or once per test method, if the CCM test mode is {@link + * CreateCCM.TestMode#PER_METHOD}. + * + *

    The test keyspace will be automatically populated upon creation with fixtures provided by + * {@link #testInitializer()} and {@link #testInitializerClass()}. + * + * @return {@code true} if a test keyspace should be automatically created, {@code false} + * otherwise. + */ + boolean[] createKeyspace() default {}; - /** - * Returns the name of the method that should be invoked to obtain - * a {@link com.datastax.driver.core.CCMBridge.Builder} instance. - *

    - * This method should be declared in {@link #ccmProviderClass()}, - * or if that attribute is not set, - * it will be looked up on the test class itself. - *

    - * The method should not have parameters. It can be static or not, - * and have any visibility. - *

    - * By default, a {@link com.datastax.driver.core.CCMBridge.Builder} instance - * is obtained by parsing other attributes of this annotation. - * - * @return The name of the method that should be invoked to obtain a - * {@link com.datastax.driver.core.CCMBridge.Builder} instance. - */ - String ccmProvider() default ""; + /** + * Returns {@code true} if the test class or the test method alters the CCM cluster, e.g. by + * adding or removing nodes, in which case, it should not be reused after the test is finished. + * + *

    If {@code true}, CCM cluster will be destroyed after the test method if test mode is {@link + * CreateCCM.TestMode#PER_METHOD}, or after the test class, if test mode is {@link + * CreateCCM.TestMode#PER_CLASS}. + * + * @return {@code true} if the test class or the test method alters the CCM cluster used, {@code + * false} otherwise. + */ + boolean[] dirtiesContext() default {}; - /** - * Returns the name of the class that should be invoked to obtain - * a {@link com.datastax.driver.core.CCMBridge.Builder} instance. - *

    - * This class should contain a method named after {@link #ccmProvider()}; - * if this attribute is not set, - * it will default to the test class itself. - * - * @return The name of the class that should be invoked to obtain a - * {@link com.datastax.driver.core.CCMBridge.Builder} instance. - */ - Class ccmProviderClass() default Undefined.class; + /** + * Returns the name of the method that should be invoked to obtain a {@link + * com.datastax.driver.core.CCMBridge.Builder} instance. + * + *

    This method should be declared in {@link #ccmProviderClass()}, or if that attribute is not + * set, it will be looked up on the test class itself. + * + *

    The method should not have parameters. It can be static or not, and have any visibility. + * + *

    By default, a {@link com.datastax.driver.core.CCMBridge.Builder} instance is obtained by + * parsing other attributes of this annotation. + * + * @return The name of the method that should be invoked to obtain a {@link + * com.datastax.driver.core.CCMBridge.Builder} instance. + */ + String ccmProvider() default ""; - /** - * Returns the name of the method that should be invoked to obtain - * a {@link com.datastax.driver.core.Cluster.Builder} instance. - *

    - * This method should be declared in {@link #clusterProviderClass()}, - * or if that attribute is not set, - * it will be looked up on the test class itself. - *

    - * The method should not have parameters. It can be static or not, - * and have any visibility. - *

    - * By default, the test will look for a method named after {@code createClusterBuilder}. - * - * @return The name of the method that should be invoked to obtain a - * {@link com.datastax.driver.core.Cluster.Builder} instance. - */ - String clusterProvider() default ""; + /** + * Returns the name of the class that should be invoked to obtain a {@link + * com.datastax.driver.core.CCMBridge.Builder} instance. + * + *

    This class should contain a method named after {@link #ccmProvider()}; if this attribute is + * not set, it will default to the test class itself. + * + * @return The name of the class that should be invoked to obtain a {@link + * com.datastax.driver.core.CCMBridge.Builder} instance. + */ + Class ccmProviderClass() default Undefined.class; - /** - * Returns the name of the class that should be invoked to obtain - * a {@link com.datastax.driver.core.Cluster.Builder} instance. - *

    - * This class should contain a method named after {@link #clusterProvider()}; - * if this attribute is not set, - * it will default to the test class itself. - * - * @return The name of the class that should be invoked to obtain a - * {@link com.datastax.driver.core.Cluster.Builder} instance. - */ - Class clusterProviderClass() default Undefined.class; + /** + * Returns the name of the method that should be invoked to obtain a {@link + * com.datastax.driver.core.Cluster.Builder} instance. + * + *

    This method should be declared in {@link #clusterProviderClass()}, or if that attribute is + * not set, it will be looked up on the test class itself. + * + *

    The method should not have parameters. It can be static or not, and have any visibility. + * + *

    By default, the test will look for a method named after {@code createClusterBuilder}. + * + * @return The name of the method that should be invoked to obtain a {@link + * com.datastax.driver.core.Cluster.Builder} instance. + */ + String clusterProvider() default ""; - /** - * Returns the name of the method that should be invoked once the test context - * is initialized. - *

    - * This method should be declared in {@link #testInitializerClass()}, - * or if that attribute is not set, - * it will be looked up on the test class itself. - *

    - * The method should not have parameters. It can be static or not, - * and have any visibility. - *

    - * By default, the framework will look for a method named - * {@code onTestContextInitialized}. - * - * @return The name of the method that should be invoked once the test context - * is initialized. - */ - String testInitializer() default ""; + /** + * Returns the name of the class that should be invoked to obtain a {@link + * com.datastax.driver.core.Cluster.Builder} instance. + * + *

    This class should contain a method named after {@link #clusterProvider()}; if this attribute + * is not set, it will default to the test class itself. + * + * @return The name of the class that should be invoked to obtain a {@link + * com.datastax.driver.core.Cluster.Builder} instance. + */ + Class clusterProviderClass() default Undefined.class; - /** - * Returns the name of the class that should be invoked once the test context - * is initialized. - *

    - * This class should contain a method named after {@link #testInitializer()}; - * if this attribute is not set, - * it will default to the test class itself. - * - * @return The name of the class that should be invoked once the test context - * is initialized. - */ - Class testInitializerClass() default Undefined.class; + /** + * Returns the name of the method that should be invoked once the test context is initialized. + * + *

    This method should be declared in {@link #testInitializerClass()}, or if that attribute is + * not set, it will be looked up on the test class itself. + * + *

    The method should not have parameters. It can be static or not, and have any visibility. + * + *

    By default, the framework will look for a method named {@code onTestContextInitialized}. + * + * @return The name of the method that should be invoked once the test context is initialized. + */ + String testInitializer() default ""; + /** + * Returns the name of the class that should be invoked once the test context is initialized. + * + *

    This class should contain a method named after {@link #testInitializer()}; if this attribute + * is not set, it will default to the test class itself. + * + * @return The name of the class that should be invoked once the test context is initialized. + */ + Class testInitializerClass() default Undefined.class; } diff --git a/driver-core/src/test/java/com/datastax/driver/core/CCMException.java b/driver-core/src/test/java/com/datastax/driver/core/CCMException.java index 2c4981c903a..ad1cc7068b5 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/CCMException.java +++ b/driver-core/src/test/java/com/datastax/driver/core/CCMException.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,20 +19,19 @@ public class CCMException extends RuntimeException { - private final String out; - - public CCMException(String message, String out) { - super(message); - this.out = out; - } + private final String out; - public CCMException(String message, String out, Throwable cause) { - super(message, cause); - this.out = out; - } + public CCMException(String message, String out) { + super(message); + this.out = out; + } - public String getOut() { - return out; - } + public CCMException(String message, String out, Throwable cause) { + super(message, cause); + this.out = out; + } + public String getOut() { + return out; + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/CCMTestsSupport.java b/driver-core/src/test/java/com/datastax/driver/core/CCMTestsSupport.java index 45ac66b91b7..8e8add392e6 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/CCMTestsSupport.java +++ b/driver-core/src/test/java/com/datastax/driver/core/CCMTestsSupport.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +17,15 @@ */ package com.datastax.driver.core; +import static com.datastax.driver.core.CreateCCM.TestMode.PER_CLASS; +import static com.datastax.driver.core.CreateCCM.TestMode.PER_METHOD; +import static com.datastax.driver.core.TestUtils.CREATE_KEYSPACE_SIMPLE_FORMAT; +import static com.datastax.driver.core.TestUtils.executeNoFail; +import static com.datastax.driver.core.TestUtils.ipOfNode; +import static org.assertj.core.api.Assertions.fail; + import com.datastax.driver.core.CCMAccess.Workload; +import com.datastax.driver.core.Cluster.Builder; import com.datastax.driver.core.CreateCCM.TestMode; import com.datastax.driver.core.exceptions.InvalidQueryException; import com.google.common.base.Throwables; @@ -23,11 +33,6 @@ import com.google.common.collect.Lists; import com.google.common.io.Closer; import com.google.common.util.concurrent.Uninterruptibles; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.testng.ITestResult; -import org.testng.annotations.*; - import java.io.Closeable; import java.io.File; import java.io.IOException; @@ -38,1058 +43,1114 @@ import java.net.InetAddress; import java.net.InetSocketAddress; import java.net.UnknownHostException; -import java.util.*; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; import java.util.concurrent.Callable; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; - -import static com.datastax.driver.core.CreateCCM.TestMode.PER_CLASS; -import static com.datastax.driver.core.CreateCCM.TestMode.PER_METHOD; -import static com.datastax.driver.core.TestUtils.*; -import static org.assertj.core.api.Assertions.fail; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.testng.ITestResult; +import org.testng.SkipException; +import org.testng.annotations.AfterClass; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; @SuppressWarnings("unused") public class CCMTestsSupport { - private static final Logger LOGGER = LoggerFactory.getLogger(CCMTestsSupport.class); - - private static final AtomicInteger CCM_COUNTER = new AtomicInteger(1); - - private static final List TEST_GROUPS = Lists.newArrayList("isolated", "short", "long", "stress", "duration"); - - // A mapping of cassandra.yaml config options to their version requirements. - // If a config is passed containing one of these options and the version requirement cannot be met - // the option is simply filtered. - private static final Map configVersionRequirements = ImmutableMap.builder() - .put("enable_user_defined_functions", VersionNumber.parse("2.2.0")) - .build(); - - private static class ReadOnlyCCMAccess implements CCMAccess { - - private final CCMAccess delegate; + private static final Logger LOGGER = LoggerFactory.getLogger(CCMTestsSupport.class); - private ReadOnlyCCMAccess(CCMAccess delegate) { - this.delegate = delegate; - } + private static final AtomicInteger CCM_COUNTER = new AtomicInteger(1); - @Override - public String getClusterName() { - return delegate.getClusterName(); - } + private static final List TEST_GROUPS = + Lists.newArrayList("isolated", "short", "long", "stress", "duration"); - @Override - public VersionNumber getCassandraVersion() { - return delegate.getCassandraVersion(); - } + // A mapping of cassandra.yaml config options to their version requirements. + // If a config is passed containing one of these options and the version requirement cannot be met + // the option is simply filtered. + private static final Map configVersionRequirements = + ImmutableMap.builder() + .put("enable_user_defined_functions", VersionNumber.parse("2.2.0")) + .build(); - @Override - public VersionNumber getDSEVersion() { - return delegate.getDSEVersion(); - } + private static class ReadOnlyCCMAccess implements CCMAccess { - @Override - public InetSocketAddress addressOfNode(int n) { - return delegate.addressOfNode(n); - } - - @Override - public File getCcmDir() { - return delegate.getCcmDir(); - } + private final CCMAccess delegate; - @Override - public File getClusterDir() { - return delegate.getClusterDir(); - } - - @Override - public File getNodeDir(int n) { - return delegate.getNodeDir(n); - } - - @Override - public File getNodeConfDir(int n) { - return delegate.getNodeConfDir(n); - } - - @Override - public int getStoragePort() { - return delegate.getStoragePort(); - } - - @Override - public int getThriftPort() { - return delegate.getThriftPort(); - } - - @Override - public int getBinaryPort() { - return delegate.getBinaryPort(); - } + private ReadOnlyCCMAccess(CCMAccess delegate) { + this.delegate = delegate; + } - @Override - public void setKeepLogs(boolean keepLogs) { - delegate.setKeepLogs(keepLogs); - } + @Override + public String getClusterName() { + return delegate.getClusterName(); + } - @Override - public String checkForErrors() { - return delegate.checkForErrors(); - } + @Override + public VersionNumber getCassandraVersion() { + return delegate.getCassandraVersion(); + } - @Override - public void close() { - throw new UnsupportedOperationException("This CCM cluster is read-only"); - } + @Override + public VersionNumber getDSEVersion() { + return delegate.getDSEVersion(); + } - @Override - public void start() { - throw new UnsupportedOperationException("This CCM cluster is read-only"); - } + @Override + public InetSocketAddress addressOfNode(int n) { + return delegate.addressOfNode(n); + } - @Override - public void stop() { - throw new UnsupportedOperationException("This CCM cluster is read-only"); - } + @Override + public InetSocketAddress jmxAddressOfNode(int n) { + return delegate.jmxAddressOfNode(n); + } - @Override - public void forceStop() { - throw new UnsupportedOperationException("This CCM cluster is read-only"); - } + @Override + public File getCcmDir() { + return delegate.getCcmDir(); + } - @Override - public void remove() { - throw new UnsupportedOperationException("This CCM cluster is read-only"); - } + @Override + public File getClusterDir() { + return delegate.getClusterDir(); + } - @Override - public void start(int n) { - throw new UnsupportedOperationException("This CCM cluster is read-only"); - } + @Override + public File getNodeDir(int n) { + return delegate.getNodeDir(n); + } - @Override - public void stop(int n) { - throw new UnsupportedOperationException("This CCM cluster is read-only"); - } + @Override + public File getNodeConfDir(int n) { + return delegate.getNodeConfDir(n); + } - @Override - public void forceStop(int n) { - throw new UnsupportedOperationException("This CCM cluster is read-only"); - } + @Override + public int getStoragePort() { + return delegate.getStoragePort(); + } - @Override - public void pause(int n) { - throw new UnsupportedOperationException("This CCM cluster is read-only"); - } + @Override + public int getThriftPort() { + return delegate.getThriftPort(); + } - @Override - public void resume(int n) { - throw new UnsupportedOperationException("This CCM cluster is read-only"); - } + @Override + public int getBinaryPort() { + return delegate.getBinaryPort(); + } - @Override - public void remove(int n) { - throw new UnsupportedOperationException("This CCM cluster is read-only"); - } + @Override + public void setKeepLogs(boolean keepLogs) { + delegate.setKeepLogs(keepLogs); + } - @Override - public void add(int n) { - throw new UnsupportedOperationException("This CCM cluster is read-only"); - } + @Override + public int[] getNodeCount() { + return delegate.getNodeCount(); + } - @Override - public void add(int dc, int n) { - throw new UnsupportedOperationException("This CCM cluster is read-only"); - } + @Override + public List getContactPoints() { + return delegate.getContactPoints(); + } - @Override - public void decommission(int n) { - throw new UnsupportedOperationException("This CCM cluster is read-only"); - } + @Override + public String checkForErrors() { + return delegate.checkForErrors(); + } - @Override - public void updateConfig(Map configs) { - throw new UnsupportedOperationException("This CCM cluster is read-only"); - } + @Override + public void close() { + throw new UnsupportedOperationException("This CCM cluster is read-only"); + } - @Override - public void updateDSEConfig(Map configs) { - throw new UnsupportedOperationException("This CCM cluster is read-only"); - } + @Override + public void start() { + throw new UnsupportedOperationException("This CCM cluster is read-only"); + } - @Override - public void updateNodeConfig(int n, String key, Object value) { - throw new UnsupportedOperationException("This CCM cluster is read-only"); - } + @Override + public void stop() { + throw new UnsupportedOperationException("This CCM cluster is read-only"); + } - @Override - public void updateNodeConfig(int n, Map configs) { - throw new UnsupportedOperationException("This CCM cluster is read-only"); - } + @Override + public void forceStop() { + throw new UnsupportedOperationException("This CCM cluster is read-only"); + } - @Override - public void updateDSENodeConfig(int n, String key, Object value) { - throw new UnsupportedOperationException("This CCM cluster is read-only"); - } + @Override + public void remove() { + throw new UnsupportedOperationException("This CCM cluster is read-only"); + } - @Override - public void updateDSENodeConfig(int n, Map configs) { - throw new UnsupportedOperationException("This CCM cluster is read-only"); - } + @Override + public void start(int n) { + throw new UnsupportedOperationException("This CCM cluster is read-only"); + } - @Override - public void setWorkload(int node, Workload... workload) { - throw new UnsupportedOperationException("This CCM cluster is read-only"); - } + @Override + public void stop(int n) { + throw new UnsupportedOperationException("This CCM cluster is read-only"); + } - @Override - public void waitForUp(int node) { - throw new UnsupportedOperationException("This CCM cluster is read-only"); - } + @Override + public void forceStop(int n) { + throw new UnsupportedOperationException("This CCM cluster is read-only"); + } - @Override - public void waitForDown(int node) { - throw new UnsupportedOperationException("This CCM cluster is read-only"); - } + @Override + public void pause(int n) { + throw new UnsupportedOperationException("This CCM cluster is read-only"); + } - @Override - public ProtocolVersion getProtocolVersion() { - return delegate.getProtocolVersion(); - } + @Override + public void resume(int n) { + throw new UnsupportedOperationException("This CCM cluster is read-only"); + } - @Override - public ProtocolVersion getProtocolVersion(ProtocolVersion maximumAllowed) { - return delegate.getProtocolVersion(maximumAllowed); - } + @Override + public void remove(int n) { + throw new UnsupportedOperationException("This CCM cluster is read-only"); + } - @Override - public String toString() { - return delegate.toString(); - } + @Override + public void add(int n) { + throw new UnsupportedOperationException("This CCM cluster is read-only"); } - private static class CCMTestConfig { + @Override + public void add(int dc, int n) { + throw new UnsupportedOperationException("This CCM cluster is read-only"); + } - private final List annotations; + @Override + public void decommission(int n) { + throw new UnsupportedOperationException("This CCM cluster is read-only"); + } - private CCMBridge.Builder ccmBuilder; + @Override + public void updateConfig(Map configs) { + throw new UnsupportedOperationException("This CCM cluster is read-only"); + } - public CCMTestConfig(List annotations) { - this.annotations = annotations; - } + @Override + public void updateDSEConfig(Map configs) { + throw new UnsupportedOperationException("This CCM cluster is read-only"); + } - private int[] numberOfNodes() { - for (CCMConfig ann : annotations) { - if (ann != null && ann.numberOfNodes().length > 0) - return ann.numberOfNodes(); - } - return new int[]{1}; - } + @Override + public void updateNodeConfig(int n, String key, Object value) { + throw new UnsupportedOperationException("This CCM cluster is read-only"); + } - @SuppressWarnings("SimplifiableIfStatement") - private String version() { - for (CCMConfig ann : annotations) { - if (ann != null && !ann.version().isEmpty()) - return ann.version(); - } - return null; - } + @Override + public void updateNodeConfig(int n, Map configs) { + throw new UnsupportedOperationException("This CCM cluster is read-only"); + } - @SuppressWarnings("SimplifiableIfStatement") - private Boolean dse() { - for (CCMConfig ann : annotations) { - if (ann != null && ann.dse().length > 0) - return ann.dse()[0]; - } - return null; - } + @Override + public void updateDSENodeConfig(int n, String key, Object value) { + throw new UnsupportedOperationException("This CCM cluster is read-only"); + } - @SuppressWarnings("SimplifiableIfStatement") - private boolean ssl() { - for (CCMConfig ann : annotations) { - if (ann != null && ann.ssl().length > 0) - return ann.ssl()[0]; - } - return false; - } + @Override + public void updateDSENodeConfig(int n, Map configs) { + throw new UnsupportedOperationException("This CCM cluster is read-only"); + } - @SuppressWarnings("SimplifiableIfStatement") - private boolean auth() { - for (CCMConfig ann : annotations) { - if (ann != null && ann.auth().length > 0) - return ann.auth()[0]; - } - return false; - } + @Override + public void setWorkload(int node, Workload... workload) { + throw new UnsupportedOperationException("This CCM cluster is read-only"); + } - @SuppressWarnings("SimplifiableIfStatement") - private Map config() { - Map config = new HashMap(); - for (int i = annotations.size() - 1; i >= 0; i--) { - CCMConfig ann = annotations.get(i); - addConfigOptions(ann.config(), config); - } - return config; - } + @Override + public void waitForUp(int node) { + throw new UnsupportedOperationException("This CCM cluster is read-only"); + } - @SuppressWarnings("SimplifiableIfStatement") - private Map dseConfig() { - Map config = new HashMap(); - for (int i = annotations.size() - 1; i >= 0; i--) { - CCMConfig ann = annotations.get(i); - addConfigOptions(ann.dseConfig(), config); - } - return config; - } + @Override + public void waitForDown(int node) { + throw new UnsupportedOperationException("This CCM cluster is read-only"); + } - private void addConfigOptions(String[] conf, Map config) { - VersionNumber version = VersionNumber.parse(version()); - if (version == null) { - version = CCMBridge.getGlobalCassandraVersion(); - } else { - Boolean dse = dse(); - if (dse != null && dse) { - version = CCMBridge.getCassandraVersion(version); - } - } - for (String aConf : conf) { - String[] tokens = aConf.split(":"); - if (tokens.length != 2) - fail("Wrong configuration option: " + aConf); - String key = tokens[0]; - String value = tokens[1]; - // If we've detected a property with a version requirement, skip it if the version requirement - // cannot be met. - if (configVersionRequirements.containsKey(key)) { - VersionNumber requirement = configVersionRequirements.get(key); - if (version != null && version.compareTo(requirement) < 0) { - LOGGER.debug("Skipping inclusion of '{}' in cassandra.yaml since it requires >= C* {} and {} " + - "was detected.", aConf, requirement, version); - continue; - } - } - config.put(key, value); - } - } + @Override + public ProtocolVersion getProtocolVersion() { + return delegate.getProtocolVersion(); + } - @SuppressWarnings("SimplifiableIfStatement") - private Set jvmArgs() { - Set args = new LinkedHashSet(); - for (int i = annotations.size() - 1; i >= 0; i--) { - CCMConfig ann = annotations.get(i); - Collections.addAll(args, ann.jvmArgs()); - } - return args; - } + @Override + public ProtocolVersion getProtocolVersion(ProtocolVersion maximumAllowed) { + return delegate.getProtocolVersion(maximumAllowed); + } - @SuppressWarnings("SimplifiableIfStatement") - private Set startOptions() { - Set args = new LinkedHashSet(); - for (int i = annotations.size() - 1; i >= 0; i--) { - CCMConfig ann = annotations.get(i); - Collections.addAll(args, ann.options()); - } - return args; - } + @Override + public String toString() { + return delegate.toString(); + } + } - private List workloads() { - int total = 0; - for (int perDc : numberOfNodes()) - total += perDc; - List workloads = new ArrayList(Collections.nCopies(total, null)); - for (int i = annotations.size() - 1; i >= 0; i--) { - CCMConfig ann = annotations.get(i); - CCMWorkload[] annWorkloads = ann.workloads(); - for (int j = 0; j < annWorkloads.length; j++) { - CCMWorkload nodeWorkloads = annWorkloads[j]; - workloads.set(j, nodeWorkloads.value()); - } - } - return workloads; - } + private static class CCMTestConfig { - @SuppressWarnings("SimplifiableIfStatement") - private boolean createCcm() { - for (CCMConfig ann : annotations) { - if (ann != null && ann.createCcm().length > 0) - return ann.createCcm()[0]; - } - return true; - } + private final List annotations; - @SuppressWarnings("SimplifiableIfStatement") - private boolean createCluster() { - for (CCMConfig ann : annotations) { - if (ann != null && ann.createCluster().length > 0) - return ann.createCluster()[0]; - } - return true; - } + private CCMBridge.Builder ccmBuilder; - @SuppressWarnings("SimplifiableIfStatement") - private boolean createSession() { - for (CCMConfig ann : annotations) { - if (ann != null && ann.createSession().length > 0) - return ann.createSession()[0]; - } - return true; - } + public CCMTestConfig(List annotations) { + this.annotations = annotations; + } - @SuppressWarnings("SimplifiableIfStatement") - private boolean createKeyspace() { - for (CCMConfig ann : annotations) { - if (ann != null && ann.createKeyspace().length > 0) - return ann.createKeyspace()[0]; - } - return true; - } + private int[] numberOfNodes() { + for (CCMConfig ann : annotations) { + if (ann != null && ann.numberOfNodes().length > 0) return ann.numberOfNodes(); + } + return new int[] {1}; + } - @SuppressWarnings("SimplifiableIfStatement") - private boolean dirtiesContext() { - for (CCMConfig ann : annotations) { - if (ann != null && ann.dirtiesContext().length > 0) - return ann.dirtiesContext()[0]; - } - return false; - } + @SuppressWarnings("SimplifiableIfStatement") + private String version() { + for (CCMConfig ann : annotations) { + if (ann != null && !ann.version().isEmpty()) return ann.version(); + } + return null; + } - private CCMBridge.Builder ccmBuilder(Object testInstance) throws Exception { - if (ccmBuilder == null) { - ccmBuilder = ccmProvider(testInstance); - if (ccmBuilder == null) { - ccmBuilder = CCMBridge.builder().withNodes(numberOfNodes()).notStarted(); - } - - String versionStr = version(); - if (versionStr != null) { - VersionNumber version = VersionNumber.parse(versionStr); - ccmBuilder.withVersion(version); - } - - Boolean dse = dse(); - if (dse != null) - ccmBuilder.withDSE(dse); - if (ssl()) - ccmBuilder.withSSL(); - if (auth()) - ccmBuilder.withAuth(); - for (Map.Entry entry : config().entrySet()) { - ccmBuilder.withCassandraConfiguration(entry.getKey(), entry.getValue()); - } - for (Map.Entry entry : dseConfig().entrySet()) { - ccmBuilder.withDSEConfiguration(entry.getKey(), entry.getValue()); - } - for (String option : startOptions()) { - ccmBuilder.withCreateOptions(option); - } - for (String arg : jvmArgs()) { - ccmBuilder.withJvmArgs(arg); - } - List workloads = workloads(); - for (int i = 0; i < workloads.size(); i++) { - Workload[] workload = workloads.get(i); - if (workload != null) - ccmBuilder.withWorkload(i + 1, workload); - } - } - return ccmBuilder; - } + @SuppressWarnings("SimplifiableIfStatement") + private Boolean dse() { + for (CCMConfig ann : annotations) { + if (ann != null && ann.dse().length > 0) return ann.dse()[0]; + } + return null; + } - private CCMBridge.Builder ccmProvider(Object testInstance) throws Exception { - String methodName = null; - Class clazz = null; - for (int i = annotations.size() - 1; i >= 0; i--) { - CCMConfig ann = annotations.get(i); - if (!ann.ccmProvider().isEmpty()) { - methodName = ann.ccmProvider(); - } - if (!ann.ccmProviderClass().equals(CCMConfig.Undefined.class)) { - clazz = ann.ccmProviderClass(); - } - } - if (methodName == null) - return null; - if (clazz == null) - clazz = testInstance.getClass(); - Method method = locateMethod(methodName, clazz); - assert CCMBridge.Builder.class.isAssignableFrom(method.getReturnType()); - if (Modifier.isStatic(method.getModifiers())) { - return (CCMBridge.Builder) method.invoke(null); - } else { - Object receiver = testInstance.getClass().equals(clazz) ? testInstance : instantiate(clazz); - return (CCMBridge.Builder) method.invoke(receiver); - } - } + @SuppressWarnings("SimplifiableIfStatement") + private boolean ssl() { + for (CCMConfig ann : annotations) { + if (ann != null && ann.ssl().length > 0) return ann.ssl()[0]; + } + return false; + } - private Cluster.Builder clusterProvider(Object testInstance) throws Exception { - String methodName = null; - Class clazz = null; - for (int i = annotations.size() - 1; i >= 0; i--) { - CCMConfig ann = annotations.get(i); - if (!ann.clusterProvider().isEmpty()) { - methodName = ann.clusterProvider(); - } - if (!ann.clusterProviderClass().equals(CCMConfig.Undefined.class)) { - clazz = ann.clusterProviderClass(); - } - } - if (methodName == null) - methodName = "createClusterBuilder"; - if (clazz == null) - clazz = testInstance.getClass(); - Method method = locateMethod(methodName, clazz); - assert Cluster.Builder.class.isAssignableFrom(method.getReturnType()); - if (Modifier.isStatic(method.getModifiers())) { - return (Cluster.Builder) method.invoke(null); - } else { - Object receiver = testInstance.getClass().equals(clazz) ? testInstance : instantiate(clazz); - return (Cluster.Builder) method.invoke(receiver); - } - } + @SuppressWarnings("SimplifiableIfStatement") + private boolean auth() { + for (CCMConfig ann : annotations) { + if (ann != null && ann.auth().length > 0) return ann.auth()[0]; + } + return false; + } - @SuppressWarnings("unchecked") - private void invokeInitTest(Object testInstance) throws Exception { - String methodName = null; - Class clazz = null; - for (int i = annotations.size() - 1; i >= 0; i--) { - CCMConfig ann = annotations.get(i); - if (!ann.testInitializer().isEmpty()) { - methodName = ann.testInitializer(); - } - if (!ann.testInitializerClass().equals(CCMConfig.Undefined.class)) { - clazz = ann.testInitializerClass(); - } - } - if (methodName == null) - methodName = "onTestContextInitialized"; - if (clazz == null) - clazz = testInstance.getClass(); - Method method = locateMethod(methodName, clazz); - if (Modifier.isStatic(method.getModifiers())) { - method.invoke(null); - } else { - Object receiver = testInstance.getClass().equals(clazz) ? testInstance : instantiate(clazz); - method.invoke(receiver); - } - } + @SuppressWarnings("SimplifiableIfStatement") + private Map config() { + Map config = new HashMap(); + for (int i = annotations.size() - 1; i >= 0; i--) { + CCMConfig ann = annotations.get(i); + addConfigOptions(ann.config(), config); + } + return config; + } + @SuppressWarnings("SimplifiableIfStatement") + private Map dseConfig() { + Map config = new HashMap(); + for (int i = annotations.size() - 1; i >= 0; i--) { + CCMConfig ann = annotations.get(i); + addConfigOptions(ann.dseConfig(), config); + } + return config; } - private TestMode testMode; + private void addConfigOptions(String[] conf, Map config) { + VersionNumber version = VersionNumber.parse(version()); + if (version == null) { + version = CCMBridge.getGlobalCassandraVersion(); + } else { + Boolean dse = dse(); + if (dse != null && dse) { + version = CCMBridge.getCassandraVersion(version); + } + } + for (String aConf : conf) { + String[] tokens = aConf.split(":"); + if (tokens.length != 2) fail("Wrong configuration option: " + aConf); + String key = tokens[0]; + String value = tokens[1]; + // If we've detected a property with a version requirement, skip it if the version + // requirement + // cannot be met. + if (configVersionRequirements.containsKey(key)) { + VersionNumber requirement = configVersionRequirements.get(key); + if (version != null && version.compareTo(requirement) < 0) { + LOGGER.debug( + "Skipping inclusion of '{}' in cassandra.yaml since it requires >= C* {} and {} " + + "was detected.", + aConf, + requirement, + version); + continue; + } + } + config.put(key, value); + } + } - protected CCMTestConfig ccmTestConfig; + @SuppressWarnings("SimplifiableIfStatement") + private Set jvmArgs() { + Set args = new LinkedHashSet(); + for (int i = annotations.size() - 1; i >= 0; i--) { + CCMConfig ann = annotations.get(i); + Collections.addAll(args, ann.jvmArgs()); + } + return args; + } - private CCMAccess ccm; + @SuppressWarnings("SimplifiableIfStatement") + private Set startOptions() { + Set args = new LinkedHashSet(); + for (int i = annotations.size() - 1; i >= 0; i--) { + CCMConfig ann = annotations.get(i); + Collections.addAll(args, ann.options()); + } + return args; + } - private CCMBridge.Builder ccmBuilder; + private List workloads() { + int total = 0; + for (int perDc : numberOfNodes()) total += perDc; + List workloads = + new ArrayList(Collections.nCopies(total, null)); + for (int i = annotations.size() - 1; i >= 0; i--) { + CCMConfig ann = annotations.get(i); + CCMWorkload[] annWorkloads = ann.workloads(); + for (int j = 0; j < annWorkloads.length; j++) { + CCMWorkload nodeWorkloads = annWorkloads[j]; + workloads.set(j, nodeWorkloads.value()); + } + } + return workloads; + } - private Cluster cluster; - - private Session session; - - protected String keyspace; - - private boolean erroredOut = false; - - private Closer closer; - - /** - * Hook invoked at the beginning of a test class to initialize CCM test context. - * - * @throws Exception - */ - @BeforeClass(groups = {"isolated", "short", "long", "stress", "duration"}) - public void beforeTestClass() throws Exception { - beforeTestClass(this); - } - - /** - * Hook invoked at the beginning of a test class to initialize CCM test context. - *

    - * Useful when this class is not a superclass of the test being run. - * - * @throws Exception - */ - public void beforeTestClass(Object testInstance) throws Exception { - testMode = determineTestMode(testInstance.getClass()); - if (testMode == PER_CLASS) { - closer = Closer.create(); - try { - initTestContext(testInstance, null); - initTestCluster(testInstance); - initTestSession(); - initTestKeyspace(); - initTest(testInstance); - } catch (Exception e) { - LOGGER.error(e.getMessage(), e); - errorOut(); - fail(e.getMessage()); + @SuppressWarnings("SimplifiableIfStatement") + private boolean createCcm() { + for (CCMConfig ann : annotations) { + if (ann != null && ann.createCcm().length > 0) return ann.createCcm()[0]; + } + return true; + } - } - } + @SuppressWarnings("SimplifiableIfStatement") + private boolean createCluster() { + for (CCMConfig ann : annotations) { + if (ann != null && ann.createCluster().length > 0) return ann.createCluster()[0]; + } + return true; } - /** - * Hook executed before each test method. - * - * @throws Exception - */ - @BeforeMethod(groups = {"isolated", "short", "long", "stress", "duration"}) - public void beforeTestMethod(Method testMethod) throws Exception { - beforeTestMethod(this, testMethod); - } - - /** - * Hook executed before each test method. - *

    - * Useful when this class is not a superclass of the test being run. - * - * @throws Exception - */ - public void beforeTestMethod(Object testInstance, Method testMethod) throws Exception { - if (isCcmEnabled(testMethod)) { - if (closer == null) - closer = Closer.create(); - if (testMode == PER_METHOD || erroredOut) { - try { - initTestContext(testInstance, testMethod); - initTestCluster(testInstance); - initTestSession(); - initTestKeyspace(); - initTest(testInstance); - } catch (Exception e) { - LOGGER.error(e.getMessage(), e); - errorOut(); - fail(e.getMessage()); - } - } - assert ccmTestConfig != null; - assert !ccmTestConfig.createCcm() || ccm != null; - } + @SuppressWarnings("SimplifiableIfStatement") + private boolean createSession() { + for (CCMConfig ann : annotations) { + if (ann != null && ann.createSession().length > 0) return ann.createSession()[0]; + } + return true; } - /** - * Hook executed after each test method. - */ - @AfterMethod(groups = {"isolated", "short", "long", "stress", "duration"}, alwaysRun = true) - public void afterTestMethod(ITestResult tr) throws Exception { - if (isCcmEnabled(tr.getMethod().getConstructorOrMethod().getMethod())) { - if (tr.getStatus() == ITestResult.FAILURE) { - errorOut(); - } - if (erroredOut || testMode == PER_METHOD) { - closeCloseables(); - closeTestCluster(); - } - if (testMode == PER_METHOD) - closeTestContext(); - } + @SuppressWarnings("SimplifiableIfStatement") + private boolean createKeyspace() { + for (CCMConfig ann : annotations) { + if (ann != null && ann.createKeyspace().length > 0) return ann.createKeyspace()[0]; + } + return true; } - /** - * Hook executed after each test class. - */ - @AfterClass(groups = {"isolated", "short", "long", "stress", "duration"}, alwaysRun = true) - public void afterTestClass() throws Exception { - if (testMode == PER_CLASS) { - closeCloseables(); - closeTestCluster(); - closeTestContext(); - } + @SuppressWarnings("SimplifiableIfStatement") + private boolean dirtiesContext() { + for (CCMConfig ann : annotations) { + if (ann != null && ann.dirtiesContext().length > 0) return ann.dirtiesContext()[0]; + } + return false; } - /** - * Returns the cluster builder to use for this test. - *

    - * The default implementation returns a vanilla builder. - *

    - * It's not required to call {@link com.datastax.driver.core.Cluster.Builder#addContactPointsWithPorts}, - * it will be done automatically. - * - * @return The cluster builder to use for the tests. - */ - public Cluster.Builder createClusterBuilder() { - return Cluster.builder() - // use a different codec registry for each cluster instance - .withCodecRegistry(new CodecRegistry()); - } - - /** - * Returns an alternate cluster builder to use for this test, - * with no event debouncing. - *

    - * It's not required to call {@link com.datastax.driver.core.Cluster.Builder#addContactPointsWithPorts}, - * it will be done automatically. - * - * @return The cluster builder to use for the tests. - */ - public Cluster.Builder createClusterBuilderNoDebouncing() { - return Cluster.builder().withQueryOptions(TestUtils.nonDebouncingQueryOptions()); - } - - /** - * Hook invoked when the test context is ready, but before the test itself. - * Useful to create fixtures or to insert test data. - *

    - * This method is invoked once per class is the {@link TestMode} is {@link TestMode#PER_CLASS} - * or before each test method, if the {@link TestMode} is {@link TestMode#PER_METHOD}. - *

    - * When this method is called, the cluster and the session are ready - * to be used (unless the configuration specifies that such objects - * should not be created). - *

    - * Statements executed inside this method do not need to be qualified with a keyspace name, - * in which case they are executed with the default keyspace for the test - * (unless the configuration specifies that no keyspace should be creaed for the test). - *

    - * The default implementation does nothing (no fixtures required). - */ - public void onTestContextInitialized() { - // nothing to do by default - } - - /** - * @return The {@link CCMAccess} instance to use with this test. - */ - public CCMAccess ccm() { - return ccm; - } - - /** - * @return The {@link Cluster} instance to use with this test. - * Can be null if CCM configuration specifies {@code createCluster = false}. - */ - public Cluster cluster() { - return cluster; - } - - /** - * @return The {@link Session} instance to use with this test. - * Can be null if CCM configuration specifies {@code createSession = false}. - */ - public Session session() { - return session; - } - - /** - * Executes the given statements with the test's session object. - *

    - * Useful to create test fixtures and/or load data before tests. - *

    - * This method should not be called if a session object hasn't been created - * (if CCM configuration specifies {@code createSession = false}) - * - * @param statements The statements to execute. - */ - public void execute(String... statements) { - execute(Arrays.asList(statements)); - } - - /** - * Executes the given statements with the test's session object. - *

    - * Useful to create test fixtures and/or load data before tests. - *

    - * This method should not be called if a session object hasn't been created - * (if CCM configuration specifies {@code createSession = false}) - * - * @param statements The statements to execute. - */ - public void execute(Collection statements) { - assert session != null; - for (String stmt : statements) { - try { - session.execute(stmt); - } catch (Exception e) { - errorOut(); - LOGGER.error("Could not execute statement: " + stmt, e); - Throwables.propagate(e); - } + private CCMBridge.Builder ccmBuilder(Object testInstance) throws Exception { + if (ccmBuilder == null) { + ccmBuilder = ccmProvider(testInstance); + if (ccmBuilder == null) { + ccmBuilder = CCMBridge.builder().withNodes(numberOfNodes()).notStarted(); } - } - /** - * Signals that the test has encountered an unexpected error. - *

    - * This method is automatically called when a test finishes with an unexpected exception - * being thrown, but it is also possible to manually invoke it. - *

    - * Calling this method will close the current cluster and session. - * The CCM data directory will be kept after the test session is finished, - * for debugging purposes - *

    - * This method should not be called before the test has started, nor after the test is finished. - */ - public void errorOut() { - erroredOut = true; - if (ccm != null) { - ccm.setKeepLogs(true); + String versionStr = version(); + if (versionStr != null) { + VersionNumber version = VersionNumber.parse(versionStr); + ccmBuilder.withVersion(version); } - } - /** - * Returns the contact points to use to contact the CCM cluster. - *

    - * This method returns as many contact points as the number of nodes initially present in the CCM cluster, - * according to {@link CCMConfig} annotations. - *

    - * On a multi-DC setup, this will include nodes in all data centers. - *

    - * This method should not be called before the test has started, nor after the test is finished. - * - * @return the contact points to use to contact the CCM cluster. - */ - public List getContactPoints() { - assert ccmTestConfig != null; - List contactPoints = new ArrayList(); - int n = 1; - int[] numberOfNodes = ccmTestConfig.numberOfNodes(); - for (int dc = 1; dc <= numberOfNodes.length; dc++) { - int nodesInDc = numberOfNodes[dc - 1]; - for (int i = 0; i < nodesInDc; i++) { - try { - contactPoints.add(InetAddress.getByName(ipOfNode(n))); - } catch (UnknownHostException e) { - Throwables.propagate(e); - } - n++; - } + Boolean dse = dse(); + if (dse != null) ccmBuilder.withDSE(dse); + if (ssl()) ccmBuilder.withSSL(); + if (auth()) ccmBuilder.withAuth(); + for (Map.Entry entry : config().entrySet()) { + ccmBuilder.withCassandraConfiguration(entry.getKey(), entry.getValue()); } - return contactPoints; - } - - /** - * Returns the contact points to use to contact the CCM cluster. - *

    - * This method returns as many contact points as the number of nodes initially present in the CCM cluster, - * according to {@link CCMConfig} annotations. - *

    - * On a multi-DC setup, this will include nodes in all data centers. - *

    - * This method should not be called before the test has started, nor after the test is finished. - * - * @return the contact points to use to contact the CCM cluster. - */ - public List getContactPointsWithPorts() { - assert ccmTestConfig != null; - List contactPoints = new ArrayList(); - int n = 1; - int[] numberOfNodes = ccmTestConfig.numberOfNodes(); - for (int dc = 1; dc <= numberOfNodes.length; dc++) { - int nodesInDc = numberOfNodes[dc - 1]; - for (int i = 0; i < nodesInDc; i++) { - contactPoints.add(new InetSocketAddress(ipOfNode(n), ccm.getBinaryPort())); - n++; - } + for (Map.Entry entry : dseConfig().entrySet()) { + ccmBuilder.withDSEConfiguration(entry.getKey(), entry.getValue()); } - return contactPoints; - } - - /** - * Registers the given {@link Closeable} to be closed at the end of the current test method. - *

    - * This method should not be called before the test has started, nor after the test is finished. - * - * @param closeable The closeable to close - * @return The closeable to close - */ - public T register(T closeable) { - closer.register(closeable); - return closeable; - } - - /** - * Tests fail randomly with InvalidQueryException: Keyspace 'xxx' does not exist; - * this method tries at most 3 times to issue a successful USE statement. - * - * @param ks The keyspace to USE - */ - public void useKeyspace(String ks) { - useKeyspace(session(), ks); - } - - /** - * Tests fail randomly with InvalidQueryException: Keyspace 'xxx' does not exist; - * this method tries at most 3 times to issue a successful USE statement. - * - * @param session The session to use - * @param ks The keyspace to USE - */ - public void useKeyspace(Session session, String ks) { - final int maxTries = 3; - for (int i = 1; i <= maxTries; i++) { - try { - session.execute("USE " + ks); - } catch (InvalidQueryException e) { - if (i == maxTries) - throw e; - LOGGER.error("Could not USE keyspace, retrying"); - Uninterruptibles.sleepUninterruptibly(1, TimeUnit.MINUTES); - } + for (String option : startOptions()) { + ccmBuilder.withCreateOptions(option); } - } - - protected void initTestContext(Object testInstance, Method testMethod) throws Exception { - erroredOut = false; - ccmTestConfig = createCCMTestConfig(testInstance, testMethod); - assert ccmTestConfig != null; - if (ccmTestConfig.createCcm()) { - ccmBuilder = ccmTestConfig.ccmBuilder(testInstance); - CCMAccess ccm = CCMCache.get(ccmBuilder); - assert ccm != null; - if (ccmTestConfig.dirtiesContext()) { - this.ccm = ccm; - } else { - this.ccm = new ReadOnlyCCMAccess(ccm); - } - try { - ccm.start(); - } catch (CCMException e) { - errorOut(); - fail(e.getMessage()); - } - LOGGER.debug("Using {}", ccm); + for (String arg : jvmArgs()) { + ccmBuilder.withJvmArgs(arg); } - } - - protected void initTestCluster(Object testInstance) throws Exception { - if (ccmTestConfig.createCcm() && ccmTestConfig.createCluster()) { - Cluster.Builder builder = ccmTestConfig.clusterProvider(testInstance); - // add contact points only if the provided builder didn't do so - if (builder.getContactPoints().isEmpty()) - builder.addContactPoints(getContactPoints()); - builder.withPort(ccm.getBinaryPort()); - cluster = register(builder.build()); - cluster.init(); + List workloads = workloads(); + for (int i = 0; i < workloads.size(); i++) { + Workload[] workload = workloads.get(i); + if (workload != null) ccmBuilder.withWorkload(i + 1, workload); } + } + return ccmBuilder; } - protected void initTestSession() throws Exception { - if (ccmTestConfig.createCcm() && ccmTestConfig.createCluster() && ccmTestConfig.createSession()) - session = register(cluster.connect()); + private CCMBridge.Builder ccmProvider(Object testInstance) throws Exception { + String methodName = null; + Class clazz = null; + for (int i = annotations.size() - 1; i >= 0; i--) { + CCMConfig ann = annotations.get(i); + if (!ann.ccmProvider().isEmpty()) { + methodName = ann.ccmProvider(); + } + if (!ann.ccmProviderClass().equals(CCMConfig.Undefined.class)) { + clazz = ann.ccmProviderClass(); + } + } + if (methodName == null) return null; + if (clazz == null) clazz = testInstance.getClass(); + Method method = locateMethod(methodName, clazz); + assert CCMBridge.Builder.class.isAssignableFrom(method.getReturnType()); + if (Modifier.isStatic(method.getModifiers())) { + return (CCMBridge.Builder) method.invoke(null); + } else { + Object receiver = testInstance.getClass().equals(clazz) ? testInstance : instantiate(clazz); + return (CCMBridge.Builder) method.invoke(receiver); + } } - protected void initTestKeyspace() { - if (ccmTestConfig.createCcm() && ccmTestConfig.createCluster() && ccmTestConfig.createSession() && ccmTestConfig.createKeyspace()) { - try { - keyspace = TestUtils.generateIdentifier("ks_"); - LOGGER.debug("Using keyspace " + keyspace); - session.execute(String.format(CREATE_KEYSPACE_SIMPLE_FORMAT, keyspace, 1)); - useKeyspace(keyspace); - } catch (Exception e) { - errorOut(); - LOGGER.error("Could not create test keyspace", e); - Throwables.propagate(e); - } - } + private Cluster.Builder clusterProvider(Object testInstance) throws Exception { + String methodName = null; + Class clazz = null; + for (int i = annotations.size() - 1; i >= 0; i--) { + CCMConfig ann = annotations.get(i); + if (!ann.clusterProvider().isEmpty()) { + methodName = ann.clusterProvider(); + } + if (!ann.clusterProviderClass().equals(CCMConfig.Undefined.class)) { + clazz = ann.clusterProviderClass(); + } + } + if (methodName == null) methodName = "createClusterBuilder"; + if (clazz == null) clazz = testInstance.getClass(); + Method method = locateMethod(methodName, clazz); + assert Cluster.Builder.class.isAssignableFrom(method.getReturnType()); + if (Modifier.isStatic(method.getModifiers())) { + return (Cluster.Builder) method.invoke(null); + } else { + Object receiver = testInstance.getClass().equals(clazz) ? testInstance : instantiate(clazz); + return (Cluster.Builder) method.invoke(receiver); + } } - protected void initTest(Object testInstance) throws Exception { - ccmTestConfig.invokeInitTest(testInstance); + @SuppressWarnings("unchecked") + private void invokeInitTest(Object testInstance) throws Exception { + String methodName = null; + Class clazz = null; + for (int i = annotations.size() - 1; i >= 0; i--) { + CCMConfig ann = annotations.get(i); + if (!ann.testInitializer().isEmpty()) { + methodName = ann.testInitializer(); + } + if (!ann.testInitializerClass().equals(CCMConfig.Undefined.class)) { + clazz = ann.testInitializerClass(); + } + } + if (methodName == null) methodName = "onTestContextInitialized"; + if (clazz == null) clazz = testInstance.getClass(); + Method method = locateMethod(methodName, clazz); + if (Modifier.isStatic(method.getModifiers())) { + method.invoke(null); + } else { + Object receiver = testInstance.getClass().equals(clazz) ? testInstance : instantiate(clazz); + method.invoke(receiver); + } } - - protected void closeTestContext() throws Exception { - if (ccmTestConfig != null && ccmBuilder != null && ccm != null) { - if (ccmTestConfig.dirtiesContext()) { - CCMCache.remove(ccmBuilder); - ccm.close(); - } else { - ((ReadOnlyCCMAccess) ccm).delegate.close(); - } - } - ccmTestConfig = null; - ccmBuilder = null; - ccm = null; - } - - protected void closeTestCluster() { - if (cluster != null && !cluster.isClosed()) - executeNoFail(new Runnable() { - @Override - public void run() { - cluster.close(); - } - }, false); - cluster = null; - session = null; - keyspace = null; - } - - protected void closeCloseables() { - if (closer != null) - executeNoFail(new Callable() { - @Override - public Void call() throws IOException { - closer.close(); - return null; - } - }, false); - } - - private static boolean isCcmEnabled(Method testMethod) { - Test ann = locateAnnotation(testMethod, Test.class); - return !Collections.disjoint(Arrays.asList(ann.groups()), TEST_GROUPS); - } - - private static CCMTestConfig createCCMTestConfig(Object testInstance, Method testMethod) throws Exception { - ArrayList annotations = new ArrayList(); - CCMConfig ann = locateAnnotation(testMethod, CCMConfig.class); - if (ann != null) - annotations.add(ann); - locateClassAnnotations(testInstance.getClass(), CCMConfig.class, annotations); - return new CCMTestConfig(annotations); - } - - private static TestMode determineTestMode(Class testClass) { - List annotations = locateClassAnnotations(testClass, CreateCCM.class, new ArrayList()); - if (!annotations.isEmpty()) - return annotations.get(0).value(); - return PER_CLASS; - } - - private static A locateAnnotation(Method testMethod, Class clazz) { - if (testMethod == null) - return null; - testMethod.setAccessible(true); - return testMethod.getAnnotation(clazz); - } - - private static List locateClassAnnotations(Class clazz, Class annotationClass, List annotations) { - A ann = clazz.getAnnotation(annotationClass); - if (ann != null) - annotations.add(ann); - clazz = clazz.getSuperclass(); - if (clazz == null) - return annotations; - return locateClassAnnotations(clazz, annotationClass, annotations); - } - - private static Method locateMethod(String methodName, Class clazz) throws NoSuchMethodException { + } + + private TestMode testMode; + + protected CCMTestConfig ccmTestConfig; + + private CCMAccess ccm; + + private CCMBridge.Builder ccmBuilder; + + private Cluster cluster; + + private Session session; + + protected String keyspace; + + private boolean erroredOut = false; + + private Closer closer; + + /** + * Hook invoked at the beginning of a test class to initialize CCM test context. + * + * @throws Exception + */ + @BeforeClass(groups = {"isolated", "short", "long", "stress", "duration"}) + public void beforeTestClass() throws Exception { + beforeTestClass(this); + } + + /** + * Hook invoked at the beginning of a test class to initialize CCM test context. + * + *

    Useful when this class is not a superclass of the test being run. + * + * @throws Exception + */ + public void beforeTestClass(Object testInstance) throws Exception { + testMode = determineTestMode(testInstance.getClass()); + if (testMode == PER_CLASS) { + closer = Closer.create(); + try { + initTestContext(testInstance, null); + initTestCluster(testInstance); + initTestSession(); + initTestKeyspace(); + initTest(testInstance); + } catch (Exception e) { + LOGGER.error(e.getMessage(), e); + errorOut(); + throw e; + } + } + } + + /** + * Hook executed before each test method. + * + * @throws Exception + */ + @BeforeMethod(groups = {"isolated", "short", "long", "stress", "duration"}) + public void beforeTestMethod(Method testMethod) throws Exception { + beforeTestMethod(this, testMethod); + } + + /** + * Hook executed before each test method. + * + *

    Useful when this class is not a superclass of the test being run. + * + * @throws Exception + */ + public void beforeTestMethod(Object testInstance, Method testMethod) throws Exception { + if (isCcmEnabled(testMethod)) { + if (closer == null) closer = Closer.create(); + if (testMode == PER_METHOD || erroredOut) { try { - Method method = clazz.getDeclaredMethod(methodName); - method.setAccessible(true); - return method; - } catch (NoSuchMethodException e) { - clazz = clazz.getSuperclass(); - if (clazz == null) - throw e; - return locateMethod(methodName, clazz); - } + initTestContext(testInstance, testMethod); + initTestCluster(testInstance); + initTestSession(); + initTestKeyspace(); + initTest(testInstance); + } catch (Exception e) { + LOGGER.error(e.getMessage(), e); + errorOut(); + fail(e.getMessage()); + } + } + assert ccmTestConfig != null; + assert !ccmTestConfig.createCcm() || ccm != null; } - - @SuppressWarnings("unchecked") - private static T instantiate(Class clazz) throws NoSuchMethodException, InstantiationException, IllegalAccessException, java.lang.reflect.InvocationTargetException { - if (clazz.getEnclosingClass() == null || Modifier.isStatic(clazz.getModifiers())) { - Constructor constructor = clazz.getDeclaredConstructor(); - constructor.setAccessible(true); - return (T) constructor.newInstance(); - } else { - Class enclosingClass = clazz.getEnclosingClass(); - Object enclosingInstance = enclosingClass.newInstance(); - Constructor constructor = clazz.getDeclaredConstructor(enclosingClass); - constructor.setAccessible(true); - return (T) constructor.newInstance(enclosingInstance); + } + + /** Hook executed after each test method. */ + @AfterMethod( + groups = {"isolated", "short", "long", "stress", "duration"}, + alwaysRun = true) + public void afterTestMethod(ITestResult tr) throws Exception { + if (isCcmEnabled(tr.getMethod().getConstructorOrMethod().getMethod())) { + if (tr.getStatus() == ITestResult.FAILURE) { + errorOut(); + } + if (erroredOut || testMode == PER_METHOD) { + closeCloseables(); + closeTestCluster(); + } + if (testMode == PER_METHOD) closeTestContext(); + } + } + + /** Hook executed after each test class. */ + @AfterClass( + groups = {"isolated", "short", "long", "stress", "duration"}, + alwaysRun = true) + public void afterTestClass() throws Exception { + if (testMode == PER_CLASS) { + closeCloseables(); + closeTestCluster(); + closeTestContext(); + } + } + + /** + * Returns the cluster builder to use for this test. + * + *

    The default implementation returns a vanilla builder with contact points and port that match + * the running CCM cluster. Therefore it's not required to call {@link + * Cluster.Builder#addContactPointsWithPorts}, it will be done automatically. + * + * @return The cluster builder to use for the tests. + */ + public Cluster.Builder createClusterBuilder() { + Cluster.Builder builder = Cluster.builder(); + return configureClusterBuilder(builder); + } + + /** + * Returns an alternate cluster builder to use for this test, with no event debouncing. + * + *

    It's not required to call {@link + * com.datastax.driver.core.Cluster.Builder#addContactPointsWithPorts}, it will be done + * automatically. + * + * @return The cluster builder to use for the tests. + */ + public Cluster.Builder createClusterBuilderNoDebouncing() { + return createClusterBuilder().withQueryOptions(TestUtils.nonDebouncingQueryOptions()); + } + + /** + * Configures the builder with contact points and port that match the running CCM cluster. + * Therefore it's not required to call {@link Cluster.Builder#addContactPointsWithPorts}, it will + * be done automatically. + * + * @return The cluster builder (for method chaining). + */ + protected Builder configureClusterBuilder(Builder builder) { + return TestUtils.configureClusterBuilder(builder, ccm()); + } + + /** + * Hook invoked when the test context is ready, but before the test itself. Useful to create + * fixtures or to insert test data. + * + *

    This method is invoked once per class is the {@link TestMode} is {@link TestMode#PER_CLASS} + * or before each test method, if the {@link TestMode} is {@link TestMode#PER_METHOD}. + * + *

    When this method is called, the cluster and the session are ready to be used (unless the + * configuration specifies that such objects should not be created). + * + *

    Statements executed inside this method do not need to be qualified with a keyspace name, in + * which case they are executed with the default keyspace for the test (unless the configuration + * specifies that no keyspace should be creaed for the test). + * + *

    The default implementation does nothing (no fixtures required). + */ + public void onTestContextInitialized() { + // nothing to do by default + } + + /** @return The {@link CCMAccess} instance to use with this test. */ + public CCMAccess ccm() { + return ccm; + } + + /** + * @return The {@link Cluster} instance to use with this test. Can be null if CCM configuration + * specifies {@code createCluster = false}. + */ + public Cluster cluster() { + return cluster; + } + + /** + * @return The {@link Session} instance to use with this test. Can be null if CCM configuration + * specifies {@code createSession = false}. + */ + public Session session() { + return session; + } + + /** + * Executes the given statements with the test's session object. + * + *

    Useful to create test fixtures and/or load data before tests. + * + *

    This method should not be called if a session object hasn't been created (if CCM + * configuration specifies {@code createSession = false}) + * + * @param statements The statements to execute. + */ + public void execute(String... statements) { + execute(Arrays.asList(statements)); + } + + /** + * Executes the given statements with the test's session object. + * + *

    Useful to create test fixtures and/or load data before tests. + * + *

    This method should not be called if a session object hasn't been created (if CCM + * configuration specifies {@code createSession = false}) + * + * @param statements The statements to execute. + */ + public void execute(Collection statements) { + assert session != null; + for (String stmt : statements) { + try { + session.execute(stmt); + } catch (Exception e) { + errorOut(); + LOGGER.error("Could not execute statement: " + stmt, e); + Throwables.propagate(e); + } + } + } + + /** + * Signals that the test has encountered an unexpected error. + * + *

    This method is automatically called when a test finishes with an unexpected exception being + * thrown, but it is also possible to manually invoke it. + * + *

    Calling this method will close the current cluster and session. The CCM data directory will + * be kept after the test session is finished, for debugging purposes + * + *

    This method should not be called before the test has started, nor after the test is + * finished. + */ + public void errorOut() { + erroredOut = true; + if (ccm != null) { + ccm.setKeepLogs(true); + } + } + + /** + * Returns the contact points to use to contact the CCM cluster. + * + *

    This method returns as many contact points as the number of nodes initially present in the + * CCM cluster, according to {@link CCMConfig} annotations. + * + *

    On a multi-DC setup, this will include nodes in all data centers. + * + *

    This method should not be called before the test has started, nor after the test is + * finished. + * + * @return the contact points to use to contact the CCM cluster. + */ + public List getContactPoints() { + assert ccmTestConfig != null; + List contactPoints = new ArrayList(); + int n = 1; + int[] numberOfNodes = ccmTestConfig.numberOfNodes(); + for (int dc = 1; dc <= numberOfNodes.length; dc++) { + int nodesInDc = numberOfNodes[dc - 1]; + for (int i = 0; i < nodesInDc; i++) { + try { + contactPoints.add(InetAddress.getByName(ipOfNode(n))); + } catch (UnknownHostException e) { + Throwables.propagate(e); } + n++; + } } - + return contactPoints; + } + + /** + * Returns the contact points to use to contact the CCM cluster. + * + *

    This method returns as many contact points as the number of nodes initially present in the + * CCM cluster, according to {@link CCMConfig} annotations. + * + *

    On a multi-DC setup, this will include nodes in all data centers. + * + *

    This method should not be called before the test has started, nor after the test is + * finished. + * + * @return the contact points to use to contact the CCM cluster. + */ + public List getContactPointsWithPorts() { + assert ccmTestConfig != null; + List contactPoints = new ArrayList(); + int n = 1; + int[] numberOfNodes = ccmTestConfig.numberOfNodes(); + for (int dc = 1; dc <= numberOfNodes.length; dc++) { + int nodesInDc = numberOfNodes[dc - 1]; + for (int i = 0; i < nodesInDc; i++) { + contactPoints.add(new InetSocketAddress(ipOfNode(n), ccm.getBinaryPort())); + n++; + } + } + return contactPoints; + } + + /** + * Registers the given {@link Closeable} to be closed at the end of the current test method. + * + *

    This method should not be called before the test has started, nor after the test is + * finished. + * + * @param closeable The closeable to close + * @return The closeable to close + */ + public T register(T closeable) { + closer.register(closeable); + return closeable; + } + + /** + * Tests fail randomly with InvalidQueryException: Keyspace 'xxx' does not exist; this method + * tries at most 3 times to issue a successful USE statement. + * + * @param ks The keyspace to USE + */ + public void useKeyspace(String ks) { + useKeyspace(session(), ks); + } + + /** + * Tests fail randomly with InvalidQueryException: Keyspace 'xxx' does not exist; this method + * tries at most 3 times to issue a successful USE statement. + * + * @param session The session to use + * @param ks The keyspace to USE + */ + public void useKeyspace(Session session, String ks) { + final int maxTries = 3; + for (int i = 1; i <= maxTries; i++) { + try { + session.execute("USE " + ks); + } catch (InvalidQueryException e) { + if (i == maxTries) throw e; + LOGGER.error("Could not USE keyspace, retrying"); + Uninterruptibles.sleepUninterruptibly(1, TimeUnit.MINUTES); + } + } + } + + protected void initTestContext(Object testInstance, Method testMethod) throws Exception { + erroredOut = false; + ccmTestConfig = createCCMTestConfig(testInstance, testMethod); + assert ccmTestConfig != null; + if (ccmTestConfig.createCcm()) { + ccmBuilder = ccmTestConfig.ccmBuilder(testInstance); + CCMAccess ccm = CCMCache.get(ccmBuilder); + assert ccm != null; + if (ccmTestConfig.dirtiesContext()) { + this.ccm = ccm; + } else { + this.ccm = new ReadOnlyCCMAccess(ccm); + } + try { + ccm.start(); + } catch (CCMException e) { + errorOut(); + fail(e.getMessage()); + } + LOGGER.debug("Using {}", ccm); + } + } + + protected void initTestCluster(Object testInstance) throws Exception { + if (ccmTestConfig.createCcm() && ccmTestConfig.createCluster()) { + Cluster.Builder builder = ccmTestConfig.clusterProvider(testInstance); + // add contact points only if the provided builder didn't do so + if (builder.getContactPoints().isEmpty()) builder.addContactPoints(getContactPoints()); + builder.withPort(ccm.getBinaryPort()); + if (ccm().getCassandraVersion().compareTo(VersionNumber.parse("3.10")) >= 0 + && ccm().getCassandraVersion().compareTo(VersionNumber.parse("4.0-beta5")) < 0) { + // prevent usage of protocol v5 for 3.10 and 3.11 since these versions have the beta + // version of it + builder.withProtocolVersion(ProtocolVersion.V4); + } + cluster = register(builder.build()); + cluster.init(); + } + } + + protected void initTestSession() throws Exception { + if (ccmTestConfig.createCcm() && ccmTestConfig.createCluster() && ccmTestConfig.createSession()) + session = register(cluster.connect()); + } + + protected void initTestKeyspace() { + if (ccmTestConfig.createCcm() + && ccmTestConfig.createCluster() + && ccmTestConfig.createSession() + && ccmTestConfig.createKeyspace()) { + try { + keyspace = TestUtils.generateIdentifier("ks_"); + LOGGER.debug("Using keyspace " + keyspace); + session.execute(String.format(CREATE_KEYSPACE_SIMPLE_FORMAT, keyspace, 1)); + useKeyspace(keyspace); + } catch (Exception e) { + errorOut(); + LOGGER.error("Could not create test keyspace", e); + Throwables.propagate(e); + } + } + } + + protected void initTest(Object testInstance) throws Exception { + ccmTestConfig.invokeInitTest(testInstance); + } + + protected void closeTestContext() throws Exception { + if (ccmTestConfig != null && ccmBuilder != null && ccm != null) { + if (ccmTestConfig.dirtiesContext()) { + CCMCache.remove(ccmBuilder); + ccm.close(); + } else { + ((ReadOnlyCCMAccess) ccm).delegate.close(); + } + } + ccmTestConfig = null; + ccmBuilder = null; + ccm = null; + } + + protected void closeTestCluster() { + if (cluster != null && !cluster.isClosed()) + executeNoFail( + new Runnable() { + @Override + public void run() { + cluster.close(); + } + }, + false); + cluster = null; + session = null; + keyspace = null; + } + + protected void resetTestSession() throws Exception { + session.close(); + Cluster.Builder builder = ccmTestConfig.clusterProvider(this); + // add contact points only if the provided builder didn't do so + if (builder.getContactPoints().isEmpty()) builder.addContactPoints(getContactPoints()); + builder.withPort(ccm.getBinaryPort()); + cluster = register(builder.build()); + cluster.init(); + + session.close(); + session = register(cluster.connect()); + useKeyspace(session, keyspace); + } + + protected void closeCloseables() { + if (closer != null) + executeNoFail( + new Callable() { + @Override + public Void call() throws IOException { + closer.close(); + return null; + } + }, + false); + } + + private static boolean isCcmEnabled(Method testMethod) { + Test ann = locateAnnotation(testMethod, Test.class); + return !Collections.disjoint(Arrays.asList(ann.groups()), TEST_GROUPS); + } + + private static CCMTestConfig createCCMTestConfig(Object testInstance, Method testMethod) + throws Exception { + ArrayList annotations = new ArrayList(); + CCMConfig ann = locateAnnotation(testMethod, CCMConfig.class); + if (ann != null) annotations.add(ann); + locateClassAnnotations(testInstance.getClass(), CCMConfig.class, annotations); + return new CCMTestConfig(annotations); + } + + private static TestMode determineTestMode(Class testClass) { + List annotations = + locateClassAnnotations(testClass, CreateCCM.class, new ArrayList()); + if (!annotations.isEmpty()) return annotations.get(0).value(); + return PER_CLASS; + } + + private static A locateAnnotation( + Method testMethod, Class clazz) { + if (testMethod == null) return null; + testMethod.setAccessible(true); + return testMethod.getAnnotation(clazz); + } + + private static List locateClassAnnotations( + Class clazz, Class annotationClass, List annotations) { + A ann = clazz.getAnnotation(annotationClass); + if (ann != null) annotations.add(ann); + clazz = clazz.getSuperclass(); + if (clazz == null) return annotations; + return locateClassAnnotations(clazz, annotationClass, annotations); + } + + private static Method locateMethod(String methodName, Class clazz) + throws NoSuchMethodException { + try { + Method method = clazz.getDeclaredMethod(methodName); + method.setAccessible(true); + return method; + } catch (NoSuchMethodException e) { + clazz = clazz.getSuperclass(); + if (clazz == null) throw e; + return locateMethod(methodName, clazz); + } + } + + @SuppressWarnings("unchecked") + private static T instantiate(Class clazz) + throws NoSuchMethodException, InstantiationException, IllegalAccessException, + java.lang.reflect.InvocationTargetException { + if (clazz.getEnclosingClass() == null || Modifier.isStatic(clazz.getModifiers())) { + Constructor constructor = clazz.getDeclaredConstructor(); + constructor.setAccessible(true); + return (T) constructor.newInstance(); + } else { + Class enclosingClass = clazz.getEnclosingClass(); + Object enclosingInstance = enclosingClass.newInstance(); + Constructor constructor = clazz.getDeclaredConstructor(enclosingClass); + constructor.setAccessible(true); + return (T) constructor.newInstance(enclosingInstance); + } + } + + protected boolean isCassandraVersionOrHigher(String version) { + return CCMBridge.getGlobalCassandraVersion().compareTo(VersionNumber.parse(version)) >= 0; + } + + protected void skipTestWithCassandraVersionOrHigher(String version, String testKind) { + if (isCassandraVersionOrHigher(version)) { + throw new SkipException( + String.format( + "%s tests not applicable to cassandra version >= %s (configured: %s)", + testKind, version, CCMBridge.getGlobalCassandraVersion())); + } + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/CCMWorkload.java b/driver-core/src/test/java/com/datastax/driver/core/CCMWorkload.java index 8dd96b65a04..ed18c9a3a47 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/CCMWorkload.java +++ b/driver-core/src/test/java/com/datastax/driver/core/CCMWorkload.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,18 +22,15 @@ import java.lang.annotation.RetentionPolicy; import java.lang.annotation.Target; -/** - * A set of workloads to assign to a specific node. - */ +/** A set of workloads to assign to a specific node. */ @Retention(RetentionPolicy.RUNTIME) @Target(ElementType.ANNOTATION_TYPE) public @interface CCMWorkload { - /** - * The workloads to assign to a specific node. - * - * @return The workloads to assign to a specifc node. - */ - CCMAccess.Workload[] value() default {}; - + /** + * The workloads to assign to a specific node. + * + * @return The workloads to assign to a specifc node. + */ + CCMAccess.Workload[] value() default {}; } diff --git a/driver-core/src/test/java/com/datastax/driver/core/CaseSensitivityTest.java b/driver-core/src/test/java/com/datastax/driver/core/CaseSensitivityTest.java index 098c41674ce..89e0f6c398e 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/CaseSensitivityTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/CaseSensitivityTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,43 +17,43 @@ */ package com.datastax.driver.core; -import org.testng.annotations.Test; - import static com.datastax.driver.core.TestUtils.CREATE_KEYSPACE_SIMPLE_FORMAT; -import static org.testng.Assert.*; +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertNotNull; +import static org.testng.Assert.assertNull; + +import org.testng.annotations.Test; @CCMConfig(clusterProvider = "createClusterBuilderNoDebouncing") public class CaseSensitivityTest extends CCMTestsSupport { - @Test(groups = "short") - public void testCaseInsensitiveKeyspace() throws Throwable { - String ksName = "MyKeyspace1"; - session().execute(String.format(CREATE_KEYSPACE_SIMPLE_FORMAT, ksName, 1)); - assertExists(ksName, "mykeyspace1"); - assertExists("mykeyspace1", "mykeyspace1"); - assertExists("MYKEYSPACE1", "mykeyspace1"); - } - - @Test(groups = "short") - public void testCaseSensitiveKeyspace() throws Throwable { - String ksName = "\"MyKeyspace2\""; - session().execute(String.format(CREATE_KEYSPACE_SIMPLE_FORMAT, ksName, 1)); - assertExists(ksName, "MyKeyspace2"); - assertExists(Metadata.quote("MyKeyspace2"), "MyKeyspace2"); - assertNotExists("mykeyspace2"); - assertNotExists("MyKeyspace2"); - assertNotExists("MYKEYSPACE2"); - } - - private void assertExists(String fetchName, String realName) { - KeyspaceMetadata km = cluster().getMetadata().getKeyspace(fetchName); - assertNotNull(km); - assertEquals(realName, km.getName()); - } - - private void assertNotExists(String name) { - assertNull(cluster().getMetadata().getKeyspace(name)); - } - - + @Test(groups = "short") + public void testCaseInsensitiveKeyspace() throws Throwable { + String ksName = "MyKeyspace1"; + session().execute(String.format(CREATE_KEYSPACE_SIMPLE_FORMAT, ksName, 1)); + assertExists(ksName, "mykeyspace1"); + assertExists("mykeyspace1", "mykeyspace1"); + assertExists("MYKEYSPACE1", "mykeyspace1"); + } + + @Test(groups = "short") + public void testCaseSensitiveKeyspace() throws Throwable { + String ksName = "\"MyKeyspace2\""; + session().execute(String.format(CREATE_KEYSPACE_SIMPLE_FORMAT, ksName, 1)); + assertExists(ksName, "MyKeyspace2"); + assertExists(Metadata.quote("MyKeyspace2"), "MyKeyspace2"); + assertNotExists("mykeyspace2"); + assertNotExists("MyKeyspace2"); + assertNotExists("MYKEYSPACE2"); + } + + private void assertExists(String fetchName, String realName) { + KeyspaceMetadata km = cluster().getMetadata().getKeyspace(fetchName); + assertNotNull(km); + assertEquals(realName, km.getName()); + } + + private void assertNotExists(String name) { + assertNull(cluster().getMetadata().getKeyspace(name)); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/ClockFactoryTest.java b/driver-core/src/test/java/com/datastax/driver/core/ClockFactoryTest.java index 603912debbc..8847dc43304 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/ClockFactoryTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/ClockFactoryTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,34 +17,35 @@ */ package com.datastax.driver.core; +import static org.assertj.core.api.Assertions.assertThat; + import org.testng.SkipException; import org.testng.annotations.Test; -import static org.assertj.core.api.Assertions.assertThat; - public class ClockFactoryTest { - final String osName = System.getProperty("os.name"); + final String osName = System.getProperty("os.name"); - @Test(groups = "unit") - public void should_use_native_clock_on_unix_platforms() { - // Cowardly assume any non-windows platform will support gettimeofday. If we find one that doesn't, - // we will have learned something. - if (osName.startsWith("Windows")) { - throw new SkipException("Skipping test for Windows platforms."); - } - Clock clock = ClockFactory.newInstance(); - assertThat(clock).isInstanceOf(NativeClock.class); - assertThat(clock.currentTimeMicros()).isGreaterThan(0); + @Test(groups = "unit") + public void should_use_native_clock_on_unix_platforms() { + // Cowardly assume any non-windows platform will support gettimeofday. If we find one that + // doesn't, + // we will have learned something. + if (osName.startsWith("Windows")) { + throw new SkipException("Skipping test for Windows platforms."); } + Clock clock = ClockFactory.newInstance(); + assertThat(clock).isInstanceOf(NativeClock.class); + assertThat(clock.currentTimeMicros()).isGreaterThan(0); + } - @Test(groups = "unit") - public void should_fallback_on_system_clock_on_windows_platforms() { - if (!osName.startsWith("Windows")) { - throw new SkipException("Skipping test for non-Windows platforms."); - } - Clock clock = ClockFactory.newInstance(); - assertThat(clock).isInstanceOf(SystemClock.class); - assertThat(clock.currentTimeMicros()).isGreaterThan(0); + @Test(groups = "unit") + public void should_fallback_on_system_clock_on_windows_platforms() { + if (!osName.startsWith("Windows")) { + throw new SkipException("Skipping test for non-Windows platforms."); } + Clock clock = ClockFactory.newInstance(); + assertThat(clock).isInstanceOf(SystemClock.class); + assertThat(clock.currentTimeMicros()).isGreaterThan(0); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/CloudConfigFactoryTest.java b/driver-core/src/test/java/com/datastax/driver/core/CloudConfigFactoryTest.java new file mode 100644 index 00000000000..96d97faa45c --- /dev/null +++ b/driver-core/src/test/java/com/datastax/driver/core/CloudConfigFactoryTest.java @@ -0,0 +1,251 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import static com.github.tomakehurst.wiremock.client.WireMock.aResponse; +import static com.github.tomakehurst.wiremock.client.WireMock.any; +import static com.github.tomakehurst.wiremock.client.WireMock.urlEqualTo; +import static com.github.tomakehurst.wiremock.client.WireMock.urlPathEqualTo; +import static com.github.tomakehurst.wiremock.core.WireMockConfiguration.wireMockConfig; +import static org.apache.commons.codec.CharEncoding.UTF_8; +import static org.assertj.core.api.Assertions.assertThat; + +import com.fasterxml.jackson.core.JsonParseException; +import com.github.tomakehurst.wiremock.WireMockServer; +import com.github.tomakehurst.wiremock.common.JettySettings; +import com.github.tomakehurst.wiremock.core.Options; +import com.github.tomakehurst.wiremock.http.AdminRequestHandler; +import com.github.tomakehurst.wiremock.http.HttpServer; +import com.github.tomakehurst.wiremock.http.HttpServerFactory; +import com.github.tomakehurst.wiremock.http.StubRequestHandler; +import com.github.tomakehurst.wiremock.jetty9.JettyHttpServer; +import com.google.common.base.Joiner; +import java.io.File; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.net.InetSocketAddress; +import java.net.URL; +import javax.net.ssl.HostnameVerifier; +import javax.net.ssl.SSLSession; +import org.apache.commons.io.FileUtils; +import org.eclipse.jetty.io.NetworkTrafficListener; +import org.eclipse.jetty.server.ConnectionFactory; +import org.eclipse.jetty.server.ServerConnector; +import org.eclipse.jetty.server.SslConnectionFactory; +import org.eclipse.jetty.util.ssl.SslContextFactory; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; + +public class CloudConfigFactoryTest { + + private static final String BUNDLE_PATH = "/cloud/creds.zip"; + + private WireMockServer wireMockServer; + + @BeforeMethod + public void startWireMock() { + wireMockServer = + new WireMockServer( + wireMockConfig() + .httpsPort(30443) + .dynamicPort() + .httpServerFactory(new HttpsServerFactory()) + .needClientAuth(true) + .keystorePath(path("/cloud/identity.jks")) + .keystorePassword("XS78x3GuBWas1OoA5") + .trustStorePath(path("/cloud/trustStore.jks")) + .trustStorePassword("48ZY5r06BmpVLKxPg")); + wireMockServer.start(); + } + + @AfterMethod + public void stopWireMock() { + wireMockServer.stop(); + } + + @Test + public void should_load_config_from_local_filesystem() throws Exception { + // given + URL configFile = getClass().getResource(BUNDLE_PATH); + mockProxyMetadataService(jsonMetadata()); + // when + CloudConfigFactory cloudConfigFactory = new CloudConfigFactory(); + CloudConfig cloudConfig = cloudConfigFactory.createCloudConfig(configFile.openStream()); + // then + assertCloudConfig(cloudConfig); + } + + @Test + public void should_load_config_from_external_location() throws Exception { + // given + mockHttpSecureBundle(secureBundle()); + mockProxyMetadataService(jsonMetadata()); + // when + URL configFile = new URL("http", "localhost", wireMockServer.port(), BUNDLE_PATH); + CloudConfigFactory cloudConfigFactory = new CloudConfigFactory(); + CloudConfig cloudConfig = cloudConfigFactory.createCloudConfig(configFile.openStream()); + // then + assertCloudConfig(cloudConfig); + } + + @Test + public void should_throw_when_bundle_not_found() throws Exception { + // given + wireMockServer.stubFor(any(urlEqualTo(BUNDLE_PATH)).willReturn(aResponse().withStatus(404))); + // when + URL configFile = new URL("http", "localhost", wireMockServer.port(), BUNDLE_PATH); + CloudConfigFactory cloudConfigFactory = new CloudConfigFactory(); + try { + cloudConfigFactory.createCloudConfig(configFile.openStream()); + } catch (FileNotFoundException ex) { + assertThat(ex).hasMessageContaining(configFile.toExternalForm()); + } + } + + @Test + public void should_throw_when_bundle_not_readable() throws Exception { + // given + mockHttpSecureBundle("not a zip file".getBytes(UTF_8)); + // when + URL configFile = new URL("http", "localhost", wireMockServer.port(), BUNDLE_PATH); + CloudConfigFactory cloudConfigFactory = new CloudConfigFactory(); + try { + cloudConfigFactory.createCloudConfig(configFile.openStream()); + } catch (IllegalStateException ex) { + assertThat(ex).hasMessageContaining("Invalid bundle: missing file config.json"); + } + } + + @Test + public void should_throw_when_metadata_not_found() throws Exception { + // given + mockHttpSecureBundle(secureBundle()); + wireMockServer.stubFor( + any(urlPathEqualTo("/metadata")).willReturn(aResponse().withStatus(404))); + // when + URL configFile = new URL("http", "localhost", wireMockServer.port(), BUNDLE_PATH); + CloudConfigFactory cloudConfigFactory = new CloudConfigFactory(); + try { + cloudConfigFactory.createCloudConfig(configFile.openStream()); + } catch (FileNotFoundException ex) { + assertThat(ex).hasMessageContaining("metadata"); + } + } + + @Test + public void should_throw_when_metadata_not_readable() throws Exception { + // given + mockHttpSecureBundle(secureBundle()); + mockProxyMetadataService("not a valid json payload"); + // when + URL configFile = new URL("http", "localhost", wireMockServer.port(), BUNDLE_PATH); + CloudConfigFactory cloudConfigFactory = new CloudConfigFactory(); + try { + cloudConfigFactory.createCloudConfig(configFile.openStream()); + } catch (JsonParseException ex) { + assertThat(ex).hasMessageContaining("Unrecognized token"); + } + } + + private void mockHttpSecureBundle(byte[] body) { + wireMockServer.stubFor( + any(urlEqualTo(BUNDLE_PATH)) + .willReturn( + aResponse() + .withStatus(200) + .withHeader("Content-Type", "application/octet-stream") + .withBody(body))); + } + + private void mockProxyMetadataService(String jsonMetadata) { + wireMockServer.stubFor( + any(urlPathEqualTo("/metadata")) + .willReturn( + aResponse() + .withStatus(200) + .withHeader("Content-Type", "application/json") + .withBody(jsonMetadata))); + } + + private byte[] secureBundle() throws IOException { + return FileUtils.readFileToByteArray(new File(path(BUNDLE_PATH))); + } + + private String jsonMetadata() throws IOException { + return Joiner.on('\n').join(FileUtils.readLines(new File(path("/cloud/metadata.json")))); + } + + private String path(String resource) { + return getClass().getResource(resource).getFile(); + } + + private void assertCloudConfig(CloudConfig config) { + InetSocketAddress expectedProxyAddress = InetSocketAddress.createUnresolved("localhost", 30002); + assertThat(config.getLocalDatacenter()).isEqualTo("dc1"); + assertThat(config.getProxyAddress()).isEqualTo(expectedProxyAddress); + assertThat(config.getEndPoints()).extracting("proxyAddress").containsOnly(expectedProxyAddress); + assertThat(config.getEndPoints()) + .extracting("serverName") + .containsExactly( + "4ac06655-f861-49f9-881e-3fee22e69b94", + "2af7c253-3394-4a0d-bfac-f1ad81b5154d", + "b17b6e2a-3f48-4d6a-81c1-20a0a1f3192a"); + assertThat(config.getSslOptions()).isNotNull().isInstanceOf(SSLOptions.class); + } + + static { + javax.net.ssl.HttpsURLConnection.setDefaultHostnameVerifier( + new HostnameVerifier() { + @Override + public boolean verify(String hostname, SSLSession sslSession) { + return hostname.equals("localhost"); + } + }); + } + + // see https://github.com/tomakehurst/wiremock/issues/874 + private static class HttpsServerFactory implements HttpServerFactory { + @Override + public HttpServer buildHttpServer( + final Options options, + AdminRequestHandler adminRequestHandler, + StubRequestHandler stubRequestHandler) { + return new JettyHttpServer(options, adminRequestHandler, stubRequestHandler) { + @Override + protected ServerConnector createServerConnector( + String bindAddress, + JettySettings jettySettings, + int port, + NetworkTrafficListener listener, + ConnectionFactory... connectionFactories) { + if (port == options.httpsSettings().port()) { + SslConnectionFactory sslConnectionFactory = + (SslConnectionFactory) connectionFactories[0]; + SslContextFactory sslContextFactory = sslConnectionFactory.getSslContextFactory(); + sslContextFactory.setKeyStorePassword(options.httpsSettings().keyStorePassword()); + connectionFactories = + new ConnectionFactory[] {sslConnectionFactory, connectionFactories[1]}; + } + return super.createServerConnector( + bindAddress, jettySettings, port, listener, connectionFactories); + } + }; + } + } +} diff --git a/driver-core/src/test/java/com/datastax/driver/core/ClusterAssert.java b/driver-core/src/test/java/com/datastax/driver/core/ClusterAssert.java index c4dd0fa98c1..941254a183c 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/ClusterAssert.java +++ b/driver-core/src/test/java/com/datastax/driver/core/ClusterAssert.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,115 +17,116 @@ */ package com.datastax.driver.core; -import com.google.common.collect.Iterators; -import org.assertj.core.api.AbstractAssert; +import static org.assertj.core.api.Assertions.assertThat; +import com.google.common.collect.Iterators; import java.net.InetAddress; import java.util.Iterator; import java.util.Set; import java.util.TreeSet; import java.util.concurrent.TimeUnit; - -import static org.assertj.core.api.Assertions.assertThat; +import org.assertj.core.api.AbstractAssert; public class ClusterAssert extends AbstractAssert { - protected ClusterAssert(Cluster actual) { - super(actual, ClusterAssert.class); - } - - public ClusterAssert usesControlHost(int node) { - String expectedAddress = TestUtils.ipOfNode(node); - Host controlHost = actual.manager.controlConnection.connectedHost(); - assertThat(controlHost.getAddress().getHostAddress()).isEqualTo(expectedAddress); - return this; - } - - public ClusterAssert hasClosedControlConnection() { - assertThat(actual.manager.controlConnection.isOpen()).isFalse(); - return this; - } - - public ClusterAssert hasOpenControlConnection() { - assertThat(actual.manager.controlConnection.isOpen()).isTrue(); - return this; - } - - public HostAssert controlHost() { - Host host = TestUtils.findOrWaitForControlConnection(actual, 10, TimeUnit.SECONDS); - return new HostAssert(host, actual); + protected ClusterAssert(Cluster actual) { + super(actual, ClusterAssert.class); + } + + public ClusterAssert usesControlHost(int node) { + String expectedAddress = TestUtils.ipOfNode(node); + Host controlHost = actual.manager.controlConnection.connectedHost(); + assertThat(controlHost.getEndPoint().resolve().getAddress().getHostAddress()) + .isEqualTo(expectedAddress); + return this; + } + + public ClusterAssert hasClosedControlConnection() { + assertThat(actual.manager.controlConnection.isOpen()).isFalse(); + return this; + } + + public ClusterAssert hasOpenControlConnection() { + assertThat(actual.manager.controlConnection.isOpen()).isTrue(); + return this; + } + + public HostAssert controlHost() { + Host host = TestUtils.findOrWaitForControlConnection(actual, 10, TimeUnit.SECONDS); + return new HostAssert(host, actual); + } + + public HostAssert host(int hostNumber) { + // Wait for the node to be added if it's not already known. + // In 2.2+ C* does not send an added event until the node is ready so we wait a long time. + Host host = + TestUtils.findOrWaitForHost( + actual, hostNumber, 60 + Cluster.NEW_NODE_DELAY_SECONDS, TimeUnit.SECONDS); + return new HostAssert(host, actual); + } + + public HostAssert host(String hostAddress) { + Host host = + TestUtils.findOrWaitForHost( + actual, hostAddress, 60 + Cluster.NEW_NODE_DELAY_SECONDS, TimeUnit.SECONDS); + return new HostAssert(host, actual); + } + + public HostAssert host(InetAddress hostAddress) { + return host(hostAddress.getHostAddress()); + } + + /** + * Asserts that {@link Cluster}'s {@link Host}s have valid {@link TokenRange}s with the given + * keyspace. + * + *

    Ensures that no ranges intersect and that they cover the entire ring. + * + * @param keyspace Keyspace to grab {@link TokenRange}s from. + */ + public ClusterAssert hasValidTokenRanges(String keyspace) { + // Sort the token ranges so they are in order (needed for vnodes). + Set ranges = new TreeSet(); + for (Host host : actual.getMetadata().getAllHosts()) { + ranges.addAll(actual.getMetadata().getTokenRanges(keyspace, host)); } - - public HostAssert host(int hostNumber) { - // Wait for the node to be added if it's not already known. - // In 2.2+ C* does not send an added event until the node is ready so we wait a long time. - Host host = TestUtils.findOrWaitForHost(actual, hostNumber, - 60 + Cluster.NEW_NODE_DELAY_SECONDS, TimeUnit.SECONDS); - return new HostAssert(host, actual); + return hasValidTokenRanges(ranges); + } + + /** + * Asserts that {@link Cluster}'s {@link Host}s have valid {@link TokenRange}s. + * + *

    Ensures that no ranges intersect and that they cover the entire ring. + */ + public ClusterAssert hasValidTokenRanges() { + // Sort the token ranges so they are in order (needed for vnodes). + Set ranges = new TreeSet(actual.getMetadata().getTokenRanges()); + return hasValidTokenRanges(ranges); + } + + /** + * Asserts that given Set of {@link TokenRange}s are valid. + * + *

    Ensures that no ranges intersect and that they cover the entire ring. + */ + private ClusterAssert hasValidTokenRanges(Set ranges) { + // Ensure no ranges intersect. + Iterator it = ranges.iterator(); + while (it.hasNext()) { + TokenRange range = it.next(); + Assertions.assertThat(range).doesNotIntersect(Iterators.toArray(it, TokenRange.class)); } - public HostAssert host(String hostAddress) { - Host host = TestUtils.findOrWaitForHost(actual, hostAddress, - 60 + Cluster.NEW_NODE_DELAY_SECONDS, TimeUnit.SECONDS); - return new HostAssert(host, actual); + // Ensure the defined ranges cover the entire ring. + it = ranges.iterator(); + TokenRange mergedRange = it.next(); + while (it.hasNext()) { + TokenRange next = it.next(); + mergedRange = mergedRange.mergeWith(next); } + boolean isFullRing = + mergedRange.getStart().equals(mergedRange.getEnd()) && !mergedRange.isEmpty(); + assertThat(isFullRing).as("Ring is not fully defined for Cluster.").isTrue(); - public HostAssert host(InetAddress hostAddress) { - return host(hostAddress.getHostAddress()); - } - - /** - * Asserts that {@link Cluster}'s {@link Host}s have valid {@link TokenRange}s with the given keyspace. - *

    - * Ensures that no ranges intersect and that they cover the entire ring. - * - * @param keyspace Keyspace to grab {@link TokenRange}s from. - */ - public ClusterAssert hasValidTokenRanges(String keyspace) { - // Sort the token ranges so they are in order (needed for vnodes). - Set ranges = new TreeSet(); - for (Host host : actual.getMetadata().getAllHosts()) { - ranges.addAll(actual.getMetadata().getTokenRanges(keyspace, host)); - } - return hasValidTokenRanges(ranges); - } - - /** - * Asserts that {@link Cluster}'s {@link Host}s have valid {@link TokenRange}s. - *

    - * Ensures that no ranges intersect and that they cover the entire ring. - */ - public ClusterAssert hasValidTokenRanges() { - // Sort the token ranges so they are in order (needed for vnodes). - Set ranges = new TreeSet(actual.getMetadata().getTokenRanges()); - return hasValidTokenRanges(ranges); - } - - /** - * Asserts that given Set of {@link TokenRange}s are valid. - *

    - * Ensures that no ranges intersect and that they cover the entire ring. - */ - private ClusterAssert hasValidTokenRanges(Set ranges) { - // Ensure no ranges intersect. - Iterator it = ranges.iterator(); - while (it.hasNext()) { - TokenRange range = it.next(); - Assertions.assertThat(range).doesNotIntersect(Iterators.toArray(it, TokenRange.class)); - } - - // Ensure the defined ranges cover the entire ring. - it = ranges.iterator(); - TokenRange mergedRange = it.next(); - while (it.hasNext()) { - TokenRange next = it.next(); - mergedRange = mergedRange.mergeWith(next); - } - boolean isFullRing = mergedRange.getStart().equals(mergedRange.getEnd()) - && !mergedRange.isEmpty(); - assertThat(isFullRing) - .as("Ring is not fully defined for Cluster.") - .isTrue(); - - return this; - } + return this; + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/ClusterInitTest.java b/driver-core/src/test/java/com/datastax/driver/core/ClusterInitTest.java index 1a365a3312c..e2f88dedf9d 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/ClusterInitTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/ClusterInitTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,19 +17,25 @@ */ package com.datastax.driver.core; +import static com.datastax.driver.core.Assertions.assertThat; +import static com.datastax.driver.core.Assertions.fail; +import static com.datastax.driver.core.FakeHost.Behavior.THROWING_CONNECT_TIMEOUTS; +import static com.datastax.driver.core.HostDistance.LOCAL; +import static com.datastax.driver.core.TestUtils.ipOfNode; +import static com.datastax.driver.core.TestUtils.nonQuietClusterCloseOptions; +import static org.mockito.Mockito.atLeast; +import static org.mockito.Mockito.atMost; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.verify; +import static org.scassandra.http.client.PrimingRequest.then; + +import com.datastax.driver.core.exceptions.DriverInternalError; import com.datastax.driver.core.exceptions.NoHostAvailableException; import com.datastax.driver.core.policies.ConstantReconnectionPolicy; import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; import com.google.common.collect.Lists; import com.google.common.util.concurrent.Uninterruptibles; -import org.scassandra.Scassandra; -import org.scassandra.http.client.PrimingClient; -import org.scassandra.http.client.PrimingRequest; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.testng.annotations.Test; - import java.net.InetAddress; import java.net.InetSocketAddress; import java.net.UnknownHostException; @@ -36,252 +44,348 @@ import java.util.UUID; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; - -import static com.datastax.driver.core.Assertions.assertThat; -import static com.datastax.driver.core.Assertions.fail; -import static com.datastax.driver.core.FakeHost.Behavior.THROWING_CONNECT_TIMEOUTS; -import static com.datastax.driver.core.HostDistance.LOCAL; -import static com.datastax.driver.core.TestUtils.ipOfNode; -import static com.datastax.driver.core.TestUtils.nonQuietClusterCloseOptions; -import static org.mockito.Mockito.*; -import static org.scassandra.http.client.PrimingRequest.then; +import org.scassandra.Scassandra; +import org.scassandra.http.client.PrimingClient; +import org.scassandra.http.client.PrimingRequest; +import org.scassandra.http.client.Result; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.testng.annotations.Test; public class ClusterInitTest { - private static final Logger logger = LoggerFactory.getLogger(ClusterInitTest.class); + private static final Logger logger = LoggerFactory.getLogger(ClusterInitTest.class); - /** - * Test for JAVA-522: when the cluster and session initialize, if some contact points are behaving badly and - * causing timeouts, we want to ensure that the driver does not wait multiple times on the same host. - */ - @Test(groups = "short") - public void should_handle_failing_or_missing_contact_points() throws UnknownHostException { - Cluster cluster = null; - Scassandra scassandra = null; - List failingHosts = Lists.newArrayList(); - try { - // Simulate a cluster of 5 hosts. + /** + * Test for JAVA-522: when the cluster and session initialize, if some contact points are behaving + * badly and causing timeouts, we want to ensure that the driver does not wait multiple times on + * the same host. + */ + @Test(groups = "short") + public void should_handle_failing_or_missing_contact_points() throws UnknownHostException { + Cluster cluster = null; + Scassandra scassandra = null; + List failingHosts = Lists.newArrayList(); + try { + // Simulate a cluster of 5 hosts. - // - 1 is an actual Scassandra instance that will accept connections: - scassandra = TestUtils.createScassandraServer(); - scassandra.start(); - int port = scassandra.getBinaryPort(); + // - 1 is an actual Scassandra instance that will accept connections: + scassandra = TestUtils.createScassandraServer(); + scassandra.start(); + ScassandraCluster.primeSystemLocalRow(scassandra); + int port = scassandra.getBinaryPort(); - // - the remaining 4 are fake servers that will throw connect timeouts: - for (int i = 2; i <= 5; i++) { - FakeHost failingHost = new FakeHost(TestUtils.ipOfNode(i), port, THROWING_CONNECT_TIMEOUTS); - failingHosts.add(failingHost); - failingHost.start(); - } + // - the remaining 4 are fake servers that will throw connect timeouts: + for (int i = 2; i <= 5; i++) { + FakeHost failingHost = new FakeHost(TestUtils.ipOfNode(i), port, THROWING_CONNECT_TIMEOUTS); + failingHosts.add(failingHost); + failingHost.start(); + } - // - we also have a "missing" contact point, i.e. there's no server listening at this address, - // and the address is not listed in the live host's system.peers - String missingHostAddress = TestUtils.ipOfNode(6); + // - we also have a "missing" contact point, i.e. there's no server listening at this address, + // and the address is not listed in the live host's system.peers + String missingHostAddress = TestUtils.ipOfNode(6); - primePeerRows(scassandra, failingHosts); + primePeerRows(scassandra, failingHosts); - logger.info("Environment is set up, starting test"); - long start = System.nanoTime(); + logger.info("Environment is set up, starting test"); + long start = System.nanoTime(); - // We want to count how many connections were attempted. For that, we rely on the fact that SocketOptions.getKeepAlive - // is called in Connection.Factory.newBoostrap() each time we prepare to open a new connection. - SocketOptions socketOptions = spy(new SocketOptions()); + // We want to count how many connections were attempted. For that, we rely on the fact that + // SocketOptions.getKeepAlive + // is called in Connection.Factory.newBoostrap() each time we prepare to open a new + // connection. + SocketOptions socketOptions = spy(new SocketOptions()); - // Set an "infinite" reconnection delay so that reconnection attempts don't pollute our observations - ConstantReconnectionPolicy reconnectionPolicy = new ConstantReconnectionPolicy(3600 * 1000); + // Set an "infinite" reconnection delay so that reconnection attempts don't pollute our + // observations + ConstantReconnectionPolicy reconnectionPolicy = new ConstantReconnectionPolicy(3600 * 1000); - // Force 1 connection per pool. Otherwise we can't distinguish a failed pool creation from multiple connection - // attempts, because pools create their connections in parallel (so 1 pool failure equals multiple connection failures). - PoolingOptions poolingOptions = new PoolingOptions().setConnectionsPerHost(LOCAL, 1, 1); + // Force 1 connection per pool. Otherwise we can't distinguish a failed pool creation from + // multiple connection + // attempts, because pools create their connections in parallel (so 1 pool failure equals + // multiple connection failures). + PoolingOptions poolingOptions = new PoolingOptions().setConnectionsPerHost(LOCAL, 1, 1); - cluster = Cluster.builder() - .withPort(scassandra.getBinaryPort()) - .addContactPoints( - ipOfNode(1), - failingHosts.get(0).address, failingHosts.get(1).address, - failingHosts.get(2).address, failingHosts.get(3).address, - missingHostAddress - ) - .withSocketOptions(socketOptions) - .withReconnectionPolicy(reconnectionPolicy) - .withPoolingOptions(poolingOptions) - .build(); - cluster.connect(); + cluster = + Cluster.builder() + .withPort(scassandra.getBinaryPort()) + // scassandra supports max V4 protocol + .withProtocolVersion(ProtocolVersion.V4) + .addContactPoints( + ipOfNode(1), + failingHosts.get(0).address, + failingHosts.get(1).address, + failingHosts.get(2).address, + failingHosts.get(3).address, + missingHostAddress) + .withSocketOptions(socketOptions) + .withReconnectionPolicy(reconnectionPolicy) + .withPoolingOptions(poolingOptions) + .build(); + cluster.connect(); - // For information only: - long initTimeMs = TimeUnit.MILLISECONDS.convert(System.nanoTime() - start, TimeUnit.NANOSECONDS); - logger.info("Cluster and session initialized in {} ms", initTimeMs); + // For information only: + long initTimeMs = + TimeUnit.MILLISECONDS.convert(System.nanoTime() - start, TimeUnit.NANOSECONDS); + logger.info("Cluster and session initialized in {} ms", initTimeMs); - // Expect : - // - 2 connections for the live host (1 control connection + 1 pooled connection) - // - 1 attempt per failing host (either a control connection attempt or a failed pool creation) - // - 0 or 1 for the missing host. We can't know for sure because contact points are randomized. If it's tried - // before the live host there will be a connection attempt, otherwise it will be removed directly because - // it's not in the live host's system.peers. - verify(socketOptions, atLeast(6)).getKeepAlive(); - verify(socketOptions, atMost(7)).getKeepAlive(); + // Expect : + // - 2 connections for the live host (1 control connection + 1 pooled connection) + // - 1 attempt per failing host (either a control connection attempt or a failed pool + // creation) + // - 0 or 1 for the missing host. We can't know for sure because contact points are + // randomized. If it's tried + // before the live host there will be a connection attempt, otherwise it will be removed + // directly because + // it's not in the live host's system.peers. + verify(socketOptions, atLeast(6)).getKeepAlive(); + verify(socketOptions, atMost(7)).getKeepAlive(); - assertThat(cluster).host(1).isNotNull().isUp(); - // It is likely but not guaranteed that a host is marked down at this point. - // It should eventually be marked down as Cluster.Manager.triggerOnDown should be - // called and submit a task that marks the host down. - for (FakeHost failingHost : failingHosts) { - assertThat(cluster).host(failingHost.address).goesDownWithin(10, TimeUnit.SECONDS); - Host host = TestUtils.findHost(cluster, failingHost.address); - // There is a possible race here in that the host is marked down in a separate Executor in onDown - // and then starts a periodic reconnection attempt shortly after. Since setDown is called before - // startPeriodicReconnectionAttempt, we add a slight delay here if the future isn't set yet. - if (host != null && (host.getReconnectionAttemptFuture() == null || host.getReconnectionAttemptFuture().isDone())) { - logger.warn("Periodic Reconnection Attempt hasn't started yet for {}, waiting 1 second and then checking.", host); - Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS); - } - assertThat(cluster).host(failingHost.address).isReconnectingFromDown(); - } - assertThat(TestUtils.findHost(cluster, missingHostAddress)).isNull(); - } finally { - if (cluster != null) - cluster.close(); - for (FakeHost fakeHost : failingHosts) - fakeHost.stop(); - if (scassandra != null) - scassandra.stop(); + assertThat(cluster).host(1).isNotNull().isUp(); + // It is likely but not guaranteed that a host is marked down at this point. + // It should eventually be marked down as Cluster.Manager.triggerOnDown should be + // called and submit a task that marks the host down. + for (FakeHost failingHost : failingHosts) { + assertThat(cluster).host(failingHost.address).goesDownWithin(10, TimeUnit.SECONDS); + Host host = TestUtils.findHost(cluster, failingHost.address); + // There is a possible race here in that the host is marked down in a separate Executor in + // onDown + // and then starts a periodic reconnection attempt shortly after. Since setDown is called + // before + // startPeriodicReconnectionAttempt, we add a slight delay here if the future isn't set yet. + if (host != null + && (host.getReconnectionAttemptFuture() == null + || host.getReconnectionAttemptFuture().isDone())) { + logger.warn( + "Periodic Reconnection Attempt hasn't started yet for {}, waiting 1 second and then checking.", + host); + Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS); } + assertThat(cluster).host(failingHost.address).isReconnectingFromDown(); + } + assertThat(TestUtils.findHost(cluster, missingHostAddress)).isNull(); + } finally { + if (cluster != null) cluster.close(); + for (FakeHost fakeHost : failingHosts) fakeHost.stop(); + if (scassandra != null) scassandra.stop(); } + } - /** - * Validates that if hosts are unreachable during Cluster initialization, no background reconnection to them - * is scheduled before the initialization is complete. - * - * @test_category connection - * @jira_ticket JAVA-954 - * @expected_result No reconnection scheduled. - */ - @Test(groups = "short", expectedExceptions = NoHostAvailableException.class) - public void should_not_schedule_reconnections_before_init_complete() { - // Both contact points time out so we're sure we'll try both of them and init will never complete. - List hosts = Lists.newArrayList( - new FakeHost(TestUtils.ipOfNode(0), 9042, THROWING_CONNECT_TIMEOUTS), - new FakeHost(TestUtils.ipOfNode(1), 9042, THROWING_CONNECT_TIMEOUTS)); - // Use a low reconnection interval and keep the default connect timeout (5 seconds). So if a reconnection was scheduled, - // we would see a call to the reconnection policy. - CountingReconnectionPolicy reconnectionPolicy = new CountingReconnectionPolicy(new ConstantReconnectionPolicy(100)); - Cluster cluster = Cluster.builder() - .addContactPoints(hosts.get(0).address, hosts.get(1).address) - .withReconnectionPolicy(reconnectionPolicy) - .build(); - try { - cluster.init(); - } finally { - // We expect a nextDelay invocation from the ConvictionPolicy for each host, but that will - // not trigger a reconnection. - assertThat(reconnectionPolicy.count.get()).isEqualTo(2); - for (FakeHost fakeHost : hosts) { - fakeHost.stop(); - } - cluster.close(); - } - // We don't test that reconnections are scheduled if init succeeds, but that's covered in - // should_handle_failing_or_missing_contact_points + /** + * Validates that if hosts are unreachable during Cluster initialization, no background + * reconnection to them is scheduled before the initialization is complete. + * + * @test_category connection + * @jira_ticket JAVA-954 + * @expected_result No reconnection scheduled. + */ + @Test(groups = "short", expectedExceptions = NoHostAvailableException.class) + public void should_not_schedule_reconnections_before_init_complete() { + // Both contact points time out so we're sure we'll try both of them and init will never + // complete. + List hosts = + Lists.newArrayList( + new FakeHost(TestUtils.ipOfNode(0), 9042, THROWING_CONNECT_TIMEOUTS), + new FakeHost(TestUtils.ipOfNode(1), 9042, THROWING_CONNECT_TIMEOUTS)); + // Use a low reconnection interval and keep the default connect timeout (5 seconds). So if a + // reconnection was scheduled, we would see a call to the reconnection policy. + CountingReconnectionPolicy reconnectionPolicy = + new CountingReconnectionPolicy(new ConstantReconnectionPolicy(100)); + Cluster cluster = + Cluster.builder() + .addContactPoints(hosts.get(0).address, hosts.get(1).address) + .withReconnectionPolicy(reconnectionPolicy) + .build(); + try { + cluster.init(); + } finally { + // We expect a nextDelay invocation from the ConvictionPolicy for each host, but that will + // not trigger a reconnection. + assertThat(reconnectionPolicy.count.get()).isEqualTo(2); + for (FakeHost fakeHost : hosts) { + fakeHost.stop(); + } + cluster.close(); } + // We don't test that reconnections are scheduled if init succeeds, but that's covered in + // should_handle_failing_or_missing_contact_points + } - /** - * Validates that a Cluster that was never able to successfully establish connection a session can be closed - * properly. - * - * @test_category connection - * @expected_result Cluster closes within 1 second. - */ - @Test(groups = "short") - public void should_be_able_to_close_cluster_that_never_successfully_connected() throws Exception { - Cluster cluster = Cluster.builder() - .addContactPointsWithPorts(new InetSocketAddress("127.0.0.1", 65534)) - .withNettyOptions(nonQuietClusterCloseOptions) - .build(); - try { - cluster.connect(); - fail("Should not have been able to connect."); - } catch (NoHostAvailableException e) { - // Expected. - CloseFuture closeFuture = cluster.closeAsync(); - try { - closeFuture.get(1, TimeUnit.SECONDS); - } catch (TimeoutException e1) { - fail("Close Future did not complete quickly."); - } - } finally { - cluster.close(); - } + /** + * Validates that a Cluster that was never able to successfully establish connection a session can + * be closed properly. + * + * @test_category connection + * @expected_result Cluster closes within 1 second. + */ + @Test(groups = "short") + public void should_be_able_to_close_cluster_that_never_successfully_connected() throws Exception { + Cluster cluster = + Cluster.builder() + .addContactPointsWithPorts(new InetSocketAddress("127.0.0.1", 65534)) + .withNettyOptions(nonQuietClusterCloseOptions) + .build(); + try { + cluster.connect(); + fail("Should not have been able to connect."); + } catch (NoHostAvailableException e) { + // Expected. + CloseFuture closeFuture = cluster.closeAsync(); + try { + closeFuture.get(1, TimeUnit.SECONDS); + } catch (TimeoutException e1) { + fail("Close Future did not complete quickly."); + } + } finally { + cluster.close(); } + } - /** - * Ensures that if a node is detected that does not support the protocol version in use on init that - * the node is ignored and remains in an added state and the all other hosts are appropriately marked up. - * - * @jira_ticket JAVA-854 - * @test_category host:state - */ - @Test(groups = "short") - public void should_not_abort_init_if_host_does_not_support_protocol_version() { - ScassandraCluster scassandraCluster = ScassandraCluster.builder() - .withIpPrefix(TestUtils.IP_PREFIX) - .withNodes(5) - // For node 2, report an older version which uses protocol v1. - .forcePeerInfo(1, 2, "release_version", "1.2.19") - .build(); - Cluster cluster = Cluster.builder() - .addContactPoints(scassandraCluster.address(1).getAddress()) - .withPort(scassandraCluster.getBinaryPort()) - .withNettyOptions(nonQuietClusterCloseOptions) - .build(); + /** + * Ensures that if a node is detected that does not support the protocol version in use on init + * that the node is ignored and remains in an added state and the all other hosts are + * appropriately marked up. + * + * @jira_ticket JAVA-854 + * @test_category host:state + */ + @Test(groups = "short") + public void should_not_abort_init_if_host_does_not_support_protocol_version() { + ScassandraCluster scassandraCluster = + ScassandraCluster.builder() + .withIpPrefix(TestUtils.IP_PREFIX) + .withNodes(5) + // For node 2, report an older version which uses protocol v1. + .forcePeerInfo(1, 2, "release_version", "1.2.19") + .build(); + Cluster cluster = + Cluster.builder() + .addContactPoints(scassandraCluster.address(1).getAddress()) + .withPort(scassandraCluster.getBinaryPort()) + .withNettyOptions(nonQuietClusterCloseOptions) + .build(); - try { - scassandraCluster.init(); - cluster.init(); - for (int i = 1; i <= 5; i++) { - InetAddress hostAddress = scassandraCluster.address(i).getAddress(); - if (i == 2) { - // As this host is at an older protocol version, it should be ignored and not marked up. - assertThat(cluster).host(hostAddress).hasState(Host.State.ADDED); - } else { - // All hosts should be set as 'UP' as part of cluster.init(). If they are - // in 'ADDED' state it's possible that cluster.init() did not fully complete. - assertThat(cluster).host(hostAddress).hasState(Host.State.UP); - } - } - } finally { - cluster.close(); - scassandraCluster.stop(); + try { + scassandraCluster.init(); + cluster.init(); + for (int i = 1; i <= 5; i++) { + InetAddress hostAddress = scassandraCluster.address(i).getAddress(); + if (i == 2) { + // As this host is at an older protocol version, it should be ignored and not marked up. + assertThat(cluster).host(hostAddress).hasState(Host.State.ADDED); + } else { + // All hosts should be set as 'UP' as part of cluster.init(). If they are + // in 'ADDED' state it's possible that cluster.init() did not fully complete. + assertThat(cluster).host(hostAddress).hasState(Host.State.UP); } + } + } finally { + cluster.close(); + scassandraCluster.stop(); + } + } + /** + * Ensures that if an error occurs doing initialization that subsequent attempts to use the + * cluster result in an appropriate error. + * + * @jira_ticket JAVA-1220 + * @test_category host:state + */ + @Test(groups = "short") + public void should_detect_cluster_init_failure_and_close_cluster() { + Cluster cluster = + Cluster.builder() + .addContactPointsWithPorts(new InetSocketAddress("127.0.0.1", 65534)) + .withNettyOptions(nonQuietClusterCloseOptions) + .build(); + try { + cluster.connect(); + fail("Should not have been able to connect."); + } catch (NoHostAvailableException e) { + try { + cluster.connect(); + fail("Should error when connect is called."); + } catch (IllegalStateException e1) { + assertThat(e1.getCause()).isSameAs(e); + assertThat(e1) + .hasMessage( + "Can't use this cluster instance because it encountered an error in its initialization"); + } + } finally { + cluster.close(); } + } + /** + * Ensures that if a cluster is closed, subsequent attempts to the use the session will throw a + * useful error. + * + * @jira_ticket JAVA-1929 + * @test_category host:state + */ + @Test(groups = "short") + public void session_should_detect_cluster_close() { + ScassandraCluster scassandraCluster = + ScassandraCluster.builder().withIpPrefix(TestUtils.IP_PREFIX).build(); + Cluster cluster = + Cluster.builder() + .addContactPoints(scassandraCluster.address(1).getAddress()) + .withPort(scassandraCluster.getBinaryPort()) + .withNettyOptions(nonQuietClusterCloseOptions) + .build(); - private void primePeerRows(Scassandra scassandra, List otherHosts) throws UnknownHostException { - PrimingClient primingClient = - PrimingClient.builder() - .withHost(ipOfNode(1)) - .withPort(scassandra.getAdminPort()) - .build(); + try { + scassandraCluster.init(); + Session session = cluster.connect(); + cluster.close(); + try { + session.execute("SELECTS * FROM system.peers"); + fail("Should have failed when session.execute was called on cluster that was closed"); + } catch (DriverInternalError e) { + assertThat(e.getCause()).isInstanceOf(IllegalStateException.class); + assertThat(e.getCause()).hasMessage("Could not send request, session is closed"); + } + } finally { + scassandraCluster.stop(); + } + } - List> rows = Lists.newArrayListWithCapacity(5); + private void primePeerRows(Scassandra scassandra, List otherHosts) + throws UnknownHostException { + PrimingClient primingClient = + PrimingClient.builder().withHost(ipOfNode(1)).withPort(scassandra.getAdminPort()).build(); - int i = 0; - for (FakeHost otherHost : otherHosts) { - InetAddress address = InetAddress.getByName(otherHost.address); - rows.add(ImmutableMap.builder() - .put("peer", address) - .put("rpc_address", address) - .put("data_center", "datacenter1") - .put("rack", "rack1") - // Base release version on min cassandra version as this is important for the driver - // to consider the node. - .put("release_version", ProtocolVersion.NEWEST_SUPPORTED.minCassandraVersion().toString()) - .put("tokens", ImmutableSet.of(Long.toString(Long.MIN_VALUE + i++))) - .put("host_id", UUID.randomUUID()) - .build()); - } + List> rows = Lists.newArrayListWithCapacity(5); - primingClient.prime( - PrimingRequest.queryBuilder() - .withQuery("SELECT * FROM system.peers") - .withThen(then().withRows(rows).withColumnTypes(ScassandraCluster.SELECT_PEERS)) - .build()); + int i = 0; + for (FakeHost otherHost : otherHosts) { + InetAddress address = InetAddress.getByName(otherHost.address); + rows.add( + ImmutableMap.builder() + .put("peer", address) + .put("rpc_address", address) + .put("data_center", "datacenter1") + .put("rack", "rack1") + // Base release version on min cassandra version as this is important for the driver + // to consider the node. + .put( + "release_version", + ProtocolVersion.NEWEST_SUPPORTED.minCassandraVersion().toString()) + .put("tokens", ImmutableSet.of(Long.toString(Long.MIN_VALUE + i++))) + .put("host_id", UUID.randomUUID()) + .build()); } + + primingClient.prime( + PrimingRequest.queryBuilder() + .withQuery("SELECT * FROM system.peers") + .withThen(then().withRows(rows).withColumnTypes(ScassandraCluster.SELECT_PEERS)) + .build()); + + // prime invalid for peers_v2 so peers table is used. + primingClient.prime( + PrimingRequest.queryBuilder() + .withQuery("SELECT * FROM system.peers_v2") + .withThen(then().withResult(Result.invalid)) + .build()); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/ClusterStressTest.java b/driver-core/src/test/java/com/datastax/driver/core/ClusterStressTest.java index f41c34f8407..340b3f49376 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/ClusterStressTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/ClusterStressTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,236 +17,242 @@ */ package com.datastax.driver.core; +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.fail; + import com.datastax.driver.core.utils.SocketChannelMonitor; import com.google.common.collect.Lists; import com.google.common.util.concurrent.Uninterruptibles; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.Callable; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.testng.annotations.AfterMethod; import org.testng.annotations.Test; -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.*; - -import static org.testng.Assert.assertEquals; -import static org.testng.Assert.fail; - public class ClusterStressTest extends CCMTestsSupport { - private static final Logger logger = LoggerFactory.getLogger(ClusterStressTest.class); + private static final Logger logger = LoggerFactory.getLogger(ClusterStressTest.class); - private ExecutorService executorService; + private ExecutorService executorService; - public ClusterStressTest() { - // 8 threads should be enough so that we stress the driver and not the OS thread scheduler - executorService = Executors.newFixedThreadPool(8); - } + public ClusterStressTest() { + // 8 threads should be enough so that we stress the driver and not the OS thread scheduler + executorService = Executors.newFixedThreadPool(8); + } - @Test(groups = "long") - public void clusters_should_not_leak_connections() { - int numberOfClusters = 10; - int numberOfIterations = 500; - try { - for (int i = 1; i < numberOfIterations; i++) { - logger.info("On iteration {}/{}.", i, numberOfIterations); - logger.info("Creating {} clusters", numberOfClusters); - List actions = - waitForCreates(createClustersConcurrently(numberOfClusters)); - waitForCloses(closeClustersConcurrently(actions)); - if (logger.isDebugEnabled()) - logger.debug("# {} threads currently running", Thread.getAllStackTraces().keySet().size()); - } - } finally { - logger.info("Sleeping 60 seconds to free TCP resources"); - Uninterruptibles.sleepUninterruptibly(60, TimeUnit.SECONDS); - } + @Test(groups = "long") + public void clusters_should_not_leak_connections() { + int numberOfClusters = 10; + int numberOfIterations = 500; + try { + for (int i = 1; i < numberOfIterations; i++) { + logger.info("On iteration {}/{}.", i, numberOfIterations); + logger.info("Creating {} clusters", numberOfClusters); + List actions = + waitForCreates(createClustersConcurrently(numberOfClusters)); + waitForCloses(closeClustersConcurrently(actions)); + if (logger.isDebugEnabled()) + logger.debug( + "# {} threads currently running", Thread.getAllStackTraces().keySet().size()); + } + } finally { + logger.info("Sleeping 60 seconds to free TCP resources"); + Uninterruptibles.sleepUninterruptibly(60, TimeUnit.SECONDS); } + } - @AfterMethod(groups = "long", alwaysRun = true) - public void shutdown() throws Exception { - executorService.shutdown(); - try { - boolean shutdown = executorService.awaitTermination(30, TimeUnit.SECONDS); - if (!shutdown) - fail("executor ran for longer than expected"); - } catch (InterruptedException e) { - fail("Interrupted while waiting for executor to shutdown"); - } finally { - executorService = null; - System.gc(); - } + @AfterMethod(groups = "long", alwaysRun = true) + public void shutdown() throws Exception { + executorService.shutdown(); + try { + boolean shutdown = executorService.awaitTermination(30, TimeUnit.SECONDS); + if (!shutdown) fail("executor ran for longer than expected"); + } catch (InterruptedException e) { + fail("Interrupted while waiting for executor to shutdown"); + } finally { + executorService = null; + System.gc(); } + } - private List> createClustersConcurrently(int numberOfClusters) { - final CountDownLatch countDownLatch = new CountDownLatch(1); - return createClustersConcurrently(numberOfClusters, countDownLatch); - } + private List> createClustersConcurrently( + int numberOfClusters) { + final CountDownLatch countDownLatch = new CountDownLatch(1); + return createClustersConcurrently(numberOfClusters, countDownLatch); + } - private List> createClustersConcurrently(int numberOfClusters, - CountDownLatch countDownLatch) { - List> clusterFutures = - Lists.newArrayListWithCapacity(numberOfClusters); - for (int i = 0; i < numberOfClusters; i++) { - clusterFutures.add(executorService.submit(new CreateClusterAndCheckConnections(countDownLatch))); - } - countDownLatch.countDown(); - return clusterFutures; + private List> createClustersConcurrently( + int numberOfClusters, CountDownLatch countDownLatch) { + List> clusterFutures = + Lists.newArrayListWithCapacity(numberOfClusters); + for (int i = 0; i < numberOfClusters; i++) { + clusterFutures.add( + executorService.submit(new CreateClusterAndCheckConnections(countDownLatch))); } + countDownLatch.countDown(); + return clusterFutures; + } - private List> closeClustersConcurrently(List actions) { - final CountDownLatch countDownLatch = new CountDownLatch(1); - return closeClustersConcurrently(actions, countDownLatch); - } + private List> closeClustersConcurrently( + List actions) { + final CountDownLatch countDownLatch = new CountDownLatch(1); + return closeClustersConcurrently(actions, countDownLatch); + } - private List> closeClustersConcurrently(List actions, - CountDownLatch startSignal) { - List> closeFutures = Lists.newArrayListWithCapacity(actions.size()); - for (CreateClusterAndCheckConnections action : actions) { - closeFutures.add(executorService.submit(new CloseCluster(action.cluster, action.channelMonitor, - startSignal))); - } - startSignal.countDown(); - return closeFutures; + private List> closeClustersConcurrently( + List actions, CountDownLatch startSignal) { + List> closeFutures = Lists.newArrayListWithCapacity(actions.size()); + for (CreateClusterAndCheckConnections action : actions) { + closeFutures.add( + executorService.submit( + new CloseCluster(action.cluster, action.channelMonitor, startSignal))); } + startSignal.countDown(); + return closeFutures; + } - private List waitForCreates( - List> futures) { - List actions = Lists.newArrayListWithCapacity(futures.size()); - // If an error occurs, we will abort the test, but we still want to close all the clusters - // that were opened successfully, so we iterate over the whole list no matter what. - AssertionError error = null; - for (Future future : futures) { - try { - actions.add(future.get()); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - if (error == null) - error = assertionError("Interrupted while waiting for future creation", e); - } catch (ExecutionException e) { - if (error == null) { - Throwable cause = e.getCause(); - if (cause instanceof AssertionError) - error = (AssertionError) cause; - else - error = assertionError("Error while creating a cluster", cause); - } - } + private List waitForCreates( + List> futures) { + List actions = Lists.newArrayListWithCapacity(futures.size()); + // If an error occurs, we will abort the test, but we still want to close all the clusters + // that were opened successfully, so we iterate over the whole list no matter what. + AssertionError error = null; + for (Future future : futures) { + try { + actions.add(future.get()); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + if (error == null) + error = assertionError("Interrupted while waiting for future creation", e); + } catch (ExecutionException e) { + if (error == null) { + Throwable cause = e.getCause(); + if (cause instanceof AssertionError) error = (AssertionError) cause; + else error = assertionError("Error while creating a cluster", cause); } - if (error != null) { - for (CreateClusterAndCheckConnections action : actions) - action.cluster.close(); - throw error; - } else - return actions; + } } + if (error != null) { + for (CreateClusterAndCheckConnections action : actions) action.cluster.close(); + throw error; + } else return actions; + } - private List waitForCloses(List> futures) { - List result = new ArrayList(futures.size()); - AssertionError error = null; - for (Future future : futures) { - try { - result.add(future.get()); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - if (error == null) - error = assertionError("Interrupted while waiting for future", e); - } catch (ExecutionException e) { - if (error == null) { - Throwable cause = e.getCause(); - if (cause instanceof AssertionError) - error = (AssertionError) cause; - else - error = assertionError("Error while closing a cluster", cause); - } - } + private List waitForCloses(List> futures) { + List result = new ArrayList(futures.size()); + AssertionError error = null; + for (Future future : futures) { + try { + result.add(future.get()); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + if (error == null) error = assertionError("Interrupted while waiting for future", e); + } catch (ExecutionException e) { + if (error == null) { + Throwable cause = e.getCause(); + if (cause instanceof AssertionError) error = (AssertionError) cause; + else error = assertionError("Error while closing a cluster", cause); } - if (error != null) - throw error; - else - return result; + } } + if (error != null) throw error; + else return result; + } - private class CreateClusterAndCheckConnections implements Callable { - private final CountDownLatch startSignal; - private Cluster cluster; - private final SocketChannelMonitor channelMonitor = new SocketChannelMonitor(); - - CreateClusterAndCheckConnections(CountDownLatch startSignal) { - this.startSignal = startSignal; - this.cluster = Cluster.builder() - .addContactPoints(getContactPoints()) - .withPort(ccm().getBinaryPort()) - .withPoolingOptions(new PoolingOptions().setCoreConnectionsPerHost(HostDistance.LOCAL, 1)) - .withNettyOptions(channelMonitor.nettyOptions()).build(); - } + private class CreateClusterAndCheckConnections + implements Callable { + private final CountDownLatch startSignal; + private Cluster cluster; + private final SocketChannelMonitor channelMonitor = new SocketChannelMonitor(); - @Override - public CreateClusterAndCheckConnections call() throws Exception { - startSignal.await(); - - try { - // There should be 1 control connection after initializing. - cluster.init(); - assertEquals(cluster.manager.sessions.size(), 0); - assertEquals((int) cluster.getMetrics().getOpenConnections().getValue(), 1); - assertEquals(channelMonitor.openChannels(getContactPointsWithPorts()).size(), 1); - - // The first session initializes the cluster and its control connection - Session session = cluster.connect(); - assertEquals(cluster.manager.sessions.size(), 1); - assertEquals((int) cluster.getMetrics().getOpenConnections().getValue(), 1 + TestUtils.numberOfLocalCoreConnections(cluster)); - assertEquals(channelMonitor.openChannels(getContactPointsWithPorts()).size(), 1 + TestUtils.numberOfLocalCoreConnections(cluster)); - - // Closing the session keeps the control connection opened - session.close(); - assertEquals(cluster.manager.sessions.size(), 0); - assertEquals((int) cluster.getMetrics().getOpenConnections().getValue(), 1); - assertEquals(channelMonitor.openChannels(getContactPointsWithPorts()).size(), 1); - - return this; - } catch (AssertionError e) { - // If an assertion fails, close the cluster now, because it's the last time we - // have a reference to it. - cluster.close(); - cluster = null; - throw e; - } finally { - channelMonitor.stop(); - } - } + CreateClusterAndCheckConnections(CountDownLatch startSignal) { + this.startSignal = startSignal; + this.cluster = + createClusterBuilder() + .withPoolingOptions( + new PoolingOptions().setCoreConnectionsPerHost(HostDistance.LOCAL, 1)) + .withNettyOptions(channelMonitor.nettyOptions()) + .build(); } - private class CloseCluster implements Callable { - private Cluster cluster; - private SocketChannelMonitor channelMonitor; - private final CountDownLatch startSignal; + @Override + public CreateClusterAndCheckConnections call() throws Exception { + startSignal.await(); - CloseCluster(Cluster cluster, SocketChannelMonitor channelMonitor, CountDownLatch startSignal) { - this.cluster = cluster; - this.channelMonitor = channelMonitor; - this.startSignal = startSignal; - } + try { + // There should be 1 control connection after initializing. + cluster.init(); + assertEquals(cluster.manager.sessions.size(), 0); + assertEquals((int) cluster.getMetrics().getOpenConnections().getValue(), 1); + assertEquals(channelMonitor.openChannels(getContactPointsWithPorts()).size(), 1); - @Override - public Void call() throws Exception { - startSignal.await(); - try { - cluster.close(); - assertEquals(cluster.manager.sessions.size(), 0); - assertEquals(channelMonitor.openChannels(getContactPointsWithPorts()).size(), 0); - } finally { - channelMonitor.stop(); - cluster = null; - channelMonitor = null; - } - return null; - } + // The first session initializes the cluster and its control connection + Session session = cluster.connect(); + assertEquals(cluster.manager.sessions.size(), 1); + assertEquals( + (int) cluster.getMetrics().getOpenConnections().getValue(), + 1 + TestUtils.numberOfLocalCoreConnections(cluster)); + assertEquals( + channelMonitor.openChannels(getContactPointsWithPorts()).size(), + 1 + TestUtils.numberOfLocalCoreConnections(cluster)); + + // Closing the session keeps the control connection opened + session.close(); + assertEquals(cluster.manager.sessions.size(), 0); + assertEquals((int) cluster.getMetrics().getOpenConnections().getValue(), 1); + assertEquals(channelMonitor.openChannels(getContactPointsWithPorts()).size(), 1); + + return this; + } catch (AssertionError e) { + // If an assertion fails, close the cluster now, because it's the last time we + // have a reference to it. + cluster.close(); + cluster = null; + throw e; + } finally { + channelMonitor.stop(); + } } + } + + private class CloseCluster implements Callable { + private Cluster cluster; + private SocketChannelMonitor channelMonitor; + private final CountDownLatch startSignal; - private static AssertionError assertionError(String message, Throwable cause) { - AssertionError error = new AssertionError(message); - error.initCause(cause); - return error; + CloseCluster(Cluster cluster, SocketChannelMonitor channelMonitor, CountDownLatch startSignal) { + this.cluster = cluster; + this.channelMonitor = channelMonitor; + this.startSignal = startSignal; } + + @Override + public Void call() throws Exception { + startSignal.await(); + try { + cluster.close(); + assertEquals(cluster.manager.sessions.size(), 0); + assertEquals(channelMonitor.openChannels(getContactPointsWithPorts()).size(), 0); + } finally { + channelMonitor.stop(); + cluster = null; + channelMonitor = null; + } + return null; + } + } + + private static AssertionError assertionError(String message, Throwable cause) { + AssertionError error = new AssertionError(message); + error.initCause(cause); + return error; + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/ClusterWidePercentileTrackerTest.java b/driver-core/src/test/java/com/datastax/driver/core/ClusterWidePercentileTrackerTest.java index dc3dd06161b..7b5876b0c5a 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/ClusterWidePercentileTrackerTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/ClusterWidePercentileTrackerTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,51 +17,59 @@ */ package com.datastax.driver.core; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.mock; + import com.datastax.driver.core.exceptions.ReadTimeoutException; import com.google.common.collect.Lists; import com.google.common.util.concurrent.Uninterruptibles; -import org.testng.annotations.Test; - import java.util.List; import java.util.concurrent.TimeUnit; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.mock; +import org.testng.annotations.Test; public class ClusterWidePercentileTrackerTest - extends PercentileTrackerTest { - - @Test(groups = "unit") - public void should_track_all_measurements_for_cluster() { - // given - a cluster wide percentile tracker. - Cluster cluster0 = mock(Cluster.class); - ClusterWidePercentileTracker tracker = builder() - .withInterval(1, TimeUnit.SECONDS) - .withMinRecordedValues(100).build(); - tracker.onRegister(cluster0); + extends PercentileTrackerTest< + ClusterWidePercentileTracker.Builder, ClusterWidePercentileTracker> { - List hosts = Lists.newArrayList(mock(Host.class), mock(Host.class), mock(Host.class)); - List statements = Lists.newArrayList(mock(Statement.class), mock(Statement.class)); - List exceptions = Lists.newArrayList(new Exception(), null, new ReadTimeoutException(ConsistencyLevel.ANY, 1, 1, true), null, null); + @Test(groups = "unit") + public void should_track_all_measurements_for_cluster() { + // given - a cluster wide percentile tracker. + Cluster cluster0 = mock(Cluster.class); + ClusterWidePercentileTracker tracker = + builder().withInterval(1, TimeUnit.SECONDS).withMinRecordedValues(100).build(); + tracker.onRegister(cluster0); - // when - recording latencies over a linear progression with varying hosts, statements and exceptions. - for (int i = 0; i < 100; i++) { - tracker.update( - hosts.get(i % hosts.size()), - statements.get(i % statements.size()), - exceptions.get(i % exceptions.size()), TimeUnit.NANOSECONDS.convert(i + 1, TimeUnit.MILLISECONDS)); - } - Uninterruptibles.sleepUninterruptibly(2, TimeUnit.SECONDS); + List hosts = Lists.newArrayList(mock(Host.class), mock(Host.class), mock(Host.class)); + List statements = Lists.newArrayList(mock(Statement.class), mock(Statement.class)); + List exceptions = + Lists.newArrayList( + new Exception(), + null, + new ReadTimeoutException(ConsistencyLevel.ANY, 1, 1, true), + null, + null); - // then - the resulting tracker's percentiles should represent that linear progression. (x percentile == x) - for (int i = 1; i <= 99; i++) { - long latencyAtPct = tracker.getLatencyAtPercentile(null, null, null, i); - assertThat(latencyAtPct).isEqualTo(i); - } + // when - recording latencies over a linear progression with varying hosts, statements and + // exceptions. + for (int i = 0; i < 100; i++) { + tracker.update( + hosts.get(i % hosts.size()), + statements.get(i % statements.size()), + exceptions.get(i % exceptions.size()), + TimeUnit.NANOSECONDS.convert(i + 1, TimeUnit.MILLISECONDS)); } + Uninterruptibles.sleepUninterruptibly(2, TimeUnit.SECONDS); - @Override - public ClusterWidePercentileTracker.Builder builder() { - return ClusterWidePercentileTracker.builder(defaultMaxLatency); + // then - the resulting tracker's percentiles should represent that linear progression. (x + // percentile == x) + for (int i = 1; i <= 99; i++) { + long latencyAtPct = tracker.getLatencyAtPercentile(null, null, null, i); + assertThat(latencyAtPct).isEqualTo(i); } + } + + @Override + public ClusterWidePercentileTracker.Builder builder() { + return ClusterWidePercentileTracker.builder(defaultMaxLatency); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/CodecRegistryTest.java b/driver-core/src/test/java/com/datastax/driver/core/CodecRegistryTest.java index 7ab55aa4b1d..fae6ed77f44 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/CodecRegistryTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/CodecRegistryTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,494 +17,488 @@ */ package com.datastax.driver.core; -import com.datastax.driver.core.exceptions.CodecNotFoundException; -import com.google.common.reflect.TypeToken; -import org.apache.log4j.Level; -import org.apache.log4j.Logger; -import org.testng.annotations.DataProvider; -import org.testng.annotations.Test; - -import java.math.BigDecimal; -import java.math.BigInteger; -import java.net.InetAddress; -import java.nio.ByteBuffer; -import java.util.*; - import static com.datastax.driver.core.Assertions.assertThat; -import static com.datastax.driver.core.DataType.*; +import static com.datastax.driver.core.DataType.CollectionType; +import static com.datastax.driver.core.DataType.blob; +import static com.datastax.driver.core.DataType.cint; import static com.datastax.driver.core.DataType.list; +import static com.datastax.driver.core.DataType.map; +import static com.datastax.driver.core.DataType.set; +import static com.datastax.driver.core.DataType.text; +import static com.datastax.driver.core.DataType.varchar; import static com.datastax.driver.core.ProtocolVersion.V4; -import static com.datastax.driver.core.TypeTokens.*; +import static com.datastax.driver.core.TypeTokens.listOf; +import static com.datastax.driver.core.TypeTokens.mapOf; +import static com.datastax.driver.core.TypeTokens.setOf; import static com.google.common.reflect.TypeToken.of; -import static java.util.Collections.*; +import static java.util.Collections.singleton; +import static java.util.Collections.singletonList; +import static java.util.Collections.singletonMap; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; import static org.testng.Assert.fail; -public class CodecRegistryTest { - - @DataProvider - public static Object[][] cql() { - return new Object[][]{ - {DataType.blob(), TypeCodec.blob()}, - {DataType.cboolean(), TypeCodec.cboolean()}, - {DataType.smallint(), TypeCodec.smallInt()}, - {DataType.tinyint(), TypeCodec.tinyInt()}, - {DataType.cint(), TypeCodec.cint()}, - {DataType.bigint(), TypeCodec.bigint()}, - {DataType.counter(), TypeCodec.counter()}, - {DataType.cdouble(), TypeCodec.cdouble()}, - {DataType.cfloat(), TypeCodec.cfloat()}, - {DataType.varint(), TypeCodec.varint()}, - {DataType.decimal(), TypeCodec.decimal()}, - {DataType.varchar(), TypeCodec.varchar()}, - {DataType.ascii(), TypeCodec.ascii()}, - {DataType.timestamp(), TypeCodec.timestamp()}, - {DataType.date(), TypeCodec.date()}, - {DataType.time(), TypeCodec.time()}, - {DataType.uuid(), TypeCodec.uuid()}, - {DataType.timeuuid(), TypeCodec.timeUUID()}, - {DataType.inet(), TypeCodec.inet()}, - {DataType.duration(), TypeCodec.duration()} - }; - } - - @DataProvider - public static Object[][] cqlAndJava() { - return new Object[][]{ - {DataType.blob(), ByteBuffer.class, TypeCodec.blob()}, - {DataType.cboolean(), Boolean.class, TypeCodec.cboolean()}, - {DataType.smallint(), Short.class, TypeCodec.smallInt()}, - {DataType.tinyint(), Byte.class, TypeCodec.tinyInt()}, - {DataType.cint(), Integer.class, TypeCodec.cint()}, - {DataType.bigint(), Long.class, TypeCodec.bigint()}, - {DataType.counter(), Long.class, TypeCodec.counter()}, - {DataType.cdouble(), Double.class, TypeCodec.cdouble()}, - {DataType.cfloat(), Float.class, TypeCodec.cfloat()}, - {DataType.varint(), BigInteger.class, TypeCodec.varint()}, - {DataType.decimal(), BigDecimal.class, TypeCodec.decimal()}, - {DataType.varchar(), String.class, TypeCodec.varchar()}, - {DataType.ascii(), String.class, TypeCodec.ascii()}, - {DataType.timestamp(), Date.class, TypeCodec.timestamp()}, - {DataType.date(), LocalDate.class, TypeCodec.date()}, - {DataType.time(), Long.class, TypeCodec.time()}, - {DataType.uuid(), UUID.class, TypeCodec.uuid()}, - {DataType.timeuuid(), UUID.class, TypeCodec.timeUUID()}, - {DataType.inet(), InetAddress.class, TypeCodec.inet()}, - {DataType.duration(), Duration.class, TypeCodec.duration()} - }; - } - - @DataProvider - public static Object[][] value() { - return new Object[][]{ - {ByteBuffer.allocate(0), TypeCodec.blob()}, - {Boolean.TRUE, TypeCodec.cboolean()}, - {(short) 42, TypeCodec.smallInt()}, - {(byte) 42, TypeCodec.tinyInt()}, - {42, TypeCodec.cint()}, - {42L, TypeCodec.bigint()}, - {42D, TypeCodec.cdouble()}, - {42F, TypeCodec.cfloat()}, - {new BigInteger("1234"), TypeCodec.varint()}, - {new BigDecimal("123.45"), TypeCodec.decimal()}, - {"foo", TypeCodec.varchar()}, - {new Date(42), TypeCodec.timestamp()}, - {LocalDate.fromDaysSinceEpoch(42), TypeCodec.date()}, - {UUID.randomUUID(), TypeCodec.uuid()}, - {mock(InetAddress.class), TypeCodec.inet()}, - {Duration.from("1mo2d3h"), TypeCodec.duration()} - }; - } - - @DataProvider - public static Object[][] cqlAndValue() { - return new Object[][]{ - {DataType.blob(), ByteBuffer.allocate(0), TypeCodec.blob()}, - {DataType.cboolean(), true, TypeCodec.cboolean()}, - {DataType.smallint(), (short) 42, TypeCodec.smallInt()}, - {DataType.tinyint(), (byte) 42, TypeCodec.tinyInt()}, - {DataType.cint(), 42, TypeCodec.cint()}, - {DataType.bigint(), 42L, TypeCodec.bigint()}, - {DataType.counter(), 42L, TypeCodec.counter()}, - {DataType.cdouble(), 42D, TypeCodec.cdouble()}, - {DataType.cfloat(), 42F, TypeCodec.cfloat()}, - {DataType.varint(), new BigInteger("1234"), TypeCodec.varint()}, - {DataType.decimal(), new BigDecimal("123.45"), TypeCodec.decimal()}, - {DataType.varchar(), "foo", TypeCodec.varchar()}, - {DataType.ascii(), "foo", TypeCodec.ascii()}, - {DataType.timestamp(), new Date(42), TypeCodec.timestamp()}, - {DataType.date(), LocalDate.fromDaysSinceEpoch(42), TypeCodec.date()}, - {DataType.time(), 42L, TypeCodec.time()}, - {DataType.uuid(), UUID.randomUUID(), TypeCodec.uuid()}, - {DataType.timeuuid(), UUID.randomUUID(), TypeCodec.timeUUID()}, - {DataType.inet(), mock(InetAddress.class), TypeCodec.inet()}, - {DataType.duration(), Duration.from("1mo2d3h"), TypeCodec.duration()} - }; - } - - @Test(groups = "unit", dataProvider = "cql") - public void should_find_codec_by_cql_type(DataType cqlType, TypeCodec expected) { - // given - CodecRegistry registry = new CodecRegistry(); - // when - TypeCodec actual = registry.codecFor(cqlType); - // then - assertThat(actual) - .isNotNull() - .accepts(cqlType) - .isSameAs(expected); - } - - @Test(groups = "unit", dataProvider = "cqlAndJava") - public void should_find_codec_by_cql_type_java_type(DataType cqlType, Class javaType, TypeCodec expected) { - // given - CodecRegistry registry = new CodecRegistry(); - // when - TypeCodec actual = registry.codecFor(cqlType, javaType); - // then - assertThat(actual) - .isNotNull() - .accepts(cqlType) - .accepts(javaType) - .isSameAs(expected); - } - - @Test(groups = "unit", dataProvider = "value") - public void should_find_codec_by_value(Object value, TypeCodec expected) { - // given - CodecRegistry registry = new CodecRegistry(); - // when - TypeCodec actual = registry.codecFor(value); - // then - assertThat(actual) - .isNotNull() - .accepts(value) - .isSameAs(expected); - } - - @Test(groups = "unit", dataProvider = "cqlAndValue") - public void should_find_codec_by_cql_type_and_value(DataType cqlType, Object value, TypeCodec expected) { - // given - CodecRegistry registry = new CodecRegistry(); - // when - TypeCodec actual = registry.codecFor(cqlType, value); - // then - assertThat(actual) - .isNotNull() - .accepts(cqlType) - .accepts(value) - .isSameAs(expected); - } - - @Test(groups = "unit") - public void should_find_newly_registered_codec_by_cql_type() { - // given - CodecRegistry registry = new CodecRegistry(); - TypeCodec expected = mockCodec(list(text()), listOf(String.class)); - registry.register(expected); - // when - TypeCodec actual = registry.codecFor(list(text())); - // then - assertThat(actual) - .isNotNull() - .isSameAs(expected); - } - - @Test(groups = "unit") - public void should_find_default_codec_if_cql_type_already_registered() { - // given - CodecRegistry registry = new CodecRegistry(); - TypeCodec newCodec = mockCodec(text(), of(StringBuilder.class)); - registry.register(newCodec); - // when - TypeCodec actual = registry.codecFor(text()); - // then - assertThat(actual) - .isNotNull() - .isNotSameAs(newCodec) - .accepts(text()) - .accepts(String.class) - .doesNotAccept(StringBuilder.class); - } - - @Test(groups = "unit") - public void should_find_newly_registered_codec_by_cql_type_and_java_type() { - // given - CodecRegistry registry = new CodecRegistry(); - TypeCodec expected = mockCodec(list(text()), listOf(String.class)); - registry.register(expected); - // when - TypeCodec actual = registry.codecFor(list(text()), listOf(String.class)); - // then - assertThat(actual) - .isNotNull() - .isSameAs(expected); - } - - @Test(groups = "unit") - public void should_create_list_codec() { - CollectionType cqlType = list(cint()); - TypeToken> javaType = listOf(Integer.class); - assertThat(new CodecRegistry().codecFor(cqlType)) - .isNotNull() - .accepts(cqlType) - .accepts(javaType); - assertThat(new CodecRegistry().codecFor(cqlType, javaType)) - .isNotNull() - .accepts(cqlType) - .accepts(javaType); - assertThat(new CodecRegistry().codecFor(singletonList(42))) - .isNotNull() - .accepts(cqlType) - .accepts(javaType); - assertThat(new CodecRegistry().codecFor(cqlType, singletonList(42))) - .isNotNull() - .accepts(cqlType) - .accepts(javaType); - assertThat(new CodecRegistry().codecFor(new ArrayList())) - .isNotNull() - // empty collections are mapped to blob codec if no CQL type provided - .accepts(list(blob())) - .accepts(listOf(ByteBuffer.class)); - assertThat(new CodecRegistry().codecFor(cqlType, new ArrayList())) - .isNotNull() - .accepts(cqlType) - .accepts(javaType); - } - - @Test(groups = "unit") - public void should_create_set_codec() { - CollectionType cqlType = set(cint()); - TypeToken> javaType = setOf(Integer.class); - assertThat(new CodecRegistry().codecFor(cqlType)) - .isNotNull() - .accepts(cqlType) - .accepts(javaType); - assertThat(new CodecRegistry().codecFor(cqlType, javaType)) - .isNotNull() - .accepts(cqlType) - .accepts(javaType); - assertThat(new CodecRegistry().codecFor(singleton(42))) - .isNotNull() - .accepts(cqlType) - .accepts(javaType); - assertThat(new CodecRegistry().codecFor(cqlType, singleton(42))) - .isNotNull() - .accepts(cqlType) - .accepts(javaType); - assertThat(new CodecRegistry().codecFor(new HashSet())) - .isNotNull() - // empty collections are mapped to blob codec if no CQL type provided - .accepts(set(blob())) - .accepts(setOf(ByteBuffer.class)); - assertThat(new CodecRegistry().codecFor(cqlType, new HashSet())) - .isNotNull() - .accepts(cqlType) - .accepts(javaType); - } - - @Test(groups = "unit") - public void should_create_map_codec() { - CollectionType cqlType = map(cint(), list(varchar())); - TypeToken>> javaType = mapOf(of(Integer.class), listOf(String.class)); - assertThat(new CodecRegistry().codecFor(cqlType)) - .isNotNull() - .accepts(cqlType) - .accepts(javaType); - assertThat(new CodecRegistry().codecFor(cqlType, javaType)) - .isNotNull() - .accepts(cqlType) - .accepts(javaType); - assertThat(new CodecRegistry().codecFor(singletonMap(42, singletonList("foo")))) - .isNotNull() - .accepts(cqlType) - .accepts(javaType); - assertThat(new CodecRegistry().codecFor(cqlType, singletonMap(42, singletonList("foo")))) - .isNotNull() - .accepts(cqlType) - .accepts(javaType); - assertThat(new CodecRegistry().codecFor(new HashMap>())) - .isNotNull() - // empty collections are mapped to blob codec if no CQL type provided - .accepts(map(blob(), blob())) - .accepts(mapOf(ByteBuffer.class, ByteBuffer.class)); - assertThat(new CodecRegistry().codecFor(cqlType, new HashMap>())) - .isNotNull() - .accepts(cqlType) - .accepts(javaType); - } +import com.datastax.driver.core.exceptions.CodecNotFoundException; +import com.google.common.reflect.TypeToken; +import java.math.BigDecimal; +import java.math.BigInteger; +import java.net.InetAddress; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Date; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.UUID; +import org.apache.log4j.Level; +import org.apache.log4j.Logger; +import org.testng.annotations.DataProvider; +import org.testng.annotations.Test; - @Test(groups = "unit") - public void should_create_tuple_codec() { - CodecRegistry registry = new CodecRegistry(); - TupleType tupleType = TupleType.of(V4, registry, cint(), varchar()); - assertThat(registry.codecFor(tupleType)) - .isNotNull() - .accepts(tupleType) - .accepts(TupleValue.class); - registry = new CodecRegistry(); - tupleType = TupleType.of(V4, registry, cint(), varchar()); - assertThat(registry.codecFor(tupleType, TupleValue.class)) - .isNotNull() - .accepts(tupleType) - .accepts(TupleValue.class); - registry = new CodecRegistry(); - tupleType = TupleType.of(V4, registry, cint(), varchar()); - assertThat(registry.codecFor(new TupleValue(tupleType))) - .isNotNull() - .accepts(tupleType) - .accepts(TupleValue.class); - assertThat(registry.codecFor(tupleType, new TupleValue(tupleType))) - .isNotNull() - .accepts(tupleType) - .accepts(TupleValue.class); - } +public class CodecRegistryTest { - @Test(groups = "unit") - public void should_create_udt_codec() { - CodecRegistry registry = new CodecRegistry(); - UserType udt = new UserType("ks", "test", false, Collections.emptyList(), V4, registry); - assertThat(registry.codecFor(udt)) - .isNotNull() - .accepts(udt) - .accepts(UDTValue.class); - registry = new CodecRegistry(); - udt = new UserType("ks", "test", false, Collections.emptyList(), V4, registry); - assertThat(registry.codecFor(udt, UDTValue.class)) - .isNotNull() - .accepts(udt) - .accepts(UDTValue.class); - registry = new CodecRegistry(); - udt = new UserType("ks", "test", false, Collections.emptyList(), V4, registry); - assertThat(registry.codecFor(new UDTValue(udt))) - .isNotNull() - .accepts(udt) - .accepts(UDTValue.class); - registry = new CodecRegistry(); - udt = new UserType("ks", "test", false, Collections.emptyList(), V4, registry); - assertThat(registry.codecFor(udt, new UDTValue(udt))) - .isNotNull() - .accepts(udt) - .accepts(UDTValue.class); + @DataProvider + public static Object[][] cql() { + return new Object[][] { + {DataType.blob(), TypeCodec.blob()}, + {DataType.cboolean(), TypeCodec.cboolean()}, + {DataType.smallint(), TypeCodec.smallInt()}, + {DataType.tinyint(), TypeCodec.tinyInt()}, + {DataType.cint(), TypeCodec.cint()}, + {DataType.bigint(), TypeCodec.bigint()}, + {DataType.counter(), TypeCodec.counter()}, + {DataType.cdouble(), TypeCodec.cdouble()}, + {DataType.cfloat(), TypeCodec.cfloat()}, + {DataType.varint(), TypeCodec.varint()}, + {DataType.decimal(), TypeCodec.decimal()}, + {DataType.varchar(), TypeCodec.varchar()}, + {DataType.ascii(), TypeCodec.ascii()}, + {DataType.timestamp(), TypeCodec.timestamp()}, + {DataType.date(), TypeCodec.date()}, + {DataType.time(), TypeCodec.time()}, + {DataType.uuid(), TypeCodec.uuid()}, + {DataType.timeuuid(), TypeCodec.timeUUID()}, + {DataType.inet(), TypeCodec.inet()}, + {DataType.duration(), TypeCodec.duration()} + }; + } + + @DataProvider + public static Object[][] cqlAndJava() { + return new Object[][] { + {DataType.blob(), ByteBuffer.class, TypeCodec.blob()}, + {DataType.cboolean(), Boolean.class, TypeCodec.cboolean()}, + {DataType.smallint(), Short.class, TypeCodec.smallInt()}, + {DataType.tinyint(), Byte.class, TypeCodec.tinyInt()}, + {DataType.cint(), Integer.class, TypeCodec.cint()}, + {DataType.bigint(), Long.class, TypeCodec.bigint()}, + {DataType.counter(), Long.class, TypeCodec.counter()}, + {DataType.cdouble(), Double.class, TypeCodec.cdouble()}, + {DataType.cfloat(), Float.class, TypeCodec.cfloat()}, + {DataType.varint(), BigInteger.class, TypeCodec.varint()}, + {DataType.decimal(), BigDecimal.class, TypeCodec.decimal()}, + {DataType.varchar(), String.class, TypeCodec.varchar()}, + {DataType.ascii(), String.class, TypeCodec.ascii()}, + {DataType.timestamp(), Date.class, TypeCodec.timestamp()}, + {DataType.date(), LocalDate.class, TypeCodec.date()}, + {DataType.time(), Long.class, TypeCodec.time()}, + {DataType.uuid(), UUID.class, TypeCodec.uuid()}, + {DataType.timeuuid(), UUID.class, TypeCodec.timeUUID()}, + {DataType.inet(), InetAddress.class, TypeCodec.inet()}, + {DataType.duration(), Duration.class, TypeCodec.duration()} + }; + } + + @DataProvider + public static Object[][] value() { + return new Object[][] { + {ByteBuffer.allocate(0), TypeCodec.blob()}, + {Boolean.TRUE, TypeCodec.cboolean()}, + {(short) 42, TypeCodec.smallInt()}, + {(byte) 42, TypeCodec.tinyInt()}, + {42, TypeCodec.cint()}, + {42L, TypeCodec.bigint()}, + {42D, TypeCodec.cdouble()}, + {42F, TypeCodec.cfloat()}, + {new BigInteger("1234"), TypeCodec.varint()}, + {new BigDecimal("123.45"), TypeCodec.decimal()}, + {"foo", TypeCodec.varchar()}, + {new Date(42), TypeCodec.timestamp()}, + {LocalDate.fromDaysSinceEpoch(42), TypeCodec.date()}, + {UUID.randomUUID(), TypeCodec.uuid()}, + {mock(InetAddress.class), TypeCodec.inet()}, + {Duration.from("1mo2d3h"), TypeCodec.duration()} + }; + } + + @DataProvider + public static Object[][] cqlAndValue() { + return new Object[][] { + {DataType.blob(), ByteBuffer.allocate(0), TypeCodec.blob()}, + {DataType.cboolean(), true, TypeCodec.cboolean()}, + {DataType.smallint(), (short) 42, TypeCodec.smallInt()}, + {DataType.tinyint(), (byte) 42, TypeCodec.tinyInt()}, + {DataType.cint(), 42, TypeCodec.cint()}, + {DataType.bigint(), 42L, TypeCodec.bigint()}, + {DataType.counter(), 42L, TypeCodec.counter()}, + {DataType.cdouble(), 42D, TypeCodec.cdouble()}, + {DataType.cfloat(), 42F, TypeCodec.cfloat()}, + {DataType.varint(), new BigInteger("1234"), TypeCodec.varint()}, + {DataType.decimal(), new BigDecimal("123.45"), TypeCodec.decimal()}, + {DataType.varchar(), "foo", TypeCodec.varchar()}, + {DataType.ascii(), "foo", TypeCodec.ascii()}, + {DataType.timestamp(), new Date(42), TypeCodec.timestamp()}, + {DataType.date(), LocalDate.fromDaysSinceEpoch(42), TypeCodec.date()}, + {DataType.time(), 42L, TypeCodec.time()}, + {DataType.uuid(), UUID.randomUUID(), TypeCodec.uuid()}, + {DataType.timeuuid(), UUID.randomUUID(), TypeCodec.timeUUID()}, + {DataType.inet(), mock(InetAddress.class), TypeCodec.inet()}, + {DataType.duration(), Duration.from("1mo2d3h"), TypeCodec.duration()} + }; + } + + @Test(groups = "unit", dataProvider = "cql") + public void should_find_codec_by_cql_type(DataType cqlType, TypeCodec expected) { + // given + CodecRegistry registry = new CodecRegistry(); + // when + TypeCodec actual = registry.codecFor(cqlType); + // then + assertThat(actual).isNotNull().accepts(cqlType).isSameAs(expected); + } + + @Test(groups = "unit", dataProvider = "cqlAndJava") + public void should_find_codec_by_cql_type_java_type( + DataType cqlType, Class javaType, TypeCodec expected) { + // given + CodecRegistry registry = new CodecRegistry(); + // when + TypeCodec actual = registry.codecFor(cqlType, javaType); + // then + assertThat(actual).isNotNull().accepts(cqlType).accepts(javaType).isSameAs(expected); + } + + @Test(groups = "unit", dataProvider = "value") + public void should_find_codec_by_value(Object value, TypeCodec expected) { + // given + CodecRegistry registry = new CodecRegistry(); + // when + TypeCodec actual = registry.codecFor(value); + // then + assertThat(actual).isNotNull().accepts(value).isSameAs(expected); + } + + @Test(groups = "unit", dataProvider = "cqlAndValue") + public void should_find_codec_by_cql_type_and_value( + DataType cqlType, Object value, TypeCodec expected) { + // given + CodecRegistry registry = new CodecRegistry(); + // when + TypeCodec actual = registry.codecFor(cqlType, value); + // then + assertThat(actual).isNotNull().accepts(cqlType).accepts(value).isSameAs(expected); + } + + @Test(groups = "unit") + public void should_find_newly_registered_codec_by_cql_type() { + // given + CodecRegistry registry = new CodecRegistry(); + TypeCodec expected = mockCodec(list(text()), listOf(String.class)); + registry.register(expected); + // when + TypeCodec actual = registry.codecFor(list(text())); + // then + assertThat(actual).isNotNull().isSameAs(expected); + } + + @Test(groups = "unit") + public void should_find_default_codec_if_cql_type_already_registered() { + // given + CodecRegistry registry = new CodecRegistry(); + TypeCodec newCodec = mockCodec(text(), of(StringBuilder.class)); + registry.register(newCodec); + // when + TypeCodec actual = registry.codecFor(text()); + // then + assertThat(actual) + .isNotNull() + .isNotSameAs(newCodec) + .accepts(text()) + .accepts(String.class) + .doesNotAccept(StringBuilder.class); + } + + @Test(groups = "unit") + public void should_find_newly_registered_codec_by_cql_type_and_java_type() { + // given + CodecRegistry registry = new CodecRegistry(); + TypeCodec expected = mockCodec(list(text()), listOf(String.class)); + registry.register(expected); + // when + TypeCodec actual = registry.codecFor(list(text()), listOf(String.class)); + // then + assertThat(actual).isNotNull().isSameAs(expected); + } + + @Test(groups = "unit") + public void should_create_list_codec() { + CollectionType cqlType = list(cint()); + TypeToken> javaType = listOf(Integer.class); + assertThat(new CodecRegistry().codecFor(cqlType)) + .isNotNull() + .accepts(cqlType) + .accepts(javaType); + assertThat(new CodecRegistry().codecFor(cqlType, javaType)) + .isNotNull() + .accepts(cqlType) + .accepts(javaType); + assertThat(new CodecRegistry().codecFor(singletonList(42))) + .isNotNull() + .accepts(cqlType) + .accepts(javaType); + assertThat(new CodecRegistry().codecFor(cqlType, singletonList(42))) + .isNotNull() + .accepts(cqlType) + .accepts(javaType); + assertThat(new CodecRegistry().codecFor(new ArrayList())) + .isNotNull() + // empty collections are mapped to blob codec if no CQL type provided + .accepts(list(blob())) + .accepts(listOf(ByteBuffer.class)); + assertThat(new CodecRegistry().codecFor(cqlType, new ArrayList())) + .isNotNull() + .accepts(cqlType) + .accepts(javaType); + } + + @Test(groups = "unit") + public void should_create_set_codec() { + CollectionType cqlType = set(cint()); + TypeToken> javaType = setOf(Integer.class); + assertThat(new CodecRegistry().codecFor(cqlType)) + .isNotNull() + .accepts(cqlType) + .accepts(javaType); + assertThat(new CodecRegistry().codecFor(cqlType, javaType)) + .isNotNull() + .accepts(cqlType) + .accepts(javaType); + assertThat(new CodecRegistry().codecFor(singleton(42))) + .isNotNull() + .accepts(cqlType) + .accepts(javaType); + assertThat(new CodecRegistry().codecFor(cqlType, singleton(42))) + .isNotNull() + .accepts(cqlType) + .accepts(javaType); + assertThat(new CodecRegistry().codecFor(new HashSet())) + .isNotNull() + // empty collections are mapped to blob codec if no CQL type provided + .accepts(set(blob())) + .accepts(setOf(ByteBuffer.class)); + assertThat(new CodecRegistry().codecFor(cqlType, new HashSet())) + .isNotNull() + .accepts(cqlType) + .accepts(javaType); + } + + @Test(groups = "unit") + public void should_create_map_codec() { + CollectionType cqlType = map(cint(), list(varchar())); + TypeToken>> javaType = mapOf(of(Integer.class), listOf(String.class)); + assertThat(new CodecRegistry().codecFor(cqlType)) + .isNotNull() + .accepts(cqlType) + .accepts(javaType); + assertThat(new CodecRegistry().codecFor(cqlType, javaType)) + .isNotNull() + .accepts(cqlType) + .accepts(javaType); + assertThat(new CodecRegistry().codecFor(singletonMap(42, singletonList("foo")))) + .isNotNull() + .accepts(cqlType) + .accepts(javaType); + assertThat(new CodecRegistry().codecFor(cqlType, singletonMap(42, singletonList("foo")))) + .isNotNull() + .accepts(cqlType) + .accepts(javaType); + assertThat(new CodecRegistry().codecFor(new HashMap>())) + .isNotNull() + // empty collections are mapped to blob codec if no CQL type provided + .accepts(map(blob(), blob())) + .accepts(mapOf(ByteBuffer.class, ByteBuffer.class)); + assertThat(new CodecRegistry().codecFor(cqlType, new HashMap>())) + .isNotNull() + .accepts(cqlType) + .accepts(javaType); + } + + @Test(groups = "unit") + public void should_create_tuple_codec() { + CodecRegistry registry = new CodecRegistry(); + TupleType tupleType = TupleType.of(V4, registry, cint(), varchar()); + assertThat(registry.codecFor(tupleType)) + .isNotNull() + .accepts(tupleType) + .accepts(TupleValue.class); + registry = new CodecRegistry(); + tupleType = TupleType.of(V4, registry, cint(), varchar()); + assertThat(registry.codecFor(tupleType, TupleValue.class)) + .isNotNull() + .accepts(tupleType) + .accepts(TupleValue.class); + registry = new CodecRegistry(); + tupleType = TupleType.of(V4, registry, cint(), varchar()); + assertThat(registry.codecFor(new TupleValue(tupleType))) + .isNotNull() + .accepts(tupleType) + .accepts(TupleValue.class); + assertThat(registry.codecFor(tupleType, new TupleValue(tupleType))) + .isNotNull() + .accepts(tupleType) + .accepts(TupleValue.class); + } + + @Test(groups = "unit") + public void should_create_udt_codec() { + CodecRegistry registry = new CodecRegistry(); + UserType udt = + new UserType("ks", "test", false, Collections.emptyList(), V4, registry); + assertThat(registry.codecFor(udt)).isNotNull().accepts(udt).accepts(UDTValue.class); + registry = new CodecRegistry(); + udt = new UserType("ks", "test", false, Collections.emptyList(), V4, registry); + assertThat(registry.codecFor(udt, UDTValue.class)) + .isNotNull() + .accepts(udt) + .accepts(UDTValue.class); + registry = new CodecRegistry(); + udt = new UserType("ks", "test", false, Collections.emptyList(), V4, registry); + assertThat(registry.codecFor(new UDTValue(udt))) + .isNotNull() + .accepts(udt) + .accepts(UDTValue.class); + registry = new CodecRegistry(); + udt = new UserType("ks", "test", false, Collections.emptyList(), V4, registry); + assertThat(registry.codecFor(udt, new UDTValue(udt))) + .isNotNull() + .accepts(udt) + .accepts(UDTValue.class); + } + + @Test(groups = "unit") + public void should_create_codec_for_custom_cql_type() { + DataType custom = DataType.custom("foo"); + assertThat(new CodecRegistry().codecFor(custom)) + .isNotNull() + .accepts(custom) + .accepts(ByteBuffer.class); + assertThat(new CodecRegistry().codecFor(custom, ByteBuffer.class)) + .isNotNull() + .accepts(custom) + .accepts(ByteBuffer.class); + assertThat(new CodecRegistry().codecFor(custom, ByteBuffer.allocate(0))) + .isNotNull() + .accepts(custom) + .accepts(ByteBuffer.class); + } + + @Test(groups = "unit") + public void should_create_derived_codecs_for_java_type_handled_by_custom_codec() { + TypeCodec newCodec = mockCodec(varchar(), of(StringBuilder.class)); + CodecRegistry registry = new CodecRegistry().register(newCodec); + // lookup by CQL type only returns default codec + assertThat(registry.codecFor(list(varchar()))).doesNotAccept(listOf(StringBuilder.class)); + assertThat(registry.codecFor(list(varchar()), listOf(StringBuilder.class))).isNotNull(); + } + + @Test(groups = "unit") + public void should_not_find_codec_if_java_type_unknown() { + try { + new CodecRegistry().codecFor(StringBuilder.class); + fail("Should not have found a codec for ANY <-> StringBuilder"); + } catch (CodecNotFoundException e) { + // expected } - - @Test(groups = "unit") - public void should_create_codec_for_custom_cql_type() { - DataType custom = DataType.custom("foo"); - assertThat(new CodecRegistry().codecFor(custom)) - .isNotNull() - .accepts(custom) - .accepts(ByteBuffer.class); - assertThat(new CodecRegistry().codecFor(custom, ByteBuffer.class)) - .isNotNull() - .accepts(custom) - .accepts(ByteBuffer.class); - assertThat(new CodecRegistry().codecFor(custom, ByteBuffer.allocate(0))) - .isNotNull() - .accepts(custom) - .accepts(ByteBuffer.class); + try { + new CodecRegistry().codecFor(varchar(), StringBuilder.class); + fail("Should not have found a codec for varchar <-> StringBuilder"); + } catch (CodecNotFoundException e) { + // expected } - - @Test(groups = "unit") - public void should_create_derived_codecs_for_java_type_handled_by_custom_codec() { - TypeCodec newCodec = mockCodec(varchar(), of(StringBuilder.class)); - CodecRegistry registry = new CodecRegistry().register(newCodec); - // lookup by CQL type only returns default codec - assertThat(registry.codecFor(list(varchar()))).doesNotAccept(listOf(StringBuilder.class)); - assertThat(registry.codecFor(list(varchar()), listOf(StringBuilder.class))).isNotNull(); + try { + new CodecRegistry().codecFor(new StringBuilder()); + fail("Should not have found a codec for ANY <-> StringBuilder"); + } catch (CodecNotFoundException e) { + // expected } - - @Test(groups = "unit") - public void should_not_find_codec_if_java_type_unknown() { - try { - new CodecRegistry().codecFor(StringBuilder.class); - fail("Should not have found a codec for ANY <-> StringBuilder"); - } catch (CodecNotFoundException e) { - // expected - } - try { - new CodecRegistry().codecFor(varchar(), StringBuilder.class); - fail("Should not have found a codec for varchar <-> StringBuilder"); - } catch (CodecNotFoundException e) { - // expected - } - try { - new CodecRegistry().codecFor(new StringBuilder()); - fail("Should not have found a codec for ANY <-> StringBuilder"); - } catch (CodecNotFoundException e) { - // expected - } - try { - new CodecRegistry().codecFor(varchar(), new StringBuilder()); - fail("Should not have found a codec for varchar <-> StringBuilder"); - } catch (CodecNotFoundException e) { - // expected - } + try { + new CodecRegistry().codecFor(varchar(), new StringBuilder()); + fail("Should not have found a codec for varchar <-> StringBuilder"); + } catch (CodecNotFoundException e) { + // expected } + } - @Test(groups = "unit") - public void should_ignore_codec_colliding_with_already_registered_codec() { - MemoryAppender logs = startCapturingLogs(); + @Test(groups = "unit") + public void should_ignore_codec_colliding_with_already_registered_codec() { + MemoryAppender logs = startCapturingLogs(); - CodecRegistry registry = new CodecRegistry(); + CodecRegistry registry = new CodecRegistry(); - TypeCodec newCodec = mockCodec(cint(), of(Integer.class)); + TypeCodec newCodec = mockCodec(cint(), of(Integer.class)); - registry.register(newCodec); + registry.register(newCodec); - assertThat(logs.getNext()).contains("Ignoring codec MockCodec"); - assertThat( - registry.codecFor(cint(), Integer.class) - ).isNotSameAs(newCodec); + assertThat(logs.getNext()).contains("Ignoring codec MockCodec"); + assertThat(registry.codecFor(cint(), Integer.class)).isNotSameAs(newCodec); - stopCapturingLogs(logs); - } + stopCapturingLogs(logs); + } - @Test(groups = "unit") - public void should_ignore_codec_colliding_with_already_generated_codec() { - MemoryAppender logs = startCapturingLogs(); + @Test(groups = "unit") + public void should_ignore_codec_colliding_with_already_generated_codec() { + MemoryAppender logs = startCapturingLogs(); - CodecRegistry registry = new CodecRegistry(); + CodecRegistry registry = new CodecRegistry(); - // Force generation of a list token from the default token - registry.codecFor(list(cint()), listOf(Integer.class)); + // Force generation of a list token from the default token + registry.codecFor(list(cint()), listOf(Integer.class)); - TypeCodec newCodec = mockCodec(list(cint()), listOf(Integer.class)); + TypeCodec newCodec = mockCodec(list(cint()), listOf(Integer.class)); - registry.register(newCodec); + registry.register(newCodec); - assertThat(logs.getNext()).contains("Ignoring codec MockCodec"); - assertThat( - registry.codecFor(list(cint()), listOf(Integer.class)) - ).isNotSameAs(newCodec); + assertThat(logs.getNext()).contains("Ignoring codec MockCodec"); + assertThat(registry.codecFor(list(cint()), listOf(Integer.class))).isNotSameAs(newCodec); - stopCapturingLogs(logs); - } + stopCapturingLogs(logs); + } - private MemoryAppender startCapturingLogs() { - Logger registryLogger = Logger.getLogger(CodecRegistry.class); - registryLogger.setLevel(Level.WARN); - MemoryAppender logs = new MemoryAppender(); - registryLogger.addAppender(logs); - return logs; - } + private MemoryAppender startCapturingLogs() { + Logger registryLogger = Logger.getLogger(CodecRegistry.class); + registryLogger.setLevel(Level.WARN); + MemoryAppender logs = new MemoryAppender(); + registryLogger.addAppender(logs); + return logs; + } - private void stopCapturingLogs(MemoryAppender logs) { - Logger registryLogger = Logger.getLogger(CodecRegistry.class); - registryLogger.setLevel(null); - registryLogger.removeAppender(logs); - } - - private TypeCodec mockCodec(DataType cqlType, TypeToken javaType) { - @SuppressWarnings("unchecked") - TypeCodec newCodec = mock(TypeCodec.class); - when(newCodec.getCqlType()).thenReturn(cqlType); - when(newCodec.getJavaType()).thenReturn(javaType); - when(newCodec.accepts(cqlType)).thenReturn(true); - when(newCodec.accepts(javaType)).thenReturn(true); - when(newCodec.toString()).thenReturn(String.format("MockCodec [%s <-> %s]", cqlType, javaType)); - return newCodec; - } + private void stopCapturingLogs(MemoryAppender logs) { + Logger registryLogger = Logger.getLogger(CodecRegistry.class); + registryLogger.setLevel(null); + registryLogger.removeAppender(logs); + } + private TypeCodec mockCodec(DataType cqlType, TypeToken javaType) { + @SuppressWarnings("unchecked") + TypeCodec newCodec = mock(TypeCodec.class); + when(newCodec.getCqlType()).thenReturn(cqlType); + when(newCodec.getJavaType()).thenReturn(javaType); + when(newCodec.accepts(cqlType)).thenReturn(true); + when(newCodec.accepts(javaType)).thenReturn(true); + when(newCodec.toString()).thenReturn(String.format("MockCodec [%s <-> %s]", cqlType, javaType)); + return newCodec; + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/ColumnDefinitionsAssert.java b/driver-core/src/test/java/com/datastax/driver/core/ColumnDefinitionsAssert.java new file mode 100644 index 00000000000..f6ba1e401c6 --- /dev/null +++ b/driver-core/src/test/java/com/datastax/driver/core/ColumnDefinitionsAssert.java @@ -0,0 +1,53 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.fail; + +import org.assertj.core.api.AbstractAssert; + +public class ColumnDefinitionsAssert + extends AbstractAssert { + + public ColumnDefinitionsAssert(ColumnDefinitions actual) { + super(actual, ColumnDefinitionsAssert.class); + } + + public ColumnDefinitionsAssert hasSize(int expected) { + assertThat(actual.size()).isEqualTo(expected); + return this; + } + + public ColumnDefinitionsAssert containsVariable(String name, DataType type) { + try { + assertThat(actual.getType(name)).isEqualTo(type); + } catch (Exception e) { + fail( + String.format( + "Expected actual to contain variable %s of type %s, but it did not", name, type), + e); + } + return this; + } + + public ColumnDefinitionsAssert doesNotContainVariable(String name) { + assertThat(actual.getIndexOf(name)).isEqualTo(-1); + return this; + } +} diff --git a/driver-core/src/test/java/com/datastax/driver/core/ColumnDefinitionsTest.java b/driver-core/src/test/java/com/datastax/driver/core/ColumnDefinitionsTest.java index 7e6183f17eb..c584e2477c7 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/ColumnDefinitionsTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/ColumnDefinitionsTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,58 +17,67 @@ */ package com.datastax.driver.core; -import org.testng.annotations.Test; - import static org.testng.Assert.assertTrue; +import org.testng.annotations.Test; + public class ColumnDefinitionsTest { - @Test(groups = "unit") - public void caseTest() { + @Test(groups = "unit") + public void caseTest() { - ColumnDefinitions defs; + ColumnDefinitions defs; - defs = new ColumnDefinitions(new ColumnDefinitions.Definition[]{ - new ColumnDefinitions.Definition("ks", "cf", "aColumn", DataType.text()), - new ColumnDefinitions.Definition("ks", "cf", "fOO", DataType.text()), - new ColumnDefinitions.Definition("ks", "cf", "anotherColumn", DataType.text()) - }, CodecRegistry.DEFAULT_INSTANCE); + defs = + new ColumnDefinitions( + new ColumnDefinitions.Definition[] { + new ColumnDefinitions.Definition("ks", "cf", "aColumn", DataType.text()), + new ColumnDefinitions.Definition("ks", "cf", "fOO", DataType.text()), + new ColumnDefinitions.Definition("ks", "cf", "anotherColumn", DataType.text()) + }, + CodecRegistry.DEFAULT_INSTANCE); - assertTrue(defs.contains("foo")); - assertTrue(defs.contains("fOO")); - assertTrue(defs.contains("FOO")); + assertTrue(defs.contains("foo")); + assertTrue(defs.contains("fOO")); + assertTrue(defs.contains("FOO")); - defs = new ColumnDefinitions(new ColumnDefinitions.Definition[]{ - new ColumnDefinitions.Definition("ks", "cf", "aColumn", DataType.text()), - new ColumnDefinitions.Definition("ks", "cf", "foo", DataType.text()), - new ColumnDefinitions.Definition("ks", "cf", "anotherColumn", DataType.text()), - new ColumnDefinitions.Definition("ks", "cf", "FOO", DataType.cint()), - new ColumnDefinitions.Definition("ks", "cf", "with \" quote", DataType.text()), - new ColumnDefinitions.Definition("ks", "cf", "\"in quote\"", DataType.text()), - new ColumnDefinitions.Definition("ks", "cf", "in quote", DataType.cint()), - }, CodecRegistry.DEFAULT_INSTANCE); + defs = + new ColumnDefinitions( + new ColumnDefinitions.Definition[] { + new ColumnDefinitions.Definition("ks", "cf", "aColumn", DataType.text()), + new ColumnDefinitions.Definition("ks", "cf", "foo", DataType.text()), + new ColumnDefinitions.Definition("ks", "cf", "anotherColumn", DataType.text()), + new ColumnDefinitions.Definition("ks", "cf", "FOO", DataType.cint()), + new ColumnDefinitions.Definition("ks", "cf", "with \" quote", DataType.text()), + new ColumnDefinitions.Definition("ks", "cf", "\"in quote\"", DataType.text()), + new ColumnDefinitions.Definition("ks", "cf", "in quote", DataType.cint()), + }, + CodecRegistry.DEFAULT_INSTANCE); - assertTrue(defs.getType("foo").equals(DataType.text())); - assertTrue(defs.getType("Foo").equals(DataType.text())); - assertTrue(defs.getType("FOO").equals(DataType.text())); - assertTrue(defs.getType("\"FOO\"").equals(DataType.cint())); + assertTrue(defs.getType("foo").equals(DataType.text())); + assertTrue(defs.getType("Foo").equals(DataType.text())); + assertTrue(defs.getType("FOO").equals(DataType.text())); + assertTrue(defs.getType("\"FOO\"").equals(DataType.cint())); - assertTrue(defs.contains("with \" quote")); + assertTrue(defs.contains("with \" quote")); - assertTrue(defs.getType("in quote").equals(DataType.cint())); - assertTrue(defs.getType("\"in quote\"").equals(DataType.cint())); - assertTrue(defs.getType("\"\"in quote\"\"").equals(DataType.text())); - } + assertTrue(defs.getType("in quote").equals(DataType.cint())); + assertTrue(defs.getType("\"in quote\"").equals(DataType.cint())); + assertTrue(defs.getType("\"\"in quote\"\"").equals(DataType.text())); + } - @Test(groups = "unit") - public void multiDefinitionTest() { + @Test(groups = "unit") + public void multiDefinitionTest() { - ColumnDefinitions defs = new ColumnDefinitions(new ColumnDefinitions.Definition[]{ - new ColumnDefinitions.Definition("ks", "cf1", "column", DataType.text()), - new ColumnDefinitions.Definition("ks", "cf2", "column", DataType.cint()), - new ColumnDefinitions.Definition("ks", "cf3", "column", DataType.cfloat()) - }, CodecRegistry.DEFAULT_INSTANCE); + ColumnDefinitions defs = + new ColumnDefinitions( + new ColumnDefinitions.Definition[] { + new ColumnDefinitions.Definition("ks", "cf1", "column", DataType.text()), + new ColumnDefinitions.Definition("ks", "cf2", "column", DataType.cint()), + new ColumnDefinitions.Definition("ks", "cf3", "column", DataType.cfloat()) + }, + CodecRegistry.DEFAULT_INSTANCE); - assertTrue(defs.getType("column").equals(DataType.text())); - } + assertTrue(defs.getType("column").equals(DataType.text())); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/ColumnMetadataAssert.java b/driver-core/src/test/java/com/datastax/driver/core/ColumnMetadataAssert.java index 223e57d332a..68a00ec8851 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/ColumnMetadataAssert.java +++ b/driver-core/src/test/java/com/datastax/driver/core/ColumnMetadataAssert.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,59 +17,71 @@ */ package com.datastax.driver.core; -import org.assertj.core.api.AbstractAssert; - import static org.assertj.core.api.Assertions.assertThat; -public class ColumnMetadataAssert extends AbstractAssert { +import org.assertj.core.api.AbstractAssert; - protected ColumnMetadataAssert(ColumnMetadata actual) { - super(actual, ColumnMetadataAssert.class); - } +public class ColumnMetadataAssert extends AbstractAssert { - public ColumnMetadataAssert hasType(DataType dataType) { - assertThat(actual.getType()).isEqualTo(dataType); - return this; - } + protected ColumnMetadataAssert(ColumnMetadata actual) { + super(actual, ColumnMetadataAssert.class); + } - public ColumnMetadataAssert hasName(String name) { - assertThat(actual.getName()).isEqualTo(name); - return this; - } + public ColumnMetadataAssert hasType(DataType dataType) { + assertThat(actual.getType()).isEqualTo(dataType); + return this; + } - public ColumnMetadataAssert isPrimaryKey() { - assertThat(actual.getParent().getPrimaryKey().contains(actual)).as("Expecting %s to be part of the primary key, but it was not", actual).isTrue(); - return this; - } + public ColumnMetadataAssert hasName(String name) { + assertThat(actual.getName()).isEqualTo(name); + return this; + } - public ColumnMetadataAssert isPartitionKey() { - assertThat(actual.getParent().getPartitionKey().contains(actual)).as("Expecting %s to be part of the partition key, but it was not", actual).isTrue(); - return this; - } + public ColumnMetadataAssert isPrimaryKey() { + assertThat(actual.getParent().getPrimaryKey().contains(actual)) + .as("Expecting %s to be part of the primary key, but it was not", actual) + .isTrue(); + return this; + } - public ColumnMetadataAssert isClusteringColumn() { - assertThat(actual.getParent().getClusteringColumns().contains(actual)).as("Expecting %s to be a clustering column, but it was not", actual).isTrue(); - return this; - } + public ColumnMetadataAssert isPartitionKey() { + assertThat(actual.getParent().getPartitionKey().contains(actual)) + .as("Expecting %s to be part of the partition key, but it was not", actual) + .isTrue(); + return this; + } - public ColumnMetadataAssert isRegularColumn() { - assertThat(actual.getParent().getPrimaryKey().contains(actual)).as("Expecting %s to be a regular column, but it was not", actual).isFalse(); - return this; - } + public ColumnMetadataAssert isClusteringColumn() { + assertThat(actual.getParent().getClusteringColumns().contains(actual)) + .as("Expecting %s to be a clustering column, but it was not", actual) + .isTrue(); + return this; + } - public ColumnMetadataAssert hasClusteringOrder(ClusteringOrder clusteringOrder) { - assertThat(actual.getParent().getClusteringOrder().get(actual.getParent().getClusteringColumns().indexOf(actual))).isEqualTo(clusteringOrder); - return this; - } + public ColumnMetadataAssert isRegularColumn() { + assertThat(actual.getParent().getPrimaryKey().contains(actual)) + .as("Expecting %s to be a regular column, but it was not", actual) + .isFalse(); + return this; + } - public ColumnMetadataAssert isStatic() { - assertThat(actual.isStatic()).isTrue(); - return this; - } + public ColumnMetadataAssert hasClusteringOrder(ClusteringOrder clusteringOrder) { + assertThat( + actual + .getParent() + .getClusteringOrder() + .get(actual.getParent().getClusteringColumns().indexOf(actual))) + .isEqualTo(clusteringOrder); + return this; + } - public ColumnMetadataAssert isNotStatic() { - assertThat(actual.isStatic()).isFalse(); - return this; - } + public ColumnMetadataAssert isStatic() { + assertThat(actual.isStatic()).isTrue(); + return this; + } + public ColumnMetadataAssert isNotStatic() { + assertThat(actual.isStatic()).isFalse(); + return this; + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/CompressionTest.java b/driver-core/src/test/java/com/datastax/driver/core/CompressionTest.java index 7b2c4623544..e0b2d32c87e 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/CompressionTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/CompressionTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,41 +17,62 @@ */ package com.datastax.driver.core; -import java.util.Locale; - import static com.datastax.driver.core.SessionTest.checkExecuteResultSet; import static org.assertj.core.api.Assertions.assertThat; +import java.util.Locale; + public class CompressionTest extends CCMTestsSupport { - private static String TABLE = "test"; + private static String TABLE = "test"; - public void onTestContextInitialized() { - execute(String.format("CREATE TABLE %s (k text PRIMARY KEY, t text, i int, f float)", TABLE)); - } + public void onTestContextInitialized() { + execute(String.format("CREATE TABLE %s (k text PRIMARY KEY, t text, i int, f float)", TABLE)); + } - void compressionTest(ProtocolOptions.Compression compression) { - cluster().getConfiguration().getProtocolOptions().setCompression(compression); - try { - Session compressedSession = cluster().connect(keyspace); + void compressionTest(ProtocolOptions.Compression compression) { + cluster().getConfiguration().getProtocolOptions().setCompression(compression); + try { + Session compressedSession = cluster().connect(keyspace); - // Simple calls to all versions of the execute/executeAsync methods - String key = "execute_compressed_test_" + compression; - ResultSet rs = compressedSession.execute(String.format(Locale.US, "INSERT INTO %s (k, t, i, f) VALUES ('%s', '%s', %d, %f)", TABLE, key, "foo", 42, 24.03f)); - assertThat(rs.isExhausted()).isTrue(); + // Simple calls to all versions of the execute/executeAsync methods + String key = "execute_compressed_test_" + compression; + ResultSet rs = + compressedSession.execute( + String.format( + Locale.US, + "INSERT INTO %s (k, t, i, f) VALUES ('%s', '%s', %d, %f)", + TABLE, + key, + "foo", + 42, + 24.03f)); + assertThat(rs.isExhausted()).isTrue(); - String SELECT_ALL = String.format(TestUtils.SELECT_ALL_FORMAT + " WHERE k = '%s'", TABLE, key); + String SELECT_ALL = + String.format(TestUtils.SELECT_ALL_FORMAT + " WHERE k = '%s'", TABLE, key); - // execute - checkExecuteResultSet(compressedSession.execute(SELECT_ALL), key); - checkExecuteResultSet(compressedSession.execute(new SimpleStatement(SELECT_ALL).setConsistencyLevel(ConsistencyLevel.ONE)), key); + // execute + checkExecuteResultSet(compressedSession.execute(SELECT_ALL), key); + checkExecuteResultSet( + compressedSession.execute( + new SimpleStatement(SELECT_ALL).setConsistencyLevel(ConsistencyLevel.ONE)), + key); - // executeAsync - checkExecuteResultSet(compressedSession.executeAsync(SELECT_ALL).getUninterruptibly(), key); - checkExecuteResultSet(compressedSession.executeAsync(new SimpleStatement(SELECT_ALL).setConsistencyLevel(ConsistencyLevel.ONE)).getUninterruptibly(), key); + // executeAsync + checkExecuteResultSet(compressedSession.executeAsync(SELECT_ALL).getUninterruptibly(), key); + checkExecuteResultSet( + compressedSession + .executeAsync( + new SimpleStatement(SELECT_ALL).setConsistencyLevel(ConsistencyLevel.ONE)) + .getUninterruptibly(), + key); - } finally { - cluster().getConfiguration().getProtocolOptions().setCompression(ProtocolOptions.Compression.NONE); - } + } finally { + cluster() + .getConfiguration() + .getProtocolOptions() + .setCompression(ProtocolOptions.Compression.NONE); } + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/ConditionChecker.java b/driver-core/src/test/java/com/datastax/driver/core/ConditionChecker.java index c3e71664d09..f9789f10abb 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/ConditionChecker.java +++ b/driver-core/src/test/java/com/datastax/driver/core/ConditionChecker.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,11 +17,10 @@ */ package com.datastax.driver.core; +import static org.assertj.core.api.Fail.fail; + import com.google.common.base.Predicate; import com.google.common.base.Predicates; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import java.util.Timer; import java.util.TimerTask; import java.util.concurrent.Callable; @@ -27,153 +28,157 @@ import java.util.concurrent.locks.Condition; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; - -import static org.assertj.core.api.Fail.fail; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; public class ConditionChecker { - private static final int DEFAULT_PERIOD_MILLIS = 500; + private static final int DEFAULT_PERIOD_MILLIS = 500; - private static final int DEFAULT_TIMEOUT_MILLIS = 60000; + private static final int DEFAULT_TIMEOUT_MILLIS = 60000; - public static class ConditionCheckerBuilder { + public static class ConditionCheckerBuilder { - private long timeout = DEFAULT_TIMEOUT_MILLIS; + private long timeout = DEFAULT_TIMEOUT_MILLIS; - private TimeUnit timeoutUnit = TimeUnit.MILLISECONDS; + private TimeUnit timeoutUnit = TimeUnit.MILLISECONDS; - private long period = DEFAULT_PERIOD_MILLIS; + private long period = DEFAULT_PERIOD_MILLIS; - private TimeUnit periodUnit = TimeUnit.MILLISECONDS; + private TimeUnit periodUnit = TimeUnit.MILLISECONDS; - private Object input; + private Object input; - private Predicate predicate; + private Predicate predicate; - public ConditionCheckerBuilder every(long period, TimeUnit unit) { - this.period = period; - periodUnit = unit; - return this; - } - - public ConditionCheckerBuilder every(long periodMillis) { - period = periodMillis; - periodUnit = TimeUnit.MILLISECONDS; - return this; - } - - public ConditionCheckerBuilder before(long timeout, TimeUnit unit) { - this.timeout = timeout; - timeoutUnit = unit; - return this; - } - - public ConditionCheckerBuilder before(long timeoutMillis) { - timeout = timeoutMillis; - timeoutUnit = TimeUnit.MILLISECONDS; - return this; - } - - public ConditionCheckerBuilder that(T input, Predicate predicate) { - this.input = input; - this.predicate = predicate; - return this; - } - - public ConditionCheckerBuilder that(final Callable condition) { - this.input = null; - this.predicate = new Predicate() { - @Override - public boolean apply(Void input) { - try { - return condition.call(); - } catch (Exception e) { - logger.error("Evaluation of condition threw exception", e); - return false; - } - } - }; - return this; - } - - @SuppressWarnings("unchecked") - public void becomesTrue() { - new ConditionChecker(input, (Predicate) predicate, period, periodUnit).await(timeout, timeoutUnit); - } - - @SuppressWarnings("unchecked") - public void becomesFalse() { - this.predicate = Predicates.not(predicate); - new ConditionChecker(input, (Predicate) predicate, period, periodUnit).await(timeout, timeoutUnit); - } + public ConditionCheckerBuilder every(long period, TimeUnit unit) { + this.period = period; + periodUnit = unit; + return this; + } + public ConditionCheckerBuilder every(long periodMillis) { + period = periodMillis; + periodUnit = TimeUnit.MILLISECONDS; + return this; } - private static final Logger logger = LoggerFactory.getLogger(ConditionChecker.class); + public ConditionCheckerBuilder before(long timeout, TimeUnit unit) { + this.timeout = timeout; + timeoutUnit = unit; + return this; + } - public static ConditionCheckerBuilder check() { - return new ConditionCheckerBuilder(); + public ConditionCheckerBuilder before(long timeoutMillis) { + timeout = timeoutMillis; + timeoutUnit = TimeUnit.MILLISECONDS; + return this; } - private final Object input; - private final Predicate predicate; - private final Lock lock; - private final Condition condition; - private final Timer timer; + public ConditionCheckerBuilder that(T input, Predicate predicate) { + this.input = input; + this.predicate = predicate; + return this; + } - @SuppressWarnings("unchecked") - public ConditionChecker(T input, Predicate predicate, long period, TimeUnit periodUnit) { - this.input = input; - this.predicate = (Predicate) predicate; - lock = new ReentrantLock(); - condition = lock.newCondition(); - timer = new Timer("condition-checker", true); - timer.schedule(new TimerTask() { + public ConditionCheckerBuilder that(final Callable condition) { + this.input = null; + this.predicate = + new Predicate() { @Override - public void run() { - checkCondition(); + public boolean apply(Void input) { + try { + return condition.call(); + } catch (Exception e) { + logger.error("Evaluation of condition threw exception", e); + return false; + } } - }, 0, periodUnit.toMillis(period)); + }; + return this; } - /** - * Waits until the predicate becomes true, - * or a timeout occurs, whichever happens first. - */ - public void await(long timeout, TimeUnit unit) { - boolean interrupted = false; - long nanos = unit.toNanos(timeout); - lock.lock(); - try { - while (!evalCondition()) { - if (nanos <= 0L) - fail(String.format("Timeout after %s %s while waiting for condition", timeout, unit.toString().toLowerCase())); - try { - nanos = condition.awaitNanos(nanos); - } catch (InterruptedException e) { - interrupted = true; - } - } - } finally { - timer.cancel(); - if (interrupted) - Thread.currentThread().interrupt(); - } + @SuppressWarnings("unchecked") + public void becomesTrue() { + new ConditionChecker(input, (Predicate) predicate, period, periodUnit) + .await(timeout, timeoutUnit); } - private void checkCondition() { - lock.lock(); + @SuppressWarnings("unchecked") + public void becomesFalse() { + this.predicate = Predicates.not(predicate); + new ConditionChecker(input, (Predicate) predicate, period, periodUnit) + .await(timeout, timeoutUnit); + } + } + + private static final Logger logger = LoggerFactory.getLogger(ConditionChecker.class); + + public static ConditionCheckerBuilder check() { + return new ConditionCheckerBuilder(); + } + + private final Object input; + private final Predicate predicate; + private final Lock lock; + private final Condition condition; + private final Timer timer; + + @SuppressWarnings("unchecked") + public ConditionChecker( + T input, Predicate predicate, long period, TimeUnit periodUnit) { + this.input = input; + this.predicate = (Predicate) predicate; + lock = new ReentrantLock(); + condition = lock.newCondition(); + timer = new Timer("condition-checker", true); + timer.schedule( + new TimerTask() { + @Override + public void run() { + checkCondition(); + } + }, + 0, + periodUnit.toMillis(period)); + } + + /** Waits until the predicate becomes true, or a timeout occurs, whichever happens first. */ + public void await(long timeout, TimeUnit unit) { + boolean interrupted = false; + long nanos = unit.toNanos(timeout); + lock.lock(); + try { + while (!evalCondition()) { + if (nanos <= 0L) + fail( + String.format( + "Timeout after %s %s while waiting for condition", + timeout, unit.toString().toLowerCase())); try { - if (evalCondition()) { - condition.signal(); - } - } finally { - lock.unlock(); + nanos = condition.awaitNanos(nanos); + } catch (InterruptedException e) { + interrupted = true; } + } + } finally { + timer.cancel(); + if (interrupted) Thread.currentThread().interrupt(); } - - private boolean evalCondition() { - return predicate.apply(input); + } + + private void checkCondition() { + lock.lock(); + try { + if (evalCondition()) { + condition.signal(); + } + } finally { + lock.unlock(); } + } + private boolean evalCondition() { + return predicate.apply(input); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/ConditionalUpdateTest.java b/driver-core/src/test/java/com/datastax/driver/core/ConditionalUpdateTest.java index 6c349c7b80b..4f29dd274c3 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/ConditionalUpdateTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/ConditionalUpdateTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,114 +17,115 @@ */ package com.datastax.driver.core; -import com.datastax.driver.core.utils.CassandraVersion; -import org.testng.annotations.Test; - import static org.testng.Assert.assertFalse; import static org.testng.Assert.assertTrue; -/** - * Test {@link ResultSet#wasApplied()} for conditional updates. - */ +import com.datastax.driver.core.utils.CassandraVersion; +import org.testng.annotations.Test; + +/** Test {@link ResultSet#wasApplied()} for conditional updates. */ @CassandraVersion(value = "2.0.0", description = "Conditional Updates requires 2.0+.") public class ConditionalUpdateTest extends CCMTestsSupport { - @Override - public void onTestContextInitialized() { - execute("CREATE TABLE test(k1 int, k2 int, v int, PRIMARY KEY (k1, k2))"); - } - - @Test(groups = "short") - public void singleUpdateTest() { - session().execute("TRUNCATE test"); - session().execute("INSERT INTO test (k1, k2, v) VALUES (1, 1, 1)"); - - ResultSet rs = session().execute("UPDATE test SET v = 3 WHERE k1 = 1 AND k2 = 1 IF v = 2"); - assertFalse(rs.wasApplied()); - // Ensure that reading the status does not consume a row: - assertFalse(rs.isExhausted()); - - rs = session().execute("UPDATE test SET v = 3 WHERE k1 = 1 AND k2 = 1 IF v = 1"); - assertTrue(rs.wasApplied()); - assertFalse(rs.isExhausted()); - - // Non-conditional statement - rs = session().execute("UPDATE test SET v = 4 WHERE k1 = 1 AND k2 = 1"); - assertTrue(rs.wasApplied()); - } - - @Test(groups = "short") - public void batchUpdateTest() { - session().execute("TRUNCATE test"); - session().execute("INSERT INTO test (k1, k2, v) VALUES (1, 1, 1)"); - session().execute("INSERT INTO test (k1, k2, v) VALUES (1, 2, 1)"); - - PreparedStatement ps = session().prepare("UPDATE test SET v = :new WHERE k1 = :k1 AND k2 = :k2 IF v = :old"); - BatchStatement batch = new BatchStatement(); - batch.add(ps.bind().setInt("k1", 1).setInt("k2", 1).setInt("old", 2).setInt("new", 3)); // will fail - batch.add(ps.bind().setInt("k1", 1).setInt("k2", 2).setInt("old", 1).setInt("new", 3)); - - - ResultSet rs = session().execute(batch); - assertFalse(rs.wasApplied()); - } - - - @Test(groups = "short") - public void multipageResultSetTest() { - session().execute("TRUNCATE test"); - session().execute("INSERT INTO test (k1, k2, v) VALUES (1, 1, 1)"); - session().execute("INSERT INTO test (k1, k2, v) VALUES (1, 2, 1)"); - - // This is really contrived, we just want to cover the code path in ArrayBackedResultSet#MultiPage. - // Currently CAS update results are never multipage, so it's hard to come up with a meaningful example. - ResultSet rs = session().execute(new SimpleStatement("SELECT * FROM test WHERE k1 = 1").setFetchSize(1)); - - assertTrue(rs.wasApplied()); - } - - /** - * Test for #JAVA-358 - Directly expose CAS_RESULT_COLUMN. - *

    - * This test makes sure that the boolean flag {@code ResultSet.wasApplied()} is false when we try to insert a row - * which already exists. - * - * @see ResultSet#wasApplied() - */ - @Test(groups = "short") - public void insert_if_not_exist_should_support_wasApplied_boolean() { - // First, make sure the test table and the row exist - session().execute("CREATE TABLE IF NOT EXISTS Java358 (key int primary key, value int)"); - ResultSet rs; - rs = session().execute("INSERT INTO Java358(key, value) VALUES (42, 42) IF NOT EXISTS"); - assertTrue(rs.wasApplied()); - - // Then, make sure the flag reports correctly that we did not create a new row - rs = session().execute("INSERT INTO Java358(key, value) VALUES (42, 42) IF NOT EXISTS"); - assertFalse(rs.wasApplied()); - } - - /** - * Test for #JAVA-358 - Directly expose CAS_RESULT_COLUMN. - *

    - * This test makes sure that the boolean flag {@code ResultSet.wasApplied()} is false when we try to delete a row - * which does not exist. - * - * @see ResultSet#wasApplied() - */ - @Test(groups = "short") - public void delete_if_not_exist_should_support_wasApplied_boolean() { - // First, make sure the test table and the row exist - session().execute("CREATE TABLE IF NOT EXISTS Java358 (key int primary key, value int)"); - session().execute("INSERT INTO Java358(key, value) VALUES (42, 42)"); - - // Then, make sure the flag reports correctly that we did delete the row - ResultSet rs; - rs = session().execute("DELETE FROM Java358 WHERE KEY=42 IF EXISTS"); - assertTrue(rs.wasApplied()); - - // Finally, make sure the flag reports correctly that we did did not delete an non-existing row - rs = session().execute("DELETE FROM Java358 WHERE KEY=42 IF EXISTS"); - assertFalse(rs.wasApplied()); - } + @Override + public void onTestContextInitialized() { + execute("CREATE TABLE test(k1 int, k2 int, v int, PRIMARY KEY (k1, k2))"); + } + + @Test(groups = "short") + public void singleUpdateTest() { + session().execute("TRUNCATE test"); + session().execute("INSERT INTO test (k1, k2, v) VALUES (1, 1, 1)"); + + ResultSet rs = session().execute("UPDATE test SET v = 3 WHERE k1 = 1 AND k2 = 1 IF v = 2"); + assertFalse(rs.wasApplied()); + // Ensure that reading the status does not consume a row: + assertFalse(rs.isExhausted()); + + rs = session().execute("UPDATE test SET v = 3 WHERE k1 = 1 AND k2 = 1 IF v = 1"); + assertTrue(rs.wasApplied()); + assertFalse(rs.isExhausted()); + + // Non-conditional statement + rs = session().execute("UPDATE test SET v = 4 WHERE k1 = 1 AND k2 = 1"); + assertTrue(rs.wasApplied()); + } + + @Test(groups = "short") + public void batchUpdateTest() { + session().execute("TRUNCATE test"); + session().execute("INSERT INTO test (k1, k2, v) VALUES (1, 1, 1)"); + session().execute("INSERT INTO test (k1, k2, v) VALUES (1, 2, 1)"); + + PreparedStatement ps = + session().prepare("UPDATE test SET v = :new WHERE k1 = :k1 AND k2 = :k2 IF v = :old"); + BatchStatement batch = new BatchStatement(); + batch.add( + ps.bind().setInt("k1", 1).setInt("k2", 1).setInt("old", 2).setInt("new", 3)); // will fail + batch.add(ps.bind().setInt("k1", 1).setInt("k2", 2).setInt("old", 1).setInt("new", 3)); + + ResultSet rs = session().execute(batch); + assertFalse(rs.wasApplied()); + } + + @Test(groups = "short") + public void multipageResultSetTest() { + session().execute("TRUNCATE test"); + session().execute("INSERT INTO test (k1, k2, v) VALUES (1, 1, 1)"); + session().execute("INSERT INTO test (k1, k2, v) VALUES (1, 2, 1)"); + + // This is really contrived, we just want to cover the code path in + // ArrayBackedResultSet#MultiPage. + // Currently CAS update results are never multipage, so it's hard to come up with a meaningful + // example. + ResultSet rs = + session().execute(new SimpleStatement("SELECT * FROM test WHERE k1 = 1").setFetchSize(1)); + + assertTrue(rs.wasApplied()); + } + + /** + * Test for #JAVA-358 - Directly expose CAS_RESULT_COLUMN. + * + *

    This test makes sure that the boolean flag {@code ResultSet.wasApplied()} is false when we + * try to insert a row which already exists. + * + * @see ResultSet#wasApplied() + */ + @Test(groups = "short") + public void insert_if_not_exist_should_support_wasApplied_boolean() { + // First, make sure the test table and the row exist + session().execute("CREATE TABLE IF NOT EXISTS Java358 (key int primary key, value int)"); + ResultSet rs; + rs = session().execute("INSERT INTO Java358(key, value) VALUES (42, 42) IF NOT EXISTS"); + assertTrue(rs.wasApplied()); + + // Then, make sure the flag reports correctly that we did not create a new row + rs = session().execute("INSERT INTO Java358(key, value) VALUES (42, 42) IF NOT EXISTS"); + assertFalse(rs.wasApplied()); + } + + /** + * Test for #JAVA-358 - Directly expose CAS_RESULT_COLUMN. + * + *

    This test makes sure that the boolean flag {@code ResultSet.wasApplied()} is false when we + * try to delete a row which does not exist. + * + * @see ResultSet#wasApplied() + */ + @Test(groups = "short") + public void delete_if_not_exist_should_support_wasApplied_boolean() { + // First, make sure the test table and the row exist + session().execute("CREATE TABLE IF NOT EXISTS Java358 (key int primary key, value int)"); + session().execute("INSERT INTO Java358(key, value) VALUES (42, 42)"); + + // Then, make sure the flag reports correctly that we did delete the row + ResultSet rs; + rs = session().execute("DELETE FROM Java358 WHERE KEY=42 IF EXISTS"); + assertTrue(rs.wasApplied()); + + // Finally, make sure the flag reports correctly that we did did not delete an non-existing row + rs = session().execute("DELETE FROM Java358 WHERE KEY=42 IF EXISTS"); + assertFalse(rs.wasApplied()); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/ConnectionReleaseTest.java b/driver-core/src/test/java/com/datastax/driver/core/ConnectionReleaseTest.java index 3c091d0f7ce..915d0898b46 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/ConnectionReleaseTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/ConnectionReleaseTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,114 +17,116 @@ */ package com.datastax.driver.core; +import static org.assertj.core.api.Assertions.assertThat; +import static org.scassandra.http.client.PrimingRequest.then; +import static org.testng.Assert.fail; + import com.google.common.collect.ImmutableMap; import com.google.common.collect.Lists; import com.google.common.util.concurrent.AsyncFunction; import com.google.common.util.concurrent.ListenableFuture; -import org.scassandra.http.client.PrimingRequest; -import org.testng.annotations.Test; - import java.util.Collection; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.scassandra.http.client.PrimingRequest.then; -import static org.testng.Assert.fail; +import org.scassandra.http.client.PrimingRequest; +import org.testng.annotations.Test; public class ConnectionReleaseTest extends ScassandraTestBase { - /** - *

    - * Validates that when a future is set that the stream associated with the future's request is released. - * This prevents situations where a user may not be specifying a separate executor on a callback/ - * transform to a ResultSetFuture, which is not recommended, causing executeAsync to block in borrowConnection - * until stream ids become available. - *

    - * Executes the following: - *

    - *

      - *
    1. Sets # of connections per host to 1.
    2. - *
    3. Sends MAX_STREAM_PER_CONNECTION-1 requests that take 10 seconds to execute.
    4. - *
    5. Calls executeAsync to retrieve records from test1 with k=1.
    6. - *
    7. Transforms executeAsync to take the 'c' column from the result and query test2. - * This is done without an executor to ensure the netty worker is used and has to wait for the function - * completion.
    8. - *
    9. Asserts that the transformed future completes within pool timeout and the value is as expected.
    10. - *
    - * - * @jira_ticket JAVA-666 - * @expected_result Are able to transform a Future without hanging in executeAsync as connection should be freed - * before the transform function is called. - * @test_category queries:async - * @since 2.0.10, 2.1.6 - */ - @SuppressWarnings("unchecked") - @Test(groups = "short") - public void should_release_connection_before_completing_future() throws Exception { - Cluster cluster = null; - Collection mockFutures = Lists.newArrayList(); - try { - primingClient.prime( - PrimingRequest.queryBuilder() - .withQuery("mock query") - .withThen(then().withRows(ImmutableMap.of("key", 1)) - .withFixedDelay(10000L)) - .build() - ); - primingClient.prime( - PrimingRequest.queryBuilder() - .withQuery("select c from test1 where k=1") - .withThen(then().withRows(ImmutableMap.of("c", "hello"))) - .build() - ); - primingClient.prime( - PrimingRequest.queryBuilder() - .withQuery("select n from test2 where c='hello'") - .withThen(then().withRows(ImmutableMap.of("n", "world"))) - .build() - ); - - cluster = Cluster.builder() - .addContactPoints(hostAddress.getAddress()) - .withPort(scassandra.getBinaryPort()) - .withPoolingOptions(new PoolingOptions() - .setCoreConnectionsPerHost(HostDistance.LOCAL, 1) - .setMaxConnectionsPerHost(HostDistance.LOCAL, 1)) - .build(); + /** + * Validates that when a future is set that the stream associated with the future's request is + * released. This prevents situations where a user may not be specifying a separate executor on a + * callback/ transform to a ResultSetFuture, which is not recommended, causing executeAsync to + * block in borrowConnection until stream ids become available. + * + *

    Executes the following: + * + *

    + * + *

      + *
    1. Sets # of connections per host to 1. + *
    2. Sends MAX_STREAM_PER_CONNECTION-1 requests that take 10 seconds to execute. + *
    3. Calls executeAsync to retrieve records from test1 with k=1. + *
    4. Transforms executeAsync to take the 'c' column from the result and query test2. This is + * done without an executor to ensure the netty worker is used and has to wait for the + * function completion. + *
    5. Asserts that the transformed future completes within pool timeout and the value is as + * expected. + *
    + * + * @jira_ticket JAVA-666 + * @expected_result Are able to transform a Future without hanging in executeAsync as connection + * should be freed before the transform function is called. + * @test_category queries:async + * @since 2.0.10, 2.1.6 + */ + @SuppressWarnings("unchecked") + @Test(groups = "short") + public void should_release_connection_before_completing_future() throws Exception { + Cluster cluster = null; + Collection mockFutures = Lists.newArrayList(); + try { + primingClient.prime( + PrimingRequest.queryBuilder() + .withQuery("mock query") + .withThen(then().withRows(ImmutableMap.of("key", 1)).withFixedDelay(10000L)) + .build()); + primingClient.prime( + PrimingRequest.queryBuilder() + .withQuery("select c from test1 where k=1") + .withThen(then().withRows(ImmutableMap.of("c", "hello"))) + .build()); + primingClient.prime( + PrimingRequest.queryBuilder() + .withQuery("select n from test2 where c='hello'") + .withThen(then().withRows(ImmutableMap.of("n", "world"))) + .build()); - final Session session = cluster.connect("ks"); - // Consume all stream ids except one. - for (int i = 0; i < StreamIdGenerator.MAX_STREAM_PER_CONNECTION_V2 - 1; i++) - mockFutures.add(session.executeAsync("mock query")); + cluster = + Cluster.builder() + .addContactPoint(hostEndPoint) + .withPort(scassandra.getBinaryPort()) + .withPoolingOptions( + new PoolingOptions() + .setCoreConnectionsPerHost(HostDistance.LOCAL, 1) + .setMaxConnectionsPerHost(HostDistance.LOCAL, 1)) + .build(); + final Session session = cluster.connect("ks"); + // Consume all stream ids except one. + for (int i = 0; i < StreamIdGenerator.MAX_STREAM_PER_CONNECTION_V2 - 1; i++) + mockFutures.add(session.executeAsync("mock query")); - ListenableFuture future = GuavaCompatibility.INSTANCE.transformAsync(session.executeAsync("select c from test1 where k=1"), - new AsyncFunction() { - @Override - public ListenableFuture apply(ResultSet result) { - Row row = result.one(); - String c = row.getString("c"); - // Execute async might hang if no streams are available. This happens if the connection - // was not release. - return session.executeAsync("select n from test2 where c='" + c + "'"); - } - }); + ListenableFuture future = + GuavaCompatibility.INSTANCE.transformAsync( + session.executeAsync("select c from test1 where k=1"), + new AsyncFunction() { + @Override + public ListenableFuture apply(ResultSet result) { + Row row = result.one(); + String c = row.getString("c"); + // Execute async might hang if no streams are available. This happens if the + // connection + // was not release. + return session.executeAsync("select n from test2 where c='" + c + "'"); + } + }); - long waitTimeInMs = 2000; - try { - ResultSet result = future.get(waitTimeInMs, TimeUnit.MILLISECONDS); - assertThat(result.one().getString("n")).isEqualTo("world"); - } catch (TimeoutException e) { - fail("Future timed out after " + waitTimeInMs + "ms. " + - "There is a strong possibility connection is not being released."); - } - } finally { - // Cancel all pending requests. - for (ResultSetFuture future : mockFutures) - future.cancel(true); - if (cluster != null) - cluster.close(); - } + long waitTimeInMs = 2000; + try { + ResultSet result = future.get(waitTimeInMs, TimeUnit.MILLISECONDS); + assertThat(result.one().getString("n")).isEqualTo("world"); + } catch (TimeoutException e) { + fail( + "Future timed out after " + + waitTimeInMs + + "ms. " + + "There is a strong possibility connection is not being released."); + } + } finally { + // Cancel all pending requests. + for (ResultSetFuture future : mockFutures) future.cancel(true); + if (cluster != null) cluster.close(); } + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/ConsistencyTest.java b/driver-core/src/test/java/com/datastax/driver/core/ConsistencyTest.java index 1daa5490f9d..78165b6a441 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/ConsistencyTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/ConsistencyTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,6 +17,12 @@ */ package com.datastax.driver.core; +import static com.datastax.driver.core.TestUtils.nonQuietClusterCloseOptions; +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertNull; +import static org.testng.Assert.assertTrue; + +import java.util.List; import org.scassandra.Scassandra; import org.scassandra.http.client.BatchExecution; import org.scassandra.http.client.PreparedStatementExecution; @@ -26,314 +34,327 @@ import org.testng.annotations.BeforeClass; import org.testng.annotations.Test; -import java.util.List; +public class ConsistencyTest { -import static com.datastax.driver.core.TestUtils.nonQuietClusterCloseOptions; -import static org.testng.Assert.*; + private static final Logger logger = LoggerFactory.getLogger(ConsistencyTest.class); + private ScassandraCluster sCluster; -public class ConsistencyTest { + @BeforeClass(groups = "short") + public void setUp() { + sCluster = ScassandraCluster.builder().withNodes(1).build(); + sCluster.init(); + } - private static final Logger logger = LoggerFactory.getLogger(ConsistencyTest.class); - private ScassandraCluster sCluster; + @AfterClass(groups = "short") + public void tearDownClass() { + sCluster.stop(); + } - @BeforeClass(groups = "short") - public void setUp() { - sCluster = ScassandraCluster.builder().withNodes(1).build(); - sCluster.init(); - } + @AfterMethod(groups = "short") + public void tearDown() { + clearActivityLog(); + } - @AfterClass(groups = "short") - public void tearDownClass() { - sCluster.stop(); + public void clearActivityLog() { + for (Scassandra node : sCluster.nodes()) { + node.activityClient().clearAllRecordedActivity(); } - - @AfterMethod(groups = "short") - public void tearDown() { - clearActivityLog(); + } + + public Cluster.Builder builder() { + // Note: nonQuietClusterCloseOptions is used to speed up tests + return Cluster.builder() + .addContactPoints(sCluster.address(1).getAddress()) + .withPort(sCluster.getBinaryPort()) + .withNettyOptions(nonQuietClusterCloseOptions); + } + + /** + * This method checks the expected/sent serial consistency level against that which is received. + * ConsistencyLevel.SERIAL is the default serial consistency level, so even when sent it will + * return as null. + */ + public void checkSerialCLMatch(ConsistencyLevel expected, String received) { + if (expected.equals(ConsistencyLevel.SERIAL)) { + assertNull(received); + } else { + assertTrue(received.equals(expected.toString())); } - - public void clearActivityLog() { - for (Scassandra node : sCluster.nodes()) { - node.activityClient().clearAllRecordedActivity(); - } + } + + public PreparedStatementExecution executePrepared( + Session session, String statement, ConsistencyLevel level, ConsistencyLevel serialLevel) { + PreparedStatement ps = session.prepare(statement); + BoundStatement bound = ps.bind(); + if (level != null) { + bound.setConsistencyLevel(level); } - - public Cluster.Builder builder() { - //Note: nonQuietClusterCloseOptions is used to speed up tests - return Cluster.builder() - .addContactPoints(sCluster.address(1).getAddress()) - .withPort(sCluster.getBinaryPort()).withNettyOptions(nonQuietClusterCloseOptions); + if (serialLevel != null) { + bound.setSerialConsistencyLevel(serialLevel); } - - /** - * This method checks the expected/sent serial consistency level against that which is received. - * ConsistencyLevel.SERIAL is the default serial consistency level, so even when sent it will return - * as null. - */ - public void checkSerialCLMatch(ConsistencyLevel expected, String received) { - if (expected.equals(ConsistencyLevel.SERIAL)) { - assertNull(received); - } else { - assertTrue(received.equals(expected.toString())); - } + session.execute(bound); + List pses = + sCluster.node(1).activityClient().retrievePreparedStatementExecutions(); + PreparedStatementExecution pse = pses.get(0); + assertTrue(pse.getPreparedStatementText().equals(statement)); + return pse; + } + + public BatchExecution executeBatch( + Session session, String statement, ConsistencyLevel level, ConsistencyLevel serialLevel) { + BatchStatement batch = new BatchStatement(); + batch.add(new SimpleStatement(statement)); + + if (level != null) { + batch.setConsistencyLevel(level); } - - public PreparedStatementExecution executePrepared(Session session, String statement, ConsistencyLevel level, ConsistencyLevel serialLevel) { - PreparedStatement ps = session.prepare(statement); - BoundStatement bound = ps.bind(); - if (level != null) { - bound.setConsistencyLevel(level); - } - if (serialLevel != null) { - bound.setSerialConsistencyLevel(serialLevel); - } - session.execute(bound); - List pses = sCluster.node(1).activityClient().retrievePreparedStatementExecutions(); - PreparedStatementExecution pse = pses.get(0); - assertTrue(pse.getPreparedStatementText().equals(statement)); - return pse; + if (serialLevel != null) { + batch.setSerialConsistencyLevel(serialLevel); } - - public BatchExecution executeBatch(Session session, String statement, ConsistencyLevel level, ConsistencyLevel serialLevel) { - BatchStatement batch = new BatchStatement(); - batch.add(new SimpleStatement(statement)); - - if (level != null) { - batch.setConsistencyLevel(level); - } - if (serialLevel != null) { - batch.setSerialConsistencyLevel(serialLevel); - } - session.execute(batch); - List batches = sCluster.node(1).activityClient().retrieveBatches(); - assertEquals(batches.size(), 1); - return batches.get(0); + session.execute(batch); + List batches = sCluster.node(1).activityClient().retrieveBatches(); + assertEquals(batches.size(), 1); + return batches.get(0); + } + + public Query executeSimple( + Session session, String statement, ConsistencyLevel level, ConsistencyLevel serialLevel) { + SimpleStatement simpleStatement = new SimpleStatement(statement); + if (level != null) { + simpleStatement.setConsistencyLevel(level); } - - public Query executeSimple(Session session, String statement, ConsistencyLevel level, ConsistencyLevel serialLevel) { - SimpleStatement simpleStatement = new SimpleStatement(statement); - if (level != null) { - simpleStatement.setConsistencyLevel(level); - } - if (serialLevel != null) { - simpleStatement.setSerialConsistencyLevel(serialLevel); - } - session.execute(simpleStatement); - //Find the unique query in the activity log. - List queries = sCluster.node(1).activityClient().retrieveQueries(); - for (Query query : queries) { - if (query.getQuery().equals(statement)) - return query; - } - return null; + if (serialLevel != null) { + simpleStatement.setSerialConsistencyLevel(serialLevel); } - - /** - * When no consistency level is defined the default of LOCAL_ONE should be used. - * - * @test_category consistency - */ - @Test(groups = "short") - public void should_use_global_default_cl_when_none_specified() throws Throwable { - //Build a cluster with no CL level set in the query options. - Cluster cluster = builder().build(); - try { - Session session = cluster.connect(); - - //Construct unique simple statement query, with no CL defined. - //Check to ensure - String queryString = "default_cl"; - Query clQuery = executeSimple(session, queryString, null, null); - assertTrue(clQuery.getConsistency().equals(ConsistencyLevel.LOCAL_ONE.toString())); - - //Check prepared statement default CL - String prepareString = "prepared_default_cl"; - PreparedStatementExecution pse = executePrepared(session, prepareString, null, null); - assertTrue(pse.getConsistency().equals(ConsistencyLevel.LOCAL_ONE.toString())); - - //Check batch statement default CL - String batchStateString = "batch_default_cl"; - BatchExecution batch = executeBatch(session, batchStateString, null, null); - assertTrue(batch.getConsistency().equals(ConsistencyLevel.LOCAL_ONE.toString())); - } finally { - cluster.close(); - } + session.execute(simpleStatement); + // Find the unique query in the activity log. + List queries = sCluster.node(1).activityClient().retrieveQueries(); + for (Query query : queries) { + if (query.getQuery().equals(statement)) return query; } - - /** - * Exhaustively tests all consistency levels when they are set via QueryOptions. - * - * @test_category consistency - */ - @Test(groups = "short", dataProvider = "consistencyLevels", dataProviderClass = DataProviders.class) - public void should_use_query_option_cl(ConsistencyLevel cl) throws Throwable { - //Build a cluster with a CL level set in the query options. - Cluster cluster = builder().withQueryOptions(new QueryOptions().setConsistencyLevel(cl)).build(); - try { - Session session = cluster.connect(); - //Construct unique query, with no CL defined. - String queryString = "query_cl"; - Query clQuery = executeSimple(session, queryString, null, null); - assertTrue(clQuery.getConsistency().equals(cl.toString())); - - //Check prepared statement CL - String prepareString = "preapred_query_cl"; - PreparedStatementExecution pse = executePrepared(session, prepareString, null, null); - assertTrue(pse.getConsistency().equals(cl.toString())); - - //Check batch statement CL - String batchStateString = "batch_query_cl"; - BatchExecution batch = executeBatch(session, batchStateString, null, null); - assertTrue(batch.getConsistency().equals(cl.toString())); - } finally { - cluster.close(); - } + return null; + } + + /** + * When no consistency level is defined the default of LOCAL_ONE should be used. + * + * @test_category consistency + */ + @Test(groups = "short") + public void should_use_global_default_cl_when_none_specified() throws Throwable { + // Build a cluster with no CL level set in the query options. + Cluster cluster = builder().build(); + try { + Session session = cluster.connect(); + + // Construct unique simple statement query, with no CL defined. + // Check to ensure + String queryString = "default_cl"; + Query clQuery = executeSimple(session, queryString, null, null); + assertTrue(clQuery.getConsistency().equals(ConsistencyLevel.LOCAL_ONE.toString())); + + // Check prepared statement default CL + String prepareString = "prepared_default_cl"; + PreparedStatementExecution pse = executePrepared(session, prepareString, null, null); + assertTrue(pse.getConsistency().equals(ConsistencyLevel.LOCAL_ONE.toString())); + + // Check batch statement default CL + String batchStateString = "batch_default_cl"; + BatchExecution batch = executeBatch(session, batchStateString, null, null); + assertTrue(batch.getConsistency().equals(ConsistencyLevel.LOCAL_ONE.toString())); + } finally { + cluster.close(); } - - /** - * Exhaustively tests all consistency levels when they are set at the statement level. - * - * @test_category consistency - */ - @Test(groups = "short", dataProvider = "consistencyLevels", dataProviderClass = DataProviders.class) - public void should_use_statement_cl(ConsistencyLevel cl) throws Throwable { - //Build a cluster with no CL set in the query options. - //Note: nonQuietClusterCloseOptions is used to speed up tests - Cluster cluster = builder().build(); - try { - Session session = cluster.connect(); - //Construct unique query statement with a CL defined. - String queryString = "statement_cl"; - Query clQuery = executeSimple(session, queryString, cl, null); - assertTrue(clQuery.getConsistency().equals(cl.toString())); - - //Check prepared statement CL - String prepareString = "preapred_statement_cl"; - PreparedStatementExecution pse = executePrepared(session, prepareString, cl, null); - assertTrue(pse.getConsistency().equals(cl.toString())); - - //Check batch statement CL - String batchStateString = "batch_statement_cl"; - BatchExecution batch = executeBatch(session, batchStateString, cl, null); - assertTrue(batch.getConsistency().equals(cl.toString())); - } finally { - cluster.close(); - } + } + + /** + * Exhaustively tests all consistency levels when they are set via QueryOptions. + * + * @test_category consistency + */ + @Test( + groups = "short", + dataProvider = "consistencyLevels", + dataProviderClass = DataProviders.class) + public void should_use_query_option_cl(ConsistencyLevel cl) throws Throwable { + // Build a cluster with a CL level set in the query options. + Cluster cluster = + builder().withQueryOptions(new QueryOptions().setConsistencyLevel(cl)).build(); + try { + Session session = cluster.connect(); + // Construct unique query, with no CL defined. + String queryString = "query_cl"; + Query clQuery = executeSimple(session, queryString, null, null); + assertTrue(clQuery.getConsistency().equals(cl.toString())); + + // Check prepared statement CL + String prepareString = "preapred_query_cl"; + PreparedStatementExecution pse = executePrepared(session, prepareString, null, null); + assertTrue(pse.getConsistency().equals(cl.toString())); + + // Check batch statement CL + String batchStateString = "batch_query_cl"; + BatchExecution batch = executeBatch(session, batchStateString, null, null); + assertTrue(batch.getConsistency().equals(cl.toString())); + } finally { + cluster.close(); } - - /** - * Tests that order of precedence is followed when defining CLs. - * Statement level CL should be honored above QueryOptions. - * QueryOptions should be honored above default CL. - * - * @test_category consistency - */ - @Test(groups = "short") - public void should_use_appropriate_cl_when_multiple_defined() throws Throwable { - ConsistencyLevel cl_one = ConsistencyLevel.ONE; - //Build a cluster with no CL set in the query options. - Cluster cluster = builder().withQueryOptions(new QueryOptions().setConsistencyLevel(cl_one)).build(); - try { - - Session session = cluster.connect(); - - //Check order of precedence for simple statements - //Construct unique query statement with no CL defined. - String queryString = "opts_cl"; - Query clQuery = executeSimple(session, queryString, null, null); - assertTrue(clQuery.getConsistency().equals(cl_one.toString())); - - //Construct unique query statement with a CL defined. - ConsistencyLevel cl_all = ConsistencyLevel.ALL; - queryString = "stm_cl"; - clQuery = executeSimple(session, queryString, cl_all, null); - assertTrue(clQuery.getConsistency().equals(cl_all.toString())); - - //Check order of precedence for prepared statements - //Construct unique prepared statement with no CL defined. - String prepareString = "prep_opts_cl"; - PreparedStatementExecution pse = executePrepared(session, prepareString, null, null); - assertTrue(pse.getConsistency().equals(cl_one.toString())); - clearActivityLog(); - - //Construct unique prepared statement with a CL defined. - prepareString = "prep_stm_cl"; - pse = executePrepared(session, prepareString, cl_all, null); - assertTrue(pse.getConsistency().equals(cl_all.toString())); - - //Check order of precedence for batch statements - //Construct unique batch statement with no CL defined. - String batchString = "batch_opts_cl"; - BatchExecution batch = executeBatch(session, batchString, null, null); - assertTrue(batch.getConsistency().equals(cl_one.toString())); - clearActivityLog(); - - //Construct unique prepared statement with a CL defined. - batchString = "prep_stm_cl"; - batch = executeBatch(session, batchString, cl_all, null); - assertTrue(batch.getConsistency().equals(cl_all.toString())); - } finally { - cluster.close(); - } + } + + /** + * Exhaustively tests all consistency levels when they are set at the statement level. + * + * @test_category consistency + */ + @Test( + groups = "short", + dataProvider = "consistencyLevels", + dataProviderClass = DataProviders.class) + public void should_use_statement_cl(ConsistencyLevel cl) throws Throwable { + // Build a cluster with no CL set in the query options. + // Note: nonQuietClusterCloseOptions is used to speed up tests + Cluster cluster = builder().build(); + try { + Session session = cluster.connect(); + // Construct unique query statement with a CL defined. + String queryString = "statement_cl"; + Query clQuery = executeSimple(session, queryString, cl, null); + assertTrue(clQuery.getConsistency().equals(cl.toString())); + + // Check prepared statement CL + String prepareString = "preapred_statement_cl"; + PreparedStatementExecution pse = executePrepared(session, prepareString, cl, null); + assertTrue(pse.getConsistency().equals(cl.toString())); + + // Check batch statement CL + String batchStateString = "batch_statement_cl"; + BatchExecution batch = executeBatch(session, batchStateString, cl, null); + assertTrue(batch.getConsistency().equals(cl.toString())); + } finally { + cluster.close(); } - - /** - * Exhaustively tests all serial consistency levels when they are set via QueryOptions. - * - * @test_category consistency - */ - @Test(groups = "short", dataProvider = "serialConsistencyLevels", dataProviderClass = DataProviders.class) - public void should_use_query_option_serial_cl(ConsistencyLevel cl) throws Throwable { - //Build a cluster with a CL level set in the query options. - Cluster cluster = builder().withQueryOptions(new QueryOptions().setSerialConsistencyLevel(cl)).build(); - try { - Session session = cluster.connect(); - //Construct unique query, with no CL defined. - String queryString = "serial_query_cl"; - Query clQuery = executeSimple(session, queryString, null, cl); - checkSerialCLMatch(cl, clQuery.getSerialConsistency()); - - //Check prepared statement CL - String prepareString = "preapred_statement_serial_cl"; - PreparedStatementExecution pse = executePrepared(session, prepareString, null, null); - checkSerialCLMatch(cl, pse.getSerialConsistency()); - - //Check batch statement CL - String batchStateString = "batch_statement_serial_cl"; - BatchExecution batch = executeBatch(session, batchStateString, null, null); - checkSerialCLMatch(cl, batch.getSerialConsistency()); - } finally { - cluster.close(); - } + } + + /** + * Tests that order of precedence is followed when defining CLs. Statement level CL should be + * honored above QueryOptions. QueryOptions should be honored above default CL. + * + * @test_category consistency + */ + @Test(groups = "short") + public void should_use_appropriate_cl_when_multiple_defined() throws Throwable { + ConsistencyLevel cl_one = ConsistencyLevel.ONE; + // Build a cluster with no CL set in the query options. + Cluster cluster = + builder().withQueryOptions(new QueryOptions().setConsistencyLevel(cl_one)).build(); + try { + + Session session = cluster.connect(); + + // Check order of precedence for simple statements + // Construct unique query statement with no CL defined. + String queryString = "opts_cl"; + Query clQuery = executeSimple(session, queryString, null, null); + assertTrue(clQuery.getConsistency().equals(cl_one.toString())); + + // Construct unique query statement with a CL defined. + ConsistencyLevel cl_all = ConsistencyLevel.ALL; + queryString = "stm_cl"; + clQuery = executeSimple(session, queryString, cl_all, null); + assertTrue(clQuery.getConsistency().equals(cl_all.toString())); + + // Check order of precedence for prepared statements + // Construct unique prepared statement with no CL defined. + String prepareString = "prep_opts_cl"; + PreparedStatementExecution pse = executePrepared(session, prepareString, null, null); + assertTrue(pse.getConsistency().equals(cl_one.toString())); + clearActivityLog(); + + // Construct unique prepared statement with a CL defined. + prepareString = "prep_stm_cl"; + pse = executePrepared(session, prepareString, cl_all, null); + assertTrue(pse.getConsistency().equals(cl_all.toString())); + + // Check order of precedence for batch statements + // Construct unique batch statement with no CL defined. + String batchString = "batch_opts_cl"; + BatchExecution batch = executeBatch(session, batchString, null, null); + assertTrue(batch.getConsistency().equals(cl_one.toString())); + clearActivityLog(); + + // Construct unique prepared statement with a CL defined. + batchString = "prep_stm_cl"; + batch = executeBatch(session, batchString, cl_all, null); + assertTrue(batch.getConsistency().equals(cl_all.toString())); + } finally { + cluster.close(); } - - /** - * Exhaustively tests all serial consistency levels when they are set at the statement level. - * - * @test_category consistency - */ - @Test(groups = "short", dataProvider = "serialConsistencyLevels", dataProviderClass = DataProviders.class) - public void should_use_statement_serial_cl(ConsistencyLevel cl) throws Throwable { - //Build a cluster with no CL set in the query options. - Cluster cluster = builder().build(); - try { - Session session = cluster.connect(); - //Construct unique query statement with a CL defined. - String queryString = "statement_serial_cl"; - Query clQuery = executeSimple(session, queryString, null, cl); - checkSerialCLMatch(cl, clQuery.getSerialConsistency()); - - //Check prepared statement CL - String prepareString = "preapred_statement_serial_cl"; - PreparedStatementExecution pse = executePrepared(session, prepareString, null, cl); - checkSerialCLMatch(cl, pse.getSerialConsistency()); - - //Check batch statement CL - String batchStateString = "batch_statement_serial_cl"; - BatchExecution batch = executeBatch(session, batchStateString, null, cl); - checkSerialCLMatch(cl, batch.getSerialConsistency()); - } finally { - cluster.close(); - } + } + + /** + * Exhaustively tests all serial consistency levels when they are set via QueryOptions. + * + * @test_category consistency + */ + @Test( + groups = "short", + dataProvider = "serialConsistencyLevels", + dataProviderClass = DataProviders.class) + public void should_use_query_option_serial_cl(ConsistencyLevel cl) throws Throwable { + // Build a cluster with a CL level set in the query options. + Cluster cluster = + builder().withQueryOptions(new QueryOptions().setSerialConsistencyLevel(cl)).build(); + try { + Session session = cluster.connect(); + // Construct unique query, with no CL defined. + String queryString = "serial_query_cl"; + Query clQuery = executeSimple(session, queryString, null, cl); + checkSerialCLMatch(cl, clQuery.getSerialConsistency()); + + // Check prepared statement CL + String prepareString = "preapred_statement_serial_cl"; + PreparedStatementExecution pse = executePrepared(session, prepareString, null, null); + checkSerialCLMatch(cl, pse.getSerialConsistency()); + + // Check batch statement CL + String batchStateString = "batch_statement_serial_cl"; + BatchExecution batch = executeBatch(session, batchStateString, null, null); + checkSerialCLMatch(cl, batch.getSerialConsistency()); + } finally { + cluster.close(); + } + } + + /** + * Exhaustively tests all serial consistency levels when they are set at the statement level. + * + * @test_category consistency + */ + @Test( + groups = "short", + dataProvider = "serialConsistencyLevels", + dataProviderClass = DataProviders.class) + public void should_use_statement_serial_cl(ConsistencyLevel cl) throws Throwable { + // Build a cluster with no CL set in the query options. + Cluster cluster = builder().build(); + try { + Session session = cluster.connect(); + // Construct unique query statement with a CL defined. + String queryString = "statement_serial_cl"; + Query clQuery = executeSimple(session, queryString, null, cl); + checkSerialCLMatch(cl, clQuery.getSerialConsistency()); + + // Check prepared statement CL + String prepareString = "preapred_statement_serial_cl"; + PreparedStatementExecution pse = executePrepared(session, prepareString, null, cl); + checkSerialCLMatch(cl, pse.getSerialConsistency()); + + // Check batch statement CL + String batchStateString = "batch_statement_serial_cl"; + BatchExecution batch = executeBatch(session, batchStateString, null, cl); + checkSerialCLMatch(cl, batch.getSerialConsistency()); + } finally { + cluster.close(); } + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/ControlConnectionTest.java b/driver-core/src/test/java/com/datastax/driver/core/ControlConnectionTest.java index 1c8c2ef81b1..90edb28b1d0 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/ControlConnectionTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/ControlConnectionTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,411 +17,911 @@ */ package com.datastax.driver.core; -import com.datastax.driver.core.policies.*; +import static com.datastax.driver.core.Assertions.assertThat; +import static com.datastax.driver.core.CreateCCM.TestMode.PER_METHOD; +import static com.datastax.driver.core.ScassandraCluster.SELECT_LOCAL; +import static com.datastax.driver.core.ScassandraCluster.SELECT_LOCAL_RPC_ADDRESS_AND_PORT; +import static com.datastax.driver.core.ScassandraCluster.SELECT_PEERS; +import static com.datastax.driver.core.ScassandraCluster.SELECT_PEERS_DSE68; +import static com.datastax.driver.core.ScassandraCluster.SELECT_PEERS_V2; +import static com.datastax.driver.core.ScassandraCluster.datacenter; +import static com.datastax.driver.core.TestUtils.nonQuietClusterCloseOptions; +import static com.google.common.collect.Lists.newArrayList; +import static org.scassandra.http.client.PrimingRequest.then; + +import com.datastax.driver.core.policies.ConstantReconnectionPolicy; +import com.datastax.driver.core.policies.DelegatingLoadBalancingPolicy; +import com.datastax.driver.core.policies.LoadBalancingPolicy; +import com.datastax.driver.core.policies.Policies; +import com.datastax.driver.core.policies.ReconnectionPolicy; import com.datastax.driver.core.utils.CassandraVersion; import com.google.common.base.Function; +import com.google.common.base.Optional; +import com.google.common.collect.Collections2; import com.google.common.collect.HashMultiset; import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; import com.google.common.collect.Maps; -import org.apache.log4j.Level; -import org.scassandra.http.client.PrimingRequest; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.testng.annotations.DataProvider; -import org.testng.annotations.Test; - import java.net.InetAddress; import java.net.InetSocketAddress; import java.net.UnknownHostException; import java.util.Collection; +import java.util.HashSet; import java.util.Iterator; import java.util.Map; +import java.util.Random; +import java.util.Set; import java.util.UUID; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; - -import static com.datastax.driver.core.Assertions.assertThat; -import static com.datastax.driver.core.CreateCCM.TestMode.PER_METHOD; -import static com.datastax.driver.core.ScassandraCluster.SELECT_PEERS; -import static com.datastax.driver.core.ScassandraCluster.datacenter; -import static com.datastax.driver.core.TestUtils.nonDebouncingQueryOptions; -import static com.datastax.driver.core.TestUtils.nonQuietClusterCloseOptions; -import static com.google.common.collect.Lists.newArrayList; -import static org.scassandra.http.client.PrimingRequest.then; +import org.apache.log4j.Level; +import org.scassandra.http.client.PrimingClient; +import org.scassandra.http.client.PrimingRequest; +import org.scassandra.http.client.Result; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.testng.annotations.DataProvider; +import org.testng.annotations.Test; @CreateCCM(PER_METHOD) @CCMConfig(dirtiesContext = true, createCluster = false) public class ControlConnectionTest extends CCMTestsSupport { - static final Logger logger = LoggerFactory.getLogger(ControlConnectionTest.class); - - @Test(groups = "short") - @CCMConfig(numberOfNodes = 2) - public void should_prevent_simultaneous_reconnection_attempts() throws InterruptedException { - - // Custom load balancing policy that counts the number of calls to newQueryPlan(). - // Since we don't open any session from our Cluster, only the control connection reattempts are calling this - // method, therefore the invocation count is equal to the number of attempts. - QueryPlanCountingPolicy loadBalancingPolicy = new QueryPlanCountingPolicy(Policies.defaultLoadBalancingPolicy()); - AtomicInteger reconnectionAttempts = loadBalancingPolicy.counter; - - // Custom reconnection policy with a very large delay (longer than the test duration), to make sure we count - // only the first reconnection attempt of each reconnection handler. - ReconnectionPolicy reconnectionPolicy = new ConstantReconnectionPolicy(60 * 1000); - - // We pass only the first host as contact point, so we know the control connection will be on this host - Cluster cluster = register(Cluster.builder() - .addContactPoints(getContactPoints().get(0)) - .withPort(ccm().getBinaryPort()) + static final Logger logger = LoggerFactory.getLogger(ControlConnectionTest.class); + + @Test(groups = "short") + @CCMConfig(numberOfNodes = 2) + public void should_prevent_simultaneous_reconnection_attempts() throws InterruptedException { + + // Custom load balancing policy that counts the number of calls to newQueryPlan(). + // Since we don't open any session from our Cluster, only the control connection reattempts are + // calling this + // method, therefore the invocation count is equal to the number of attempts. + QueryPlanCountingPolicy loadBalancingPolicy = + new QueryPlanCountingPolicy(Policies.defaultLoadBalancingPolicy()); + AtomicInteger reconnectionAttempts = loadBalancingPolicy.counter; + + // Custom reconnection policy with a very large delay (longer than the test duration), to make + // sure we count + // only the first reconnection attempt of each reconnection handler. + ReconnectionPolicy reconnectionPolicy = new ConstantReconnectionPolicy(60 * 1000); + + // We pass only the first host as contact point, so we know the control connection will be on + // this host + Cluster cluster = + register( + createClusterBuilder() .withReconnectionPolicy(reconnectionPolicy) .withLoadBalancingPolicy(loadBalancingPolicy) .build()); - cluster.init(); - - // Kill the control connection host, there should be exactly one reconnection attempt - ccm().stop(1); - TimeUnit.SECONDS.sleep(1); // Sleep for a while to make sure our final count is not the result of lucky timing - assertThat(reconnectionAttempts.get()).isEqualTo(1); + cluster.init(); + + // Kill the control connection host, there should be exactly one reconnection attempt + ccm().stop(1); + TimeUnit.SECONDS.sleep( + 1); // Sleep for a while to make sure our final count is not the result of lucky timing + assertThat(reconnectionAttempts.get()).isEqualTo(1); + + ccm().stop(2); + TimeUnit.SECONDS.sleep(1); + assertThat(reconnectionAttempts.get()).isEqualTo(2); + } + + /** + * Test for JAVA-509: UDT definitions were not properly parsed when using the default protocol + * version. + * + *

    This did not appear with other tests because the UDT needs to already exist when the driver + * initializes. Therefore we use two different driver instances in this test. + */ + @Test(groups = "short") + @CassandraVersion("2.1.0") + public void should_parse_UDT_definitions_when_using_default_protocol_version() { + // First driver instance: create UDT + Cluster cluster = register(createClusterBuilder().build()); + Session session = cluster.connect(); + session.execute( + "create keyspace ks WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}"); + session.execute("create type ks.foo (i int)"); + cluster.close(); + + // Second driver instance: read UDT definition + Cluster cluster2 = register(createClusterBuilder().build()); + UserType fooType = cluster2.getMetadata().getKeyspace("ks").getUserType("foo"); + + assertThat(fooType.getFieldNames()).containsExactly("i"); + } + + /** + * Ensures that if the host that the Control Connection is connected to is removed/decommissioned + * that the Control Connection is reestablished to another host. + * + * @jira_ticket JAVA-597 + * @expected_result Control Connection is reestablished to another host. + * @test_category control_connection + * @since 2.0.9 + */ + @Test(groups = "long") + @CCMConfig(numberOfNodes = 3) + public void should_reestablish_if_control_node_decommissioned() throws InterruptedException { + InetSocketAddress firstHost = ccm().addressOfNode(1); + Cluster cluster = register(createClusterBuilderNoDebouncing().build()); + cluster.init(); + + // Ensure the control connection host is that of the first node. + InetSocketAddress controlHost = + cluster.manager.controlConnection.connectedHost().getEndPoint().resolve(); + assertThat(controlHost).isEqualTo(firstHost); + + // Decommission the node. + ccm().decommission(1); + + // Ensure that the new control connection is not null and it's host is not equal to the + // decommissioned node. + Host newHost = cluster.manager.controlConnection.connectedHost(); + assertThat(newHost).isNotNull(); + assertThat(newHost.getAddress()).isNotEqualTo(controlHost); + } + + /** + * Ensures that contact points are randomized when determining the initial control connection by + * default. Initializes a cluster with 5 contact points 100 times and ensures that all 5 were + * used. + * + * @jira_ticket JAVA-618 + * @expected_result All 5 hosts were chosen within 100 attempts. There is a very small possibility + * that this may not be the case and this is not actually an error. + * @test_category control_connection + * @since 2.0.11, 2.1.8, 2.2.0 + */ + @Test(groups = "short") + @CCMConfig(createCcm = false) + public void should_randomize_contact_points_when_determining_control_connection() { + int hostCount = 5; + int iterations = 100; + ScassandraCluster scassandras = ScassandraCluster.builder().withNodes(hostCount).build(); + scassandras.init(); + + try { + Collection contactPoints = newArrayList(); + for (int i = 1; i <= hostCount; i++) { + contactPoints.add(scassandras.address(i).getAddress()); + } + final HashMultiset occurrencesByHost = HashMultiset.create(hostCount); + for (int i = 0; i < iterations; i++) { + Cluster cluster = + Cluster.builder() + .addContactPoints(contactPoints) + .withPort(scassandras.getBinaryPort()) + .withNettyOptions(nonQuietClusterCloseOptions) + .build(); - ccm().stop(2); - TimeUnit.SECONDS.sleep(1); - assertThat(reconnectionAttempts.get()).isEqualTo(2); + try { + cluster.init(); + occurrencesByHost.add( + cluster + .manager + .controlConnection + .connectedHost() + .getEndPoint() + .resolve() + .getAddress()); + } finally { + cluster.close(); + } + } + if (logger.isDebugEnabled()) { + Map hostCounts = + Maps.toMap( + occurrencesByHost.elementSet(), + new Function() { + @Override + public Integer apply(InetAddress input) { + return occurrencesByHost.count(input); + } + }); + logger.debug("Control Connection Use Counts by Host: {}", hostCounts); + } + // There is an incredibly low chance that a host may not be used based on randomness. + // This probability is very low however. + assertThat(occurrencesByHost.elementSet().size()) + .as( + "Not all hosts were used as contact points. There is a very small chance" + + " of this happening based on randomness, investigate whether or not this" + + " is a bug.") + .isEqualTo(hostCount); + } finally { + scassandras.stop(); + } + } + + /** + * @return Configurations of columns that are missing, whether or not the peers_v2 table should be + * present and whether or not an extended peer check is required to fail validation. + */ + @DataProvider + public static Object[][] disallowedNullColumnsInPeerData() { + return new Object[][] { + {"host_id", false, false}, // JAVA-2171: host_id does not require extended peer check anymore + {"data_center", false, true}, + {"rack", false, true}, + {"tokens", false, true}, + {"data_center,rack,tokens", false, true}, + {"rpc_address", false, false}, + {"host_id", true, false}, + {"data_center", true, true}, + {"rack", true, true}, + {"tokens", true, true}, + {"data_center,rack,tokens", true, true}, + {"native_address", true, false}, + {"native_port", true, false}, + {"native_address,native_port", true, false}, + }; + } + + /** + * Validates that if the com.datastax.driver.EXTENDED_PEER_CHECK system property is set to true + * that a peer with null values for host_id, data_center, rack, or tokens is ignored. + * + * @test_category host:metadata + * @jira_ticket JAVA-852 + * @since 2.1.10 + */ + @Test(groups = "isolated", dataProvider = "disallowedNullColumnsInPeerData") + @CCMConfig(createCcm = false) + public void should_ignore_peer_if_extended_peer_check_is_enabled( + String columns, + boolean withPeersV2, + @SuppressWarnings("unused") boolean extendPeerCheckRequired) { + System.setProperty("com.datastax.driver.EXTENDED_PEER_CHECK", "true"); + run_with_null_peer_info(columns, false, withPeersV2); + } + + /** + * Validates that a peer with null values for host_id, data_center, rack, or tokens is ignored. + * + * @test_category host:metadata + * @jira_ticket JAVA-852 + * @since 2.1.10 + */ + @Test(groups = "short", dataProvider = "disallowedNullColumnsInPeerData") + @CCMConfig(createCcm = false) + public void should_ignore_and_warn_peers_with_null_entries_by_default( + String columns, + boolean withPeersV2, + @SuppressWarnings("unused") boolean extendedPeerCheckRequired) { + run_with_null_peer_info(columns, false, withPeersV2); + } + + static void run_with_null_peer_info(String columns, boolean expectPeer2, boolean withPeersV2) { + // given: A cluster with peer 2 having a null rack. + ScassandraCluster.ScassandraClusterBuilder builder = ScassandraCluster.builder().withNodes(3); + + if (withPeersV2) { + builder.withPeersV2(true); } - /** - * Test for JAVA-509: UDT definitions were not properly parsed when using the default protocol version. - *

    - * This did not appear with other tests because the UDT needs to already exist when the driver initializes. - * Therefore we use two different driver instances in this test. - */ - @Test(groups = "short") - @CassandraVersion("2.1.0") - public void should_parse_UDT_definitions_when_using_default_protocol_version() { - // First driver instance: create UDT - Cluster cluster = register(Cluster.builder() - .addContactPoints(getContactPoints().get(0)) - .withPort(ccm().getBinaryPort()) - .build()); - Session session = cluster.connect(); - session.execute("create keyspace ks WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}"); - session.execute("create type ks.foo (i int)"); - cluster.close(); - - // Second driver instance: read UDT definition - Cluster cluster2 = register(Cluster.builder() - .addContactPoints(getContactPoints().get(0)) - .withPort(ccm().getBinaryPort()) - .build()); - UserType fooType = cluster2.getMetadata().getKeyspace("ks").getUserType("foo"); + StringBuilder columnDataBuilder = new StringBuilder(); + for (String column : columns.split(",")) { + builder = builder.forcePeerInfo(1, 2, column, null); + columnDataBuilder.append(String.format("%s=null, ", column)); + } - assertThat(fooType.getFieldNames()).containsExactly("i"); + String columnData = columnDataBuilder.toString(); + if (columnData.endsWith(", ")) { + columnData = columnData.substring(0, columnData.length() - 2); } - /** - * Ensures that if the host that the Control Connection is connected to is removed/decommissioned that the - * Control Connection is reestablished to another host. - * - * @jira_ticket JAVA-597 - * @expected_result Control Connection is reestablished to another host. - * @test_category control_connection - * @since 2.0.9 - */ - @Test(groups = "long") - @CCMConfig(numberOfNodes = 3) - public void should_reestablish_if_control_node_decommissioned() throws InterruptedException { - InetSocketAddress firstHost = ccm().addressOfNode(1); - Cluster cluster = register(Cluster.builder() - .addContactPoints(firstHost.getAddress()) - .withPort(ccm().getBinaryPort()) - .withQueryOptions(nonDebouncingQueryOptions()) - .build()); - cluster.init(); + ScassandraCluster scassandraCluster = builder.build(); - // Ensure the control connection host is that of the first node. - InetAddress controlHost = cluster.manager.controlConnection.connectedHost().getAddress(); - assertThat(controlHost).isEqualTo(firstHost.getAddress()); + Cluster cluster = + Cluster.builder() + .addContactPoints(scassandraCluster.address(1).getAddress()) + .withPort(scassandraCluster.getBinaryPort()) + .withNettyOptions(nonQuietClusterCloseOptions) + .build(); - // Decommission the node. - ccm().decommission(1); + // Capture logs to ensure appropriate warnings are logged. + org.apache.log4j.Logger cLogger = org.apache.log4j.Logger.getLogger("com.datastax.driver.core"); + Level originalLevel = cLogger.getLevel(); + if (originalLevel != null && !originalLevel.isGreaterOrEqual(Level.WARN)) { + cLogger.setLevel(Level.WARN); + } + MemoryAppender logs = new MemoryAppender(); + cLogger.addAppender(logs); + + try { + scassandraCluster.init(); + + // when: Initializing a cluster instance and grabbing metadata. + cluster.init(); + + InetAddress node2Address = scassandraCluster.address(2).getAddress(); + String invalidValues = + withPeersV2 + ? columnData + : String.format( + "missing native_transport_address, missing native_transport_port, missing native_transport_port_ssl, %s", + columnData); + String expectedError = + String.format( + "Found invalid row in system.peers: [peer=%s, %s]. " + + "This is likely a gossip or snitch issue, this host will be ignored.", + node2Address, invalidValues); + String log = logs.get(); + // then: A peer with a null rack should not show up in host metadata, unless allowed via + // system property. + if (expectPeer2) { + assertThat(cluster.getMetadata().getAllHosts()) + .hasSize(3) + .extractingResultOf("getAddress") + .contains(node2Address); + + assertThat(log).doesNotContain(expectedError); + } else { + assertThat(cluster.getMetadata().getAllHosts()) + .hasSize(2) + .extractingResultOf("getAddress") + .doesNotContain(node2Address); + + assertThat(log).containsOnlyOnce(expectedError); + } + } finally { + cLogger.removeAppender(logs); + cLogger.setLevel(originalLevel); + cluster.close(); + scassandraCluster.stop(); + } + } + + /** + * Ensures that when a node changes its broadcast address (for example, after a shutdown and + * startup on EC2 and its public IP has changed), the driver will be able to detect that change + * and recognize the host in the system.peers table in spite of that change. + * + * @jira_ticket JAVA-1038 + * @expected_result The driver should be able to detect that a host has changed its broadcast + * address and update its metadata accordingly. + * @test_category control_connection + * @since 2.1.10 + */ + @SuppressWarnings("unchecked") + @Test(groups = "short") + @CCMConfig(createCcm = false) + public void should_fetch_whole_peers_table_if_broadcast_address_changed() + throws UnknownHostException { + ScassandraCluster scassandras = ScassandraCluster.builder().withNodes(2).build(); + scassandras.init(); + + InetSocketAddress node2RpcAddress = scassandras.address(2); + + Cluster cluster = + Cluster.builder() + .addContactPoints(scassandras.address(1).getAddress()) + .withPort(scassandras.getBinaryPort()) + .withNettyOptions(nonQuietClusterCloseOptions) + .build(); + + try { + + cluster.init(); + + Host host2 = cluster.getMetadata().getHost(node2RpcAddress); + assertThat(host2).isNotNull(); + + InetSocketAddress node2OldBroadcastAddress = host2.getBroadcastSocketAddress(); + InetSocketAddress node2NewBroadcastAddress = + new InetSocketAddress(InetAddress.getByName("1.2.3.4"), scassandras.getBinaryPort()); + + // host 2 has the old broadcast_address (which is identical to its rpc_broadcast_address) + assertThat(host2.getEndPoint().resolve().getAddress()) + .isEqualTo(node2OldBroadcastAddress.getAddress()); + + // simulate a change in host 2 public IP + Map rows = + ImmutableMap.builder() + .put( + "peer", node2NewBroadcastAddress.getAddress()) // new broadcast address for host 2 + .put( + "rpc_address", + host2 + .getEndPoint() + .resolve() + .getAddress()) // rpc_broadcast_address remains unchanged + .put("host_id", host2.getHostId()) + .put("data_center", datacenter(1)) + .put("rack", "rack1") + .put("release_version", "2.1.8") + .put("tokens", ImmutableSet.of(Long.toString(scassandras.getTokensForDC(1).get(1)))) + .build(); + + scassandras.node(1).primingClient().clearAllPrimes(); + + // the driver will attempt to locate host2 in system.peers by its old broadcast address, and + // that will fail + scassandras + .node(1) + .primingClient() + .prime( + PrimingRequest.queryBuilder() + .withQuery( + "SELECT * FROM system.peers WHERE peer='" + + node2OldBroadcastAddress.getAddress().getHostAddress() + + "'") + .withThen(then().withColumnTypes(SELECT_PEERS).build()) + .build()); + + // the driver will then attempt to fetch the whole system.peers + scassandras + .node(1) + .primingClient() + .prime( + PrimingRequest.queryBuilder() + .withQuery("SELECT * FROM system.peers") + .withThen(then().withColumnTypes(SELECT_PEERS).withRows(rows).build()) + .build()); + + assertThat(cluster.manager.controlConnection.refreshNodeInfo(host2)).isTrue(); + + host2 = cluster.getMetadata().getHost(node2RpcAddress); + + // host2 should now have a new broadcast address + assertThat(host2).isNotNull(); + assertThat(host2.getBroadcastSocketAddress().getAddress()) + .isEqualTo(node2NewBroadcastAddress.getAddress()); + + // host 2 should keep its old rpc broadcast address + assertThat(host2.getEndPoint().resolve()).isEqualTo(node2RpcAddress); + + } finally { + cluster.close(); + scassandras.stop(); + } + } + + /** + * Ensures that multiple C* nodes can share the same ip address (but use different port) if they + * support the system.peers_v2 table. + * + * @jira_ticket JAVA-1388 + * @since 3.6.0 + */ + @Test(groups = "short", dataProviderClass = DataProviders.class, dataProvider = "bool") + @CCMConfig(createCcm = false) + public void should_use_port_from_peers_v2_table(boolean sharedIP) { + ScassandraCluster sCluster = + ScassandraCluster.builder().withNodes(5).withPeersV2(true).withSharedIP(sharedIP).build(); + + Cluster.Builder builder = + Cluster.builder() + .addContactPointsWithPorts(sCluster.address(1)) + .withNettyOptions(nonQuietClusterCloseOptions); + + // need to specify port in non peers_v2 case as driver can't infer ports without it. + if (!sharedIP) { + builder.withPort(sCluster.getBinaryPort()); + } - // Ensure that the new control connection is not null and it's host is not equal to the decommissioned node. - Host newHost = cluster.manager.controlConnection.connectedHost(); - assertThat(newHost).isNotNull(); - assertThat(newHost.getAddress()).isNotEqualTo(controlHost); + Cluster cluster = builder.build(); + + try { + sCluster.init(); + cluster.connect(); + assertThat(cluster.getMetadata().getAllHosts()).hasSize(5); + + Set uniqueAddresses = new HashSet(); + Set uniqueSocketAddresses = new HashSet(); + for (int i = 1; i <= 5; i++) { + Host host = sCluster.host(cluster, 1, i); + + // host is up and broadcast address matches what was configured. + assertThat(host) + .isNotNull() + .isUp() + .hasSocketAddress(sCluster.address(i)) + .hasBroadcastSocketAddress(sCluster.listenAddress(i)); + + // host should only have listen address if it is control connection, and the + // address should match what was configured. + if (i == 1) { + assertThat(host).hasListenSocketAddress(sCluster.listenAddress(i)); + } else { + assertThat(host).hasNoListenSocketAddress(); + } + uniqueAddresses.add(host.getEndPoint().resolve().getAddress()); + uniqueSocketAddresses.add(host.getEndPoint().resolve()); + } + + if (!sharedIP) { + // each host should have its own address + assertThat(uniqueAddresses).hasSize(5); + } else { + // all hosts share the same ip... + assertThat(uniqueAddresses).hasSize(1); + // but have a unique port. + assertThat(uniqueSocketAddresses).hasSize(5); + } + } finally { + cluster.close(); + sCluster.stop(); } + } + + /** + * Ensures that if cluster does not have the system.peers_v2 table that cluster initialization + * still succeeds. + * + * @jira_ticket JAVA-1388 + * @since 3.6.0 + */ + @Test(groups = "short") + @CCMConfig(createCcm = false) + public void should_connect_when_peers_v2_table_not_present() { + ScassandraCluster sCluster = + ScassandraCluster.builder().withNodes(5).withPeersV2(false).build(); + + Cluster cluster = + Cluster.builder() + .addContactPointsWithPorts(sCluster.address(1)) + .withNettyOptions(nonQuietClusterCloseOptions) + .withPort(sCluster.getBinaryPort()) + .build(); + + try { + sCluster.init(); + cluster.connect(); + + assertThat(cluster.getMetadata().getAllHosts()).hasSize(5); + } finally { + cluster.close(); + sCluster.stop(); + } + } + + /** + * Cassandra 4.0 supports native_address and native_port columns in system.peers_v2. We want to + * validate our ability to build correct metadata when drawing data from these tables. + */ + @Test(groups = "short") + @CCMConfig(createCcm = false) + public void should_extract_hosts_using_native_address_port_from_peersv2() + throws UnknownHostException { + + InetAddress expectedAddress = InetAddress.getByName("4.3.2.1"); + int expectedPort = 2409; + PeerRowState state = + PeerRowState.builder() + .peersV2("native_address", expectedAddress) + .peersV2("native_port", expectedPort) + .expectedAddress(expectedAddress) + .expectedPort(expectedPort) + .build(); + runPeerTest(state); + } + + /** DSE 6.8 includes native_transport_address and native_transport_port in system.peers. */ + @Test(groups = "short") + @CCMConfig(createCcm = false) + public void should_extract_hosts_using_native_transport_address_port_from_peers() + throws UnknownHostException { + + InetAddress expectedAddress = InetAddress.getByName("4.3.2.1"); + int expectedPort = 2409; + PeerRowState state = + PeerRowState.builder() + .peers("native_transport_address", expectedAddress) + .peers("native_transport_port", expectedPort) + .expectedAddress(expectedAddress) + .expectedPort(expectedPort) + .build(); + runPeerTest(state); + } + + /** + * If both native_transport_port and native_transport_port_ssl are present we expect the latter to + * be selected if the Cluster is created with SSL support (i.e. if {@link + * Cluster.Builder#withSSL()} is used). + */ + @Test(groups = "short", enabled = false /* Requires SSL support in scassandra */) + @CCMConfig(createCcm = false) + public void should_extract_hosts_using_native_transport_address_port_ssl_from_peers() + throws UnknownHostException { + + InetAddress expectedAddress = InetAddress.getByName("4.3.2.1"); + int expectedPort = 2409; + PeerRowState state = + PeerRowState.builder() + .peers("native_transport_address", expectedAddress) + .peers("native_transport_port", expectedPort - 100) + .peers("native_transport_port_ssl", expectedPort) + .expectedAddress(expectedAddress) + .expectedPort(expectedPort) + .build(); + runPeerTest(state); + } + + /** + * The default case. If we can't get native_address/port out of system.peers_v2 or + * native_transport_address/port out of system.peers the fall back to rpc_address + a default port + */ + @Test(groups = "short") + @CCMConfig(createCcm = false) + public void should_extract_hosts_using_rpc_address_from_peers() throws UnknownHostException { + + InetAddress expectedAddress = InetAddress.getByName("4.3.2.1"); + PeerRowState state = + PeerRowState.builder() + .peers("rpc_address", expectedAddress) + /* DefaultEndPointFactory isn't happy if we don't have a value for + * both peer and rpc_address */ + .peers("peer", InetAddress.getByName("1.2.3.4")) + .expectedAddress(expectedAddress) + .build(); + runPeerTest(state); + } + + @Test(groups = "short") + @CCMConfig(createCcm = false) + public void should_extract_hosts_port_using_rpc_port_from_local() throws UnknownHostException { + InetAddress expectedAddress = InetAddress.getByName("1.2.3.4"); + int expectedPort = 29042; + PeerRowState state = + PeerRowState.builder() + .local("rpc_address", expectedAddress) + .local("rpc_port", expectedPort) + .build(); + + ScassandraCluster scassandras = + ScassandraCluster.builder().withNodes(2).withPeersV2(state.usePeersV2()).build(); + scassandras.init(); + + Cluster cluster = null; + try { + scassandras.node(1).primingClient().clearAllPrimes(); + PrimingClient primingClient = scassandras.node(1).primingClient(); + primingClient.prime( + PrimingRequest.queryBuilder() + .withQuery("SELECT * FROM system.local WHERE key='local'") + .withThen( + then() + .withColumnTypes(SELECT_LOCAL_RPC_ADDRESS_AND_PORT) + .withRows(state.getLocalRow()) + .build()) + .build()); + cluster = + Cluster.builder() + .addContactPoints(scassandras.address(1).getAddress()) + .withPort(scassandras.getBinaryPort()) + .withNettyOptions(nonQuietClusterCloseOptions) + .build(); + cluster.connect(); + + assertThat(cluster.manager.getControlConnection().connectedHost().getBroadcastRpcAddress()) + .isEqualTo(new InetSocketAddress(expectedAddress, expectedPort)); + } finally { + if (cluster != null) cluster.close(); + scassandras.stop(); + } + } - /** - *

    - * Ensures that contact points are randomized when determining the initial control connection - * by default. Initializes a cluster with 5 contact points 100 times and ensures that all 5 - * were used. - *

    - * - * @jira_ticket JAVA-618 - * @expected_result All 5 hosts were chosen within 100 attempts. There is a very small possibility - * that this may not be the case and this is not actually an error. - * @test_category control_connection - * @since 2.0.11, 2.1.8, 2.2.0 - */ - @Test(groups = "short") - @CCMConfig(createCcm = false) - public void should_randomize_contact_points_when_determining_control_connection() { - int hostCount = 5; - int iterations = 100; - ScassandraCluster scassandras = ScassandraCluster.builder().withNodes(hostCount).build(); - scassandras.init(); + private void runPeerTest(PeerRowState state) { - try { - Collection contactPoints = newArrayList(); - for (int i = 1; i <= hostCount; i++) { - contactPoints.add(scassandras.address(i).getAddress()); - } - final HashMultiset occurrencesByHost = HashMultiset.create(hostCount); - for (int i = 0; i < iterations; i++) { - Cluster cluster = Cluster.builder() - .addContactPoints(contactPoints) - .withPort(scassandras.getBinaryPort()) - .withNettyOptions(nonQuietClusterCloseOptions) - .build(); - - try { - cluster.init(); - occurrencesByHost.add(cluster.manager.controlConnection.connectedHost().getAddress()); - } finally { - cluster.close(); + ScassandraCluster scassandras = + ScassandraCluster.builder().withNodes(2).withPeersV2(state.usePeersV2()).build(); + scassandras.init(); + + Cluster cluster = null; + try { + + scassandras.node(1).primingClient().clearAllPrimes(); + + PrimingClient primingClient = scassandras.node(1).primingClient(); + + /* Note that we always prime system.local; ControlConnection.refreshNodeAndTokenMap() gets angry + * if this is empty */ + primingClient.prime( + PrimingRequest.queryBuilder() + .withQuery("SELECT * FROM system.local WHERE key='local'") + .withThen(then().withColumnTypes(SELECT_LOCAL).withRows(state.getLocalRow()).build()) + .build()); + + if (state.shouldPrimePeers()) { + + primingClient.prime( + PrimingRequest.queryBuilder() + .withQuery("SELECT * FROM system.peers") + .withThen( + then() + .withColumnTypes(state.isDse68() ? SELECT_PEERS_DSE68 : SELECT_PEERS) + .withRows(state.getPeersRow()) + .build()) + .build()); + } + if (state.shouldPrimePeersV2()) { + + primingClient.prime( + PrimingRequest.queryBuilder() + .withQuery("SELECT * FROM system.peers_v2") + .withThen( + then().withColumnTypes(SELECT_PEERS_V2).withRows(state.getPeersV2Row()).build()) + .build()); + } else { + + /* Must return an error code in this case in order to trigger the driver's downgrade to system.peers */ + primingClient.prime( + PrimingRequest.queryBuilder() + .withQuery("SELECT * FROM system.peers_v2") + .withThen(then().withResult(Result.invalid).build())); + } + + cluster = + Cluster.builder() + .addContactPoints(scassandras.address(1).getAddress()) + .withPort(scassandras.getBinaryPort()) + .withNettyOptions(nonQuietClusterCloseOptions) + .build(); + cluster.connect(); + + Collection hostEndPoints = + Collections2.transform( + cluster.getMetadata().allHosts(), + new Function() { + public EndPoint apply(Host host) { + return host.getEndPoint(); } - } - if (logger.isDebugEnabled()) { - Map hostCounts = Maps.toMap(occurrencesByHost.elementSet(), new Function() { - @Override - public Integer apply(InetAddress input) { - return occurrencesByHost.count(input); - } - }); - logger.debug("Control Connection Use Counts by Host: {}", hostCounts); - } - // There is an incredibly low chance that a host may not be used based on randomness. - // This probability is very low however. - assertThat(occurrencesByHost.elementSet().size()) - .as("Not all hosts were used as contact points. There is a very small chance" - + " of this happening based on randomness, investigate whether or not this" - + " is a bug.") - .isEqualTo(hostCount); - } finally { - scassandras.stop(); - } + }); + assertThat(hostEndPoints).contains(state.getExpectedEndPoint(scassandras)); + } finally { + if (cluster != null) cluster.close(); + scassandras.stop(); } + } - @DataProvider - public static Object[][] disallowedNullColumnsInPeerData() { - return new Object[][]{ - {"host_id"}, - {"data_center"}, - {"rack"}, - {"tokens"}, - {"host_id,data_center,rack,tokens"} - }; - } + static class PeerRowState { - /** - * Validates that if the com.datastax.driver.EXTENDED_PEER_CHECK system property is set to true that a peer - * with null values for host_id, data_center, rack, or tokens is ignored. - * - * @test_category host:metadata - * @jira_ticket JAVA-852 - * @since 2.1.10 - */ - @Test(groups = "isolated", dataProvider = "disallowedNullColumnsInPeerData") - @CCMConfig(createCcm = false) - public void should_ignore_peer_if_extended_peer_check_is_enabled(String columns) { - System.setProperty("com.datastax.driver.EXTENDED_PEER_CHECK", "true"); - run_with_null_peer_info(columns, false); - } + private final ImmutableMap peers; + private final ImmutableMap peersV2; + private final ImmutableMap local; - /** - * Validates that a peer with null values for host_id, data_center, rack, or tokens is ignored. - * - * @test_category host:metadata - * @jira_ticket JAVA-852 - * @since 2.1.10 - */ - @Test(groups = "short", dataProvider = "disallowedNullColumnsInPeerData") - @CCMConfig(createCcm = false) - public void should_ignore_and_warn_peers_with_null_entries_by_default(String columns) { - run_with_null_peer_info(columns, false); - } + private final InetAddress expectedAddress; + private final Optional expectedPort; - static void run_with_null_peer_info(String columns, boolean expectPeer2) { - // given: A cluster with peer 2 having a null rack. - ScassandraCluster.ScassandraClusterBuilder builder = ScassandraCluster.builder() - .withNodes(3); + private final boolean shouldPrimePeers; + private final boolean shouldPrimePeersV2; - StringBuilder columnDataBuilder = new StringBuilder(); - for (String column : columns.split(",")) { - builder = builder.forcePeerInfo(1, 2, column, null); - columnDataBuilder.append(String.format("%s=null, ", column)); - } + private PeerRowState( + ImmutableMap peers, + ImmutableMap peersV2, + ImmutableMap local, + InetAddress expectedAddress, + Optional expectedPort, + boolean shouldPrimePeers, + boolean shouldPrimePeersV2) { + this.peers = peers; + this.peersV2 = peersV2; + this.local = local; - String columnData = columnDataBuilder.toString(); - if (columnData.endsWith(", ")) { - columnData = columnData.substring(0, columnData.length() - 2); - } + this.expectedAddress = expectedAddress; + this.expectedPort = expectedPort; - ScassandraCluster scassandraCluster = builder.build(); + this.shouldPrimePeers = shouldPrimePeers; + this.shouldPrimePeersV2 = shouldPrimePeersV2; + } - Cluster cluster = Cluster.builder() - .addContactPoints(scassandraCluster.address(1).getAddress()) - .withPort(scassandraCluster.getBinaryPort()) - .withNettyOptions(nonQuietClusterCloseOptions) - .build(); + public static Builder builder() { + return new Builder(); + } - // Capture logs to ensure appropriate warnings are logged. - org.apache.log4j.Logger cLogger = org.apache.log4j.Logger.getLogger("com.datastax.driver.core"); - Level originalLevel = cLogger.getLevel(); - if (originalLevel != null && !originalLevel.isGreaterOrEqual(Level.WARN)) { - cLogger.setLevel(Level.WARN); - } - MemoryAppender logs = new MemoryAppender(); - cLogger.addAppender(logs); + public boolean usePeersV2() { + return !this.peersV2.isEmpty(); + } - try { - scassandraCluster.init(); - - // when: Initializing a cluster instance and grabbing metadata. - cluster.init(); - - InetAddress node2Address = scassandraCluster.address(2).getAddress(); - String expectedError = String.format("Found invalid row in system.peers: [peer=%s, %s]. " + - "This is likely a gossip or snitch issue, this host will be ignored.", node2Address, columnData); - String log = logs.get(); - // then: A peer with a null rack should not show up in host metadata, unless allowed via system property. - if (expectPeer2) { - assertThat(cluster.getMetadata().getAllHosts()) - .hasSize(3) - .extractingResultOf("getAddress") - .contains(node2Address); - - assertThat(log).doesNotContain(expectedError); - } else { - assertThat(cluster.getMetadata().getAllHosts()) - .hasSize(2) - .extractingResultOf("getAddress") - .doesNotContain(node2Address); - - assertThat(log) - .containsOnlyOnce(expectedError); - } - } finally { - cLogger.removeAppender(logs); - cLogger.setLevel(originalLevel); - cluster.close(); - scassandraCluster.stop(); - } + public boolean isDse68() { + return this.peers.containsKey("native_transport_address") + || this.peers.containsKey("native_transport_port") + || this.peers.containsKey("native_transport_port_ssl"); } - /** - * Ensures that when a node changes its broadcast address (for example, after - * a shutdown and startup on EC2 and its public IP has changed), - * the driver will be able to detect that change and recognize the host - * in the system.peers table in spite of that change. - * - * @jira_ticket JAVA-1038 - * @expected_result The driver should be able to detect that a host has changed its broadcast address - * and update its metadata accordingly. - * @test_category control_connection - * @since 2.1.10 - */ - @SuppressWarnings("unchecked") - @Test(groups = "short") - @CCMConfig(createCcm = false) - public void should_fetch_whole_peers_table_if_broadcast_address_changed() throws UnknownHostException { - ScassandraCluster scassandras = ScassandraCluster.builder().withNodes(2).build(); - scassandras.init(); - - InetSocketAddress node2RpcAddress = scassandras.address(2); - - Cluster cluster = Cluster.builder() - .addContactPoints(scassandras.address(1).getAddress()) - .withPort(scassandras.getBinaryPort()) - .withNettyOptions(nonQuietClusterCloseOptions) - .build(); + public boolean shouldPrimePeers() { + return this.shouldPrimePeers; + } - try { + public boolean shouldPrimePeersV2() { + return this.shouldPrimePeersV2; + } - cluster.init(); - - Host host2 = cluster.getMetadata().getHost(node2RpcAddress); - assertThat(host2).isNotNull(); - - InetAddress node2OldBroadcastAddress = host2.getBroadcastAddress(); - InetAddress node2NewBroadcastAddress = InetAddress.getByName("1.2.3.4"); - - // host 2 has the old broadcast_address (which is identical to its rpc_broadcast_address) - assertThat(host2.getAddress()) - .isEqualTo(node2OldBroadcastAddress); - - // simulate a change in host 2 public IP - Map rows = ImmutableMap.builder() - .put("peer", node2NewBroadcastAddress) // new broadcast address for host 2 - .put("rpc_address", host2.getAddress()) // rpc_broadcast_address remains unchanged - .put("host_id", UUID.randomUUID()) - .put("data_center", datacenter(1)) - .put("rack", "rack1") - .put("release_version", "2.1.8") - .put("tokens", ImmutableSet.of(Long.toString(scassandras.getTokensForDC(1).get(1)))) - .build(); - - scassandras.node(1).primingClient().clearAllPrimes(); - - // the driver will attempt to locate host2 in system.peers by its old broadcast address, and that will fail - scassandras.node(1).primingClient().prime(PrimingRequest.queryBuilder() - .withQuery("SELECT * FROM system.peers WHERE peer='" + node2OldBroadcastAddress + "'") - .withThen(then() - .withColumnTypes(SELECT_PEERS) - .build()) - .build()); - - // the driver will then attempt to fetch the whole system.peers - scassandras.node(1).primingClient().prime(PrimingRequest.queryBuilder() - .withQuery("SELECT * FROM system.peers") - .withThen(then() - .withColumnTypes(SELECT_PEERS) - .withRows(rows) - .build()) - .build()); - - assertThat(cluster.manager.controlConnection.refreshNodeInfo(host2)).isTrue(); - - host2 = cluster.getMetadata().getHost(node2RpcAddress); - - // host2 should now have a new broadcast address - assertThat(host2).isNotNull(); - assertThat(host2.getBroadcastAddress()) - .isEqualTo(node2NewBroadcastAddress); - - // host 2 should keep its old rpc broadcast address - assertThat(host2.getSocketAddress()) - .isEqualTo(node2RpcAddress); + public ImmutableMap getPeersRow() { + return this.peers; + } - } finally { - cluster.close(); - scassandras.stop(); - } + public ImmutableMap getPeersV2Row() { + return this.peersV2; + } + + public ImmutableMap getLocalRow() { + return this.local; + } + public EndPoint getExpectedEndPoint(ScassandraCluster cluster) { + return new TranslatedAddressEndPoint( + new InetSocketAddress( + this.expectedAddress, this.expectedPort.or(cluster.getBinaryPort()))); } - static class QueryPlanCountingPolicy extends DelegatingLoadBalancingPolicy { + static class Builder { + + private ImmutableMap.Builder peers = this.basePeerRow(); + private ImmutableMap.Builder peersV2 = this.basePeerRow(); + private ImmutableMap.Builder local = this.basePeerRow(); + + private InetAddress expectedAddress; + private Optional expectedPort = Optional.absent(); + + private boolean shouldPrimePeers = false; + private boolean shouldPrimePeersV2 = false; + + public PeerRowState build() { + return new PeerRowState( + this.peers.build(), + this.peersV2.build(), + this.local.build(), + this.expectedAddress, + this.expectedPort, + this.shouldPrimePeers, + this.shouldPrimePeersV2); + } + + public Builder peers(String name, Object val) { + this.peers.put(name, val); + this.shouldPrimePeers = true; + return this; + } + + public Builder peersV2(String name, Object val) { + this.peersV2.put(name, val); + this.shouldPrimePeersV2 = true; + return this; + } + + public Builder local(String name, Object val) { + this.local.put(name, val); + return this; + } + + public Builder expectedAddress(InetAddress address) { + this.expectedAddress = address; + return this; + } + + public Builder expectedPort(int port) { + this.expectedPort = Optional.of(port); + return this; + } + + private ImmutableMap.Builder basePeerRow() { + return ImmutableMap.builder() + /* Required to support Metadata.addIfAbsent(Host) which is used by host loading code */ + .put("host_id", UUID.randomUUID()) + /* Elements below required to pass peer row validation */ + .put("data_center", datacenter(1)) + .put("rack", "rack1") + .put("tokens", ImmutableSet.of(Long.toString(new Random().nextLong()))); + } + } + } - final AtomicInteger counter = new AtomicInteger(); + static class QueryPlanCountingPolicy extends DelegatingLoadBalancingPolicy { - public QueryPlanCountingPolicy(LoadBalancingPolicy delegate) { - super(delegate); - } + final AtomicInteger counter = new AtomicInteger(); - public Iterator newQueryPlan(String loggedKeyspace, Statement statement) { - counter.incrementAndGet(); - return super.newQueryPlan(loggedKeyspace, statement); - } + public QueryPlanCountingPolicy(LoadBalancingPolicy delegate) { + super(delegate); + } + + @Override + public Iterator newQueryPlan(String loggedKeyspace, Statement statement) { + counter.incrementAndGet(); + return super.newQueryPlan(loggedKeyspace, statement); } + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/CountingReconnectionPolicy.java b/driver-core/src/test/java/com/datastax/driver/core/CountingReconnectionPolicy.java index 2928bd6cb47..d5457421e0f 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/CountingReconnectionPolicy.java +++ b/driver-core/src/test/java/com/datastax/driver/core/CountingReconnectionPolicy.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,46 +18,41 @@ package com.datastax.driver.core; import com.datastax.driver.core.policies.ReconnectionPolicy; - import java.util.concurrent.atomic.AtomicInteger; -/** - * A reconnection policy that tracks how many times its schedule has been invoked. - */ +/** A reconnection policy that tracks how many times its schedule has been invoked. */ public class CountingReconnectionPolicy implements ReconnectionPolicy { - public final AtomicInteger count = new AtomicInteger(); - private final ReconnectionPolicy childPolicy; + public final AtomicInteger count = new AtomicInteger(); + private final ReconnectionPolicy childPolicy; - public CountingReconnectionPolicy(ReconnectionPolicy childPolicy) { - this.childPolicy = childPolicy; - } + public CountingReconnectionPolicy(ReconnectionPolicy childPolicy) { + this.childPolicy = childPolicy; + } - @Override - public ReconnectionSchedule newSchedule() { - return new CountingSchedule(childPolicy.newSchedule()); - } - - class CountingSchedule implements ReconnectionSchedule { - private final ReconnectionSchedule childSchedule; + @Override + public ReconnectionSchedule newSchedule() { + return new CountingSchedule(childPolicy.newSchedule()); + } - public CountingSchedule(ReconnectionSchedule childSchedule) { - this.childSchedule = childSchedule; - } + class CountingSchedule implements ReconnectionSchedule { + private final ReconnectionSchedule childSchedule; - @Override - public long nextDelayMs() { - count.incrementAndGet(); - return childSchedule.nextDelayMs(); - } + public CountingSchedule(ReconnectionSchedule childSchedule) { + this.childSchedule = childSchedule; } @Override - public void init(Cluster cluster) { + public long nextDelayMs() { + count.incrementAndGet(); + return childSchedule.nextDelayMs(); } + } - @Override - public void close() { - childPolicy.close(); - } + @Override + public void init(Cluster cluster) {} + @Override + public void close() { + childPolicy.close(); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/CreateCCM.java b/driver-core/src/test/java/com/datastax/driver/core/CreateCCM.java index 878e84e0d67..82c71468d72 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/CreateCCM.java +++ b/driver-core/src/test/java/com/datastax/driver/core/CreateCCM.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,50 +17,47 @@ */ package com.datastax.driver.core; -import java.lang.annotation.Retention; -import java.lang.annotation.Target; - import static java.lang.annotation.ElementType.TYPE; import static java.lang.annotation.RetentionPolicy.RUNTIME; +import java.lang.annotation.Retention; +import java.lang.annotation.Target; + @Retention(RUNTIME) @Target(TYPE) public @interface CreateCCM { - enum TestMode { + enum TestMode { - /** - * When this mode is selected, one single CCM cluster will - * be shared for all tests in the test class. - *

    - * When this mode is selected, only class-level {@link CCMConfig} annotations - * are processed; method-level annotations are ignored. - *

    - * This mode usually runs faster, but care should be taken - * not to alter the CCM cluster in a test in such a way - * that subsequent tests could fail. - */ - PER_CLASS, - - /** - * When this mode is selected, a different CCM cluster - * will be used for each test in the test class. - *

    - * When this mode is selected, both class-level and method-level {@link CCMConfig} annotations - * are processed; the test configuration results from the merge of - * both annotations, if both are present (method-level annotations - * override class-level ones). - *

    - * This mode is slower, but is safer to use - * if a test method alters the CCM cluster. - */ - PER_METHOD - } + /** + * When this mode is selected, one single CCM cluster will be shared for all tests in the test + * class. + * + *

    When this mode is selected, only class-level {@link CCMConfig} annotations are processed; + * method-level annotations are ignored. + * + *

    This mode usually runs faster, but care should be taken not to alter the CCM cluster in a + * test in such a way that subsequent tests could fail. + */ + PER_CLASS, /** - * The test mode to use for tests in this class. + * When this mode is selected, a different CCM cluster will be used for each test in the test + * class. + * + *

    When this mode is selected, both class-level and method-level {@link CCMConfig} + * annotations are processed; the test configuration results from the merge of both annotations, + * if both are present (method-level annotations override class-level ones). * - * @return The test mode to use for tests in this class. + *

    This mode is slower, but is safer to use if a test method alters the CCM cluster. */ - TestMode value() default TestMode.PER_CLASS; + PER_METHOD + } + + /** + * The test mode to use for tests in this class. + * + * @return The test mode to use for tests in this class. + */ + TestMode value() default TestMode.PER_CLASS; } diff --git a/driver-core/src/test/java/com/datastax/driver/core/CustomPayloadTest.java b/driver-core/src/test/java/com/datastax/driver/core/CustomPayloadTest.java index c4c7a1eed1c..4509d51eb03 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/CustomPayloadTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/CustomPayloadTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,17 +17,6 @@ */ package com.datastax.driver.core; -import com.datastax.driver.core.exceptions.UnsupportedFeatureException; -import com.datastax.driver.core.utils.CassandraVersion; -import com.google.common.collect.ImmutableMap; -import org.apache.log4j.Logger; -import org.testng.annotations.BeforeMethod; -import org.testng.annotations.Test; - -import java.nio.ByteBuffer; -import java.util.HashMap; -import java.util.Map; - import static com.datastax.driver.core.ProtocolVersion.V3; import static com.datastax.driver.core.querybuilder.QueryBuilder.eq; import static com.datastax.driver.core.querybuilder.QueryBuilder.select; @@ -33,242 +24,259 @@ import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Fail.fail; +import com.datastax.driver.core.exceptions.UnsupportedFeatureException; +import com.datastax.driver.core.utils.CassandraVersion; +import com.google.common.collect.ImmutableMap; +import java.nio.ByteBuffer; +import java.util.HashMap; +import java.util.Map; +import org.apache.log4j.Logger; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; + @CassandraVersion("2.2.0") -@CCMConfig(jvmArgs = "-Dcassandra.custom_query_handler_class=org.apache.cassandra.cql3.CustomPayloadMirroringQueryHandler") +@CCMConfig( + jvmArgs = + "-Dcassandra.custom_query_handler_class=org.apache.cassandra.cql3.CustomPayloadMirroringQueryHandler") public class CustomPayloadTest extends CCMTestsSupport { - private Map payload1; + private Map payload1; - private Map payload2; + private Map payload2; - @BeforeMethod(groups = {"short", "unit"}) - public void initPayloads() { - payload1 = ImmutableMap.of( - "k1", ByteBuffer.wrap(new byte[]{1, 2, 3}), - "k2", ByteBuffer.wrap(new byte[]{4, 5, 6}) - ); - payload2 = ImmutableMap.of( - "k2", ByteBuffer.wrap(new byte[]{1, 2}), - "k3", ByteBuffer.wrap(new byte[]{3, 4}) - ); - } + @BeforeMethod(groups = {"short", "unit"}) + public void initPayloads() { + payload1 = + ImmutableMap.of( + "k1", ByteBuffer.wrap(new byte[] {1, 2, 3}), + "k2", ByteBuffer.wrap(new byte[] {4, 5, 6})); + payload2 = + ImmutableMap.of( + "k2", ByteBuffer.wrap(new byte[] {1, 2}), + "k3", ByteBuffer.wrap(new byte[] {3, 4})); + } - // execute + // execute - @Test(groups = "short") - public void should_echo_custom_payload_when_executing_statement() throws Exception { - Statement statement = new SimpleStatement("SELECT c2 FROM t1 where c1 = ?", 1); - statement.setOutgoingPayload(payload1); - ResultSet rows = session().execute(statement); - Map actual = rows.getExecutionInfo().getIncomingPayload(); - assertThat(actual).isEqualTo(payload1); - } + @Test(groups = "short") + public void should_echo_custom_payload_when_executing_statement() throws Exception { + Statement statement = new SimpleStatement("SELECT c2 FROM t1 where c1 = ?", 1); + statement.setOutgoingPayload(payload1); + ResultSet rows = session().execute(statement); + Map actual = rows.getExecutionInfo().getIncomingPayload(); + assertThat(actual).isEqualTo(payload1); + } - @Test(groups = "short") - public void should_echo_custom_payload_when_executing_batch_statement() throws Exception { - Statement statement = new BatchStatement().add(new SimpleStatement("INSERT INTO t1 (c1, c2) values (1, 'foo')")); - statement.setOutgoingPayload(payload1); - ResultSet rows = session().execute(statement); - Map actual = rows.getExecutionInfo().getIncomingPayload(); - assertThat(actual).isEqualTo(payload1); - } + @Test(groups = "short") + public void should_echo_custom_payload_when_executing_batch_statement() throws Exception { + Statement statement = + new BatchStatement().add(new SimpleStatement("INSERT INTO t1 (c1, c2) values (1, 'foo')")); + statement.setOutgoingPayload(payload1); + ResultSet rows = session().execute(statement); + Map actual = rows.getExecutionInfo().getIncomingPayload(); + assertThat(actual).isEqualTo(payload1); + } - @Test(groups = "short") - public void should_echo_custom_payload_when_building_statement() throws Exception { - Statement statement = select("c2").from("t1").where(eq("c1", 1)).setOutgoingPayload(payload1); - ResultSet rows = session().execute(statement); - Map actual = rows.getExecutionInfo().getIncomingPayload(); - assertThat(actual).isEqualTo(payload1); - } + @Test(groups = "short") + public void should_echo_custom_payload_when_building_statement() throws Exception { + Statement statement = select("c2").from("t1").where(eq("c1", 1)).setOutgoingPayload(payload1); + ResultSet rows = session().execute(statement); + Map actual = rows.getExecutionInfo().getIncomingPayload(); + assertThat(actual).isEqualTo(payload1); + } - // prepare + // prepare - /** - * Ensures that an incoming payload is propagated from prepared to bound statements. - */ - @Test(groups = "short") - public void should_propagate_incoming_payload_to_bound_statement() throws Exception { - RegularStatement statement = new SimpleStatement("SELECT c2 as col1 FROM t1 where c1 = ?"); - statement.setOutgoingPayload(payload1); - PreparedStatement ps = session().prepare(statement); - // Prepared statement should inherit outgoing payload - assertThat(ps.getOutgoingPayload()).isEqualTo(payload1); - // Prepared statement should receive incoming payload - assertThat(ps.getIncomingPayload()).isEqualTo(payload1); - ps.setOutgoingPayload(null); // unset outgoing payload - // bound statement should inherit from prepared statement's incoming payload - BoundStatement bs = ps.bind(1); - ResultSet rows = session().execute(bs); - Map actual = rows.getExecutionInfo().getIncomingPayload(); - assertThat(actual).isEqualTo(payload1); - bs = ps.bind(); - bs.setInt(0, 1); - rows = session().execute(bs); - actual = rows.getExecutionInfo().getIncomingPayload(); - assertThat(actual).isEqualTo(payload1); - } + /** Ensures that an incoming payload is propagated from prepared to bound statements. */ + @Test(groups = "short") + public void should_propagate_incoming_payload_to_bound_statement() throws Exception { + RegularStatement statement = new SimpleStatement("SELECT c2 as col1 FROM t1 where c1 = ?"); + statement.setOutgoingPayload(payload1); + PreparedStatement ps = session().prepare(statement); + // Prepared statement should inherit outgoing payload + assertThat(ps.getOutgoingPayload()).isEqualTo(payload1); + // Prepared statement should receive incoming payload + assertThat(ps.getIncomingPayload()).isEqualTo(payload1); + ps.setOutgoingPayload(null); // unset outgoing payload + // bound statement should inherit from prepared statement's incoming payload + BoundStatement bs = ps.bind(1); + ResultSet rows = session().execute(bs); + Map actual = rows.getExecutionInfo().getIncomingPayload(); + assertThat(actual).isEqualTo(payload1); + bs = ps.bind(); + bs.setInt(0, 1); + rows = session().execute(bs); + actual = rows.getExecutionInfo().getIncomingPayload(); + assertThat(actual).isEqualTo(payload1); + } - /** - * Ensures that an incoming payload is overridden by an explicitly set outgoing payload - * when propagated to bound statements. - */ - @Test(groups = "short") - public void should_override_incoming_payload_when_outgoing_payload_explicitly_set_on_preparing_statement() throws Exception { - RegularStatement statement = new SimpleStatement("SELECT c2 as col2 FROM t1 where c1 = ?"); - statement.setOutgoingPayload(payload1); - PreparedStatement ps = session().prepare(statement); - // Prepared statement should inherit outgoing payload - assertThat(ps.getOutgoingPayload()).isEqualTo(payload1); - // Prepared statement should receive incoming payload - assertThat(ps.getIncomingPayload()).isEqualTo(payload1); - ps.setOutgoingPayload(payload2); // override outgoing payload - // bound statement should inherit from prepared statement's outgoing payload - BoundStatement bs = ps.bind(1); - ResultSet rows = session().execute(bs); - Map actual = rows.getExecutionInfo().getIncomingPayload(); - assertThat(actual).isEqualTo(payload2); - bs = ps.bind(); - bs.setInt(0, 1); - rows = session().execute(bs); - actual = rows.getExecutionInfo().getIncomingPayload(); - assertThat(actual).isEqualTo(payload2); - } + /** + * Ensures that an incoming payload is overridden by an explicitly set outgoing payload when + * propagated to bound statements. + */ + @Test(groups = "short") + public void + should_override_incoming_payload_when_outgoing_payload_explicitly_set_on_preparing_statement() + throws Exception { + RegularStatement statement = new SimpleStatement("SELECT c2 as col2 FROM t1 where c1 = ?"); + statement.setOutgoingPayload(payload1); + PreparedStatement ps = session().prepare(statement); + // Prepared statement should inherit outgoing payload + assertThat(ps.getOutgoingPayload()).isEqualTo(payload1); + // Prepared statement should receive incoming payload + assertThat(ps.getIncomingPayload()).isEqualTo(payload1); + ps.setOutgoingPayload(payload2); // override outgoing payload + // bound statement should inherit from prepared statement's outgoing payload + BoundStatement bs = ps.bind(1); + ResultSet rows = session().execute(bs); + Map actual = rows.getExecutionInfo().getIncomingPayload(); + assertThat(actual).isEqualTo(payload2); + bs = ps.bind(); + bs.setInt(0, 1); + rows = session().execute(bs); + actual = rows.getExecutionInfo().getIncomingPayload(); + assertThat(actual).isEqualTo(payload2); + } - /** - * Ensures that payloads can still be set individually on bound statements - * if the prepared statement does not have a default payload. - */ - @Test(groups = "short") - public void should_not_set_any_payload_on_bound_statement() throws Exception { - RegularStatement statement = new SimpleStatement("SELECT c2 as col3 FROM t1 where c1 = ?"); - PreparedStatement ps = session().prepare(statement); - assertThat(ps.getOutgoingPayload()).isNull(); - assertThat(ps.getIncomingPayload()).isNull(); - // bound statement should not have outgoing payload - BoundStatement bs = ps.bind(1); - assertThat(bs.getOutgoingPayload()).isNull(); - // explicitly set a payload for this boudn statement only - bs.setOutgoingPayload(payload1); - ResultSet rows = session().execute(bs); - Map actual = rows.getExecutionInfo().getIncomingPayload(); - assertThat(actual).isEqualTo(payload1); - // a second bound statement should not have any payload - bs = ps.bind(); - assertThat(bs.getOutgoingPayload()).isNull(); - bs.setInt(0, 1); - rows = session().execute(bs); - actual = rows.getExecutionInfo().getIncomingPayload(); - assertThat(actual).isNull(); - } + /** + * Ensures that payloads can still be set individually on bound statements if the prepared + * statement does not have a default payload. + */ + @Test(groups = "short") + public void should_not_set_any_payload_on_bound_statement() throws Exception { + RegularStatement statement = new SimpleStatement("SELECT c2 as col3 FROM t1 where c1 = ?"); + PreparedStatement ps = session().prepare(statement); + assertThat(ps.getOutgoingPayload()).isNull(); + assertThat(ps.getIncomingPayload()).isNull(); + // bound statement should not have outgoing payload + BoundStatement bs = ps.bind(1); + assertThat(bs.getOutgoingPayload()).isNull(); + // explicitly set a payload for this boudn statement only + bs.setOutgoingPayload(payload1); + ResultSet rows = session().execute(bs); + Map actual = rows.getExecutionInfo().getIncomingPayload(); + assertThat(actual).isEqualTo(payload1); + // a second bound statement should not have any payload + bs = ps.bind(); + assertThat(bs.getOutgoingPayload()).isNull(); + bs.setInt(0, 1); + rows = session().execute(bs); + actual = rows.getExecutionInfo().getIncomingPayload(); + assertThat(actual).isNull(); + } - // pagination + // pagination - /** - * Ensures that a custom payload is propagated throughout pages. - */ - @Test(groups = "short") - public void should_echo_custom_payload_when_paginating() throws Exception { - session().execute("INSERT INTO t1 (c1, c2) VALUES (1, 'a')"); - session().execute("INSERT INTO t1 (c1, c2) VALUES (1, 'b')"); - Statement statement = new SimpleStatement("SELECT c2 FROM t1 where c1 = 1"); - statement.setFetchSize(1); - statement.setOutgoingPayload(payload1); - ResultSet rows = session().execute(statement); - rows.all(); - assertThat(rows.getAllExecutionInfo()).extracting("incomingPayload").containsOnly(payload1); - } + /** Ensures that a custom payload is propagated throughout pages. */ + @Test(groups = "short") + public void should_echo_custom_payload_when_paginating() throws Exception { + session().execute("INSERT INTO t1 (c1, c2) VALUES (1, 'a')"); + session().execute("INSERT INTO t1 (c1, c2) VALUES (1, 'b')"); + Statement statement = new SimpleStatement("SELECT c2 FROM t1 where c1 = 1"); + statement.setFetchSize(1); + statement.setOutgoingPayload(payload1); + ResultSet rows = session().execute(statement); + rows.all(); + assertThat(rows.getAllExecutionInfo()).extracting("incomingPayload").containsOnly(payload1); + } - // TODO retries, spec execs + // TODO retries, spec execs - // edge cases + // edge cases - @Test(groups = "short") - public void should_encode_null_values() throws Exception { - Map payload = new HashMap(); - payload.put("k1", Statement.NULL_PAYLOAD_VALUE); - Statement statement = new SimpleStatement("SELECT c2 FROM t1 where c1 = ?", 1); - statement.setOutgoingPayload(payload); - ResultSet rows = session().execute(statement); - Map actual = rows.getExecutionInfo().getIncomingPayload(); - assertThat(actual).isEqualTo(payload); - } + @Test(groups = "short") + public void should_encode_null_values() throws Exception { + Map payload = new HashMap(); + payload.put("k1", Statement.NULL_PAYLOAD_VALUE); + Statement statement = new SimpleStatement("SELECT c2 FROM t1 where c1 = ?", 1); + statement.setOutgoingPayload(payload); + ResultSet rows = session().execute(statement); + Map actual = rows.getExecutionInfo().getIncomingPayload(); + assertThat(actual).isEqualTo(payload); + } - @Test(groups = "unit", expectedExceptions = NullPointerException.class) - public void should_throw_npe_when_null_key_on_regular_statement() throws Exception { - Map payload = new HashMap(); - payload.put(null, ByteBuffer.wrap(new byte[]{1})); - new SimpleStatement("SELECT c2 FROM t1 where c1 = ?", 1).setOutgoingPayload(payload); - } + @Test(groups = "unit", expectedExceptions = NullPointerException.class) + public void should_throw_npe_when_null_key_on_regular_statement() throws Exception { + Map payload = new HashMap(); + payload.put(null, ByteBuffer.wrap(new byte[] {1})); + new SimpleStatement("SELECT c2 FROM t1 where c1 = ?", 1).setOutgoingPayload(payload); + } - @Test(groups = "unit", expectedExceptions = NullPointerException.class) - public void should_throw_npe_when_null_value_on_regular_statement() throws Exception { - Map payload = new HashMap(); - payload.put("k1", null); - new SimpleStatement("SELECT c2 FROM t1 where c1 = ?", 1).setOutgoingPayload(payload); - } + @Test(groups = "unit", expectedExceptions = NullPointerException.class) + public void should_throw_npe_when_null_value_on_regular_statement() throws Exception { + Map payload = new HashMap(); + payload.put("k1", null); + new SimpleStatement("SELECT c2 FROM t1 where c1 = ?", 1).setOutgoingPayload(payload); + } - @Test(groups = "short", expectedExceptions = NullPointerException.class) - public void should_throw_npe_when_null_key_on_prepared_statement() throws Exception { - Map payload = new HashMap(); - payload.put(null, ByteBuffer.wrap(new byte[]{1})); - session().prepare(new SimpleStatement("SELECT c2 FROM t1 where c1 = 1")).setOutgoingPayload(payload); - } + @Test(groups = "short", expectedExceptions = NullPointerException.class) + public void should_throw_npe_when_null_key_on_prepared_statement() throws Exception { + Map payload = new HashMap(); + payload.put(null, ByteBuffer.wrap(new byte[] {1})); + session() + .prepare(new SimpleStatement("SELECT c2 FROM t1 where c1 = 1")) + .setOutgoingPayload(payload); + } - @Test(groups = "short", expectedExceptions = NullPointerException.class) - public void should_throw_npe_when_null_value_on_prepared_statement() throws Exception { - Map payload = new HashMap(); - payload.put("k1", null); - session().prepare(new SimpleStatement("SELECT c2 FROM t1 where c1 = 2")).setOutgoingPayload(payload); - } + @Test(groups = "short", expectedExceptions = NullPointerException.class) + public void should_throw_npe_when_null_value_on_prepared_statement() throws Exception { + Map payload = new HashMap(); + payload.put("k1", null); + session() + .prepare(new SimpleStatement("SELECT c2 FROM t1 where c1 = 2")) + .setOutgoingPayload(payload); + } - @Test(groups = "short") - public void should_throw_ufe_when_protocol_version_lesser_than_4() throws Exception { - try { - Cluster v3cluster = register(Cluster.builder() - .addContactPoints(getContactPoints()) - .withPort(ccm().getBinaryPort()) - .withProtocolVersion(V3) - .build()) - .init(); - Session v3session = v3cluster.connect(); - Statement statement = new SimpleStatement("SELECT c2 FROM t1 where c1 = ?", 1); - statement.setOutgoingPayload(payload1); - v3session.execute(statement); - fail("Should not send custom payloads with protocol V3"); - } catch (UnsupportedFeatureException e) { - assertThat(e.getMessage()).isEqualTo( - "Unsupported feature with the native protocol V3 (which is currently in use): Custom payloads are only supported since native protocol V4"); - } + @Test(groups = "short") + public void should_throw_ufe_when_protocol_version_lesser_than_4() throws Exception { + try { + Cluster v3cluster = + register( + Cluster.builder() + .addContactPoints(getContactPoints()) + .withPort(ccm().getBinaryPort()) + .withProtocolVersion(V3) + .build()) + .init(); + Session v3session = v3cluster.connect(); + Statement statement = new SimpleStatement("SELECT c2 FROM t1 where c1 = ?", 1); + statement.setOutgoingPayload(payload1); + v3session.execute(statement); + fail("Should not send custom payloads with protocol V3"); + } catch (UnsupportedFeatureException e) { + assertThat(e.getMessage()) + .isEqualTo( + "Unsupported feature with the native protocol V3 (which is currently in use): Custom payloads are only supported since native protocol V4"); } + } - // log messages - - /** - * Ensures that when debugging custom payloads, the driver will print appropriate log messages. - */ - @Test(groups = "short") - public void should_print_log_message_when_level_trace() throws Exception { - Logger logger = Logger.getLogger(Message.logger.getName()); - MemoryAppender appender = new MemoryAppender(); - try { - logger.setLevel(TRACE); - logger.addAppender(appender); - Statement statement = new SimpleStatement("SELECT c2 FROM t1 where c1 = ?", 1); - statement.setOutgoingPayload(payload1); - session().execute(statement); - String logs = appender.waitAndGet(10000); - assertThat(logs) - .contains("Sending payload: {k1:0x010203, k2:0x040506} (20 bytes total)") - .contains("Received payload: {k1:0x010203, k2:0x040506} (20 bytes total)"); - } finally { - logger.setLevel(null); - logger.removeAppender(appender); - } - } + // log messages - @Override - public void onTestContextInitialized() { - execute("CREATE TABLE t1 (c1 int, c2 text, PRIMARY KEY (c1, c2))"); + /** + * Ensures that when debugging custom payloads, the driver will print appropriate log messages. + */ + @Test(groups = "short") + public void should_print_log_message_when_level_trace() throws Exception { + Logger logger = Logger.getLogger(Message.logger.getName()); + MemoryAppender appender = new MemoryAppender(); + try { + logger.setLevel(TRACE); + logger.addAppender(appender); + Statement statement = new SimpleStatement("SELECT c2 FROM t1 where c1 = ?", 1); + statement.setOutgoingPayload(payload1); + session().execute(statement); + String logs = appender.waitAndGet(10000); + assertThat(logs) + .contains("Sending payload: {k1:0x010203, k2:0x040506} (24 bytes total)") + .contains("Received payload: {k1:0x010203, k2:0x040506} (24 bytes total)"); + } finally { + logger.setLevel(null); + logger.removeAppender(appender); } + } + @Override + public void onTestContextInitialized() { + execute("CREATE TABLE t1 (c1 int, c2 text, PRIMARY KEY (c1, c2))"); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/CustomPercentileTrackerTest.java b/driver-core/src/test/java/com/datastax/driver/core/CustomPercentileTrackerTest.java index 904a072c11f..7c714a9acc7 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/CustomPercentileTrackerTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/CustomPercentileTrackerTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,61 +17,69 @@ */ package com.datastax.driver.core; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.mock; + import com.datastax.driver.core.exceptions.ReadTimeoutException; import com.google.common.collect.Lists; import com.google.common.util.concurrent.Uninterruptibles; -import org.testng.annotations.Test; - import java.util.List; import java.util.concurrent.TimeUnit; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.mock; +import org.testng.annotations.Test; public class CustomPercentileTrackerTest { - @Test(groups = "unit") - public void should_return_negative_value_when_key_cant_be_computed() { - // given - A custom tracker that returns null for a specific host and keys by host otherwise. - final Cluster cluster0 = mock(Cluster.class); - final Host host0 = mock(Host.class); - final Host host1 = mock(Host.class); + @Test(groups = "unit") + public void should_return_negative_value_when_key_cant_be_computed() { + // given - A custom tracker that returns null for a specific host and keys by host otherwise. + final Cluster cluster0 = mock(Cluster.class); + final Host host0 = mock(Host.class); + final Host host1 = mock(Host.class); - PercentileTracker tracker = new PercentileTracker(1000, 3, 100, 50) { - @Override - protected Object computeKey(Host host, Statement statement, Exception exception) { - if (host == host0) { - return host; - } else { - return null; - } + PercentileTracker tracker = + new PercentileTracker(1000, 3, 100, 50) { + @Override + protected Object computeKey(Host host, Statement statement, Exception exception) { + if (host == host0) { + return host; + } else { + return null; } + } }; - tracker.onRegister(cluster0); - - List statements = Lists.newArrayList(mock(Statement.class), mock(Statement.class)); - List exceptions = Lists.newArrayList(new Exception(), null, new ReadTimeoutException(ConsistencyLevel.ANY, 1, 1, true), null, null); + tracker.onRegister(cluster0); - // when - recording latencies over a linear progression with varying hosts, statements and exceptions. - for (int i = 0; i < 100; i++) { - tracker.update( - host0, - statements.get(i % statements.size()), - exceptions.get(i % exceptions.size()), TimeUnit.NANOSECONDS.convert((i + 1) * 2, TimeUnit.MILLISECONDS)); + List statements = Lists.newArrayList(mock(Statement.class), mock(Statement.class)); + List exceptions = + Lists.newArrayList( + new Exception(), + null, + new ReadTimeoutException(ConsistencyLevel.ANY, 1, 1, true), + null, + null); - tracker.update( - host1, - statements.get(i % statements.size()), - exceptions.get(i % exceptions.size()), TimeUnit.NANOSECONDS.convert(i + 1, TimeUnit.MILLISECONDS)); - } - Uninterruptibles.sleepUninterruptibly(2, TimeUnit.SECONDS); + // when - recording latencies over a linear progression with varying hosts, statements and + // exceptions. + for (int i = 0; i < 100; i++) { + tracker.update( + host0, + statements.get(i % statements.size()), + exceptions.get(i % exceptions.size()), + TimeUnit.NANOSECONDS.convert((i + 1) * 2, TimeUnit.MILLISECONDS)); - // then - host0 should return a linear progression (i*2) since it has a tracker. - // host1 should return -1 since it has no tracker since it has no key. - for (int i = 1; i <= 99; i++) { - assertThat(tracker.getLatencyAtPercentile(host0, null, null, i)).isEqualTo(i * 2); - assertThat(tracker.getLatencyAtPercentile(host1, null, null, i)).isEqualTo(-1); - } + tracker.update( + host1, + statements.get(i % statements.size()), + exceptions.get(i % exceptions.size()), + TimeUnit.NANOSECONDS.convert(i + 1, TimeUnit.MILLISECONDS)); + } + Uninterruptibles.sleepUninterruptibly(2, TimeUnit.SECONDS); + // then - host0 should return a linear progression (i*2) since it has a tracker. + // host1 should return -1 since it has no tracker since it has no key. + for (int i = 1; i <= 99; i++) { + assertThat(tracker.getLatencyAtPercentile(host0, null, null, i)).isEqualTo(i * 2); + assertThat(tracker.getLatencyAtPercentile(host1, null, null, i)).isEqualTo(-1); } + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/CustomTypeTest.java b/driver-core/src/test/java/com/datastax/driver/core/CustomTypeTest.java index ff62e4878c2..50f0015acc3 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/CustomTypeTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/CustomTypeTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,175 +17,208 @@ */ package com.datastax.driver.core; -import com.datastax.driver.core.utils.CassandraVersion; -import org.testng.annotations.Test; - -import java.nio.ByteBuffer; -import java.util.List; -import java.util.Map; - import static com.datastax.driver.core.Assertions.assertThat; import static com.datastax.driver.core.DataType.cint; import static com.datastax.driver.core.DataType.custom; import static com.datastax.driver.core.TestUtils.serializeForCompositeType; import static com.datastax.driver.core.TestUtils.serializeForDynamicCompositeType; -/** - * Test we "support" custom types. - */ -public class CustomTypeTest extends CCMTestsSupport { - - public static final DataType CUSTOM_DYNAMIC_COMPOSITE = custom( - "org.apache.cassandra.db.marshal.DynamicCompositeType(" - + "s=>org.apache.cassandra.db.marshal.UTF8Type," - + "i=>org.apache.cassandra.db.marshal.Int32Type)"); - - public static final DataType CUSTOM_COMPOSITE = custom( - "org.apache.cassandra.db.marshal.CompositeType(" - + "org.apache.cassandra.db.marshal.UTF8Type," - + "org.apache.cassandra.db.marshal.Int32Type)"); - - @Override - public void onTestContextInitialized() { - execute( - "CREATE TABLE test (" - + " k int," - + " c1 'DynamicCompositeType(s => UTF8Type, i => Int32Type)'," - + " c2 'ReversedType(CompositeType(UTF8Type, Int32Type))'," // reversed translates to CLUSTERING ORDER BY DESC - + " c3 'Int32Type'," // translates to int - + " PRIMARY KEY (k, c1, c2)" - + ") WITH COMPACT STORAGE", - "CREATE TABLE test_collection(" - + " k int PRIMARY KEY," - + " c1 list<'DynamicCompositeType(s => UTF8Type, i => Int32Type)'>," - + " c2 map<'DynamicCompositeType(s => UTF8Type, i => Int32Type)', 'DynamicCompositeType(s => UTF8Type, i => Int32Type)'>" - + ")" - ); - } - - /** - * Validates that columns using custom types are properly handled by the driver in the following ways: - *

    - *

      - *
    1. The column metadata appropriately represents the types as {@link DataType#custom(String)}
    2. - *
    3. ReversedType is appropriately detected and the clustering order of that column is marked as descending.
    4. - *
    5. ColumnDefinitions for a column in a {@link Row} matches the custom type and that inserted data is read back properly.
    6. - *
    - * - * @jira_ticket JAVA-993 - * @test_category metadata - */ - @Test(groups = "short") - public void should_serialize_and_deserialize_custom_types() { - - TableMetadata table = cluster().getMetadata().getKeyspace(keyspace).getTable("test"); - - assertThat(table.getColumn("c1")).isClusteringColumn().hasType(CUSTOM_DYNAMIC_COMPOSITE); - assertThat(table.getColumn("c2")).isClusteringColumn().hasType(CUSTOM_COMPOSITE).hasClusteringOrder(ClusteringOrder.DESC); - assertThat(table.getColumn("c3")).hasType(cint()); - - session().execute("INSERT INTO test(k, c1, c2, c3) VALUES (0, 's@foo:i@32', 'foo:32', 1)"); - session().execute("INSERT INTO test(k, c1, c2, c3) VALUES (0, 'i@42', ':42', 2)"); - session().execute("INSERT INTO test(k, c1, c2, c3) VALUES (0, 'i@12:i@3', 'foo', 3)"); - - ResultSet rs = session().execute("SELECT * FROM test"); - - Row r = rs.one(); - - assertThat(r.getColumnDefinitions().getType("c1")).isEqualTo(CUSTOM_DYNAMIC_COMPOSITE); - assertThat(r.getColumnDefinitions().getType("c2")).isEqualTo(CUSTOM_COMPOSITE); - assertThat(r.getColumnDefinitions().getType("c3")).isEqualTo(cint()); - - assertThat(r.getInt("k")).isEqualTo(0); - assertThat(r.getBytesUnsafe("c1")).isEqualTo(serializeForDynamicCompositeType(12, 3)); - assertThat(r.getBytesUnsafe("c2")).isEqualTo(serializeForCompositeType("foo")); - assertThat(r.getInt("c3")).isEqualTo(3); - - r = rs.one(); - assertThat(r.getInt("k")).isEqualTo(0); - assertThat(r.getBytesUnsafe("c1")).isEqualTo(serializeForDynamicCompositeType(42)); - assertThat(r.getBytesUnsafe("c2")).isEqualTo(serializeForCompositeType("", 42)); - assertThat(r.getInt("c3")).isEqualTo(2); - - r = rs.one(); - assertThat(r.getInt("k")).isEqualTo(0); - assertThat(r.getBytesUnsafe("c1")).isEqualTo(serializeForDynamicCompositeType("foo", 32)); - assertThat(r.getBytesUnsafe("c2")).isEqualTo(serializeForCompositeType("foo", 32)); - assertThat(r.getInt("c3")).isEqualTo(1); - } +import com.datastax.driver.core.utils.CassandraVersion; +import java.nio.ByteBuffer; +import java.util.List; +import java.util.Map; +import org.testng.SkipException; +import org.testng.annotations.Test; - /** - * Validates that columns using collections of custom types are properly handled by the driver. - * - * @jira_ticket JAVA-1034 - * @test_category metadata - */ - @Test(groups = "short") - public void should_serialize_and_deserialize_collections_of_custom_types() { - TableMetadata table = cluster().getMetadata().getKeyspace(keyspace).getTable("test_collection"); - assertThat(table.getColumn("c1")).hasType(DataType.list(CUSTOM_DYNAMIC_COMPOSITE)); - assertThat(table.getColumn("c2")).hasType(DataType.map(CUSTOM_DYNAMIC_COMPOSITE, CUSTOM_DYNAMIC_COMPOSITE)); - - session().execute("INSERT INTO test_collection(k, c1, c2) VALUES (0, [ 's@foo:i@32' ], { 's@foo:i@32': 's@bar:i@42' })"); - - Row r = session().execute("SELECT * FROM test_collection").one(); - - assertThat(r.getColumnDefinitions().getType("c1")).isEqualTo(DataType.list(CUSTOM_DYNAMIC_COMPOSITE)); - List c1 = r.getList("c1", ByteBuffer.class); - assertThat(c1.get(0)).isEqualTo(serializeForDynamicCompositeType("foo", 32)); - - assertThat(r.getColumnDefinitions().getType("c2")).isEqualTo(DataType.map(CUSTOM_DYNAMIC_COMPOSITE, CUSTOM_DYNAMIC_COMPOSITE)); - Map c2 = r.getMap("c2", ByteBuffer.class, ByteBuffer.class); - Map.Entry entry = c2.entrySet().iterator().next(); - assertThat(entry.getKey()).isEqualTo(serializeForDynamicCompositeType("foo", 32)); - assertThat(entry.getValue()).isEqualTo(serializeForDynamicCompositeType("bar", 42)); - } +/** Test we "support" custom types. */ +public class CustomTypeTest extends CCMTestsSupport { - /** - * Validates that UDTs with fields using custom types are properly handled by the driver in the following ways: - *

    - *

      - *
    1. The {@link UserType} metadata appropriately represents the types of fields with custom types as {@link DataType#custom(String)}
    2. - *
    3. {@link TableMetadata} with a column having a {@link UserType} is properly referenced.
    4. - *
    5. ColumnDefinitions for a column in a {@link Row} matches the {@link UserType} and that inserted data is read back properly.
    6. - *
    - * - * @jira_ticket JAVA-993 - * @test_category metadata - */ - @Test(groups = "short") - @CassandraVersion("2.1.0") - public void should_handle_udt_with_custom_type() { - // Given: a UDT with custom types, and a table using it. - session().execute("CREATE TYPE custom_udt (regular int, c1 'DynamicCompositeType(s => UTF8Type, i => Int32Type)', c2 'LongType')"); - session().execute("CREATE TABLE custom_udt_tbl (k int primary key, v frozen)"); - - // When: Retrieving User Type via schema metadata. - UserType custom_udt = cluster().getMetadata().getKeyspace(keyspace).getUserType("custom_udt"); - assertThat(custom_udt.getFieldType("regular")).isEqualTo(cint()); - // Then: The fields with custom types should be appropriately represented with their defined types. - assertThat(custom_udt.getFieldType("c1")).isEqualTo(CUSTOM_DYNAMIC_COMPOSITE); - assertThat(custom_udt.getFieldType("c2")).isEqualTo(DataType.bigint()); - - // When: Retrieving Table via schema metadata. - TableMetadata table = cluster().getMetadata().getKeyspace(keyspace).getTable("custom_udt_tbl"); - // Then: The column with the user type should be represented and match the type previously retrieved. - assertThat(table.getColumn("v")).hasType(custom_udt); - - // Given: Existing data in table with a UDT with custom types. - session().execute("INSERT INTO custom_udt_tbl (k, v) VALUES (0, {regular: 5, c1: 's@hello:i@93', c2: 400})"); - - // When: Data is retrieved. - Row row = session().execute("select * from custom_udt_tbl").one(); - - // Then: The resulting row's column definitions should match the table definition. - assertThat(row.getColumnDefinitions().getType("k")).isEqualTo(cint()); - assertThat(row.getColumnDefinitions().getType("v")).isEqualTo(custom_udt); - - // And: The column values should represent what was inserted. - UDTValue value = row.getUDTValue("v"); - assertThat(value.getInt("regular")).isEqualTo(5); - assertThat(value.getBytes("c1")).isEqualTo(serializeForDynamicCompositeType("hello", 93)); - assertThat(value.getLong("c2")).isEqualTo(400); + public static final DataType CUSTOM_DYNAMIC_COMPOSITE = + custom( + "org.apache.cassandra.db.marshal.DynamicCompositeType(" + + "s=>org.apache.cassandra.db.marshal.UTF8Type," + + "i=>org.apache.cassandra.db.marshal.Int32Type)"); + + public static final DataType CUSTOM_COMPOSITE = + custom( + "org.apache.cassandra.db.marshal.CompositeType(" + + "org.apache.cassandra.db.marshal.UTF8Type," + + "org.apache.cassandra.db.marshal.Int32Type)"); + + @Override + public void onTestContextInitialized() { + try { + TestUtils.compactStorageSupportCheck(ccm()); + execute( + "CREATE TABLE test (" + + " k int," + + " c1 'DynamicCompositeType(s => UTF8Type, i => Int32Type)'," + + " c2 'ReversedType(CompositeType(UTF8Type, Int32Type))'," // reversed translates + // to CLUSTERING ORDER + // BY DESC + + " c3 'Int32Type'," // translates to int + + " PRIMARY KEY (k, c1, c2)" + + ") WITH COMPACT STORAGE", + "CREATE TABLE test_collection(" + + " k int PRIMARY KEY," + + " c1 list<'DynamicCompositeType(s => UTF8Type, i => Int32Type)'>," + + " c2 map<'DynamicCompositeType(s => UTF8Type, i => Int32Type)', 'DynamicCompositeType(s => UTF8Type, i => Int32Type)'>" + + ")"); + } catch (SkipException e) { + // no op, tests will be skipped. } + } + + /** + * Validates that columns using custom types are properly handled by the driver in the following + * ways: + * + *

    + * + *

      + *
    1. The column metadata appropriately represents the types as {@link DataType#custom(String)} + *
    2. ReversedType is appropriately detected and the clustering order of that column is marked + * as descending. + *
    3. ColumnDefinitions for a column in a {@link Row} matches the custom type and that inserted + * data is read back properly. + *
    + * + * @jira_ticket JAVA-993 + * @test_category metadata + */ + @Test(groups = "short") + public void should_serialize_and_deserialize_custom_types() { + TestUtils.compactStorageSupportCheck(ccm()); + + TableMetadata table = cluster().getMetadata().getKeyspace(keyspace).getTable("test"); + + assertThat(table.getColumn("c1")).isClusteringColumn().hasType(CUSTOM_DYNAMIC_COMPOSITE); + assertThat(table.getColumn("c2")) + .isClusteringColumn() + .hasType(CUSTOM_COMPOSITE) + .hasClusteringOrder(ClusteringOrder.DESC); + assertThat(table.getColumn("c3")).hasType(cint()); + + session().execute("INSERT INTO test(k, c1, c2, c3) VALUES (0, 's@foo:i@32', 'foo:32', 1)"); + session().execute("INSERT INTO test(k, c1, c2, c3) VALUES (0, 'i@42', ':42', 2)"); + session().execute("INSERT INTO test(k, c1, c2, c3) VALUES (0, 'i@12:i@3', 'foo', 3)"); + + ResultSet rs = session().execute("SELECT * FROM test"); + + Row r = rs.one(); + + assertThat(r.getColumnDefinitions().getType("c1")).isEqualTo(CUSTOM_DYNAMIC_COMPOSITE); + assertThat(r.getColumnDefinitions().getType("c2")).isEqualTo(CUSTOM_COMPOSITE); + assertThat(r.getColumnDefinitions().getType("c3")).isEqualTo(cint()); + + assertThat(r.getInt("k")).isEqualTo(0); + assertThat(r.getBytesUnsafe("c1")).isEqualTo(serializeForDynamicCompositeType(12, 3)); + assertThat(r.getBytesUnsafe("c2")).isEqualTo(serializeForCompositeType("foo")); + assertThat(r.getInt("c3")).isEqualTo(3); + + r = rs.one(); + assertThat(r.getInt("k")).isEqualTo(0); + assertThat(r.getBytesUnsafe("c1")).isEqualTo(serializeForDynamicCompositeType(42)); + assertThat(r.getBytesUnsafe("c2")).isEqualTo(serializeForCompositeType("", 42)); + assertThat(r.getInt("c3")).isEqualTo(2); + + r = rs.one(); + assertThat(r.getInt("k")).isEqualTo(0); + assertThat(r.getBytesUnsafe("c1")).isEqualTo(serializeForDynamicCompositeType("foo", 32)); + assertThat(r.getBytesUnsafe("c2")).isEqualTo(serializeForCompositeType("foo", 32)); + assertThat(r.getInt("c3")).isEqualTo(1); + } + + /** + * Validates that columns using collections of custom types are properly handled by the driver. + * + * @jira_ticket JAVA-1034 + * @test_category metadata + */ + @Test(groups = "short") + public void should_serialize_and_deserialize_collections_of_custom_types() { + TestUtils.compactStorageSupportCheck(ccm()); + TableMetadata table = cluster().getMetadata().getKeyspace(keyspace).getTable("test_collection"); + assertThat(table.getColumn("c1")).hasType(DataType.list(CUSTOM_DYNAMIC_COMPOSITE)); + assertThat(table.getColumn("c2")) + .hasType(DataType.map(CUSTOM_DYNAMIC_COMPOSITE, CUSTOM_DYNAMIC_COMPOSITE)); + + session() + .execute( + "INSERT INTO test_collection(k, c1, c2) VALUES (0, [ 's@foo:i@32' ], { 's@foo:i@32': 's@bar:i@42' })"); + + Row r = session().execute("SELECT * FROM test_collection").one(); + + assertThat(r.getColumnDefinitions().getType("c1")) + .isEqualTo(DataType.list(CUSTOM_DYNAMIC_COMPOSITE)); + List c1 = r.getList("c1", ByteBuffer.class); + assertThat(c1.get(0)).isEqualTo(serializeForDynamicCompositeType("foo", 32)); + + assertThat(r.getColumnDefinitions().getType("c2")) + .isEqualTo(DataType.map(CUSTOM_DYNAMIC_COMPOSITE, CUSTOM_DYNAMIC_COMPOSITE)); + Map c2 = r.getMap("c2", ByteBuffer.class, ByteBuffer.class); + Map.Entry entry = c2.entrySet().iterator().next(); + assertThat(entry.getKey()).isEqualTo(serializeForDynamicCompositeType("foo", 32)); + assertThat(entry.getValue()).isEqualTo(serializeForDynamicCompositeType("bar", 42)); + } + + /** + * Validates that UDTs with fields using custom types are properly handled by the driver in the + * following ways: + * + *

    + * + *

      + *
    1. The {@link UserType} metadata appropriately represents the types of fields with custom + * types as {@link DataType#custom(String)} + *
    2. {@link TableMetadata} with a column having a {@link UserType} is properly referenced. + *
    3. ColumnDefinitions for a column in a {@link Row} matches the {@link UserType} and that + * inserted data is read back properly. + *
    + * + * @jira_ticket JAVA-993 + * @test_category metadata + */ + @Test(groups = "short") + @CassandraVersion("2.1.0") + public void should_handle_udt_with_custom_type() { + TestUtils.compactStorageSupportCheck(ccm()); + // Given: a UDT with custom types, and a table using it. + session() + .execute( + "CREATE TYPE custom_udt (regular int, c1 'DynamicCompositeType(s => UTF8Type, i => Int32Type)', c2 'LongType')"); + session().execute("CREATE TABLE custom_udt_tbl (k int primary key, v frozen)"); + + // When: Retrieving User Type via schema metadata. + UserType custom_udt = cluster().getMetadata().getKeyspace(keyspace).getUserType("custom_udt"); + assertThat(custom_udt.getFieldType("regular")).isEqualTo(cint()); + // Then: The fields with custom types should be appropriately represented with their defined + // types. + assertThat(custom_udt.getFieldType("c1")).isEqualTo(CUSTOM_DYNAMIC_COMPOSITE); + assertThat(custom_udt.getFieldType("c2")).isEqualTo(DataType.bigint()); + + // When: Retrieving Table via schema metadata. + TableMetadata table = cluster().getMetadata().getKeyspace(keyspace).getTable("custom_udt_tbl"); + // Then: The column with the user type should be represented and match the type previously + // retrieved. + assertThat(table.getColumn("v")).hasType(custom_udt); + + // Given: Existing data in table with a UDT with custom types. + session() + .execute( + "INSERT INTO custom_udt_tbl (k, v) VALUES (0, {regular: 5, c1: 's@hello:i@93', c2: 400})"); + + // When: Data is retrieved. + Row row = session().execute("select * from custom_udt_tbl").one(); + + // Then: The resulting row's column definitions should match the table definition. + assertThat(row.getColumnDefinitions().getType("k")).isEqualTo(cint()); + assertThat(row.getColumnDefinitions().getType("v")).isEqualTo(custom_udt); + + // And: The column values should represent what was inserted. + UDTValue value = row.getUDTValue("v"); + assertThat(value.getInt("regular")).isEqualTo(5); + assertThat(value.getBytes("c1")).isEqualTo(serializeForDynamicCompositeType("hello", 93)); + assertThat(value.getLong("c2")).isEqualTo(400); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/DataProviders.java b/driver-core/src/test/java/com/datastax/driver/core/DataProviders.java index b58b9f7d47a..ea9897e5546 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/DataProviders.java +++ b/driver-core/src/test/java/com/datastax/driver/core/DataProviders.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,56 +19,54 @@ import com.google.common.base.Predicate; import com.google.common.collect.Iterables; -import org.testng.annotations.DataProvider; - import java.util.Arrays; import java.util.Iterator; +import org.testng.annotations.DataProvider; public class DataProviders { - /** - * @return A DataProvider that provides all non-serial consistency levels - */ - @DataProvider(name = "consistencyLevels") - public static Iterator consistencyLevels() { - final Iterator consistencyLevels = Iterables.filter(Arrays.asList(ConsistencyLevel.values()), new Predicate() { - @Override - public boolean apply(ConsistencyLevel input) { - // filter out serial CLs. - return !input.isSerial(); - } - }).iterator(); + /** @return A DataProvider that provides all non-serial consistency levels */ + @DataProvider(name = "consistencyLevels") + public static Iterator consistencyLevels() { + final Iterator consistencyLevels = + Iterables.filter( + Arrays.asList(ConsistencyLevel.values()), + new Predicate() { + @Override + public boolean apply(ConsistencyLevel input) { + // filter out serial CLs. + return !input.isSerial(); + } + }) + .iterator(); - return new Iterator() { + return new Iterator() { - @Override - public boolean hasNext() { - return consistencyLevels.hasNext(); - } + @Override + public boolean hasNext() { + return consistencyLevels.hasNext(); + } - @Override - public Object[] next() { - return new Object[]{ - consistencyLevels.next() - }; - } + @Override + public Object[] next() { + return new Object[] {consistencyLevels.next()}; + } - @Override - public void remove() { - throw new UnsupportedOperationException("This shouldn't happen.."); - } - }; - } + @Override + public void remove() { + throw new UnsupportedOperationException("This shouldn't happen.."); + } + }; + } - /** - * @return A DataProvider that provides all serial consistency levels - */ - @DataProvider(name = "serialConsistencyLevels") - public static Object[][] serialConsistencyLevels() { - return new Object[][]{ - {ConsistencyLevel.SERIAL}, - {ConsistencyLevel.LOCAL_SERIAL} - }; - } + /** @return A DataProvider that provides all serial consistency levels */ + @DataProvider(name = "serialConsistencyLevels") + public static Object[][] serialConsistencyLevels() { + return new Object[][] {{ConsistencyLevel.SERIAL}, {ConsistencyLevel.LOCAL_SERIAL}}; + } + @DataProvider(name = "bool") + public static Object[][] bool() { + return new Object[][] {{true}, {false}}; + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/DataTypeAssert.java b/driver-core/src/test/java/com/datastax/driver/core/DataTypeAssert.java index 626789f5fad..3998d8d89fa 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/DataTypeAssert.java +++ b/driver-core/src/test/java/com/datastax/driver/core/DataTypeAssert.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,61 +17,60 @@ */ package com.datastax.driver.core; -import org.assertj.core.api.AbstractAssert; - import static com.datastax.driver.core.Assertions.assertThat; -public class DataTypeAssert extends AbstractAssert { - public DataTypeAssert(DataType actual) { - super(actual, DataTypeAssert.class); - } +import org.assertj.core.api.AbstractAssert; - public DataTypeAssert hasName(DataType.Name name) { - assertThat(actual.name).isEqualTo(name); - return this; - } +public class DataTypeAssert extends AbstractAssert { + public DataTypeAssert(DataType actual) { + super(actual, DataTypeAssert.class); + } - public DataTypeAssert isUserType(String keyspaceName, String userTypeName) { - assertThat(actual).isInstanceOf(UserType.class); - UserType userType = (UserType) this.actual; - assertThat(userType.getKeyspace()).isEqualTo(keyspaceName); - assertThat(userType.getTypeName()).isEqualTo(userTypeName); - return this; - } + public DataTypeAssert hasName(DataType.Name name) { + assertThat(actual.name).isEqualTo(name); + return this; + } - public DataTypeAssert isShallowUserType(String keyspaceName, String userTypeName) { - assertThat(actual).isInstanceOf(UserType.Shallow.class); - UserType.Shallow shallow = (UserType.Shallow) actual; - assertThat(shallow.keyspaceName).isEqualTo(keyspaceName); - assertThat(shallow.typeName).isEqualTo(userTypeName); - return this; - } + public DataTypeAssert isUserType(String keyspaceName, String userTypeName) { + assertThat(actual).isInstanceOf(UserType.class); + UserType userType = (UserType) this.actual; + assertThat(userType.getKeyspace()).isEqualTo(keyspaceName); + assertThat(userType.getTypeName()).isEqualTo(userTypeName); + return this; + } - public DataTypeAssert isFrozen() { - assertThat(actual.isFrozen()).isTrue(); - return this; - } + public DataTypeAssert isShallowUserType(String keyspaceName, String userTypeName) { + assertThat(actual).isInstanceOf(UserType.Shallow.class); + UserType.Shallow shallow = (UserType.Shallow) actual; + assertThat(shallow.keyspaceName).isEqualTo(keyspaceName); + assertThat(shallow.typeName).isEqualTo(userTypeName); + return this; + } - public DataTypeAssert isNotFrozen() { - assertThat(actual.isFrozen()).isFalse(); - return this; - } + public DataTypeAssert isFrozen() { + assertThat(actual.isFrozen()).isTrue(); + return this; + } - public DataTypeAssert hasTypeArgument(int position, DataType expected) { - assertThat(actual.getTypeArguments().get(position)).isEqualTo(expected); - return this; - } + public DataTypeAssert isNotFrozen() { + assertThat(actual.isFrozen()).isFalse(); + return this; + } - public DataTypeAssert hasTypeArguments(DataType... expected) { - assertThat(actual.getTypeArguments()).containsExactly(expected); - return this; - } + public DataTypeAssert hasTypeArgument(int position, DataType expected) { + assertThat(actual.getTypeArguments().get(position)).isEqualTo(expected); + return this; + } - public DataTypeAssert hasField(String name, DataType expected) { - assertThat(actual).isInstanceOf(UserType.class); - UserType userType = (UserType) this.actual; - assertThat(userType.getFieldType(name)).isEqualTo(expected); - return this; - } + public DataTypeAssert hasTypeArguments(DataType... expected) { + assertThat(actual.getTypeArguments()).containsExactly(expected); + return this; + } + public DataTypeAssert hasField(String name, DataType expected) { + assertThat(actual).isInstanceOf(UserType.class); + UserType userType = (UserType) this.actual; + assertThat(userType.getFieldType(name)).isEqualTo(expected); + return this; + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/DataTypeClassNameParserTest.java b/driver-core/src/test/java/com/datastax/driver/core/DataTypeClassNameParserTest.java index 5194be6c3bb..2f317971c0a 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/DataTypeClassNameParserTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/DataTypeClassNameParserTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,121 +17,162 @@ */ package com.datastax.driver.core; -import org.testng.annotations.Test; +import static com.datastax.driver.core.Assertions.assertThat; +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertFalse; +import static org.testng.Assert.assertNotNull; +import static org.testng.Assert.assertTrue; import java.util.Arrays; import java.util.Iterator; - -import static com.datastax.driver.core.Assertions.assertThat; -import static org.testng.Assert.*; +import org.testng.annotations.Test; public class DataTypeClassNameParserTest { - private ProtocolVersion protocolVersion = ProtocolVersion.NEWEST_SUPPORTED; - private CodecRegistry codecRegistry = new CodecRegistry(); - - @Test(groups = "unit") - public void parseOneTest() { - - assertEquals(DataTypeClassNameParser.parseOne("org.apache.cassandra.db.marshal.ByteType", protocolVersion, codecRegistry), DataType.tinyint()); - assertEquals(DataTypeClassNameParser.parseOne("org.apache.cassandra.db.marshal.ShortType", protocolVersion, codecRegistry), DataType.smallint()); - assertEquals(DataTypeClassNameParser.parseOne("org.apache.cassandra.db.marshal.SimpleDateType", protocolVersion, codecRegistry), DataType.date()); - assertEquals(DataTypeClassNameParser.parseOne("org.apache.cassandra.db.marshal.TimeType", protocolVersion, codecRegistry), DataType.time()); - assertEquals(DataTypeClassNameParser.parseOne("org.apache.cassandra.db.marshal.InetAddressType", protocolVersion, codecRegistry), DataType.inet()); - assertEquals(DataTypeClassNameParser.parseOne("org.apache.cassandra.db.marshal.ListType(org.apache.cassandra.db.marshal.UTF8Type)", protocolVersion, codecRegistry), DataType.list(DataType.text())); - assertEquals(DataTypeClassNameParser.parseOne("org.apache.cassandra.db.marshal.ReversedType(org.apache.cassandra.db.marshal.UTF8Type)", protocolVersion, codecRegistry), DataType.text()); - - String s; - - s = "org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.UTF8Type, org.apache.cassandra.db.marshal.Int32Type)"; - assertEquals(DataTypeClassNameParser.parseOne(s, protocolVersion, codecRegistry), DataType.custom(s)); - - s = "org.apache.cassandra.db.marshal.ReversedType(org.apache.cassandra.db.marshal.ListType(org.apache.cassandra.db.marshal.Int32Type))"; - assertEquals(DataTypeClassNameParser.parseOne(s, protocolVersion, codecRegistry), DataType.list(DataType.cint())); - } - - @Test(groups = "unit") - public void parseWithCompositeTest() { - - String s = "org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.Int32Type, org.apache.cassandra.db.marshal.UTF8Type,"; - s += "org.apache.cassandra.db.marshal.ColumnToCollectionType(6162:org.apache.cassandra.db.marshal.ListType(org.apache.cassandra.db.marshal.Int32Type)))"; - DataTypeClassNameParser.ParseResult r1 = DataTypeClassNameParser.parseWithComposite(s, protocolVersion, codecRegistry); - assertTrue(r1.isComposite); - assertEquals(r1.types, Arrays.asList(DataType.cint(), DataType.text())); - assertEquals(r1.collections.size(), 1); - assertEquals(r1.collections.get("ab"), DataType.list(DataType.cint())); - - DataTypeClassNameParser.ParseResult r2 = DataTypeClassNameParser.parseWithComposite("org.apache.cassandra.db.marshal.TimestampType", protocolVersion, codecRegistry); - assertFalse(r2.isComposite); - assertEquals(r2.types, Arrays.asList(DataType.timestamp())); - assertEquals(r2.collections.size(), 0); - } - - @Test(groups = "unit") - public void parseUserTypes() { - - String s = "org.apache.cassandra.db.marshal.UserType(foo,61646472657373,737472656574:org.apache.cassandra.db.marshal.UTF8Type,7a6970636f6465:org.apache.cassandra.db.marshal.Int32Type,70686f6e6573:org.apache.cassandra.db.marshal.SetType(org.apache.cassandra.db.marshal.UserType(foo,70686f6e65,6e616d65:org.apache.cassandra.db.marshal.UTF8Type,6e756d626572:org.apache.cassandra.db.marshal.UTF8Type)))"; - UserType def = (UserType) DataTypeClassNameParser.parseOne(s, protocolVersion, codecRegistry); - - assertEquals(def.getKeyspace(), "foo"); - assertEquals(def.getTypeName(), "address"); - - Iterator iter = def.iterator(); - - UserType.Field field1 = iter.next(); - assertEquals(field1.getName(), "street"); - assertEquals(field1.getType(), DataType.text()); - - UserType.Field field2 = iter.next(); - assertEquals(field2.getName(), "zipcode"); - assertEquals(field2.getType(), DataType.cint()); - - UserType.Field field3 = iter.next(); - assertEquals(field3.getName(), "phones"); - - DataType st = field3.getType(); - assertEquals(st.getName(), DataType.Name.SET); - UserType subDef = (UserType) st.getTypeArguments().get(0); - - assertEquals(subDef.getKeyspace(), "foo"); - assertEquals(subDef.getTypeName(), "phone"); - - Iterator subIter = subDef.iterator(); - - UserType.Field subField1 = subIter.next(); - assertEquals(subField1.getName(), "name"); - assertEquals(subField1.getType(), DataType.text()); - - UserType.Field subField2 = subIter.next(); - assertEquals(subField2.getName(), "number"); - assertEquals(subField2.getType(), DataType.text()); - } - - @Test(groups = "unit") - public void parseTupleTest() { - String s = "org.apache.cassandra.db.marshal.TupleType(org.apache.cassandra.db.marshal.Int32Type,org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.FloatType)"; - TupleType type = (TupleType) DataTypeClassNameParser.parseOne(s, protocolVersion, codecRegistry); - assertNotNull(type); - assertEquals(type.getComponentTypes().get(0), DataType.cint()); - assertEquals(type.getComponentTypes().get(1), DataType.text()); - assertEquals(type.getComponentTypes().get(2), DataType.cfloat()); - } - - @Test(groups = "unit") - public void parseNestedCollectionTest() { - // map>> - String s = "org.apache.cassandra.db.marshal.MapType(org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.FrozenType(org.apache.cassandra.db.marshal.MapType(org.apache.cassandra.db.marshal.Int32Type,org.apache.cassandra.db.marshal.Int32Type)))"; - - DataType parentMap = DataTypeClassNameParser.parseOne(s, protocolVersion, codecRegistry); - assertThat(parentMap) - .hasName(DataType.Name.MAP) - .isNotFrozen() - .hasTypeArgument(0, DataType.text()); - - DataType childMap = parentMap.getTypeArguments().get(1); - assertThat(childMap) - .hasName(DataType.Name.MAP) - .isFrozen() - .hasTypeArguments(DataType.cint(), DataType.cint()); - } + private ProtocolVersion protocolVersion = ProtocolVersion.NEWEST_SUPPORTED; + private CodecRegistry codecRegistry = new CodecRegistry(); + + @Test(groups = "unit") + public void parseOneTest() { + + assertEquals( + DataTypeClassNameParser.parseOne( + "org.apache.cassandra.db.marshal.ByteType", protocolVersion, codecRegistry), + DataType.tinyint()); + assertEquals( + DataTypeClassNameParser.parseOne( + "org.apache.cassandra.db.marshal.ShortType", protocolVersion, codecRegistry), + DataType.smallint()); + assertEquals( + DataTypeClassNameParser.parseOne( + "org.apache.cassandra.db.marshal.SimpleDateType", protocolVersion, codecRegistry), + DataType.date()); + assertEquals( + DataTypeClassNameParser.parseOne( + "org.apache.cassandra.db.marshal.TimeType", protocolVersion, codecRegistry), + DataType.time()); + assertEquals( + DataTypeClassNameParser.parseOne( + "org.apache.cassandra.db.marshal.InetAddressType", protocolVersion, codecRegistry), + DataType.inet()); + assertEquals( + DataTypeClassNameParser.parseOne( + "org.apache.cassandra.db.marshal.ListType(org.apache.cassandra.db.marshal.UTF8Type)", + protocolVersion, + codecRegistry), + DataType.list(DataType.text())); + assertEquals( + DataTypeClassNameParser.parseOne( + "org.apache.cassandra.db.marshal.ReversedType(org.apache.cassandra.db.marshal.UTF8Type)", + protocolVersion, + codecRegistry), + DataType.text()); + + String s; + + s = + "org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.UTF8Type, org.apache.cassandra.db.marshal.Int32Type)"; + assertEquals( + DataTypeClassNameParser.parseOne(s, protocolVersion, codecRegistry), DataType.custom(s)); + + s = + "org.apache.cassandra.db.marshal.ReversedType(org.apache.cassandra.db.marshal.ListType(org.apache.cassandra.db.marshal.Int32Type))"; + assertEquals( + DataTypeClassNameParser.parseOne(s, protocolVersion, codecRegistry), + DataType.list(DataType.cint())); + } + + @Test(groups = "unit") + public void parseWithCompositeTest() { + + String s = + "org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.Int32Type, org.apache.cassandra.db.marshal.UTF8Type,"; + s += + "org.apache.cassandra.db.marshal.ColumnToCollectionType(6162:org.apache.cassandra.db.marshal.ListType(org.apache.cassandra.db.marshal.Int32Type)))"; + DataTypeClassNameParser.ParseResult r1 = + DataTypeClassNameParser.parseWithComposite(s, protocolVersion, codecRegistry); + assertTrue(r1.isComposite); + assertEquals(r1.types, Arrays.asList(DataType.cint(), DataType.text())); + assertEquals(r1.collections.size(), 1); + assertEquals(r1.collections.get("ab"), DataType.list(DataType.cint())); + + DataTypeClassNameParser.ParseResult r2 = + DataTypeClassNameParser.parseWithComposite( + "org.apache.cassandra.db.marshal.TimestampType", protocolVersion, codecRegistry); + assertFalse(r2.isComposite); + assertEquals(r2.types, Arrays.asList(DataType.timestamp())); + assertEquals(r2.collections.size(), 0); + } + + @Test(groups = "unit") + public void parseUserTypes() { + + String s = + "org.apache.cassandra.db.marshal.UserType(foo,61646472657373,737472656574:org.apache.cassandra.db.marshal.UTF8Type,7a6970636f6465:org.apache.cassandra.db.marshal.Int32Type,70686f6e6573:org.apache.cassandra.db.marshal.SetType(org.apache.cassandra.db.marshal.UserType(foo,70686f6e65,6e616d65:org.apache.cassandra.db.marshal.UTF8Type,6e756d626572:org.apache.cassandra.db.marshal.UTF8Type)))"; + UserType def = (UserType) DataTypeClassNameParser.parseOne(s, protocolVersion, codecRegistry); + + assertEquals(def.getKeyspace(), "foo"); + assertEquals(def.getTypeName(), "address"); + + Iterator iter = def.iterator(); + + UserType.Field field1 = iter.next(); + assertEquals(field1.getName(), "street"); + assertEquals(field1.getType(), DataType.text()); + + UserType.Field field2 = iter.next(); + assertEquals(field2.getName(), "zipcode"); + assertEquals(field2.getType(), DataType.cint()); + + UserType.Field field3 = iter.next(); + assertEquals(field3.getName(), "phones"); + + DataType st = field3.getType(); + assertEquals(st.getName(), DataType.Name.SET); + UserType subDef = (UserType) st.getTypeArguments().get(0); + + assertEquals(subDef.getKeyspace(), "foo"); + assertEquals(subDef.getTypeName(), "phone"); + + Iterator subIter = subDef.iterator(); + + UserType.Field subField1 = subIter.next(); + assertEquals(subField1.getName(), "name"); + assertEquals(subField1.getType(), DataType.text()); + + UserType.Field subField2 = subIter.next(); + assertEquals(subField2.getName(), "number"); + assertEquals(subField2.getType(), DataType.text()); + } + + @Test(groups = "unit") + public void parseTupleTest() { + String s = + "org.apache.cassandra.db.marshal.TupleType(org.apache.cassandra.db.marshal.Int32Type,org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.FloatType)"; + TupleType type = + (TupleType) DataTypeClassNameParser.parseOne(s, protocolVersion, codecRegistry); + assertNotNull(type); + assertEquals(type.getComponentTypes().get(0), DataType.cint()); + assertEquals(type.getComponentTypes().get(1), DataType.text()); + assertEquals(type.getComponentTypes().get(2), DataType.cfloat()); + } + + @Test(groups = "unit") + public void parseNestedCollectionTest() { + // map>> + String s = + "org.apache.cassandra.db.marshal.MapType(org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.FrozenType(org.apache.cassandra.db.marshal.MapType(org.apache.cassandra.db.marshal.Int32Type,org.apache.cassandra.db.marshal.Int32Type)))"; + + DataType parentMap = DataTypeClassNameParser.parseOne(s, protocolVersion, codecRegistry); + assertThat(parentMap) + .hasName(DataType.Name.MAP) + .isNotFrozen() + .hasTypeArgument(0, DataType.text()); + + DataType childMap = parentMap.getTypeArguments().get(1); + assertThat(childMap) + .hasName(DataType.Name.MAP) + .isFrozen() + .hasTypeArguments(DataType.cint(), DataType.cint()); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/DataTypeCqlNameParserTest.java b/driver-core/src/test/java/com/datastax/driver/core/DataTypeCqlNameParserTest.java index 705f4256fb5..5013707423c 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/DataTypeCqlNameParserTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/DataTypeCqlNameParserTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,112 +17,175 @@ */ package com.datastax.driver.core; -import com.datastax.driver.core.utils.CassandraVersion; -import org.testng.annotations.Test; - import static com.datastax.driver.core.Assertions.assertThat; -import static com.datastax.driver.core.DataType.*; +import static com.datastax.driver.core.DataType.ascii; +import static com.datastax.driver.core.DataType.bigint; +import static com.datastax.driver.core.DataType.blob; +import static com.datastax.driver.core.DataType.cboolean; +import static com.datastax.driver.core.DataType.cdouble; +import static com.datastax.driver.core.DataType.cfloat; +import static com.datastax.driver.core.DataType.cint; +import static com.datastax.driver.core.DataType.counter; +import static com.datastax.driver.core.DataType.date; +import static com.datastax.driver.core.DataType.decimal; +import static com.datastax.driver.core.DataType.inet; +import static com.datastax.driver.core.DataType.list; +import static com.datastax.driver.core.DataType.map; +import static com.datastax.driver.core.DataType.set; +import static com.datastax.driver.core.DataType.smallint; +import static com.datastax.driver.core.DataType.text; +import static com.datastax.driver.core.DataType.time; +import static com.datastax.driver.core.DataType.timestamp; +import static com.datastax.driver.core.DataType.timeuuid; +import static com.datastax.driver.core.DataType.tinyint; +import static com.datastax.driver.core.DataType.uuid; +import static com.datastax.driver.core.DataType.varchar; +import static com.datastax.driver.core.DataType.varint; import static com.datastax.driver.core.DataTypeCqlNameParser.parse; import static com.datastax.driver.core.Metadata.quote; +import com.datastax.driver.core.utils.CassandraVersion; +import org.testng.annotations.Test; + @CassandraVersion("3.0") public class DataTypeCqlNameParserTest extends CCMTestsSupport { - @Test(groups = "short") - public void should_parse_native_types() { - assertThat(parse("ascii", cluster(), null, null, null, false, false)).isEqualTo(ascii()); - assertThat(parse("bigint", cluster(), null, null, null, false, false)).isEqualTo(bigint()); - assertThat(parse("blob", cluster(), null, null, null, false, false)).isEqualTo(blob()); - assertThat(parse("boolean", cluster(), null, null, null, false, false)).isEqualTo(cboolean()); - assertThat(parse("counter", cluster(), null, null, null, false, false)).isEqualTo(counter()); - assertThat(parse("decimal", cluster(), null, null, null, false, false)).isEqualTo(decimal()); - assertThat(parse("double", cluster(), null, null, null, false, false)).isEqualTo(cdouble()); - assertThat(parse("float", cluster(), null, null, null, false, false)).isEqualTo(cfloat()); - assertThat(parse("inet", cluster(), null, null, null, false, false)).isEqualTo(inet()); - assertThat(parse("int", cluster(), null, null, null, false, false)).isEqualTo(cint()); - assertThat(parse("text", cluster(), null, null, null, false, false)).isEqualTo(text()); - assertThat(parse("varchar", cluster(), null, null, null, false, false)).isEqualTo(varchar()); - assertThat(parse("timestamp", cluster(), null, null, null, false, false)).isEqualTo(timestamp()); - assertThat(parse("date", cluster(), null, null, null, false, false)).isEqualTo(date()); - assertThat(parse("time", cluster(), null, null, null, false, false)).isEqualTo(time()); - assertThat(parse("uuid", cluster(), null, null, null, false, false)).isEqualTo(uuid()); - assertThat(parse("varint", cluster(), null, null, null, false, false)).isEqualTo(varint()); - assertThat(parse("timeuuid", cluster(), null, null, null, false, false)).isEqualTo(timeuuid()); - assertThat(parse("tinyint", cluster(), null, null, null, false, false)).isEqualTo(tinyint()); - assertThat(parse("smallint", cluster(), null, null, null, false, false)).isEqualTo(smallint()); - } - - @Test(groups = "short") - public void should_ignore_whitespace() { - assertThat(parse(" int ", cluster(), null, null, null, false, false)).isEqualTo(cint()); - assertThat(parse(" set < bigint > ", cluster(), null, null, null, false, false)).isEqualTo(set(bigint())); - assertThat(parse(" map < date , timeuuid > ", cluster(), null, null, null, false, false)).isEqualTo(map(date(), timeuuid())); - } - - @Test(groups = "short") - public void should_ignore_case() { - assertThat(parse("INT", cluster(), null, null, null, false, false)).isEqualTo(cint()); - assertThat(parse("SET", cluster(), null, null, null, false, false)).isEqualTo(set(bigint())); - assertThat(parse("FROZEN>>", cluster(), null, null, null, false, false)).isEqualTo(map(date(), cluster().getMetadata().newTupleType(timeuuid()), true)); - } - - @Test(groups = "short") - public void should_parse_collection_types() { - assertThat(parse("list", cluster(), null, null, null, false, false)).isEqualTo(list(cint())); - assertThat(parse("set", cluster(), null, null, null, false, false)).isEqualTo(set(bigint())); - assertThat(parse("map", cluster(), null, null, null, false, false)).isEqualTo(map(date(), timeuuid())); - } - - @Test(groups = "short") - public void should_parse_frozen_collection_types() { - assertThat(parse("frozen>", cluster(), null, null, null, false, false)).isEqualTo(list(cint(), true)); - assertThat(parse("frozen>", cluster(), null, null, null, false, false)).isEqualTo(set(bigint(), true)); - assertThat(parse("frozen>", cluster(), null, null, null, false, false)).isEqualTo(map(date(), timeuuid(), true)); - } - - @Test(groups = "short") - public void should_parse_nested_collection_types() { - Metadata metadata = cluster().getMetadata(); - KeyspaceMetadata keyspaceMetadata = metadata.getKeyspace(this.keyspace); - assertThat(parse("list>", cluster(), null, null, null, false, false)).isEqualTo(list(list(cint()))); - assertThat(parse("set>>>", cluster(), null, null, null, false, false)).isEqualTo(set(list(map(bigint(), varchar(), true)))); - - UserType keyType = keyspaceMetadata.getUserType(quote("Incr,edibly\" EvilTy<>><<>><<>,frozen<\"A\">>", cluster(), keyspace, keyspaceMetadata.userTypes, null, false, false)) - .isEqualTo(map(keyType, valueType, false)); - } - - @Test(groups = "short") - public void should_parse_tuple_types() { - assertThat(parse("tuple>", cluster(), null, null, null, false, false)).isEqualTo(cluster().getMetadata().newTupleType(cint(), list(text()))); - } - - @Test(groups = "short") - public void should_parse_user_defined_type_when_definition_in_current_user_types() { - Metadata metadata = cluster().getMetadata(); - KeyspaceMetadata keyspaceMetadata = metadata.getKeyspace(this.keyspace); - assertThat(parse("frozen<\"A\">", cluster(), keyspace, keyspaceMetadata.userTypes, null, false, false)).isUserType(keyspace, "A"); - } - - @Test(groups = "short") - public void should_parse_user_defined_type_when_definition_in_old_user_types() { - Metadata metadata = cluster().getMetadata(); - KeyspaceMetadata keyspaceMetadata = metadata.getKeyspace(this.keyspace); - assertThat(parse("\"A\"", cluster(), keyspace, null, keyspaceMetadata.userTypes, false, false)).isUserType(keyspace, "A"); - } - - @Test(groups = "short") - public void should_parse_user_defined_type_to_shallow_type_if_requested() { - assertThat(parse("\"A\"", cluster(), keyspace, null, null, false, true)).isShallowUserType(keyspace, "A"); - } - - @Override - public void onTestContextInitialized() { - execute( - String.format("CREATE TYPE %s.\"A\" (f1 int)", keyspace), - String.format("CREATE TYPE %s.\"Incr,edibly\"\" EvilTy<>><<>)", keyspace) - ); - } - + @Test(groups = "short") + public void should_parse_native_types() { + assertThat(parse("ascii", cluster(), null, null, null, false, false)).isEqualTo(ascii()); + assertThat(parse("bigint", cluster(), null, null, null, false, false)).isEqualTo(bigint()); + assertThat(parse("blob", cluster(), null, null, null, false, false)).isEqualTo(blob()); + assertThat(parse("boolean", cluster(), null, null, null, false, false)).isEqualTo(cboolean()); + assertThat(parse("counter", cluster(), null, null, null, false, false)).isEqualTo(counter()); + assertThat(parse("decimal", cluster(), null, null, null, false, false)).isEqualTo(decimal()); + assertThat(parse("double", cluster(), null, null, null, false, false)).isEqualTo(cdouble()); + assertThat(parse("float", cluster(), null, null, null, false, false)).isEqualTo(cfloat()); + assertThat(parse("inet", cluster(), null, null, null, false, false)).isEqualTo(inet()); + assertThat(parse("int", cluster(), null, null, null, false, false)).isEqualTo(cint()); + assertThat(parse("text", cluster(), null, null, null, false, false)).isEqualTo(text()); + assertThat(parse("varchar", cluster(), null, null, null, false, false)).isEqualTo(varchar()); + assertThat(parse("timestamp", cluster(), null, null, null, false, false)) + .isEqualTo(timestamp()); + assertThat(parse("date", cluster(), null, null, null, false, false)).isEqualTo(date()); + assertThat(parse("time", cluster(), null, null, null, false, false)).isEqualTo(time()); + assertThat(parse("uuid", cluster(), null, null, null, false, false)).isEqualTo(uuid()); + assertThat(parse("varint", cluster(), null, null, null, false, false)).isEqualTo(varint()); + assertThat(parse("timeuuid", cluster(), null, null, null, false, false)).isEqualTo(timeuuid()); + assertThat(parse("tinyint", cluster(), null, null, null, false, false)).isEqualTo(tinyint()); + assertThat(parse("smallint", cluster(), null, null, null, false, false)).isEqualTo(smallint()); + } + + @Test(groups = "short") + public void should_ignore_whitespace() { + assertThat(parse(" int ", cluster(), null, null, null, false, false)).isEqualTo(cint()); + assertThat(parse(" set < bigint > ", cluster(), null, null, null, false, false)) + .isEqualTo(set(bigint())); + assertThat(parse(" map < date , timeuuid > ", cluster(), null, null, null, false, false)) + .isEqualTo(map(date(), timeuuid())); + } + + @Test(groups = "short") + public void should_ignore_case() { + assertThat(parse("INT", cluster(), null, null, null, false, false)).isEqualTo(cint()); + assertThat(parse("SET", cluster(), null, null, null, false, false)) + .isEqualTo(set(bigint())); + assertThat( + parse("FROZEN>>", cluster(), null, null, null, false, false)) + .isEqualTo(map(date(), cluster().getMetadata().newTupleType(timeuuid()), true)); + } + + @Test(groups = "short") + public void should_parse_collection_types() { + assertThat(parse("list", cluster(), null, null, null, false, false)) + .isEqualTo(list(cint())); + assertThat(parse("set", cluster(), null, null, null, false, false)) + .isEqualTo(set(bigint())); + assertThat(parse("map", cluster(), null, null, null, false, false)) + .isEqualTo(map(date(), timeuuid())); + } + + @Test(groups = "short") + public void should_parse_frozen_collection_types() { + assertThat(parse("frozen>", cluster(), null, null, null, false, false)) + .isEqualTo(list(cint(), true)); + assertThat(parse("frozen>", cluster(), null, null, null, false, false)) + .isEqualTo(set(bigint(), true)); + assertThat(parse("frozen>", cluster(), null, null, null, false, false)) + .isEqualTo(map(date(), timeuuid(), true)); + } + + @Test(groups = "short") + public void should_parse_nested_collection_types() { + Metadata metadata = cluster().getMetadata(); + KeyspaceMetadata keyspaceMetadata = metadata.getKeyspace(this.keyspace); + assertThat(parse("list>", cluster(), null, null, null, false, false)) + .isEqualTo(list(list(cint()))); + assertThat( + parse( + "set>>>", + cluster(), + null, + null, + null, + false, + false)) + .isEqualTo(set(list(map(bigint(), varchar(), true)))); + + UserType keyType = keyspaceMetadata.getUserType(quote("Incr,edibly\" EvilTy<>><<>><<>,frozen<\"A\">>", + cluster(), + keyspace, + keyspaceMetadata.userTypes, + null, + false, + false)) + .isEqualTo(map(keyType, valueType, false)); + } + + @Test(groups = "short") + public void should_parse_tuple_types() { + assertThat(parse("tuple>", cluster(), null, null, null, false, false)) + .isEqualTo(cluster().getMetadata().newTupleType(cint(), list(text()))); + } + + @Test(groups = "short") + public void should_parse_user_defined_type_when_definition_in_current_user_types() { + Metadata metadata = cluster().getMetadata(); + KeyspaceMetadata keyspaceMetadata = metadata.getKeyspace(this.keyspace); + assertThat( + parse( + "frozen<\"A\">", + cluster(), + keyspace, + keyspaceMetadata.userTypes, + null, + false, + false)) + .isUserType(keyspace, "A"); + } + + @Test(groups = "short") + public void should_parse_user_defined_type_when_definition_in_old_user_types() { + Metadata metadata = cluster().getMetadata(); + KeyspaceMetadata keyspaceMetadata = metadata.getKeyspace(this.keyspace); + assertThat(parse("\"A\"", cluster(), keyspace, null, keyspaceMetadata.userTypes, false, false)) + .isUserType(keyspace, "A"); + } + + @Test(groups = "short") + public void should_parse_user_defined_type_to_shallow_type_if_requested() { + assertThat(parse("\"A\"", cluster(), keyspace, null, null, false, true)) + .isShallowUserType(keyspace, "A"); + } + + @Override + public void onTestContextInitialized() { + execute( + String.format("CREATE TYPE %s.\"A\" (f1 int)", keyspace), + String.format( + "CREATE TYPE %s.\"Incr,edibly\"\" EvilTy<>><<>)", keyspace)); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/DataTypeIntegrationTest.java b/driver-core/src/test/java/com/datastax/driver/core/DataTypeIntegrationTest.java index b1e8a7f430d..01742647891 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/DataTypeIntegrationTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/DataTypeIntegrationTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,469 +17,515 @@ */ package com.datastax.driver.core; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.fail; + import com.datastax.driver.core.utils.CassandraVersion; -import com.google.common.collect.*; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; +import com.google.common.collect.Sets; +import java.nio.ByteBuffer; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.Random; +import java.util.Set; +import java.util.concurrent.atomic.AtomicInteger; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.testng.annotations.Test; -import java.nio.ByteBuffer; -import java.util.*; -import java.util.concurrent.atomic.AtomicInteger; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.fail; - /** * The goal of this test is to cover the serialization and deserialization of datatypes. - *

    - * It creates a table with a column of a given type, inserts a value and then tries to retrieve it. - * There are 3 variants for the insert query: a raw string, a simple statement with a parameter - * (protocol > v2 only) and a prepared statement. - * This is repeated with a large number of datatypes. + * + *

    It creates a table with a column of a given type, inserts a value and then tries to retrieve + * it. There are 3 variants for the insert query: a raw string, a simple statement with a parameter + * (protocol > v2 only) and a prepared statement. This is repeated with a large number of datatypes. */ public class DataTypeIntegrationTest extends CCMTestsSupport { - private static final Logger logger = LoggerFactory.getLogger(DataTypeIntegrationTest.class); - - private Map samples; - - private List tables; - - private VersionNumber cassandraVersion; - - enum StatementType {RAW_STRING, SIMPLE_WITH_PARAM, PREPARED} - - @Override - public void onTestContextInitialized() { - ProtocolVersion protocolVersion = ccm().getProtocolVersion(); - samples = PrimitiveTypeSamples.samples(protocolVersion); - tables = allTables(); - Host host = cluster().getMetadata().getAllHosts().iterator().next(); - cassandraVersion = host.getCassandraVersion().nextStable(); - List statements = Lists.newArrayList(); - for (TestTable table : tables) { - if (cassandraVersion.compareTo(table.minCassandraVersion) < 0) - logger.debug("Skipping table because it uses a feature not supported by Cassandra {}: {}", - cassandraVersion, table.createStatement); - else - statements.add(table.createStatement); - } - execute(statements); + private static final Logger logger = LoggerFactory.getLogger(DataTypeIntegrationTest.class); + + private Map samples; + + private List tables; + + private VersionNumber cassandraVersion; + + enum StatementType { + RAW_STRING, + SIMPLE_WITH_PARAM, + PREPARED + } + + @Override + public void onTestContextInitialized() { + ProtocolVersion protocolVersion = ccm().getProtocolVersion(); + samples = PrimitiveTypeSamples.samples(protocolVersion); + tables = allTables(); + Host host = cluster().getMetadata().getAllHosts().iterator().next(); + cassandraVersion = host.getCassandraVersion().nextStable(); + List statements = Lists.newArrayList(); + for (TestTable table : tables) { + if (cassandraVersion.compareTo(table.minCassandraVersion) < 0) + logger.debug( + "Skipping table because it uses a feature not supported by Cassandra {}: {}", + cassandraVersion, + table.createStatement); + else statements.add(table.createStatement); } - - @Test(groups = "long") - public void should_insert_and_retrieve_data_with_legacy_statements() { - should_insert_and_retrieve_data(StatementType.RAW_STRING); + execute(statements); + } + + @Test(groups = "long") + public void should_insert_and_retrieve_data_with_legacy_statements() { + should_insert_and_retrieve_data(StatementType.RAW_STRING); + } + + @Test(groups = "long") + public void should_insert_and_retrieve_data_with_prepared_statements() { + should_insert_and_retrieve_data(StatementType.PREPARED); + } + + @Test(groups = "long") + @CassandraVersion( + value = "2.0", + description = + "Uses parameterized simple statements, which are only available with protocol v2") + public void should_insert_and_retrieve_data_with_parameterized_simple_statements() { + should_insert_and_retrieve_data(StatementType.SIMPLE_WITH_PARAM); + } + + protected void should_insert_and_retrieve_data(StatementType statementType) { + ProtocolVersion protocolVersion = + cluster().getConfiguration().getProtocolOptions().getProtocolVersion(); + CodecRegistry codecRegistry = cluster().getConfiguration().getCodecRegistry(); + + for (TestTable table : tables) { + if (cassandraVersion.compareTo(table.minCassandraVersion) < 0) continue; + + TypeCodec codec = codecRegistry.codecFor(table.testColumnType); + switch (statementType) { + case RAW_STRING: + String formatValue = codec.format(table.sampleValue); + assertThat(formatValue).isNotNull(); + String query = table.insertStatement.replace("?", formatValue); + session().execute(query); + break; + case SIMPLE_WITH_PARAM: + SimpleStatement statement = new SimpleStatement(table.insertStatement, table.sampleValue); + checkGetValuesReturnsSerializedValue(protocolVersion, statement, table); + session().execute(statement); + break; + case PREPARED: + PreparedStatement ps = session().prepare(table.insertStatement); + BoundStatement bs = ps.bind(table.sampleValue); + checkGetterReturnsValue(bs, table); + session().execute(bs); + break; + } + + Row row = session().execute(table.selectStatement).one(); + Object queriedValue = codec.deserialize(row.getBytesUnsafe("v"), protocolVersion); + + // Since codec.deserialize will get the unboxed version for primitive check against expected + // unboxed value. + assertThat(queriedValue) + .as( + "Test failure on %s statement with table:%n%s;%n" + "insert statement:%n%s;%n", + statementType, table.createStatement, table.insertStatement) + .isEqualTo(table.expectedValue); + + // Since calling row.get* will return boxed version for primitives check against expected + // primitive value. + assertThat(getValue(row, table.testColumnType)) + .as( + "Test failure on %s statement with table:%n%s;%n" + "insert statement:%n%s;%n", + statementType, table.createStatement, table.insertStatement) + .isEqualTo(table.expectedPrimitiveValue); + + session().execute(table.truncateStatement); } - - @Test(groups = "long") - public void should_insert_and_retrieve_data_with_prepared_statements() { - should_insert_and_retrieve_data(StatementType.PREPARED); + } + + private void checkGetterReturnsValue(BoundStatement bs, TestTable table) { + // Driver will not serialize null references in a statement. + Object getterResult = getValue(bs, table.testColumnType); + assertThat(getterResult) + .as("Expected values to match for " + table.testColumnType) + .isEqualTo(table.expectedPrimitiveValue); + + // Ensure that bs.getObject() also returns the expected value. + assertThat(bs.getObject(0)) + .as("Expected values to match for " + table.testColumnType) + .isEqualTo(table.sampleValue); + assertThat(bs.getObject("v")) + .as("Expected values to match for " + table.testColumnType) + .isEqualTo(table.sampleValue); + } + + public void checkGetValuesReturnsSerializedValue( + ProtocolVersion protocolVersion, SimpleStatement statement, TestTable table) { + CodecRegistry codecRegistry = cluster().getConfiguration().getCodecRegistry(); + ByteBuffer[] values = statement.getValues(protocolVersion, codecRegistry); + assertThat(values.length).isEqualTo(1); + assertThat(values[0]) + .as("Value not serialized as expected for " + table.sampleValue) + .isEqualTo( + codecRegistry + .codecFor(table.testColumnType) + .serialize(table.sampleValue, protocolVersion)); + } + + /** Abstracts information about a table (corresponding to a given column type). */ + static class TestTable { + private static final AtomicInteger counter = new AtomicInteger(); + private String tableName = "date_type_test" + counter.incrementAndGet(); + + final DataType testColumnType; + final Object sampleValue; + final Object expectedValue; + final Object expectedPrimitiveValue; + + final String createStatement; + final String insertStatement = String.format("INSERT INTO %s (k, v) VALUES (1, ?)", tableName); + final String selectStatement = String.format("SELECT v FROM %s WHERE k = 1", tableName); + final String truncateStatement = String.format("TRUNCATE %s", tableName); + + final VersionNumber minCassandraVersion; + + TestTable(DataType testColumnType, Object sampleValue, String minCassandraVersion) { + this(testColumnType, sampleValue, sampleValue, minCassandraVersion); } - @Test(groups = "long") - @CassandraVersion(value = "2.0", description = "Uses parameterized simple statements, which are only available with protocol v2") - public void should_insert_and_retrieve_data_with_parameterized_simple_statements() { - should_insert_and_retrieve_data(StatementType.SIMPLE_WITH_PARAM); + TestTable( + DataType testColumnType, + Object sampleValue, + Object expectedValue, + String minCassandraVersion) { + this(testColumnType, sampleValue, expectedValue, expectedValue, minCassandraVersion); } - protected void should_insert_and_retrieve_data(StatementType statementType) { - ProtocolVersion protocolVersion = cluster().getConfiguration().getProtocolOptions().getProtocolVersion(); - CodecRegistry codecRegistry = cluster().getConfiguration().getCodecRegistry(); - - for (TestTable table : tables) { - if (cassandraVersion.compareTo(table.minCassandraVersion) < 0) - continue; - - TypeCodec codec = codecRegistry.codecFor(table.testColumnType); - switch (statementType) { - case RAW_STRING: - String formatValue = codec.format(table.sampleValue); - assertThat(formatValue).isNotNull(); - String query = table.insertStatement.replace("?", formatValue); - session().execute(query); - break; - case SIMPLE_WITH_PARAM: - SimpleStatement statement = new SimpleStatement(table.insertStatement, table.sampleValue); - checkGetValuesReturnsSerializedValue(protocolVersion, statement, table); - session().execute(statement); - break; - case PREPARED: - PreparedStatement ps = session().prepare(table.insertStatement); - BoundStatement bs = ps.bind(table.sampleValue); - checkGetterReturnsValue(bs, table); - session().execute(bs); - break; - } - - Row row = session().execute(table.selectStatement).one(); - Object queriedValue = codec.deserialize(row.getBytesUnsafe("v"), protocolVersion); - - // Since codec.deserialize will get the unboxed version for primitive check against expected unboxed value. - assertThat(queriedValue) - .as("Test failure on %s statement with table:%n%s;%n" + - "insert statement:%n%s;%n", - statementType, - table.createStatement, - table.insertStatement) - .isEqualTo(table.expectedValue); - - - // Since calling row.get* will return boxed version for primitives check against expected primitive value. - assertThat(getValue(row, table.testColumnType)) - .as("Test failure on %s statement with table:%n%s;%n" + - "insert statement:%n%s;%n", - statementType, - table.createStatement, - table.insertStatement) - .isEqualTo(table.expectedPrimitiveValue); - - - session().execute(table.truncateStatement); - } + TestTable( + DataType testColumnType, + Object sampleValue, + Object expectedValue, + Object expectedPrimitiveValue, + String minCassandraVersion) { + this.testColumnType = testColumnType; + this.sampleValue = sampleValue; + this.expectedValue = expectedValue; + this.expectedPrimitiveValue = expectedPrimitiveValue; + this.minCassandraVersion = VersionNumber.parse(minCassandraVersion); + + this.createStatement = + String.format("CREATE TABLE %s (k int PRIMARY KEY, v %s)", tableName, testColumnType); } - - private void checkGetterReturnsValue(BoundStatement bs, TestTable table) { - // Driver will not serialize null references in a statement. - Object getterResult = getValue(bs, table.testColumnType); - assertThat(getterResult).as("Expected values to match for " + table.testColumnType).isEqualTo(table.expectedPrimitiveValue); - - // Ensure that bs.getObject() also returns the expected value. - assertThat(bs.getObject(0)).as("Expected values to match for " + table.testColumnType).isEqualTo(table.sampleValue); - assertThat(bs.getObject("v")).as("Expected values to match for " + table.testColumnType).isEqualTo(table.sampleValue); - } - - public void checkGetValuesReturnsSerializedValue(ProtocolVersion protocolVersion, SimpleStatement statement, TestTable table) { - CodecRegistry codecRegistry = cluster().getConfiguration().getCodecRegistry(); - ByteBuffer[] values = statement.getValues(protocolVersion, codecRegistry); - assertThat(values.length).isEqualTo(1); - assertThat(values[0]) - .as("Value not serialized as expected for " + table.sampleValue) - .isEqualTo(codecRegistry.codecFor(table.testColumnType).serialize(table.sampleValue, protocolVersion)); - } - - /** - * Abstracts information about a table (corresponding to a given column type). - */ - static class TestTable { - private static final AtomicInteger counter = new AtomicInteger(); - private String tableName = "date_type_test" + counter.incrementAndGet(); - - final DataType testColumnType; - final Object sampleValue; - final Object expectedValue; - final Object expectedPrimitiveValue; - - final String createStatement; - final String insertStatement = String.format("INSERT INTO %s (k, v) VALUES (1, ?)", tableName); - final String selectStatement = String.format("SELECT v FROM %s WHERE k = 1", tableName); - final String truncateStatement = String.format("TRUNCATE %s", tableName); - - final VersionNumber minCassandraVersion; - - TestTable(DataType testColumnType, Object sampleValue, String minCassandraVersion) { - this(testColumnType, sampleValue, sampleValue, minCassandraVersion); - } - - TestTable(DataType testColumnType, Object sampleValue, Object expectedValue, String minCassandraVersion) { - this(testColumnType, sampleValue, expectedValue, expectedValue, minCassandraVersion); - } - - TestTable(DataType testColumnType, Object sampleValue, Object expectedValue, Object expectedPrimitiveValue, String minCassandraVersion) { - this.testColumnType = testColumnType; - this.sampleValue = sampleValue; - this.expectedValue = expectedValue; - this.expectedPrimitiveValue = expectedPrimitiveValue; - this.minCassandraVersion = VersionNumber.parse(minCassandraVersion); - - this.createStatement = String.format("CREATE TABLE %s (k int PRIMARY KEY, v %s)", tableName, testColumnType); - } + } + + private List allTables() { + List tables = Lists.newArrayList(); + + tables.addAll(tablesWithPrimitives()); + tables.addAll(tablesWithPrimitivesNull()); + tables.addAll(tablesWithCollectionsOfPrimitives()); + tables.addAll(tablesWithMapsOfPrimitives()); + tables.addAll(tablesWithNestedCollections()); + tables.addAll(tablesWithRandomlyGeneratedNestedCollections()); + + return ImmutableList.copyOf(tables); + } + + private List tablesWithPrimitives() { + List tables = Lists.newArrayList(); + for (Map.Entry entry : samples.entrySet()) + tables.add(new TestTable(entry.getKey(), entry.getValue(), "1.2.0")); + return tables; + } + + private List tablesWithPrimitivesNull() { + List tables = Lists.newArrayList(); + // Create a test table for each primitive type testing with null values. If the + // type maps to a java primitive type it's value will be the default one specified here instead + // of null. + for (DataType dataType : TestUtils.allPrimitiveTypes(ccm().getProtocolVersion())) { + Object expectedPrimitiveValue = null; + switch (dataType.getName()) { + case BIGINT: + case TIME: + expectedPrimitiveValue = 0L; + break; + case DOUBLE: + expectedPrimitiveValue = 0.0; + break; + case FLOAT: + expectedPrimitiveValue = 0.0f; + break; + case INT: + expectedPrimitiveValue = 0; + break; + case SMALLINT: + expectedPrimitiveValue = (short) 0; + break; + case TINYINT: + expectedPrimitiveValue = (byte) 0; + break; + case BOOLEAN: + expectedPrimitiveValue = false; + break; + default: + // not a Java primitive type + continue; + } + + tables.add(new TestTable(dataType, null, null, expectedPrimitiveValue, "1.2.0")); } - - private List allTables() { - List tables = Lists.newArrayList(); - - tables.addAll(tablesWithPrimitives()); - tables.addAll(tablesWithPrimitivesNull()); - tables.addAll(tablesWithCollectionsOfPrimitives()); - tables.addAll(tablesWithMapsOfPrimitives()); - tables.addAll(tablesWithNestedCollections()); - tables.addAll(tablesWithRandomlyGeneratedNestedCollections()); - - return ImmutableList.copyOf(tables); + return tables; + } + + private List tablesWithCollectionsOfPrimitives() { + List tables = Lists.newArrayList(); + for (Map.Entry entry : samples.entrySet()) { + + DataType elementType = entry.getKey(); + Object elementSample = entry.getValue(); + + tables.add( + new TestTable( + DataType.list(elementType), + Lists.newArrayList(elementSample, elementSample), + "1.2.0")); + // Duration not supported in Set + if (elementType != DataType.duration()) + tables.add( + new TestTable(DataType.set(elementType), Sets.newHashSet(elementSample), "1.2.0")); } - - private List tablesWithPrimitives() { - List tables = Lists.newArrayList(); - for (Map.Entry entry : samples.entrySet()) - tables.add(new TestTable(entry.getKey(), entry.getValue(), "1.2.0")); - return tables; + return tables; + } + + private List tablesWithMapsOfPrimitives() { + List tables = Lists.newArrayList(); + for (Map.Entry keyEntry : samples.entrySet()) { + // Duration not supported as Map key + DataType keyType = keyEntry.getKey(); + if (keyType == DataType.duration()) continue; + + Object keySample = keyEntry.getValue(); + for (Map.Entry valueEntry : samples.entrySet()) { + DataType valueType = valueEntry.getKey(); + Object valueSample = valueEntry.getValue(); + + tables.add( + new TestTable( + DataType.map(keyType, valueType), + ImmutableMap.builder().put(keySample, valueSample).build(), + "1.2.0")); + } } - - private List tablesWithPrimitivesNull() { - List tables = Lists.newArrayList(); - // Create a test table for each primitive type testing with null values. If the - // type maps to a java primitive type it's value will be the default one specified here instead of null. - for (DataType dataType : TestUtils.allPrimitiveTypes(ccm().getProtocolVersion())) { - Object expectedPrimitiveValue = null; - switch (dataType.getName()) { - case BIGINT: - case TIME: - expectedPrimitiveValue = 0L; - break; - case DOUBLE: - expectedPrimitiveValue = 0.0; - break; - case FLOAT: - expectedPrimitiveValue = 0.0f; - break; - case INT: - expectedPrimitiveValue = 0; - break; - case SMALLINT: - expectedPrimitiveValue = (short) 0; - break; - case TINYINT: - expectedPrimitiveValue = (byte) 0; - break; - case BOOLEAN: - expectedPrimitiveValue = false; - break; - default: - // not a Java primitive type - continue; - } - - tables.add(new TestTable(dataType, null, null, expectedPrimitiveValue, "1.2.0")); - } - return tables; - + return tables; + } + + private Collection tablesWithNestedCollections() { + List tables = Lists.newArrayList(); + + // To avoid combinatorial explosion, only use int as the primitive type, and two levels of + // nesting. + // This yields collections like list>, map>, + // frozen>>, etc. + + // Types and samples for the inner collections like frozen> + Map childCollectionSamples = + ImmutableMap.builder() + .put(DataType.frozenList(DataType.cint()), Lists.newArrayList(1, 1)) + .put(DataType.frozenSet(DataType.cint()), Sets.newHashSet(1, 2)) + .put( + DataType.frozenMap(DataType.cint(), DataType.cint()), + ImmutableMap.builder().put(1, 2).put(3, 4).build()) + .build(); + + for (Map.Entry entry : childCollectionSamples.entrySet()) { + DataType elementType = entry.getKey(); + Object elementSample = entry.getValue(); + + tables.add( + new TestTable( + DataType.list(elementType), + Lists.newArrayList(elementSample, elementSample), + "2.1.3")); + tables.add(new TestTable(DataType.set(elementType), Sets.newHashSet(elementSample), "2.1.3")); + + for (Map.Entry valueEntry : childCollectionSamples.entrySet()) { + DataType valueType = valueEntry.getKey(); + Object valueSample = valueEntry.getValue(); + + tables.add( + new TestTable( + DataType.map(elementType, valueType), + ImmutableMap.builder().put(elementSample, valueSample).build(), + "2.1.3")); + } } - - private List tablesWithCollectionsOfPrimitives() { - List tables = Lists.newArrayList(); - for (Map.Entry entry : samples.entrySet()) { - - DataType elementType = entry.getKey(); - Object elementSample = entry.getValue(); - - tables.add(new TestTable(DataType.list(elementType), Lists.newArrayList(elementSample, elementSample), "1.2.0")); - // Duration not supported in Set - if (elementType != DataType.duration()) - tables.add(new TestTable(DataType.set(elementType), Sets.newHashSet(elementSample), "1.2.0")); - } - return tables; + return tables; + } + + private Collection tablesWithRandomlyGeneratedNestedCollections() { + List tables = Lists.newArrayList(); + + DataType nestedListType = buildNestedType(DataType.Name.LIST, 5); + DataType nestedSetType = buildNestedType(DataType.Name.SET, 5); + DataType nestedMapType = buildNestedType(DataType.Name.MAP, 5); + + tables.add(new TestTable(nestedListType, nestedObject(nestedListType), "2.1.3")); + tables.add(new TestTable(nestedSetType, nestedObject(nestedSetType), "2.1.3")); + tables.add(new TestTable(nestedMapType, nestedObject(nestedMapType), "2.1.3")); + return tables; + } + + /** Populate a nested collection based on the given type and it's arguments. */ + public Object nestedObject(DataType type) { + + int typeIdx = type.getTypeArguments().size() > 1 ? 1 : 0; + DataType argument = type.getTypeArguments().get(typeIdx); + boolean isAtBottom = !(argument instanceof DataType.CollectionType); + + if (isAtBottom) { + switch (type.getName()) { + case LIST: + return Lists.newArrayList(1, 2, 3); + case SET: + return Sets.newHashSet(1, 2, 3); + case MAP: + Map map = Maps.newHashMap(); + map.put(1, 2); + map.put(3, 4); + map.put(5, 6); + return map; + } + } else { + switch (type.getName()) { + case LIST: + List l = Lists.newArrayListWithExpectedSize(2); + for (int i = 0; i < 5; i++) { + l.add(nestedObject(argument)); + } + return l; + case SET: + Set s = Sets.newHashSet(); + for (int i = 0; i < 5; i++) { + s.add(nestedObject(argument)); + } + return s; + case MAP: + Map map = Maps.newHashMap(); + for (int i = 0; i < 5; i++) { + map.put(i, nestedObject(argument)); + } + return map; + } } - - private List tablesWithMapsOfPrimitives() { - List tables = Lists.newArrayList(); - for (Map.Entry keyEntry : samples.entrySet()) { - // Duration not supported as Map key - DataType keyType = keyEntry.getKey(); - if (keyType == DataType.duration()) - continue; - - Object keySample = keyEntry.getValue(); - for (Map.Entry valueEntry : samples.entrySet()) { - DataType valueType = valueEntry.getKey(); - Object valueSample = valueEntry.getValue(); - - tables.add(new TestTable(DataType.map(keyType, valueType), - ImmutableMap.builder().put(keySample, valueSample).build(), - "1.2.0")); - } + return null; + } + + /** + * @param baseType The base type to use, one of SET, MAP, LIST. + * @param depth How many subcollections to generate. + * @return a DataType that is a nested collection with the given baseType with the given depth. + */ + public DataType buildNestedType(DataType.Name baseType, int depth) { + Random r = new Random(); + DataType t = null; + + for (int i = 1; i <= depth; i++) { + int chooser = r.nextInt(3); + if (t == null) { + if (chooser == 0) { + t = DataType.frozenList(DataType.cint()); + } else if (chooser == 1) { + t = DataType.frozenSet(DataType.cint()); + } else { + t = DataType.frozenMap(DataType.cint(), DataType.cint()); } - return tables; - } - - private Collection tablesWithNestedCollections() { - List tables = Lists.newArrayList(); - - // To avoid combinatorial explosion, only use int as the primitive type, and two levels of nesting. - // This yields collections like list>, map>, frozen>>, etc. - - // Types and samples for the inner collections like frozen> - Map childCollectionSamples = ImmutableMap.builder() - .put(DataType.frozenList(DataType.cint()), Lists.newArrayList(1, 1)) - .put(DataType.frozenSet(DataType.cint()), Sets.newHashSet(1, 2)) - .put(DataType.frozenMap(DataType.cint(), DataType.cint()), ImmutableMap.builder().put(1, 2).put(3, 4).build()) - .build(); - - for (Map.Entry entry : childCollectionSamples.entrySet()) { - DataType elementType = entry.getKey(); - Object elementSample = entry.getValue(); - - tables.add(new TestTable(DataType.list(elementType), Lists.newArrayList(elementSample, elementSample), "2.1.3")); - tables.add(new TestTable(DataType.set(elementType), Sets.newHashSet(elementSample), "2.1.3")); - - for (Map.Entry valueEntry : childCollectionSamples.entrySet()) { - DataType valueType = valueEntry.getKey(); - Object valueSample = valueEntry.getValue(); - - tables.add(new TestTable(DataType.map(elementType, valueType), - ImmutableMap.builder().put(elementSample, valueSample).build(), "2.1.3")); - } + } else if (i == depth) { + switch (baseType) { + case LIST: + return DataType.list(t); + case SET: + return DataType.set(t); + case MAP: + return DataType.map(DataType.cint(), t); } - return tables; - } - - private Collection tablesWithRandomlyGeneratedNestedCollections() { - List tables = Lists.newArrayList(); - - DataType nestedListType = buildNestedType(DataType.Name.LIST, 5); - DataType nestedSetType = buildNestedType(DataType.Name.SET, 5); - DataType nestedMapType = buildNestedType(DataType.Name.MAP, 5); - - tables.add(new TestTable(nestedListType, nestedObject(nestedListType), "2.1.3")); - tables.add(new TestTable(nestedSetType, nestedObject(nestedSetType), "2.1.3")); - tables.add(new TestTable(nestedMapType, nestedObject(nestedMapType), "2.1.3")); - return tables; - } - - /** - * Populate a nested collection based on the given type and it's arguments. - */ - public Object nestedObject(DataType type) { - - int typeIdx = type.getTypeArguments().size() > 1 ? 1 : 0; - DataType argument = type.getTypeArguments().get(typeIdx); - boolean isAtBottom = !(argument instanceof DataType.CollectionType); - - if (isAtBottom) { - switch (type.getName()) { - case LIST: - return Lists.newArrayList(1, 2, 3); - case SET: - return Sets.newHashSet(1, 2, 3); - case MAP: - Map map = Maps.newHashMap(); - map.put(1, 2); - map.put(3, 4); - map.put(5, 6); - return map; - } + } else { + if (chooser == 0) { + t = DataType.frozenList(t); + } else if (chooser == 1) { + t = DataType.frozenSet(t); } else { - switch (type.getName()) { - case LIST: - List l = Lists.newArrayListWithExpectedSize(2); - for (int i = 0; i < 5; i++) { - l.add(nestedObject(argument)); - } - return l; - case SET: - Set s = Sets.newHashSet(); - for (int i = 0; i < 5; i++) { - s.add(nestedObject(argument)); - } - return s; - case MAP: - Map map = Maps.newHashMap(); - for (int i = 0; i < 5; i++) { - map.put(i, nestedObject(argument)); - } - return map; - } + t = DataType.frozenMap(DataType.cint(), t); } - return null; + } } - - /** - * @param baseType The base type to use, one of SET, MAP, LIST. - * @param depth How many subcollections to generate. - * @return a DataType that is a nested collection with the given baseType with the - * given depth. - */ - public DataType buildNestedType(DataType.Name baseType, int depth) { - Random r = new Random(); - DataType t = null; - - for (int i = 1; i <= depth; i++) { - int chooser = r.nextInt(3); - if (t == null) { - if (chooser == 0) { - t = DataType.frozenList(DataType.cint()); - } else if (chooser == 1) { - t = DataType.frozenSet(DataType.cint()); - } else { - t = DataType.frozenMap(DataType.cint(), DataType.cint()); - } - } else if (i == depth) { - switch (baseType) { - case LIST: - return DataType.list(t); - case SET: - return DataType.set(t); - case MAP: - return DataType.map(DataType.cint(), t); - } - } else { - if (chooser == 0) { - t = DataType.frozenList(t); - } else if (chooser == 1) { - t = DataType.frozenSet(t); - } else { - t = DataType.frozenMap(DataType.cint(), t); - } - } - } + return null; + } + + private Object getValue(GettableByIndexData data, DataType dataType) { + // This is kind of lame, but better than testing all getters manually + CodecRegistry codecRegistry = cluster().getConfiguration().getCodecRegistry(); + switch (dataType.getName()) { + case ASCII: + return data.getString(0); + case BIGINT: + return data.getLong(0); + case BLOB: + return data.getBytes(0); + case BOOLEAN: + return data.getBool(0); + case DECIMAL: + return data.getDecimal(0); + case DOUBLE: + return data.getDouble(0); + case FLOAT: + return data.getFloat(0); + case INET: + return data.getInet(0); + case TINYINT: + return data.getByte(0); + case SMALLINT: + return data.getShort(0); + case INT: + return data.getInt(0); + case TEXT: + case VARCHAR: + return data.getString(0); + case TIMESTAMP: + return data.getTimestamp(0); + case DATE: + return data.getDate(0); + case TIME: + return data.getTime(0); + case UUID: + case TIMEUUID: + return data.getUUID(0); + case VARINT: + return data.getVarint(0); + case LIST: + return data.getList( + 0, codecRegistry.codecFor(dataType.getTypeArguments().get(0)).getJavaType()); + case SET: + return data.getSet( + 0, codecRegistry.codecFor(dataType.getTypeArguments().get(0)).getJavaType()); + case MAP: + return data.getMap( + 0, + codecRegistry.codecFor(dataType.getTypeArguments().get(0)).getJavaType(), + codecRegistry.codecFor(dataType.getTypeArguments().get(1)).getJavaType()); + case DURATION: + return data.get(0, Duration.class); + case CUSTOM: + case COUNTER: + default: + fail("Unexpected type in bound statement test: " + dataType); return null; } - - private Object getValue(GettableByIndexData data, DataType dataType) { - // This is kind of lame, but better than testing all getters manually - CodecRegistry codecRegistry = cluster().getConfiguration().getCodecRegistry(); - switch (dataType.getName()) { - case ASCII: - return data.getString(0); - case BIGINT: - return data.getLong(0); - case BLOB: - return data.getBytes(0); - case BOOLEAN: - return data.getBool(0); - case DECIMAL: - return data.getDecimal(0); - case DOUBLE: - return data.getDouble(0); - case FLOAT: - return data.getFloat(0); - case INET: - return data.getInet(0); - case TINYINT: - return data.getByte(0); - case SMALLINT: - return data.getShort(0); - case INT: - return data.getInt(0); - case TEXT: - case VARCHAR: - return data.getString(0); - case TIMESTAMP: - return data.getTimestamp(0); - case DATE: - return data.getDate(0); - case TIME: - return data.getTime(0); - case UUID: - case TIMEUUID: - return data.getUUID(0); - case VARINT: - return data.getVarint(0); - case LIST: - return data.getList(0, codecRegistry.codecFor(dataType.getTypeArguments().get(0)).getJavaType()); - case SET: - return data.getSet(0, codecRegistry.codecFor(dataType.getTypeArguments().get(0)).getJavaType()); - case MAP: - return data.getMap(0, - codecRegistry.codecFor(dataType.getTypeArguments().get(0)).getJavaType(), - codecRegistry.codecFor(dataType.getTypeArguments().get(1)).getJavaType()); - case DURATION: - return data.get(0, Duration.class); - case CUSTOM: - case COUNTER: - default: - fail("Unexpected type in bound statement test: " + dataType); - return null; - } - } + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/DataTypeTest.java b/driver-core/src/test/java/com/datastax/driver/core/DataTypeTest.java index ee979474308..cdf3583fbf1 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/DataTypeTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/DataTypeTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,306 +17,396 @@ */ package com.datastax.driver.core; +import static com.datastax.driver.core.Assertions.assertThat; +import static com.google.common.collect.Lists.newArrayList; +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.fail; + import com.datastax.driver.core.exceptions.InvalidTypeException; import com.datastax.driver.core.utils.Bytes; -import org.testng.annotations.Test; - import java.math.BigDecimal; import java.math.BigInteger; import java.net.InetAddress; import java.nio.ByteBuffer; -import java.util.*; - -import static com.datastax.driver.core.Assertions.assertThat; -import static com.google.common.collect.Lists.newArrayList; -import static org.testng.Assert.assertEquals; -import static org.testng.Assert.fail; +import java.util.Arrays; +import java.util.Date; +import java.util.HashMap; +import java.util.LinkedHashMap; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.UUID; +import org.testng.annotations.Test; -/** - * DataType simple unit tests. - */ +/** DataType simple unit tests. */ public class DataTypeTest { - CodecRegistry codecRegistry = new CodecRegistry(); + CodecRegistry codecRegistry = new CodecRegistry(); - ProtocolVersion protocolVersion = ProtocolVersion.NEWEST_SUPPORTED; + ProtocolVersion protocolVersion = ProtocolVersion.NEWEST_SUPPORTED; - static boolean exclude(DataType t) { - return t.getName() == DataType.Name.COUNTER || t.getName() == DataType.Name.DURATION; - } + static boolean exclude(DataType t) { + return t.getName() == DataType.Name.COUNTER || t.getName() == DataType.Name.DURATION; + } - /** - * A test value for a primitive data type - */ - static class TestValue { - /** - * The value as a Java object - */ - final Object javaObject; - /** - * A CQL string that should parse to the value - */ - final String cqlInputString; - /** - * How the value should be formatted in CQL - */ - final String cqlOutputString; - - TestValue(Object javaObject, String cqlInputString, String cqlOutputString) { - this.javaObject = javaObject; - this.cqlInputString = cqlInputString; - this.cqlOutputString = cqlOutputString; - } - } + /** A test value for a primitive data type */ + static class TestValue { + /** The value as a Java object */ + final Object javaObject; + /** A CQL string that should parse to the value */ + final String cqlInputString; + /** How the value should be formatted in CQL */ + final String cqlOutputString; - private static TestValue[] primitiveTestValues(DataType dt) { - switch (dt.getName()) { - case ASCII: - case TEXT: - case VARCHAR: - return new TestValue[]{ - new TestValue("foo", "'foo'", "'foo'"), - new TestValue("fo'o", "'fo''o'", "'fo''o'"), - new TestValue(null, null, "NULL"), new TestValue(null, "null", "NULL"), new TestValue(null, "NULL", "NULL")}; - case BIGINT: - return new TestValue[]{ - new TestValue(42L, "42", "42"), - new TestValue(91294377723L, "91294377723", "91294377723"), - new TestValue(-133L, "-133", "-133"), - new TestValue(null, null, "NULL"), new TestValue(null, "null", "NULL"), new TestValue(null, "NULL", "NULL")}; - case TIMESTAMP: - // input: single quotes are optional for long literals, mandatory for date patterns - return new TestValue[]{ - new TestValue(new Date(42L), "42", "42"), - new TestValue(new Date(91294377723L), "91294377723", "91294377723"), - new TestValue(new Date(-133L), "-133", "-133"), - new TestValue(new Date(784041330999L), "'1994-11-05T14:15:30.999+0100'", "784041330999"), - new TestValue(null, null, "NULL"), new TestValue(null, "null", "NULL"), new TestValue(null, "NULL", "NULL")}; - case DATE: - // input: single quotes are optional for long literals, mandatory for date patterns - return new TestValue[]{ - new TestValue(LocalDate.fromDaysSinceEpoch(16071), "'2014-01-01'", "'2014-01-01'"), - new TestValue(LocalDate.fromDaysSinceEpoch(0), "'1970-01-01'", "'1970-01-01'"), - new TestValue(LocalDate.fromDaysSinceEpoch((int) (2147483648L - (1L << 31))), "'2147483648'", "'1970-01-01'"), - new TestValue(LocalDate.fromDaysSinceEpoch((int) (0 - (1L << 31))), "0", "'-5877641-06-23'"), - new TestValue(null, null, "NULL"), new TestValue(null, "null", "NULL"), new TestValue(null, "NULL", "NULL")}; - case TIME: - // input: all literals must by enclosed in single quotes - return new TestValue[]{ - new TestValue(54012123450000L, "'54012123450000'", "'15:00:12.123450000'"), - new TestValue(0L, "'0'", "'00:00:00.000000000'"), - new TestValue(54012012345000L, "'15:00:12.012345000'", "'15:00:12.012345000'"), - new TestValue(null, null, "NULL"), new TestValue(null, "null", "NULL"), new TestValue(null, "NULL", "NULL")}; - case BLOB: - return new TestValue[]{ - new TestValue(Bytes.fromHexString("0x2450"), "0x2450", "0x2450"), - new TestValue(ByteBuffer.allocate(0), "0x", "0x"), - new TestValue(null, null, "NULL"), new TestValue(null, "null", "NULL"), new TestValue(null, "NULL", "NULL")}; - case BOOLEAN: - return new TestValue[]{ - new TestValue(true, "true", "true"), - new TestValue(false, "false", "false"), - new TestValue(null, null, "NULL"), new TestValue(null, "null", "NULL"), new TestValue(null, "NULL", "NULL")}; - case DECIMAL: - return new TestValue[]{ - new TestValue(new BigDecimal("1.23E+8"), "1.23E+8", "1.23E+8"), - new TestValue(null, null, "NULL"), new TestValue(null, "null", "NULL"), new TestValue(null, "NULL", "NULL")}; - case DOUBLE: - return new TestValue[]{ - new TestValue(2.39324324, "2.39324324", "2.39324324"), - new TestValue(-12., "-12.0", "-12.0"), - new TestValue(null, null, "NULL"), new TestValue(null, "null", "NULL"), new TestValue(null, "NULL", "NULL")}; - case FLOAT: - return new TestValue[]{ - new TestValue(2.39f, "2.39", "2.39"), - new TestValue(-12.f, "-12.0", "-12.0"), - new TestValue(null, null, "NULL"), new TestValue(null, "null", "NULL"), new TestValue(null, "NULL", "NULL")}; - case INET: - try { - return new TestValue[]{ - new TestValue(InetAddress.getByName("128.2.12.3"), "'128.2.12.3'", "'128.2.12.3'"), - new TestValue(null, null, "NULL"), new TestValue(null, "null", "NULL"), new TestValue(null, "NULL", "NULL")}; - } catch (java.net.UnknownHostException e) { - throw new RuntimeException(); - } - case TINYINT: - return new TestValue[]{ - new TestValue((byte) -4, "-4", "-4"), - new TestValue((byte) 44, "44", "44"), - new TestValue(null, null, "NULL"), new TestValue(null, "null", "NULL"), new TestValue(null, "NULL", "NULL")}; - case SMALLINT: - return new TestValue[]{ - new TestValue((short) -3, "-3", "-3"), - new TestValue((short) 43, "43", "43"), - new TestValue(null, null, "NULL"), new TestValue(null, "null", "NULL"), new TestValue(null, "NULL", "NULL")}; - case INT: - return new TestValue[]{ - new TestValue(-2, "-2", "-2"), - new TestValue(42, "42", "42"), - new TestValue(null, null, "NULL"), new TestValue(null, "null", "NULL"), new TestValue(null, "NULL", "NULL")}; - case TIMEUUID: - return new TestValue[]{ - new TestValue(UUID.fromString("FE2B4360-28C6-11E2-81C1-0800200C9A66"), "fe2b4360-28c6-11e2-81c1-0800200c9a66", "fe2b4360-28c6-11e2-81c1-0800200c9a66"), - new TestValue(null, null, "NULL"), new TestValue(null, "null", "NULL"), new TestValue(null, "NULL", "NULL")}; - case UUID: - return new TestValue[]{ - new TestValue(UUID.fromString("FE2B4360-28C6-11E2-81C1-0800200C9A66"), "fe2b4360-28c6-11e2-81c1-0800200c9a66", "fe2b4360-28c6-11e2-81c1-0800200c9a66"), - new TestValue(UUID.fromString("067e6162-3b6f-4ae2-a171-2470b63dff00"), "067e6162-3b6f-4ae2-a171-2470b63dff00", "067e6162-3b6f-4ae2-a171-2470b63dff00"), - new TestValue(null, null, "NULL"), new TestValue(null, "null", "NULL"), new TestValue(null, "NULL", "NULL")}; - case VARINT: - return new TestValue[]{ - new TestValue(new BigInteger("12387290982347987032483422342432"), "12387290982347987032483422342432", "12387290982347987032483422342432"), - new TestValue(null, null, "NULL"), new TestValue(null, "null", "NULL"), new TestValue(null, "NULL", "NULL")}; - default: - throw new RuntimeException("Missing handling of " + dt); - } + TestValue(Object javaObject, String cqlInputString, String cqlOutputString) { + this.javaObject = javaObject; + this.cqlInputString = cqlInputString; + this.cqlOutputString = cqlOutputString; } - - @Test(groups = "unit") - public void parseNativeTest() { - for (DataType dt : DataType.allPrimitiveTypes()) { - if (exclude(dt)) - continue; - - for (TestValue value : primitiveTestValues(dt)) - assertThat(codecRegistry.codecFor(dt).parse(value.cqlInputString)) - .as("Parsing input %s to a %s", value.cqlInputString, dt) - .isEqualTo(value.javaObject); + } + + private static TestValue[] primitiveTestValues(DataType dt) { + switch (dt.getName()) { + case ASCII: + case TEXT: + case VARCHAR: + return new TestValue[] { + new TestValue("foo", "'foo'", "'foo'"), + new TestValue("fo'o", "'fo''o'", "'fo''o'"), + new TestValue(null, null, "NULL"), + new TestValue(null, "null", "NULL"), + new TestValue(null, "NULL", "NULL") + }; + case BIGINT: + return new TestValue[] { + new TestValue(42L, "42", "42"), + new TestValue(91294377723L, "91294377723", "91294377723"), + new TestValue(-133L, "-133", "-133"), + new TestValue(null, null, "NULL"), + new TestValue(null, "null", "NULL"), + new TestValue(null, "NULL", "NULL") + }; + case TIMESTAMP: + // input: single quotes are optional for long literals, mandatory for date patterns + return new TestValue[] { + new TestValue(new Date(42L), "42", "42"), + new TestValue(new Date(91294377723L), "91294377723", "91294377723"), + new TestValue(new Date(-133L), "-133", "-133"), + new TestValue(new Date(784041330999L), "'1994-11-05T14:15:30.999+0100'", "784041330999"), + new TestValue(null, null, "NULL"), + new TestValue(null, "null", "NULL"), + new TestValue(null, "NULL", "NULL") + }; + case DATE: + // input: single quotes are optional for long literals, mandatory for date patterns + return new TestValue[] { + new TestValue(LocalDate.fromDaysSinceEpoch(16071), "'2014-01-01'", "'2014-01-01'"), + new TestValue(LocalDate.fromDaysSinceEpoch(0), "'1970-01-01'", "'1970-01-01'"), + new TestValue( + LocalDate.fromDaysSinceEpoch((int) (2147483648L - (1L << 31))), + "'2147483648'", + "'1970-01-01'"), + new TestValue( + LocalDate.fromDaysSinceEpoch((int) (0 - (1L << 31))), "0", "'-5877641-06-23'"), + new TestValue(null, null, "NULL"), + new TestValue(null, "null", "NULL"), + new TestValue(null, "NULL", "NULL") + }; + case TIME: + // input: all literals must by enclosed in single quotes + return new TestValue[] { + new TestValue(54012123450000L, "'54012123450000'", "'15:00:12.123450000'"), + new TestValue(0L, "'0'", "'00:00:00.000000000'"), + new TestValue(54012012345000L, "'15:00:12.012345000'", "'15:00:12.012345000'"), + new TestValue(null, null, "NULL"), + new TestValue(null, "null", "NULL"), + new TestValue(null, "NULL", "NULL") + }; + case BLOB: + return new TestValue[] { + new TestValue(Bytes.fromHexString("0x2450"), "0x2450", "0x2450"), + new TestValue(ByteBuffer.allocate(0), "0x", "0x"), + new TestValue(null, null, "NULL"), + new TestValue(null, "null", "NULL"), + new TestValue(null, "NULL", "NULL") + }; + case BOOLEAN: + return new TestValue[] { + new TestValue(true, "true", "true"), + new TestValue(false, "false", "false"), + new TestValue(null, null, "NULL"), + new TestValue(null, "null", "NULL"), + new TestValue(null, "NULL", "NULL") + }; + case DECIMAL: + return new TestValue[] { + new TestValue(new BigDecimal("1.23E+8"), "1.23E+8", "1.23E+8"), + new TestValue(null, null, "NULL"), + new TestValue(null, "null", "NULL"), + new TestValue(null, "NULL", "NULL") + }; + case DOUBLE: + return new TestValue[] { + new TestValue(2.39324324, "2.39324324", "2.39324324"), + new TestValue(-12., "-12.0", "-12.0"), + new TestValue(null, null, "NULL"), + new TestValue(null, "null", "NULL"), + new TestValue(null, "NULL", "NULL") + }; + case FLOAT: + return new TestValue[] { + new TestValue(2.39f, "2.39", "2.39"), + new TestValue(-12.f, "-12.0", "-12.0"), + new TestValue(null, null, "NULL"), + new TestValue(null, "null", "NULL"), + new TestValue(null, "NULL", "NULL") + }; + case INET: + try { + return new TestValue[] { + new TestValue(InetAddress.getByName("128.2.12.3"), "'128.2.12.3'", "'128.2.12.3'"), + new TestValue(null, null, "NULL"), + new TestValue(null, "null", "NULL"), + new TestValue(null, "NULL", "NULL") + }; + } catch (java.net.UnknownHostException e) { + throw new RuntimeException(); } + case TINYINT: + return new TestValue[] { + new TestValue((byte) -4, "-4", "-4"), + new TestValue((byte) 44, "44", "44"), + new TestValue(null, null, "NULL"), + new TestValue(null, "null", "NULL"), + new TestValue(null, "NULL", "NULL") + }; + case SMALLINT: + return new TestValue[] { + new TestValue((short) -3, "-3", "-3"), + new TestValue((short) 43, "43", "43"), + new TestValue(null, null, "NULL"), + new TestValue(null, "null", "NULL"), + new TestValue(null, "NULL", "NULL") + }; + case INT: + return new TestValue[] { + new TestValue(-2, "-2", "-2"), + new TestValue(42, "42", "42"), + new TestValue(null, null, "NULL"), + new TestValue(null, "null", "NULL"), + new TestValue(null, "NULL", "NULL") + }; + case TIMEUUID: + return new TestValue[] { + new TestValue( + UUID.fromString("FE2B4360-28C6-11E2-81C1-0800200C9A66"), + "fe2b4360-28c6-11e2-81c1-0800200c9a66", + "fe2b4360-28c6-11e2-81c1-0800200c9a66"), + new TestValue(null, null, "NULL"), + new TestValue(null, "null", "NULL"), + new TestValue(null, "NULL", "NULL") + }; + case UUID: + return new TestValue[] { + new TestValue( + UUID.fromString("FE2B4360-28C6-11E2-81C1-0800200C9A66"), + "fe2b4360-28c6-11e2-81c1-0800200c9a66", + "fe2b4360-28c6-11e2-81c1-0800200c9a66"), + new TestValue( + UUID.fromString("067e6162-3b6f-4ae2-a171-2470b63dff00"), + "067e6162-3b6f-4ae2-a171-2470b63dff00", + "067e6162-3b6f-4ae2-a171-2470b63dff00"), + new TestValue(null, null, "NULL"), + new TestValue(null, "null", "NULL"), + new TestValue(null, "NULL", "NULL") + }; + case VARINT: + return new TestValue[] { + new TestValue( + new BigInteger("12387290982347987032483422342432"), + "12387290982347987032483422342432", + "12387290982347987032483422342432"), + new TestValue(null, null, "NULL"), + new TestValue(null, "null", "NULL"), + new TestValue(null, "NULL", "NULL") + }; + default: + throw new RuntimeException("Missing handling of " + dt); } + } - @Test(groups = "unit") - public void formatNativeTest() { - for (DataType dt : DataType.allPrimitiveTypes()) { - if (exclude(dt)) - continue; + @Test(groups = "unit") + public void parseNativeTest() { + for (DataType dt : DataType.allPrimitiveTypes()) { + if (exclude(dt)) continue; - for (TestValue value : primitiveTestValues(dt)) - assertThat(codecRegistry.codecFor(dt).format(value.javaObject)) - .as("Formatting a %s expecting %s", dt, value.cqlOutputString) - .isEqualTo(value.cqlOutputString); - } + for (TestValue value : primitiveTestValues(dt)) + assertThat(codecRegistry.codecFor(dt).parse(value.cqlInputString)) + .as("Parsing input %s to a %s", value.cqlInputString, dt) + .isEqualTo(value.javaObject); } + } - @Test(groups = "unit") - public void parseFormatListTest() { - String toParse = "['Foo','Bar','Foo''bar']"; - List toFormat = Arrays.asList("Foo", "Bar", "Foo'bar"); - DataType dt = DataType.list(DataType.text()); - assertEquals(codecRegistry.codecFor(dt).parse(toParse), toFormat); - assertEquals(codecRegistry.codecFor(dt).format(toFormat), toParse); - } + @Test(groups = "unit") + public void formatNativeTest() { + for (DataType dt : DataType.allPrimitiveTypes()) { + if (exclude(dt)) continue; - @SuppressWarnings("serial") - @Test(groups = "unit") - public void parseFormatSetTest() { - String toParse = "{'Foo','Bar','Foo''bar'}"; - Set toFormat = new LinkedHashSet() {{ + for (TestValue value : primitiveTestValues(dt)) + assertThat(codecRegistry.codecFor(dt).format(value.javaObject)) + .as("Formatting a %s expecting %s", dt, value.cqlOutputString) + .isEqualTo(value.cqlOutputString); + } + } + + @Test(groups = "unit") + public void parseFormatListTest() { + String toParse = "['Foo','Bar','Foo''bar']"; + List toFormat = Arrays.asList("Foo", "Bar", "Foo'bar"); + DataType dt = DataType.list(DataType.text()); + assertEquals(codecRegistry.codecFor(dt).parse(toParse), toFormat); + assertEquals(codecRegistry.codecFor(dt).format(toFormat), toParse); + } + + @SuppressWarnings("serial") + @Test(groups = "unit") + public void parseFormatSetTest() { + String toParse = "{'Foo','Bar','Foo''bar'}"; + Set toFormat = + new LinkedHashSet() { + { add("Foo"); add("Bar"); add("Foo'bar"); - }}; - DataType dt = DataType.set(DataType.text()); - assertEquals(codecRegistry.codecFor(dt).parse(toParse), toFormat); - assertEquals(codecRegistry.codecFor(dt).format(toFormat), toParse); - } - - @SuppressWarnings("serial") - @Test(groups = "unit") - public void parseFormatMapTest() { - String toParse = "{'Foo':3,'Bar':42,'Foo''bar':-24}"; - Map toFormat = new LinkedHashMap() {{ + } + }; + DataType dt = DataType.set(DataType.text()); + assertEquals(codecRegistry.codecFor(dt).parse(toParse), toFormat); + assertEquals(codecRegistry.codecFor(dt).format(toFormat), toParse); + } + + @SuppressWarnings("serial") + @Test(groups = "unit") + public void parseFormatMapTest() { + String toParse = "{'Foo':3,'Bar':42,'Foo''bar':-24}"; + Map toFormat = + new LinkedHashMap() { + { put("Foo", 3); put("Bar", 42); put("Foo'bar", -24); - }}; - DataType dt = DataType.map(DataType.text(), DataType.cint()); - assertEquals(codecRegistry.codecFor(dt).parse(toParse), toFormat); - assertEquals(codecRegistry.codecFor(dt).format(toFormat), toParse); - } - - @SuppressWarnings("serial") - @Test(groups = "unit") - public void parseFormatUDTTest() { - String toParse = "{t:'fo''o',i:3,\"L\":['a','b'],s:{3:{a:0x01}}}"; - - final UserType udt1 = new UserType("ks", "t", false, Arrays.asList(new UserType.Field("a", DataType.blob())), protocolVersion, codecRegistry); - UserType udt2 = new UserType("ks", "t", false, Arrays.asList( + } + }; + DataType dt = DataType.map(DataType.text(), DataType.cint()); + assertEquals(codecRegistry.codecFor(dt).parse(toParse), toFormat); + assertEquals(codecRegistry.codecFor(dt).format(toFormat), toParse); + } + + @SuppressWarnings("serial") + @Test(groups = "unit") + public void parseFormatUDTTest() { + String toParse = "{t:'fo''o',i:3,\"L\":['a','b'],s:{3:{a:0x01}}}"; + + final UserType udt1 = + new UserType( + "ks", + "t", + false, + Arrays.asList(new UserType.Field("a", DataType.blob())), + protocolVersion, + codecRegistry); + UserType udt2 = + new UserType( + "ks", + "t", + false, + Arrays.asList( new UserType.Field("t", DataType.text()), new UserType.Field("i", DataType.cint()), new UserType.Field("L", DataType.list(DataType.text())), - new UserType.Field("s", DataType.map(DataType.cint(), udt1)) - ), protocolVersion, codecRegistry); - - UDTValue toFormat = udt2.newValue(); - toFormat.setString("t", "fo'o"); - toFormat.setInt("i", 3); - toFormat.setList("\"L\"", Arrays.asList("a", "b")); - toFormat.setMap("s", new HashMap() {{ - put(3, udt1.newValue().setBytes("a", ByteBuffer.wrap(new byte[]{1}))); - }}); - - assertEquals(codecRegistry.codecFor(udt2).parse(toParse), toFormat); - assertEquals(codecRegistry.codecFor(udt2).format(toFormat), toParse); + new UserType.Field("s", DataType.map(DataType.cint(), udt1))), + protocolVersion, + codecRegistry); + + UDTValue toFormat = udt2.newValue(); + toFormat.setString("t", "fo'o"); + toFormat.setInt("i", 3); + toFormat.setList("\"L\"", Arrays.asList("a", "b")); + toFormat.setMap( + "s", + new HashMap() { + { + put(3, udt1.newValue().setBytes("a", ByteBuffer.wrap(new byte[] {1}))); + } + }); + + assertEquals(codecRegistry.codecFor(udt2).parse(toParse), toFormat); + assertEquals(codecRegistry.codecFor(udt2).format(toFormat), toParse); + } + + @SuppressWarnings("deprecation") + @Test(groups = "unit") + public void parseFormatTupleTest() { + + String toParse = "(1,'foo',1.0)"; + TupleType t = + new TupleType( + newArrayList(DataType.cint(), DataType.text(), DataType.cfloat()), + protocolVersion, + codecRegistry); + TupleValue toFormat = t.newValue(1, "foo", 1.0f); + + assertEquals(codecRegistry.codecFor(t).parse(toParse), toFormat); + assertEquals(codecRegistry.codecFor(t).format(toFormat), toParse); + } + + @Test(groups = "unit") + public void serializeDeserializeTest() { + for (ProtocolVersion v : ProtocolVersion.values()) serializeDeserializeTest(v); + } + + public void serializeDeserializeTest(ProtocolVersion version) { + + for (DataType dt : DataType.allPrimitiveTypes()) { + if (exclude(dt)) continue; + + Object value = TestUtils.getFixedValue(dt); + TypeCodec codec = codecRegistry.codecFor(dt); + assertEquals(codec.deserialize(codec.serialize(value, version), version), value); } - @SuppressWarnings("deprecation") - @Test(groups = "unit") - public void parseFormatTupleTest() { - - String toParse = "(1,'foo',1.0)"; - TupleType t = new TupleType(newArrayList(DataType.cint(), DataType.text(), DataType.cfloat()), protocolVersion, codecRegistry); - TupleValue toFormat = t.newValue(1, "foo", 1.0f); + TypeCodec codec = codecRegistry.codecFor(DataType.bigint()); - assertEquals(codecRegistry.codecFor(t).parse(toParse), toFormat); - assertEquals(codecRegistry.codecFor(t).format(toFormat), toParse); + try { + ByteBuffer badValue = ByteBuffer.allocate(4); + codec.deserialize(badValue, version); + fail("This should not have worked"); + } catch (InvalidTypeException e) { + /* That's what we want */ } + } - @Test(groups = "unit") - public void serializeDeserializeTest() { - for (ProtocolVersion v : ProtocolVersion.values()) - serializeDeserializeTest(v); - } + @Test(groups = "unit") + public void serializeDeserializeCollectionsTest() { + for (ProtocolVersion v : ProtocolVersion.values()) serializeDeserializeCollectionsTest(v); + } - public void serializeDeserializeTest(ProtocolVersion version) { + public void serializeDeserializeCollectionsTest(ProtocolVersion version) { - for (DataType dt : DataType.allPrimitiveTypes()) { - if (exclude(dt)) - continue; + List l = Arrays.asList("foo", "bar"); - Object value = TestUtils.getFixedValue(dt); - TypeCodec codec = codecRegistry.codecFor(dt); - assertEquals(codec.deserialize(codec.serialize(value, version), version), value); - } + DataType dt = DataType.list(DataType.text()); + TypeCodec> codec = codecRegistry.codecFor(dt); + assertEquals(codec.deserialize(codec.serialize(l, version), version), l); - TypeCodec codec = codecRegistry.codecFor(DataType.bigint()); - - try { - ByteBuffer badValue = ByteBuffer.allocate(4); - codec.deserialize(badValue, version); - fail("This should not have worked"); - } catch (InvalidTypeException e) { /* That's what we want */ } - } - - @Test(groups = "unit") - public void serializeDeserializeCollectionsTest() { - for (ProtocolVersion v : ProtocolVersion.values()) - serializeDeserializeCollectionsTest(v); - } - - public void serializeDeserializeCollectionsTest(ProtocolVersion version) { - - List l = Arrays.asList("foo", "bar"); - - DataType dt = DataType.list(DataType.text()); - TypeCodec> codec = codecRegistry.codecFor(dt); - assertEquals(codec.deserialize(codec.serialize(l, version), version), l); - - try { - DataType listOfBigint = DataType.list(DataType.bigint()); - codec = codecRegistry.codecFor(listOfBigint); - codec.serialize(l, version); - fail("This should not have worked"); - } catch (InvalidTypeException e) { /* That's what we want */ } + try { + DataType listOfBigint = DataType.list(DataType.bigint()); + codec = codecRegistry.codecFor(listOfBigint); + codec.serialize(l, version); + fail("This should not have worked"); + } catch (InvalidTypeException e) { + /* That's what we want */ } + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/DelegatingClusterIntegrationTest.java b/driver-core/src/test/java/com/datastax/driver/core/DelegatingClusterIntegrationTest.java index bb4fc8fd5f7..07415dd4c76 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/DelegatingClusterIntegrationTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/DelegatingClusterIntegrationTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,32 +17,32 @@ */ package com.datastax.driver.core; -import org.testng.annotations.Test; - import static org.assertj.core.api.Assertions.assertThat; +import org.testng.annotations.Test; + public class DelegatingClusterIntegrationTest extends CCMTestsSupport { - @Test(groups = "short") - public void should_allow_subclass_to_delegate_to_other_instance() { - SimpleDelegatingCluster delegatingCluster = new SimpleDelegatingCluster(cluster()); + @Test(groups = "short") + public void should_allow_subclass_to_delegate_to_other_instance() { + SimpleDelegatingCluster delegatingCluster = new SimpleDelegatingCluster(cluster()); - ResultSet rs = delegatingCluster.connect().execute("select * from system.local"); + ResultSet rs = delegatingCluster.connect().execute("select * from system.local"); - assertThat(rs.all()).hasSize(1); - } + assertThat(rs.all()).hasSize(1); + } - static class SimpleDelegatingCluster extends DelegatingCluster { + static class SimpleDelegatingCluster extends DelegatingCluster { - private final Cluster delegate; + private final Cluster delegate; - public SimpleDelegatingCluster(Cluster delegate) { - this.delegate = delegate; - } + public SimpleDelegatingCluster(Cluster delegate) { + this.delegate = delegate; + } - @Override - protected Cluster delegate() { - return delegate; - } + @Override + protected Cluster delegate() { + return delegate; } + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/DelegatingClusterTest.java b/driver-core/src/test/java/com/datastax/driver/core/DelegatingClusterTest.java index 1a4481d32ed..430aa5953e9 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/DelegatingClusterTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/DelegatingClusterTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,70 +17,70 @@ */ package com.datastax.driver.core; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.reset; + import com.datastax.driver.core.DelegatingClusterIntegrationTest.SimpleDelegatingCluster; import com.google.common.collect.ImmutableSet; +import java.lang.reflect.Method; +import java.lang.reflect.Modifier; +import java.util.Set; import org.mockito.Mockito; import org.mockito.exceptions.verification.WantedButNotInvoked; import org.mockito.invocation.Invocation; import org.testng.annotations.Test; -import java.lang.reflect.Method; -import java.lang.reflect.Modifier; -import java.util.Set; - -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.reset; - public class DelegatingClusterTest { - private static final Set NON_DELEGATED_METHODS = ImmutableSet.of("getClusterName"); + private static final Set NON_DELEGATED_METHODS = ImmutableSet.of("getClusterName"); - /** - * Checks that all methods of {@link DelegatingCluster} invoke their counterpart in {@link Cluster}. - * This protects us from forgetting to add a method to the former when it gets added to the latter. - *

    - * Note that a much better, compile-time solution would be to make {@link Cluster} an interface, but that's a - * breaking change so it will have to wait until the next major version. - */ - @Test(groups = "unit") - public void should_call_delegate_methods() throws Exception { - Cluster delegate = mock(Cluster.class); - SimpleDelegatingCluster delegatingCluster = new SimpleDelegatingCluster(delegate); + /** + * Checks that all methods of {@link DelegatingCluster} invoke their counterpart in {@link + * Cluster}. This protects us from forgetting to add a method to the former when it gets added to + * the latter. + * + *

    Note that a much better, compile-time solution would be to make {@link Cluster} an + * interface, but that's a breaking change so it will have to wait until the next major version. + */ + @Test(groups = "unit") + public void should_call_delegate_methods() throws Exception { + Cluster delegate = mock(Cluster.class); + SimpleDelegatingCluster delegatingCluster = new SimpleDelegatingCluster(delegate); - for (Method method : Cluster.class.getMethods()) { - if ((method.getModifiers() & Modifier.STATIC) == Modifier.STATIC || - NON_DELEGATED_METHODS.contains(method.getName()) || - method.getDeclaringClass() == Object.class) { - continue; - } - // we can leave all parameters to null since we're invoking a mock - Object[] parameters = new Object[method.getParameterTypes().length]; - try { - method.invoke(delegatingCluster, parameters); - } catch (Exception ignored) { - } - verify(delegate, method, parameters); - reset(delegate); - } + for (Method method : Cluster.class.getMethods()) { + if ((method.getModifiers() & Modifier.STATIC) == Modifier.STATIC + || NON_DELEGATED_METHODS.contains(method.getName()) + || method.getDeclaringClass() == Object.class) { + continue; + } + // we can leave all parameters to null since we're invoking a mock + Object[] parameters = new Object[method.getParameterTypes().length]; + try { + method.invoke(delegatingCluster, parameters); + } catch (Exception ignored) { + } + verify(delegate, method, parameters); + reset(delegate); } + } - private static void verify(Object mock, Method expectedMethod, Object... expectedArguments) { - out: - for (Invocation invocation : Mockito.mockingDetails(mock).getInvocations()) { - if (invocation.getMethod().equals(expectedMethod)) { - Object[] actualArguments = invocation.getArguments(); - assert actualArguments.length == expectedArguments.length; // because it's the same method - for (int i = 0; i < actualArguments.length; i++) { - Object actual = actualArguments[i]; - Object expected = expectedArguments[i]; - boolean equal = (actual == null) ? expected == null : actual.equals(expected); - if (!equal) { - continue out; - } - } - invocation.markVerified(); - return; - } + private static void verify(Object mock, Method expectedMethod, Object... expectedArguments) { + out: + for (Invocation invocation : Mockito.mockingDetails(mock).getInvocations()) { + if (invocation.getMethod().equals(expectedMethod)) { + Object[] actualArguments = invocation.getArguments(); + assert actualArguments.length == expectedArguments.length; // because it's the same method + for (int i = 0; i < actualArguments.length; i++) { + Object actual = actualArguments[i]; + Object expected = expectedArguments[i]; + boolean equal = (actual == null) ? expected == null : actual.equals(expected); + if (!equal) { + continue out; + } } - throw new WantedButNotInvoked("Not delegated: " + expectedMethod.toString()); + invocation.markVerified(); + return; + } } -} \ No newline at end of file + throw new WantedButNotInvoked("Not delegated: " + expectedMethod.toString()); + } +} diff --git a/driver-core/src/test/java/com/datastax/driver/core/DirectCompressionTest.java b/driver-core/src/test/java/com/datastax/driver/core/DirectCompressionTest.java index d4aa82af679..ea4bd82ad38 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/DirectCompressionTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/DirectCompressionTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,28 +22,29 @@ public class DirectCompressionTest extends CompressionTest { - /** - * Validates that a session can be established using snappy compression and executes some queries that inserts and - * retrieves data using that session(). - * - * @test_category connection:compression - * @expected_result session established and queries made successfully using it. - */ - @Test(groups = "short") - public void should_function_with_snappy_compression() throws Exception { - compressionTest(ProtocolOptions.Compression.SNAPPY); - } + /** + * Validates that a session can be established using snappy compression and executes some queries + * that inserts and retrieves data using that session(). + * + * @test_category connection:compression + * @expected_result session established and queries made successfully using it. + */ + @Test(groups = "short") + public void should_function_with_snappy_compression() throws Exception { + skipTestWithCassandraVersionOrHigher("4.0.0", "snappy"); + compressionTest(ProtocolOptions.Compression.SNAPPY); + } - /** - * Validates that a session can be established using lz4 compression and executes some queries that inserts and - * retrieves data using that session(). - * - * @test_category connection:compression - * @expected_result session established and queries made successfully using it. - */ - @Test(groups = "short") - @CassandraVersion("2.0.0") - public void should_function_with_lz4_compression() throws Exception { - compressionTest(ProtocolOptions.Compression.LZ4); - } + /** + * Validates that a session can be established using lz4 compression and executes some queries + * that inserts and retrieves data using that session(). + * + * @test_category connection:compression + * @expected_result session established and queries made successfully using it. + */ + @Test(groups = "short") + @CassandraVersion("2.0.0") + public void should_function_with_lz4_compression() throws Exception { + compressionTest(ProtocolOptions.Compression.LZ4); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/DirectedGraphTest.java b/driver-core/src/test/java/com/datastax/driver/core/DirectedGraphTest.java index 65d713ccd55..9cf804fa83a 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/DirectedGraphTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/DirectedGraphTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,81 +17,125 @@ */ package com.datastax.driver.core; +import static org.assertj.core.api.Assertions.assertThat; + import com.datastax.driver.core.exceptions.DriverInternalError; +import java.util.Comparator; +import java.util.List; import org.testng.annotations.Test; -import java.util.List; +public class DirectedGraphTest { -import static org.assertj.core.api.Assertions.assertThat; + private Comparator alphaComparator = + new Comparator() { -public class DirectedGraphTest { - @Test(groups = "unit") - public void should_sort_empty_graph() { - DirectedGraph g = new DirectedGraph(); - assertThat(g.topologicalSort()).isEmpty(); - } - - @Test(groups = "unit") - public void should_sort_graph_with_one_node() { - DirectedGraph g = new DirectedGraph("A"); - assertThat(g.topologicalSort()) - .containsExactly("A"); - } - - @Test(groups = "unit") - public void should_sort_complex_graph() { - // H G - // / \ /\ - // F | E - // \ / / - // D / - // / \/ - // B C - // | - // A - DirectedGraph g = new DirectedGraph("A", "B", "C", "D", "E", "F", "G", "H"); - g.addEdge("H", "F"); - g.addEdge("G", "E"); - g.addEdge("H", "D"); - g.addEdge("F", "D"); - g.addEdge("G", "D"); - g.addEdge("D", "C"); - g.addEdge("E", "C"); - g.addEdge("D", "B"); - g.addEdge("B", "A"); - - // Topological sort order should be : GH,FE,D,CB,A - // There's no guarantee on the order within the same level, so we use sublists: - List sorted = g.topologicalSort(); - assertThat(sorted.subList(0, 2)) - .contains("G", "H"); - assertThat(sorted.subList(2, 4)) - .contains("F", "E"); - assertThat(sorted.subList(4, 5)) - .contains("D"); - assertThat(sorted.subList(5, 7)) - .contains("C", "B"); - assertThat(sorted.subList(7, 8)) - .contains("A"); - } - - @Test(groups = "unit", expectedExceptions = DriverInternalError.class) - public void should_fail_to_sort_if_graph_has_a_cycle() { - DirectedGraph g = new DirectedGraph("A", "B", "C"); - g.addEdge("A", "B"); - g.addEdge("B", "C"); - g.addEdge("C", "B"); - - g.topologicalSort(); - } - - @Test(groups = "unit", expectedExceptions = DriverInternalError.class) - public void should_fail_to_sort_if_graph_is_a_cycle() { - DirectedGraph g = new DirectedGraph("A", "B", "C"); - g.addEdge("A", "B"); - g.addEdge("B", "C"); - g.addEdge("C", "A"); - - g.topologicalSort(); - } -} \ No newline at end of file + @Override + public int compare(String o1, String o2) { + return o1.compareTo(o2); + } + }; + + @Test(groups = "unit") + public void should_sort_empty_graph() { + DirectedGraph g = new DirectedGraph(alphaComparator); + assertThat(g.topologicalSort()).isEmpty(); + } + + @Test(groups = "unit") + public void should_sort_graph_with_one_node() { + DirectedGraph g = new DirectedGraph(alphaComparator, "A"); + assertThat(g.topologicalSort()).containsExactly("A"); + } + + @Test(groups = "unit") + public void should_sort_complex_graph() { + // H G + // / \ /\ + // F | E + // \ / / + // D / + // / \/ + // B C + // | + // A + DirectedGraph g = + new DirectedGraph(alphaComparator, "A", "B", "C", "D", "E", "F", "G", "H"); + g.addEdge("H", "F"); + g.addEdge("G", "E"); + g.addEdge("H", "D"); + g.addEdge("F", "D"); + g.addEdge("G", "D"); + g.addEdge("D", "C"); + g.addEdge("E", "C"); + g.addEdge("D", "B"); + g.addEdge("B", "A"); + + // Topological sort order should be : GH,E,F,D,BC,A + List sorted = g.topologicalSort(); + assertThat(sorted).containsExactly("G", "H", "E", "F", "D", "B", "C", "A"); + } + + @Test(groups = "unit") + public void should_sort_complex_custom_comparator() { + // Version of should_sort_complex_graph using a custom comparator based on ordering largest + // values first. + // This is counter to how hashmaps should usually behave, so this should help ensure that the + // comparator is + // being used. + Comparator highFirst = + new Comparator() { + @Override + public int compare(Integer o1, Integer o2) { + return o2 - o1; + } + }; + + // sort graph and use a alphaComparator that favors larger values ordered first. + // 7 6 + // / \ /\ + // 5 | 10 + // \ / / + // 9 / + // / \/ + // 1 2 + // | + // 0 + DirectedGraph g = new DirectedGraph(highFirst, 0, 1, 2, 9, 10, 5, 6, 7); + g.addEdge(7, 5); + g.addEdge(6, 10); + g.addEdge(7, 9); + g.addEdge(5, 9); + g.addEdge(6, 9); + g.addEdge(9, 2); + g.addEdge(10, 2); + g.addEdge(9, 1); + g.addEdge(1, 0); + + // Topological sort order should be : [7,6],[5],[10],[9],[2,1],[0] + // 5 comes before 10 even though they appear at the same depth. This happens because 5's (7) + // dependency + // is evaluated before 10's (6), so it is placed first. + List sorted = g.topologicalSort(); + assertThat(sorted).containsExactly(7, 6, 5, 10, 9, 2, 1, 0); + } + + @Test(groups = "unit", expectedExceptions = DriverInternalError.class) + public void should_fail_to_sort_if_graph_has_a_cycle() { + DirectedGraph g = new DirectedGraph(alphaComparator, "A", "B", "C"); + g.addEdge("A", "B"); + g.addEdge("B", "C"); + g.addEdge("C", "B"); + + g.topologicalSort(); + } + + @Test(groups = "unit", expectedExceptions = DriverInternalError.class) + public void should_fail_to_sort_if_graph_is_a_cycle() { + DirectedGraph g = new DirectedGraph(alphaComparator, "A", "B", "C"); + g.addEdge("A", "B"); + g.addEdge("B", "C"); + g.addEdge("C", "A"); + + g.topologicalSort(); + } +} diff --git a/driver-core/src/test/java/com/datastax/driver/core/DseCCMClusterTest.java b/driver-core/src/test/java/com/datastax/driver/core/DseCCMClusterTest.java index 7c0e6b423d1..5460ae48e9c 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/DseCCMClusterTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/DseCCMClusterTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,76 +17,80 @@ */ package com.datastax.driver.core; -import org.testng.annotations.Test; +import static com.datastax.driver.core.CCMAccess.Workload.cassandra; +import static com.datastax.driver.core.CCMAccess.Workload.solr; +import static com.datastax.driver.core.CCMAccess.Workload.spark; -import static com.datastax.driver.core.CCMAccess.Workload.*; +import org.testng.annotations.Test; /** * A simple test to validate DSE setups. - *

    + * + *

    + * *

    Running all tests against DSE

    - *

    - * To run tests globally against DSE, set the system property {@code dse} - * to {@code true}. - *

    - * When this flag is provided, it is assumed a DSE version is passed under - * the system property {@code cassandra.version}. - * A mapping for determining C* version from DSE version is described in {@link CCMBridge}. - *

    - * Example usages: - *

    - * DSE 4.8.3: + * + *

    To run tests globally against DSE, set the system property {@code dse} to {@code true}. + * + *

    When this flag is provided, it is assumed a DSE version is passed under the system property + * {@code cassandra.version}. A mapping for determining C* version from DSE version is described in + * {@link CCMBridge}. + * + *

    Example usages: + * + *

    DSE 4.8.3: + * *

      * -Ddse -Dcassandra.version=4.8.3
      * -Ddse=true -Dcassandra.version=4.8.3
      * 
    - *

    - * Custom local install of DSE 5.0 (using {@code cassandra.directory} instead of {@code cassandra.version}): + * + *

    Custom local install of DSE 5.0 (using {@code cassandra.directory} instead of {@code + * cassandra.version}): + * *

      * -Dcassandra.version=5.0 -Ddse -Dcassandra.directory=/path/to/dse
      * 
    - *

    + * + *

    + * *

    Running a specific test against DSE

    - *

    - * Set the following properties on the test: + * + *

    Set the following properties on the test: + * *

    {@code @CCMConfig(dse = true, version = "4.8.3")}
    * *

    Supplying DSE credentials

    * - * Rather than adding system properties for DSE credentials, - * DSE tests rely on a recent change in CCM to support providing - * credentials via {@code $HOME/.ccm/.dse.ini}. + * Rather than adding system properties for DSE credentials, DSE tests rely on a recent change in + * CCM to support providing credentials via {@code $HOME/.ccm/.dse.ini}. + * + *

    The contents of this file need to be formed in this way: * - * The contents of this file need to be formed in this way: *

      * [dse_credentials]
      * dse_username = myusername
      * dse_password = mypassword
      * 
    - *

    + * + *

    + * *

    Other requirements

    - *

    - * DSE requires your {@code PATH} variable to provide access - * to super-user executables in {@code /usr/sbin}. - *

    - * A correct example is as follows: {@code /usr/bin:/usr/local/bin:/bin:/usr/sbin:$JAVA_HOME/bin:$PATH}. + * + *

    DSE requires your {@code PATH} variable to provide access to super-user executables in {@code + * /usr/sbin}. + * + *

    A correct example is as follows: {@code + * /usr/bin:/usr/local/bin:/bin:/usr/sbin:$JAVA_HOME/bin:$PATH}. */ @Test(enabled = false) @CCMConfig( - dse = true, - numberOfNodes = 3, - version = "4.8.3", - workloads = { - @CCMWorkload(solr), - @CCMWorkload({spark, solr}), - @CCMWorkload({cassandra, spark}) - } -) + dse = true, + numberOfNodes = 3, + version = "4.8.3", + workloads = {@CCMWorkload(solr), @CCMWorkload({spark, solr}), @CCMWorkload({cassandra, spark})}) public class DseCCMClusterTest extends CCMTestsSupport { - @Test(groups = "short") - public void should_conenct_to_dse() throws InterruptedException { - - } - + @Test(groups = "short") + public void should_conenct_to_dse() throws InterruptedException {} } diff --git a/driver-core/src/test/java/com/datastax/driver/core/DurationCodecTest.java b/driver-core/src/test/java/com/datastax/driver/core/DurationCodecTest.java index e57d9e83aea..181dd044084 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/DurationCodecTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/DurationCodecTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,78 +17,83 @@ */ package com.datastax.driver.core; -import org.testng.annotations.Test; - -import java.nio.ByteBuffer; - -import static com.datastax.driver.core.Duration.*; +import static com.datastax.driver.core.Duration.NANOS_PER_HOUR; +import static com.datastax.driver.core.Duration.NANOS_PER_MICRO; +import static com.datastax.driver.core.Duration.NANOS_PER_MILLI; +import static com.datastax.driver.core.Duration.NANOS_PER_MINUTE; +import static com.datastax.driver.core.Duration.NANOS_PER_SECOND; import static com.datastax.driver.core.ProtocolVersion.V4; import static org.assertj.core.api.Assertions.assertThat; -public class DurationCodecTest { +import java.nio.ByteBuffer; +import org.testng.annotations.Test; - @Test(groups = "unit") - public void testFromStringWithStandardPattern() { - assertCodec("1y2mo", Duration.newInstance(14, 0, 0)); - assertCodec("-1y2mo", Duration.newInstance(-14, 0, 0)); - assertCodec("1Y2MO", Duration.newInstance(14, 0, 0)); - assertCodec("2w", Duration.newInstance(0, 14, 0)); - assertCodec("2d10h", Duration.newInstance(0, 2, 10 * NANOS_PER_HOUR)); - assertCodec("2d", Duration.newInstance(0, 2, 0)); - assertCodec("30h", Duration.newInstance(0, 0, 30 * NANOS_PER_HOUR)); - assertCodec("30h20m", Duration.newInstance(0, 0, 30 * NANOS_PER_HOUR + 20 * NANOS_PER_MINUTE)); - assertCodec("20m", Duration.newInstance(0, 0, 20 * NANOS_PER_MINUTE)); - assertCodec("56s", Duration.newInstance(0, 0, 56 * NANOS_PER_SECOND)); - assertCodec("567ms", Duration.newInstance(0, 0, 567 * NANOS_PER_MILLI)); - assertCodec("1950us", Duration.newInstance(0, 0, 1950 * NANOS_PER_MICRO)); - assertCodec("1950µs", Duration.newInstance(0, 0, 1950 * NANOS_PER_MICRO)); - assertCodec("1950000ns", Duration.newInstance(0, 0, 1950000)); - assertCodec("1950000NS", Duration.newInstance(0, 0, 1950000)); - assertCodec("-1950000ns", Duration.newInstance(0, 0, -1950000)); - assertCodec("1y3mo2h10m", Duration.newInstance(15, 0, 130 * NANOS_PER_MINUTE)); - } +public class DurationCodecTest { - @Test(groups = "unit") - public void testFromStringWithIso8601Pattern() { - assertCodec("P1Y2D", Duration.newInstance(12, 2, 0)); - assertCodec("P1Y2M", Duration.newInstance(14, 0, 0)); - assertCodec("P2W", Duration.newInstance(0, 14, 0)); - assertCodec("P1YT2H", Duration.newInstance(12, 0, 2 * NANOS_PER_HOUR)); - assertCodec("-P1Y2M", Duration.newInstance(-14, 0, 0)); - assertCodec("P2D", Duration.newInstance(0, 2, 0)); - assertCodec("PT30H", Duration.newInstance(0, 0, 30 * NANOS_PER_HOUR)); - assertCodec("PT30H20M", Duration.newInstance(0, 0, 30 * NANOS_PER_HOUR + 20 * NANOS_PER_MINUTE)); - assertCodec("PT20M", Duration.newInstance(0, 0, 20 * NANOS_PER_MINUTE)); - assertCodec("PT56S", Duration.newInstance(0, 0, 56 * NANOS_PER_SECOND)); - assertCodec("P1Y3MT2H10M", Duration.newInstance(15, 0, 130 * NANOS_PER_MINUTE)); - } + @Test(groups = "unit") + public void testFromStringWithStandardPattern() { + assertCodec("1y2mo", Duration.newInstance(14, 0, 0)); + assertCodec("-1y2mo", Duration.newInstance(-14, 0, 0)); + assertCodec("1Y2MO", Duration.newInstance(14, 0, 0)); + assertCodec("2w", Duration.newInstance(0, 14, 0)); + assertCodec("2d10h", Duration.newInstance(0, 2, 10 * NANOS_PER_HOUR)); + assertCodec("2d", Duration.newInstance(0, 2, 0)); + assertCodec("30h", Duration.newInstance(0, 0, 30 * NANOS_PER_HOUR)); + assertCodec("30h20m", Duration.newInstance(0, 0, 30 * NANOS_PER_HOUR + 20 * NANOS_PER_MINUTE)); + assertCodec("20m", Duration.newInstance(0, 0, 20 * NANOS_PER_MINUTE)); + assertCodec("56s", Duration.newInstance(0, 0, 56 * NANOS_PER_SECOND)); + assertCodec("567ms", Duration.newInstance(0, 0, 567 * NANOS_PER_MILLI)); + assertCodec("1950us", Duration.newInstance(0, 0, 1950 * NANOS_PER_MICRO)); + assertCodec("1950µs", Duration.newInstance(0, 0, 1950 * NANOS_PER_MICRO)); + assertCodec("1950000ns", Duration.newInstance(0, 0, 1950000)); + assertCodec("1950000NS", Duration.newInstance(0, 0, 1950000)); + assertCodec("-1950000ns", Duration.newInstance(0, 0, -1950000)); + assertCodec("1y3mo2h10m", Duration.newInstance(15, 0, 130 * NANOS_PER_MINUTE)); + } - @Test(groups = "unit") - public void testFromStringWithIso8601AlternativePattern() { - assertCodec("P0001-00-02T00:00:00", Duration.newInstance(12, 2, 0)); - assertCodec("P0001-02-00T00:00:00", Duration.newInstance(14, 0, 0)); - assertCodec("P0001-00-00T02:00:00", Duration.newInstance(12, 0, 2 * NANOS_PER_HOUR)); - assertCodec("-P0001-02-00T00:00:00", Duration.newInstance(-14, 0, 0)); - assertCodec("P0000-00-02T00:00:00", Duration.newInstance(0, 2, 0)); - assertCodec("P0000-00-00T30:00:00", Duration.newInstance(0, 0, 30 * NANOS_PER_HOUR)); - assertCodec("P0000-00-00T30:20:00", Duration.newInstance(0, 0, 30 * NANOS_PER_HOUR + 20 * NANOS_PER_MINUTE)); - assertCodec("P0000-00-00T00:20:00", Duration.newInstance(0, 0, 20 * NANOS_PER_MINUTE)); - assertCodec("P0000-00-00T00:00:56", Duration.newInstance(0, 0, 56 * NANOS_PER_SECOND)); - assertCodec("P0001-03-00T02:10:00", Duration.newInstance(15, 0, 130 * NANOS_PER_MINUTE)); - } + @Test(groups = "unit") + public void testFromStringWithIso8601Pattern() { + assertCodec("P1Y2D", Duration.newInstance(12, 2, 0)); + assertCodec("P1Y2M", Duration.newInstance(14, 0, 0)); + assertCodec("P2W", Duration.newInstance(0, 14, 0)); + assertCodec("P1YT2H", Duration.newInstance(12, 0, 2 * NANOS_PER_HOUR)); + assertCodec("-P1Y2M", Duration.newInstance(-14, 0, 0)); + assertCodec("P2D", Duration.newInstance(0, 2, 0)); + assertCodec("PT30H", Duration.newInstance(0, 0, 30 * NANOS_PER_HOUR)); + assertCodec( + "PT30H20M", Duration.newInstance(0, 0, 30 * NANOS_PER_HOUR + 20 * NANOS_PER_MINUTE)); + assertCodec("PT20M", Duration.newInstance(0, 0, 20 * NANOS_PER_MINUTE)); + assertCodec("PT56S", Duration.newInstance(0, 0, 56 * NANOS_PER_SECOND)); + assertCodec("P1Y3MT2H10M", Duration.newInstance(15, 0, 130 * NANOS_PER_MINUTE)); + } - private void assertCodec(String input, Duration expected) { - // serialize + deserialize - ByteBuffer bytes = TypeCodec.duration().serialize(Duration.from(input), V4); - Duration actual = TypeCodec.duration().deserialize(bytes, V4); - assertThat(actual).isEqualTo(expected); - // format + parse - String format = TypeCodec.duration().format(Duration.from(input)); - actual = TypeCodec.duration().parse(format); - assertThat(actual).isEqualTo(expected); - // parse alone - actual = TypeCodec.duration().parse(input); - assertThat(actual).isEqualTo(expected); - } + @Test(groups = "unit") + public void testFromStringWithIso8601AlternativePattern() { + assertCodec("P0001-00-02T00:00:00", Duration.newInstance(12, 2, 0)); + assertCodec("P0001-02-00T00:00:00", Duration.newInstance(14, 0, 0)); + assertCodec("P0001-00-00T02:00:00", Duration.newInstance(12, 0, 2 * NANOS_PER_HOUR)); + assertCodec("-P0001-02-00T00:00:00", Duration.newInstance(-14, 0, 0)); + assertCodec("P0000-00-02T00:00:00", Duration.newInstance(0, 2, 0)); + assertCodec("P0000-00-00T30:00:00", Duration.newInstance(0, 0, 30 * NANOS_PER_HOUR)); + assertCodec( + "P0000-00-00T30:20:00", + Duration.newInstance(0, 0, 30 * NANOS_PER_HOUR + 20 * NANOS_PER_MINUTE)); + assertCodec("P0000-00-00T00:20:00", Duration.newInstance(0, 0, 20 * NANOS_PER_MINUTE)); + assertCodec("P0000-00-00T00:00:56", Duration.newInstance(0, 0, 56 * NANOS_PER_SECOND)); + assertCodec("P0001-03-00T02:10:00", Duration.newInstance(15, 0, 130 * NANOS_PER_MINUTE)); + } + private void assertCodec(String input, Duration expected) { + // serialize + deserialize + ByteBuffer bytes = TypeCodec.duration().serialize(Duration.from(input), V4); + Duration actual = TypeCodec.duration().deserialize(bytes, V4); + assertThat(actual).isEqualTo(expected); + // format + parse + String format = TypeCodec.duration().format(Duration.from(input)); + actual = TypeCodec.duration().parse(format); + assertThat(actual).isEqualTo(expected); + // parse alone + actual = TypeCodec.duration().parse(input); + assertThat(actual).isEqualTo(expected); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/DurationIntegrationTest.java b/driver-core/src/test/java/com/datastax/driver/core/DurationIntegrationTest.java index 3c8a01ba20b..16f6a20e15c 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/DurationIntegrationTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/DurationIntegrationTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,97 +17,95 @@ */ package com.datastax.driver.core; +import static com.datastax.driver.core.Assertions.assertThat; + import com.datastax.driver.core.utils.CassandraVersion; +import java.util.UUID; import org.testng.annotations.DataProvider; import org.testng.annotations.Test; -import java.util.UUID; - -import static com.datastax.driver.core.Assertions.assertThat; - @CassandraVersion("3.10") public class DurationIntegrationTest extends CCMTestsSupport { - @Override - public void onTestContextInitialized() { - execute("CREATE TABLE test_duration (pk uuid PRIMARY KEY, c1 duration)"); - } - - @DataProvider - Object[][] durations() { - return new Object[][]{ - {"1y2mo"}, - {"-1y2mo"}, - {"1Y2MO"}, - {"2w"}, - {"2d10h"}, - {"2d"}, - {"30h"}, - {"30h20m"}, - {"20m"}, - {"56s"}, - {"567ms"}, - {"1950us"}, - {"1950µs"}, - {"1950000ns"}, - {"1950000NS"}, - {"-1950000ns"}, - {"1y3mo2h10m"}, - {"P1Y2D"}, - {"P1Y2M"}, - {"P2W"}, - {"P1YT2H"}, - {"-P1Y2M"}, - {"P2D"}, - {"PT30H"}, - {"PT30H20M"}, - {"PT20M"}, - {"PT56S"}, - {"P1Y3MT2H10M"}, - {"P0001-00-02T00:00:00"}, - {"P0001-02-00T00:00:00"}, - {"P0001-00-00T02:00:00"}, - {"-P0001-02-00T00:00:00"}, - {"P0000-00-02T00:00:00"}, - {"P0000-00-00T30:00:00"}, - {"P0000-00-00T30:20:00"}, - {"P0000-00-00T00:20:00"}, - {"P0000-00-00T00:00:56"}, - {"P0001-03-00T02:10:00"} - }; - } - - /** - * Validates that columns using the duration type are properly handled by the driver when used as a parameter - * and retrieved in a row result for a variety of sample inputs. - * - * @jira_ticket JAVA-1347 - * @test_category metadata - */ - @Test(groups = "short", dataProvider = "durations") - public void should_serialize_and_deserialize_durations(String durationStr) { - // read and write - UUID id = UUID.randomUUID(); - Duration expected = Duration.from(durationStr); - session().execute("INSERT INTO test_duration (pk, c1) VALUES (?, ?)", id, expected); - Row row = session().execute("SELECT c1 from test_duration WHERE pk = ?", id).one(); - Duration actual = row.get("c1", Duration.class); - assertThat(actual).isEqualTo(expected); - } + @Override + public void onTestContextInitialized() { + execute("CREATE TABLE test_duration (pk uuid PRIMARY KEY, c1 duration)"); + } + @DataProvider + Object[][] durations() { + return new Object[][] { + {"1y2mo"}, + {"-1y2mo"}, + {"1Y2MO"}, + {"2w"}, + {"2d10h"}, + {"2d"}, + {"30h"}, + {"30h20m"}, + {"20m"}, + {"56s"}, + {"567ms"}, + {"1950us"}, + {"1950µs"}, + {"1950000ns"}, + {"1950000NS"}, + {"-1950000ns"}, + {"1y3mo2h10m"}, + {"P1Y2D"}, + {"P1Y2M"}, + {"P2W"}, + {"P1YT2H"}, + {"-P1Y2M"}, + {"P2D"}, + {"PT30H"}, + {"PT30H20M"}, + {"PT20M"}, + {"PT56S"}, + {"P1Y3MT2H10M"}, + {"P0001-00-02T00:00:00"}, + {"P0001-02-00T00:00:00"}, + {"P0001-00-00T02:00:00"}, + {"-P0001-02-00T00:00:00"}, + {"P0000-00-02T00:00:00"}, + {"P0000-00-00T30:00:00"}, + {"P0000-00-00T30:20:00"}, + {"P0000-00-00T00:20:00"}, + {"P0000-00-00T00:00:56"}, + {"P0001-03-00T02:10:00"} + }; + } - /** - * Validates that columns using the duration type are properly represented in {@link TableMetadata}. - * - * @jira_ticket JAVA-1347 - * @test_category metadata - */ - @Test(groups = "short") - public void should_parse_column_metadata() { - // column metadata - TableMetadata table = cluster().getMetadata().getKeyspace(keyspace).getTable("test_duration"); - assertThat(table.getColumn("c1")).hasType(DataType.duration()); - assertThat(table.asCQLQuery()).contains("c1 duration"); - } + /** + * Validates that columns using the duration type are properly handled by the driver when used as + * a parameter and retrieved in a row result for a variety of sample inputs. + * + * @jira_ticket JAVA-1347 + * @test_category metadata + */ + @Test(groups = "short", dataProvider = "durations") + public void should_serialize_and_deserialize_durations(String durationStr) { + // read and write + UUID id = UUID.randomUUID(); + Duration expected = Duration.from(durationStr); + session().execute("INSERT INTO test_duration (pk, c1) VALUES (?, ?)", id, expected); + Row row = session().execute("SELECT c1 from test_duration WHERE pk = ?", id).one(); + Duration actual = row.get("c1", Duration.class); + assertThat(actual).isEqualTo(expected); + } + /** + * Validates that columns using the duration type are properly represented in {@link + * TableMetadata}. + * + * @jira_ticket JAVA-1347 + * @test_category metadata + */ + @Test(groups = "short") + public void should_parse_column_metadata() { + // column metadata + TableMetadata table = cluster().getMetadata().getKeyspace(keyspace).getTable("test_duration"); + assertThat(table.getColumn("c1")).hasType(DataType.duration()); + assertThat(table.asCQLQuery()).contains("c1 duration"); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/DurationTest.java b/driver-core/src/test/java/com/datastax/driver/core/DurationTest.java index 558fe8b5f24..d5649483612 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/DurationTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/DurationTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,85 +17,103 @@ */ package com.datastax.driver.core; -import org.testng.annotations.Test; - -import static com.datastax.driver.core.Duration.*; +import static com.datastax.driver.core.Duration.NANOS_PER_HOUR; +import static com.datastax.driver.core.Duration.NANOS_PER_MICRO; +import static com.datastax.driver.core.Duration.NANOS_PER_MILLI; +import static com.datastax.driver.core.Duration.NANOS_PER_MINUTE; +import static com.datastax.driver.core.Duration.NANOS_PER_SECOND; import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.fail; +import org.testng.annotations.Test; + public class DurationTest { - @Test(groups = "unit") - public void testFromStringWithStandardPattern() { - assertThat(Duration.from("1y2mo")).isEqualTo(Duration.newInstance(14, 0, 0)); - assertThat(Duration.from("-1y2mo")).isEqualTo(Duration.newInstance(-14, 0, 0)); - assertThat(Duration.from("1Y2MO")).isEqualTo(Duration.newInstance(14, 0, 0)); - assertThat(Duration.from("2w")).isEqualTo(Duration.newInstance(0, 14, 0)); - assertThat(Duration.from("2d10h")).isEqualTo(Duration.newInstance(0, 2, 10 * NANOS_PER_HOUR)); - assertThat(Duration.from("2d")).isEqualTo(Duration.newInstance(0, 2, 0)); - assertThat(Duration.from("30h")).isEqualTo(Duration.newInstance(0, 0, 30 * NANOS_PER_HOUR)); - assertThat(Duration.from("30h20m")).isEqualTo(Duration.newInstance(0, 0, 30 * NANOS_PER_HOUR + 20 * NANOS_PER_MINUTE)); - assertThat(Duration.from("20m")).isEqualTo(Duration.newInstance(0, 0, 20 * NANOS_PER_MINUTE)); - assertThat(Duration.from("56s")).isEqualTo(Duration.newInstance(0, 0, 56 * NANOS_PER_SECOND)); - assertThat(Duration.from("567ms")).isEqualTo(Duration.newInstance(0, 0, 567 * NANOS_PER_MILLI)); - assertThat(Duration.from("1950us")).isEqualTo(Duration.newInstance(0, 0, 1950 * NANOS_PER_MICRO)); - assertThat(Duration.from("1950µs")).isEqualTo(Duration.newInstance(0, 0, 1950 * NANOS_PER_MICRO)); - assertThat(Duration.from("1950000ns")).isEqualTo(Duration.newInstance(0, 0, 1950000)); - assertThat(Duration.from("1950000NS")).isEqualTo(Duration.newInstance(0, 0, 1950000)); - assertThat(Duration.from("-1950000ns")).isEqualTo(Duration.newInstance(0, 0, -1950000)); - assertThat(Duration.from("1y3mo2h10m")).isEqualTo(Duration.newInstance(15, 0, 130 * NANOS_PER_MINUTE)); - } + @Test(groups = "unit") + public void testFromStringWithStandardPattern() { + assertThat(Duration.from("1y2mo")).isEqualTo(Duration.newInstance(14, 0, 0)); + assertThat(Duration.from("-1y2mo")).isEqualTo(Duration.newInstance(-14, 0, 0)); + assertThat(Duration.from("1Y2MO")).isEqualTo(Duration.newInstance(14, 0, 0)); + assertThat(Duration.from("2w")).isEqualTo(Duration.newInstance(0, 14, 0)); + assertThat(Duration.from("2d10h")).isEqualTo(Duration.newInstance(0, 2, 10 * NANOS_PER_HOUR)); + assertThat(Duration.from("2d")).isEqualTo(Duration.newInstance(0, 2, 0)); + assertThat(Duration.from("30h")).isEqualTo(Duration.newInstance(0, 0, 30 * NANOS_PER_HOUR)); + assertThat(Duration.from("30h20m")) + .isEqualTo(Duration.newInstance(0, 0, 30 * NANOS_PER_HOUR + 20 * NANOS_PER_MINUTE)); + assertThat(Duration.from("20m")).isEqualTo(Duration.newInstance(0, 0, 20 * NANOS_PER_MINUTE)); + assertThat(Duration.from("56s")).isEqualTo(Duration.newInstance(0, 0, 56 * NANOS_PER_SECOND)); + assertThat(Duration.from("567ms")).isEqualTo(Duration.newInstance(0, 0, 567 * NANOS_PER_MILLI)); + assertThat(Duration.from("1950us")) + .isEqualTo(Duration.newInstance(0, 0, 1950 * NANOS_PER_MICRO)); + assertThat(Duration.from("1950µs")) + .isEqualTo(Duration.newInstance(0, 0, 1950 * NANOS_PER_MICRO)); + assertThat(Duration.from("1950000ns")).isEqualTo(Duration.newInstance(0, 0, 1950000)); + assertThat(Duration.from("1950000NS")).isEqualTo(Duration.newInstance(0, 0, 1950000)); + assertThat(Duration.from("-1950000ns")).isEqualTo(Duration.newInstance(0, 0, -1950000)); + assertThat(Duration.from("1y3mo2h10m")) + .isEqualTo(Duration.newInstance(15, 0, 130 * NANOS_PER_MINUTE)); + } - @Test(groups = "unit") - public void testFromStringWithIso8601Pattern() { - assertThat(Duration.from("P1Y2D")).isEqualTo(Duration.newInstance(12, 2, 0)); - assertThat(Duration.from("P1Y2M")).isEqualTo(Duration.newInstance(14, 0, 0)); - assertThat(Duration.from("P2W")).isEqualTo(Duration.newInstance(0, 14, 0)); - assertThat(Duration.from("P1YT2H")).isEqualTo(Duration.newInstance(12, 0, 2 * NANOS_PER_HOUR)); - assertThat(Duration.from("-P1Y2M")).isEqualTo(Duration.newInstance(-14, 0, 0)); - assertThat(Duration.from("P2D")).isEqualTo(Duration.newInstance(0, 2, 0)); - assertThat(Duration.from("PT30H")).isEqualTo(Duration.newInstance(0, 0, 30 * NANOS_PER_HOUR)); - assertThat(Duration.from("PT30H20M")).isEqualTo(Duration.newInstance(0, 0, 30 * NANOS_PER_HOUR + 20 * NANOS_PER_MINUTE)); - assertThat(Duration.from("PT20M")).isEqualTo(Duration.newInstance(0, 0, 20 * NANOS_PER_MINUTE)); - assertThat(Duration.from("PT56S")).isEqualTo(Duration.newInstance(0, 0, 56 * NANOS_PER_SECOND)); - assertThat(Duration.from("P1Y3MT2H10M")).isEqualTo(Duration.newInstance(15, 0, 130 * NANOS_PER_MINUTE)); - } + @Test(groups = "unit") + public void testFromStringWithIso8601Pattern() { + assertThat(Duration.from("P1Y2D")).isEqualTo(Duration.newInstance(12, 2, 0)); + assertThat(Duration.from("P1Y2M")).isEqualTo(Duration.newInstance(14, 0, 0)); + assertThat(Duration.from("P2W")).isEqualTo(Duration.newInstance(0, 14, 0)); + assertThat(Duration.from("P1YT2H")).isEqualTo(Duration.newInstance(12, 0, 2 * NANOS_PER_HOUR)); + assertThat(Duration.from("-P1Y2M")).isEqualTo(Duration.newInstance(-14, 0, 0)); + assertThat(Duration.from("P2D")).isEqualTo(Duration.newInstance(0, 2, 0)); + assertThat(Duration.from("PT30H")).isEqualTo(Duration.newInstance(0, 0, 30 * NANOS_PER_HOUR)); + assertThat(Duration.from("PT30H20M")) + .isEqualTo(Duration.newInstance(0, 0, 30 * NANOS_PER_HOUR + 20 * NANOS_PER_MINUTE)); + assertThat(Duration.from("PT20M")).isEqualTo(Duration.newInstance(0, 0, 20 * NANOS_PER_MINUTE)); + assertThat(Duration.from("PT56S")).isEqualTo(Duration.newInstance(0, 0, 56 * NANOS_PER_SECOND)); + assertThat(Duration.from("P1Y3MT2H10M")) + .isEqualTo(Duration.newInstance(15, 0, 130 * NANOS_PER_MINUTE)); + } - @Test(groups = "unit") - public void testFromStringWithIso8601AlternativePattern() { - assertThat(Duration.from("P0001-00-02T00:00:00")).isEqualTo(Duration.newInstance(12, 2, 0)); - assertThat(Duration.from("P0001-02-00T00:00:00")).isEqualTo(Duration.newInstance(14, 0, 0)); - assertThat(Duration.from("P0001-00-00T02:00:00")).isEqualTo(Duration.newInstance(12, 0, 2 * NANOS_PER_HOUR)); - assertThat(Duration.from("-P0001-02-00T00:00:00")).isEqualTo(Duration.newInstance(-14, 0, 0)); - assertThat(Duration.from("P0000-00-02T00:00:00")).isEqualTo(Duration.newInstance(0, 2, 0)); - assertThat(Duration.from("P0000-00-00T30:00:00")).isEqualTo(Duration.newInstance(0, 0, 30 * NANOS_PER_HOUR)); - assertThat(Duration.from("P0000-00-00T30:20:00")).isEqualTo(Duration.newInstance(0, 0, 30 * NANOS_PER_HOUR + 20 * NANOS_PER_MINUTE)); - assertThat(Duration.from("P0000-00-00T00:20:00")).isEqualTo(Duration.newInstance(0, 0, 20 * NANOS_PER_MINUTE)); - assertThat(Duration.from("P0000-00-00T00:00:56")).isEqualTo(Duration.newInstance(0, 0, 56 * NANOS_PER_SECOND)); - assertThat(Duration.from("P0001-03-00T02:10:00")).isEqualTo(Duration.newInstance(15, 0, 130 * NANOS_PER_MINUTE)); - } + @Test(groups = "unit") + public void testFromStringWithIso8601AlternativePattern() { + assertThat(Duration.from("P0001-00-02T00:00:00")).isEqualTo(Duration.newInstance(12, 2, 0)); + assertThat(Duration.from("P0001-02-00T00:00:00")).isEqualTo(Duration.newInstance(14, 0, 0)); + assertThat(Duration.from("P0001-00-00T02:00:00")) + .isEqualTo(Duration.newInstance(12, 0, 2 * NANOS_PER_HOUR)); + assertThat(Duration.from("-P0001-02-00T00:00:00")).isEqualTo(Duration.newInstance(-14, 0, 0)); + assertThat(Duration.from("P0000-00-02T00:00:00")).isEqualTo(Duration.newInstance(0, 2, 0)); + assertThat(Duration.from("P0000-00-00T30:00:00")) + .isEqualTo(Duration.newInstance(0, 0, 30 * NANOS_PER_HOUR)); + assertThat(Duration.from("P0000-00-00T30:20:00")) + .isEqualTo(Duration.newInstance(0, 0, 30 * NANOS_PER_HOUR + 20 * NANOS_PER_MINUTE)); + assertThat(Duration.from("P0000-00-00T00:20:00")) + .isEqualTo(Duration.newInstance(0, 0, 20 * NANOS_PER_MINUTE)); + assertThat(Duration.from("P0000-00-00T00:00:56")) + .isEqualTo(Duration.newInstance(0, 0, 56 * NANOS_PER_SECOND)); + assertThat(Duration.from("P0001-03-00T02:10:00")) + .isEqualTo(Duration.newInstance(15, 0, 130 * NANOS_PER_MINUTE)); + } - @Test(groups = "unit") - public void testInvalidDurations() { - assertInvalidDuration(Long.MAX_VALUE + "d", "Invalid duration. The total number of days must be less or equal to 2147483647"); - assertInvalidDuration("2µ", "Unable to convert '2µ' to a duration"); - assertInvalidDuration("-2µ", "Unable to convert '2µ' to a duration"); - assertInvalidDuration("12.5s", "Unable to convert '12.5s' to a duration"); - assertInvalidDuration("2m12.5s", "Unable to convert '2m12.5s' to a duration"); - assertInvalidDuration("2m-12s", "Unable to convert '2m-12s' to a duration"); - assertInvalidDuration("12s3s", "Invalid duration. The seconds are specified multiple times"); - assertInvalidDuration("12s3m", "Invalid duration. The seconds should be after minutes"); - assertInvalidDuration("1Y3M4D", "Invalid duration. The minutes should be after days"); - assertInvalidDuration("P2Y3W", "Unable to convert 'P2Y3W' to a duration"); - assertInvalidDuration("P0002-00-20", "Unable to convert 'P0002-00-20' to a duration"); - } + @Test(groups = "unit") + public void testInvalidDurations() { + assertInvalidDuration( + Long.MAX_VALUE + "d", + "Invalid duration. The total number of days must be less or equal to 2147483647"); + assertInvalidDuration("2µ", "Unable to convert '2µ' to a duration"); + assertInvalidDuration("-2µ", "Unable to convert '2µ' to a duration"); + assertInvalidDuration("12.5s", "Unable to convert '12.5s' to a duration"); + assertInvalidDuration("2m12.5s", "Unable to convert '2m12.5s' to a duration"); + assertInvalidDuration("2m-12s", "Unable to convert '2m-12s' to a duration"); + assertInvalidDuration("12s3s", "Invalid duration. The seconds are specified multiple times"); + assertInvalidDuration("12s3m", "Invalid duration. The seconds should be after minutes"); + assertInvalidDuration("1Y3M4D", "Invalid duration. The minutes should be after days"); + assertInvalidDuration("P2Y3W", "Unable to convert 'P2Y3W' to a duration"); + assertInvalidDuration("P0002-00-20", "Unable to convert 'P0002-00-20' to a duration"); + } - public void assertInvalidDuration(String duration, String expectedErrorMessage) { - try { - Duration.from(duration); - fail("Expected RuntimeException"); - } catch (RuntimeException e) { - assertThat(e.getMessage()).isEqualTo(expectedErrorMessage); - } + public void assertInvalidDuration(String duration, String expectedErrorMessage) { + try { + Duration.from(duration); + fail("Expected RuntimeException"); + } catch (RuntimeException e) { + assertThat(e.getMessage()).isEqualTo(expectedErrorMessage); } + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/EndPoints.java b/driver-core/src/test/java/com/datastax/driver/core/EndPoints.java new file mode 100644 index 00000000000..2154d1884f1 --- /dev/null +++ b/driver-core/src/test/java/com/datastax/driver/core/EndPoints.java @@ -0,0 +1,35 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import java.net.InetSocketAddress; + +/** + * Utility class to create endpoints in tests. This serves mainly as a hook to access + * package-private classes. + */ +public class EndPoints { + + public static EndPoint forAddress(InetSocketAddress address) { + return new TranslatedAddressEndPoint(address); + } + + public static EndPoint forAddress(String host, int port) { + return forAddress(new InetSocketAddress(host, port)); + } +} diff --git a/driver-core/src/test/java/com/datastax/driver/core/EventDebouncerIntegrationTest.java b/driver-core/src/test/java/com/datastax/driver/core/EventDebouncerIntegrationTest.java index 327ae7d1b51..0d4e81be899 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/EventDebouncerIntegrationTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/EventDebouncerIntegrationTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,144 +17,141 @@ */ package com.datastax.driver.core; -import org.testng.annotations.Test; +import static com.datastax.driver.core.CreateCCM.TestMode.PER_METHOD; +import static com.datastax.driver.core.TestUtils.ipOfNode; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Fail.fail; +import static org.mockito.Mockito.reset; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.timeout; +import static org.mockito.Mockito.verify; import java.util.Collection; import java.util.concurrent.CountDownLatch; import java.util.concurrent.CyclicBarrier; import java.util.concurrent.TimeUnit; - -import static com.datastax.driver.core.CreateCCM.TestMode.PER_METHOD; -import static com.datastax.driver.core.TestUtils.ipOfNode; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Fail.fail; -import static org.mockito.Mockito.*; +import org.testng.annotations.Test; @CreateCCM(PER_METHOD) public class EventDebouncerIntegrationTest extends CCMTestsSupport { - /** - * Tests that DOWN, UP, REMOVE or ADD events will not be delivered to - * load balancing policy nor host state listeners - * before the cluster is fully initialized. - * - * @throws InterruptedException - * @jira_ticket JAVA-784 - * @since 2.0.11 - */ - @CCMConfig(numberOfNodes = 3, createCluster = false, dirtiesContext = true) - @Test(groups = "long") - public void should_wait_until_load_balancing_policy_is_fully_initialized() throws InterruptedException { - TestLoadBalancingPolicy policy = new TestLoadBalancingPolicy(); - final Cluster cluster = register(createClusterBuilderNoDebouncing() - .addContactPoints(getContactPoints().get(0)) - .withPort(ccm().getBinaryPort()) - .withLoadBalancingPolicy(policy).build()); - new Thread() { - @Override - public void run() { - cluster.init(); - } - }.start(); - // stop cluster initialization in the middle of LBP initialization - policy.stop(); - // generate a DOWN event - will not be delivered immediately - // because the debouncers are not started - // note: a graceful stop notify other nodes which send a topology change event to the driver right away - // while forceStop kills the node so other nodes take much more time to detect node failure - ccm().stop(3); - ccm().waitForDown(3); - // finish cluster initialization and deliver the DOWN event - policy.proceed(); - assertThat(policy.onDownCalledBeforeInit).isFalse(); - assertThat(policy.onDownCalled()).isTrue(); - assertThat(policy.hosts).doesNotContain(TestUtils.findHost(cluster, 3)); + /** + * Tests that DOWN, UP, REMOVE or ADD events will not be delivered to load balancing policy nor + * host state listeners before the cluster is fully initialized. + * + * @throws InterruptedException + * @jira_ticket JAVA-784 + * @since 2.0.11 + */ + @CCMConfig(numberOfNodes = 3, createCluster = false, dirtiesContext = true) + @Test(groups = "long") + public void should_wait_until_load_balancing_policy_is_fully_initialized() + throws InterruptedException { + TestLoadBalancingPolicy policy = new TestLoadBalancingPolicy(); + final Cluster cluster = + register(createClusterBuilderNoDebouncing().withLoadBalancingPolicy(policy).build()); + new Thread() { + @Override + public void run() { + cluster.init(); + } + }.start(); + // stop cluster initialization in the middle of LBP initialization + policy.stop(); + // generate a DOWN event - will not be delivered immediately + // because the debouncers are not started + // note: a graceful stop notify other nodes which send a topology change event to the driver + // right away + // while forceStop kills the node so other nodes take much more time to detect node failure + ccm().stop(3); + ccm().waitForDown(3); + // finish cluster initialization and deliver the DOWN event + policy.proceed(); + assertThat(policy.onDownCalledBeforeInit).isFalse(); + assertThat(policy.onDownCalled()).isTrue(); + assertThat(policy.hosts).doesNotContain(TestUtils.findHost(cluster, 3)); + } + + /** + * Tests that settings for a debouncer can be modified dynamically without requiring the cluster + * to be restarted. + * + * @throws InterruptedException + * @jira_ticket JAVA-1192 + */ + @CCMConfig(numberOfNodes = 1) + @Test(groups = "short") + public void should_change_debouncer_settings_dynamically() throws InterruptedException { + // Create a spy of the Cluster's control connection and replace it with the spy. + ControlConnection controlConnection = spy(cluster().manager.controlConnection); + cluster().manager.controlConnection = controlConnection; + for (int i = 0; i < 10; i++) { + cluster().manager.submitNodeListRefresh(); + Thread.sleep(100); } + // all requests should be coalesced into a single one + verify(controlConnection, timeout(10000)).refreshNodeListAndTokenMap(); + reset(controlConnection); + // disable debouncing + cluster().getConfiguration().getQueryOptions().setRefreshNodeListIntervalMillis(0); + for (int i = 0; i < 10; i++) { + cluster().manager.submitNodeListRefresh(); + Thread.sleep(100); + } + // each request should have been handled separately + verify(controlConnection, timeout(10000).times(10)).refreshNodeListAndTokenMap(); + } + + private class TestLoadBalancingPolicy extends SortingLoadBalancingPolicy { + + CyclicBarrier stop = new CyclicBarrier(2); + + CyclicBarrier proceed = new CyclicBarrier(2); + + CountDownLatch onDownCalled = new CountDownLatch(1); + + volatile boolean init = false; - /** - * Tests that settings for a debouncer can be modified dynamically - * without requiring the cluster to be restarted. - * - * @throws InterruptedException - * @jira_ticket JAVA-1192 - */ - @CCMConfig(numberOfNodes = 1) - @Test(groups = "short") - public void should_change_debouncer_settings_dynamically() throws InterruptedException { - // Create a spy of the Cluster's control connection and replace it with the spy. - ControlConnection controlConnection = spy(cluster().manager.controlConnection); - cluster().manager.controlConnection = controlConnection; - for (int i = 0; i < 10; i++) { - cluster().manager.submitNodeListRefresh(); - Thread.sleep(100); - } - // all requests should be coalesced into a single one - verify(controlConnection, timeout(10000)).refreshNodeListAndTokenMap(); - reset(controlConnection); - // disable debouncing - cluster().getConfiguration().getQueryOptions() - .setRefreshNodeListIntervalMillis(0); - for (int i = 0; i < 10; i++) { - cluster().manager.submitNodeListRefresh(); - Thread.sleep(100); - } - // each request should have been handled separately - verify(controlConnection, timeout(10000).times(10)).refreshNodeListAndTokenMap(); + volatile boolean onDownCalledBeforeInit = false; + + @Override + public void init(Cluster cluster, Collection hosts) { + try { + stop.await(1, TimeUnit.MINUTES); + proceed.await(1, TimeUnit.MINUTES); + } catch (Exception e) { + fail(e.getMessage()); + } + super.init(cluster, hosts); + init = true; } - private class TestLoadBalancingPolicy extends SortingLoadBalancingPolicy { - - CyclicBarrier stop = new CyclicBarrier(2); - - CyclicBarrier proceed = new CyclicBarrier(2); - - CountDownLatch onDownCalled = new CountDownLatch(1); - - volatile boolean init = false; - - volatile boolean onDownCalledBeforeInit = false; - - @Override - public void init(Cluster cluster, Collection hosts) { - try { - stop.await(1, TimeUnit.MINUTES); - proceed.await(1, TimeUnit.MINUTES); - } catch (Exception e) { - fail(e.getMessage()); - } - super.init(cluster, hosts); - init = true; - } - - @Override - public void onDown(Host host) { - if (!init) - onDownCalledBeforeInit = true; - super.onDown(host); - if (host.getAddress().toString().contains(ipOfNode(3))) - onDownCalled.countDown(); - } - - void stop() throws InterruptedException { - try { - stop.await(1, TimeUnit.MINUTES); - } catch (Exception e) { - fail(e.getMessage()); - } - } - - void proceed() throws InterruptedException { - try { - proceed.await(1, TimeUnit.MINUTES); - } catch (Exception e) { - fail(e.getMessage()); - } - } - - boolean onDownCalled() throws InterruptedException { - return onDownCalled.await(1, TimeUnit.MINUTES); - } + @Override + public void onDown(Host host) { + if (!init) onDownCalledBeforeInit = true; + super.onDown(host); + if (host.getEndPoint().resolve().getAddress().toString().contains(ipOfNode(3))) + onDownCalled.countDown(); + } + void stop() throws InterruptedException { + try { + stop.await(1, TimeUnit.MINUTES); + } catch (Exception e) { + fail(e.getMessage()); + } } + void proceed() throws InterruptedException { + try { + proceed.await(1, TimeUnit.MINUTES); + } catch (Exception e) { + fail(e.getMessage()); + } + } + + boolean onDownCalled() throws InterruptedException { + return onDownCalled.await(1, TimeUnit.MINUTES); + } + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/EventDebouncerTest.java b/driver-core/src/test/java/com/datastax/driver/core/EventDebouncerTest.java index b6cd5cd07fe..f45ed0a8c47 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/EventDebouncerTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/EventDebouncerTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,15 +17,14 @@ */ package com.datastax.driver.core; +import static java.util.concurrent.TimeUnit.MILLISECONDS; +import static java.util.concurrent.TimeUnit.MINUTES; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.fail; + import com.datastax.driver.core.EventDebouncer.DeliveryCallback; import com.google.common.util.concurrent.Futures; import com.google.common.util.concurrent.ListenableFuture; -import org.apache.log4j.Level; -import org.apache.log4j.Logger; -import org.testng.annotations.AfterMethod; -import org.testng.annotations.BeforeMethod; -import org.testng.annotations.Test; - import java.util.ArrayList; import java.util.List; import java.util.concurrent.CopyOnWriteArrayList; @@ -33,313 +34,322 @@ import java.util.concurrent.locks.Condition; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; - -import static java.util.concurrent.TimeUnit.MILLISECONDS; -import static java.util.concurrent.TimeUnit.MINUTES; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.fail; +import org.apache.log4j.Level; +import org.apache.log4j.Logger; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; public class EventDebouncerTest { - private ScheduledExecutorService executor; - - private MockDeliveryCallback callback; - - @BeforeMethod(groups = "unit") - public void setup() { - executor = Executors.newScheduledThreadPool(1); - callback = new MockDeliveryCallback(); - } - - @AfterMethod(groups = "unit") - public void tearDown() { - executor.shutdownNow(); - } - - @Test(groups = "unit") - public void should_deliver_single_event() throws InterruptedException { - EventDebouncer debouncer = new EventDebouncer("test", executor, callback) { + private ScheduledExecutorService executor; + + private MockDeliveryCallback callback; + + @BeforeMethod(groups = "unit") + public void setup() { + executor = Executors.newScheduledThreadPool(1); + callback = new MockDeliveryCallback(); + } + + @AfterMethod(groups = "unit") + public void tearDown() { + executor.shutdownNow(); + } + + @Test(groups = "unit") + public void should_deliver_single_event() throws InterruptedException { + EventDebouncer debouncer = + new EventDebouncer("test", executor, callback) { + @Override + int maxPendingEvents() { + return 10; + } + + @Override + long delayMs() { + return 50; + } + }; + debouncer.start(); + MockEvent event = new MockEvent(0); + debouncer.eventReceived(event); + callback.awaitEvents(1); + assertThat(callback.getEvents()).containsOnly(event); + } + + @Test(groups = "unit") + public void should_log_and_drop_events_on_overflow() throws InterruptedException { + MemoryAppender logs = new MemoryAppender(); + Logger logger = Logger.getLogger(EventDebouncer.class); + Level originalLoggerLevel = logger.getLevel(); + logger.setLevel(Level.WARN); + logger.addAppender(logs); + try { + EventDebouncer debouncer = + new EventDebouncer("test", executor, callback, 10) { @Override int maxPendingEvents() { - return 10; + return 100; } @Override long delayMs() { - return 50; + return 15; } - }; - debouncer.start(); - MockEvent event = new MockEvent(0); + }; + debouncer.start(); + List events = new ArrayList(); + for (int i = 0; i < 14; i++) { + MockEvent event = new MockEvent(i); + events.add(event); debouncer.eventReceived(event); - callback.awaitEvents(1); - assertThat(callback.getEvents()).containsOnly(event); + } + // Only 10 events should have been handled. + callback.awaitEvents(10); + assertThat(callback.getEvents()).isEqualTo(events.subList(0, 10)); + // Debouncer warning should have been logged, but only once. + assertThat(logs.get()) + .containsOnlyOnce("test debouncer enqueued more than 10 events, rejecting new events."); + } finally { + logger.removeAppender(logs); + logger.setLevel(originalLoggerLevel); } - - @Test(groups = "unit") - public void should_log_and_drop_events_on_overflow() throws InterruptedException { - MemoryAppender logs = new MemoryAppender(); - Logger logger = Logger.getLogger(EventDebouncer.class); - Level originalLoggerLevel = logger.getLevel(); - logger.setLevel(Level.WARN); - logger.addAppender(logs); - try { - EventDebouncer debouncer = new EventDebouncer("test", executor, callback, 10) { - @Override - int maxPendingEvents() { - return 100; - } - - @Override - long delayMs() { - return 15; - } - }; - debouncer.start(); - List events = new ArrayList(); - for (int i = 0; i < 14; i++) { - MockEvent event = new MockEvent(i); - events.add(event); - debouncer.eventReceived(event); - } - // Only 10 events should have been handled. - callback.awaitEvents(10); - assertThat(callback.getEvents()).isEqualTo(events.subList(0, 10)); - // Debouncer warning should have been logged, but only once. - assertThat(logs.get()).containsOnlyOnce("test debouncer enqueued more than 10 events, rejecting new events."); - } finally { - logger.removeAppender(logs); - logger.setLevel(originalLoggerLevel); - } - } - - @Test(groups = "unit") - public void should_deliver_n_events_in_order() throws InterruptedException { - EventDebouncer debouncer = new EventDebouncer("test", executor, callback) { - @Override - int maxPendingEvents() { - return 10; - } - - @Override - long delayMs() { - return 50; - } + } + + @Test(groups = "unit") + public void should_deliver_n_events_in_order() throws InterruptedException { + EventDebouncer debouncer = + new EventDebouncer("test", executor, callback) { + @Override + int maxPendingEvents() { + return 10; + } + + @Override + long delayMs() { + return 50; + } }; - debouncer.start(); - List events = new ArrayList(); - for (int i = 0; i < 50; i++) { - MockEvent event = new MockEvent(i); - events.add(event); - debouncer.eventReceived(event); - } - callback.awaitEvents(50); - assertThat(callback.getEvents()).isEqualTo(events); + debouncer.start(); + List events = new ArrayList(); + for (int i = 0; i < 50; i++) { + MockEvent event = new MockEvent(i); + events.add(event); + debouncer.eventReceived(event); } - - @Test(groups = "unit") - public void should_deliver_n_events_in_order_even_if_queue_full() throws InterruptedException { - EventDebouncer debouncer = new EventDebouncer("test", executor, callback) { - @Override - int maxPendingEvents() { - return 10; - } - - @Override - long delayMs() { - return 1; - } + callback.awaitEvents(50); + assertThat(callback.getEvents()).isEqualTo(events); + } + + @Test(groups = "unit") + public void should_deliver_n_events_in_order_even_if_queue_full() throws InterruptedException { + EventDebouncer debouncer = + new EventDebouncer("test", executor, callback) { + @Override + int maxPendingEvents() { + return 10; + } + + @Override + long delayMs() { + return 1; + } }; - debouncer.start(); - List events = new ArrayList(); - for (int i = 0; i < 50; i++) { - MockEvent event = new MockEvent(i); - events.add(event); - debouncer.eventReceived(event); - } - callback.awaitEvents(50); - assertThat(callback.getEvents()).isEqualTo(events); + debouncer.start(); + List events = new ArrayList(); + for (int i = 0; i < 50; i++) { + MockEvent event = new MockEvent(i); + events.add(event); + debouncer.eventReceived(event); } - - @Test(groups = "unit") - public void should_accumulate_events_if_not_ready() throws InterruptedException { - EventDebouncer debouncer = new EventDebouncer("test", executor, callback) { - @Override - int maxPendingEvents() { - return 10; - } - - @Override - long delayMs() { - return 50; - } + callback.awaitEvents(50); + assertThat(callback.getEvents()).isEqualTo(events); + } + + @Test(groups = "unit") + public void should_accumulate_events_if_not_ready() throws InterruptedException { + EventDebouncer debouncer = + new EventDebouncer("test", executor, callback) { + @Override + int maxPendingEvents() { + return 10; + } + + @Override + long delayMs() { + return 50; + } }; - List events = new ArrayList(); - for (int i = 0; i < 50; i++) { - MockEvent event = new MockEvent(i); - events.add(event); - debouncer.eventReceived(event); - } - // simulate late start - debouncer.start(); - callback.awaitEvents(50); - assertThat(callback.getEvents()).hasSize(50); - assertThat(callback.getEvents()).isEqualTo(events); + List events = new ArrayList(); + for (int i = 0; i < 50; i++) { + MockEvent event = new MockEvent(i); + events.add(event); + debouncer.eventReceived(event); } - - @Test(groups = "unit") - public void should_accumulate_all_events_until_start() throws InterruptedException { - final EventDebouncer debouncer = new EventDebouncer("test", executor, callback) { - @Override - int maxPendingEvents() { - return 10; - } - - @Override - long delayMs() { - return 25; - } + // simulate late start + debouncer.start(); + callback.awaitEvents(50); + assertThat(callback.getEvents()).hasSize(50); + assertThat(callback.getEvents()).isEqualTo(events); + } + + @Test(groups = "unit") + public void should_accumulate_all_events_until_start() throws InterruptedException { + final EventDebouncer debouncer = + new EventDebouncer("test", executor, callback) { + @Override + int maxPendingEvents() { + return 10; + } + + @Override + long delayMs() { + return 25; + } }; - final List events = new ArrayList(); - - for (int i = 0; i < 50; i++) { - MockEvent event = new MockEvent(i); - events.add(event); - debouncer.eventReceived(event); - } - - debouncer.start(); + final List events = new ArrayList(); - callback.awaitEvents(50); - assertThat(callback.getEvents()).isEqualTo(events); + for (int i = 0; i < 50; i++) { + MockEvent event = new MockEvent(i); + events.add(event); + debouncer.eventReceived(event); } - @Test(groups = "unit") - public void should_reset_timer_if_n_events_received_within_same_window() throws InterruptedException { - final EventDebouncer debouncer = new EventDebouncer("test", executor, callback) { - @Override - int maxPendingEvents() { - return 50; - } - - @Override - long delayMs() { - return 50; - } + debouncer.start(); + + callback.awaitEvents(50); + assertThat(callback.getEvents()).isEqualTo(events); + } + + @Test(groups = "unit") + public void should_reset_timer_if_n_events_received_within_same_window() + throws InterruptedException { + final EventDebouncer debouncer = + new EventDebouncer("test", executor, callback) { + @Override + int maxPendingEvents() { + return 50; + } + + @Override + long delayMs() { + return 50; + } }; - debouncer.start(); - final CountDownLatch latch = new CountDownLatch(50); - ScheduledExecutorService pool = Executors.newScheduledThreadPool(1); - pool.scheduleAtFixedRate(new Runnable() { - @Override - public void run() { - if (latch.getCount() > 0) { - MockEvent event = new MockEvent(0); - debouncer.eventReceived(event); - latch.countDown(); - } - } - }, 0, 5, MILLISECONDS); - latch.await(); - pool.shutdownNow(); - callback.awaitEvents(50); - assertThat(callback.getEvents()).hasSize(50); - } - - @Test(groups = "unit") - public void should_stop_receiving_events() throws InterruptedException { - final EventDebouncer debouncer = new EventDebouncer("test", executor, callback) { - @Override - int maxPendingEvents() { - return 10; - } - - @Override - long delayMs() { - return 50; + debouncer.start(); + final CountDownLatch latch = new CountDownLatch(50); + ScheduledExecutorService pool = Executors.newScheduledThreadPool(1); + pool.scheduleAtFixedRate( + new Runnable() { + @Override + public void run() { + if (latch.getCount() > 0) { + MockEvent event = new MockEvent(0); + debouncer.eventReceived(event); + latch.countDown(); } + } + }, + 0, + 5, + MILLISECONDS); + latch.await(); + pool.shutdownNow(); + callback.awaitEvents(50); + assertThat(callback.getEvents()).hasSize(50); + } + + @Test(groups = "unit") + public void should_stop_receiving_events() throws InterruptedException { + final EventDebouncer debouncer = + new EventDebouncer("test", executor, callback) { + @Override + int maxPendingEvents() { + return 10; + } + + @Override + long delayMs() { + return 50; + } }; - debouncer.start(); - for (int i = 0; i < 50; i++) { - MockEvent event = new MockEvent(i); - debouncer.eventReceived(event); - } - callback.awaitEvents(50); - debouncer.stop(); - MockEvent event = new MockEvent(0); - debouncer.eventReceived(event); - assertThat(callback.getEvents()).hasSize(50); + debouncer.start(); + for (int i = 0; i < 50; i++) { + MockEvent event = new MockEvent(i); + debouncer.eventReceived(event); + } + callback.awaitEvents(50); + debouncer.stop(); + MockEvent event = new MockEvent(0); + debouncer.eventReceived(event); + assertThat(callback.getEvents()).hasSize(50); + } + + private static class MockDeliveryCallback implements DeliveryCallback { + + final List events = new CopyOnWriteArrayList(); + + final Lock lock = new ReentrantLock(); + + final Condition cond = lock.newCondition(); + + @Override + public ListenableFuture deliver(List events) { + lock.lock(); + try { + this.events.addAll(events); + cond.signal(); + } finally { + lock.unlock(); + } + return Futures.immediateFuture(null); } - private static class MockDeliveryCallback implements DeliveryCallback { - - final List events = new CopyOnWriteArrayList(); - - final Lock lock = new ReentrantLock(); - - final Condition cond = lock.newCondition(); - - @Override - public ListenableFuture deliver(List events) { - lock.lock(); - try { - this.events.addAll(events); - cond.signal(); - } finally { - lock.unlock(); - } - return Futures.immediateFuture(null); - } - - void awaitEvents(int expected) throws InterruptedException { - long nanos = MINUTES.toNanos(5); - lock.lock(); - try { - while (events.size() < expected) { - if (nanos <= 0L) - fail("Timed out waiting for events"); - nanos = cond.awaitNanos(nanos); - } - } finally { - lock.unlock(); - } - } - - public List getEvents() { - return events; + void awaitEvents(int expected) throws InterruptedException { + long nanos = MINUTES.toNanos(5); + lock.lock(); + try { + while (events.size() < expected) { + if (nanos <= 0L) fail("Timed out waiting for events"); + nanos = cond.awaitNanos(nanos); } - + } finally { + lock.unlock(); + } } - private class MockEvent { + public List getEvents() { + return events; + } + } - private final int i; + private class MockEvent { - private MockEvent(int i) { - this.i = i; - } + private final int i; - @Override - public boolean equals(Object o) { - if (this == o) - return true; - if (o == null || getClass() != o.getClass()) - return false; + private MockEvent(int i) { + this.i = i; + } - MockEvent mockEvent = (MockEvent) o; + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; - return i == mockEvent.i; + MockEvent mockEvent = (MockEvent) o; - } + return i == mockEvent.i; + } - @Override - public int hashCode() { - return i; - } + @Override + public int hashCode() { + return i; + } - @Override - public String toString() { - return "MockEvent" + i; - } + @Override + public String toString() { + return "MockEvent" + i; } + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/ExportAsStringTest.java b/driver-core/src/test/java/com/datastax/driver/core/ExportAsStringTest.java new file mode 100644 index 00000000000..934bfba791f --- /dev/null +++ b/driver-core/src/test/java/com/datastax/driver/core/ExportAsStringTest.java @@ -0,0 +1,276 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.fail; + +import com.datastax.driver.core.schemabuilder.SchemaBuilder; +import com.datastax.driver.core.utils.CassandraVersion; +import com.google.common.collect.ImmutableMap; +import com.google.common.io.Closer; +import java.io.BufferedReader; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.io.StringWriter; +import java.util.Map; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.testng.annotations.Test; + +@CassandraVersion("2.0") +@CCMConfig(config = "enable_user_defined_functions:true") +public class ExportAsStringTest extends CCMTestsSupport { + + private static final Logger logger = LoggerFactory.getLogger(ExportAsStringTest.class); + + /** + * Creates a keyspace using a variety of features and ensures {@link + * KeyspaceMetadata#exportAsString()} contains the expected data in the expected order. This is + * not exhaustive, but covers quite a bit of different scenarios (materialized views, aggregates, + * functions, nested UDTs, etc.). + * + *

    The test also verifies that the generated schema is the same whether the keyspace and its + * schema was created during the lifecycle of the cluster or before connecting. + * + *

    Note that this test might be fragile in the future if default option values change in + * cassandra. In order to deal with new features, we create a schema for each tested C* version, + * and if one is not present the test is failed. + */ + @Test(groups = "short") + public void should_create_schema_and_ensure_exported_cql_is_as_expected() { + String keyspace = "complex_ks"; + Map replicationOptions = + ImmutableMap.of("class", "SimpleStrategy", "replication_factor", 1); + + // create keyspace + session() + .execute(SchemaBuilder.createKeyspace(keyspace).with().replication(replicationOptions)); + + // create session from this keyspace. + Session session = cluster().connect(keyspace); + + KeyspaceMetadata ks; + + // udts require 2.1+ + if (ccm().getCassandraVersion().compareTo(VersionNumber.parse("2.1")) >= 0) { + // Usertype 'ztype' with two columns. Given name to ensure that even though it has an + // alphabetically + // later name, it shows up before other user types ('ctype') that depend on it. + session.execute( + SchemaBuilder.createType("ztype") + .addColumn("c", DataType.text()) + .addColumn("a", DataType.cint())); + + // Usertype 'xtype' with two columns. At same level as 'ztype' since both are depended on by + // ctype, should + // show up before 'ztype' because it's alphabetically before, even though it was created + // after. + session.execute(SchemaBuilder.createType("xtype").addColumn("d", DataType.text())); + + ks = cluster().getMetadata().getKeyspace(keyspace); + + // Usertype 'ctype' which depends on both ztype and xtype, therefore ztype and xtype should + // show up earlier. + session.execute( + SchemaBuilder.createType("ctype") + .addColumn("\"Z\"", ks.getUserType("ztype").copy(true)) + .addColumn("x", ks.getUserType("xtype").copy(true))); + + // Usertype 'btype' which has no dependencies, should show up before 'xtype' and 'ztype' since + // it's + // alphabetically before. + session.execute(SchemaBuilder.createType("btype").addColumn("a", DataType.text())); + + // Refetch keyspace for < 3.0 schema this is required as a new keyspace metadata reference may + // be created. + ks = cluster().getMetadata().getKeyspace(keyspace); + + // Usertype 'atype' which depends on 'ctype', so should show up after 'ctype', 'xtype' and + // 'ztype'. + session.execute( + SchemaBuilder.createType("atype").addColumn("c", ks.getUserType("ctype").copy(true))); + + // A simple table with a udt column and LCS compaction strategy. + session.execute( + SchemaBuilder.createTable("ztable") + .addPartitionKey("zkey", DataType.text()) + .addColumn("a", ks.getUserType("atype").copy(true)) + .withOptions() + .compactionOptions(SchemaBuilder.leveledStrategy().ssTableSizeInMB(95))); + } else { + // A simple table with LCS compaction strategy. + session.execute( + SchemaBuilder.createTable("ztable") + .addPartitionKey("zkey", DataType.text()) + .addColumn("a", DataType.cint()) + .withOptions() + .compactionOptions(SchemaBuilder.leveledStrategy().ssTableSizeInMB(95))); + } + + // date type requries 2.2+ + if (ccm().getCassandraVersion().compareTo(VersionNumber.parse("2.2")) >= 0) { + // A table that will have materialized views (copied from mv docs) + session.execute( + SchemaBuilder.createTable("cyclist_mv") + .addPartitionKey("cid", DataType.uuid()) + .addColumn("name", DataType.text()) + .addColumn("age", DataType.cint()) + .addColumn("birthday", DataType.date()) + .addColumn("country", DataType.text())); + + // index on table with view, index should be printed first. + session.execute( + SchemaBuilder.createIndex("cyclist_by_country") + .onTable("cyclist_mv") + .andColumn("country")); + + // materialized views require 3.0+ + if (ccm().getCassandraVersion().compareTo(VersionNumber.parse("3.0")) >= 0) { + // A materialized view for cyclist_mv, reverse clustering. created first to ensure creation + // order does not + // matter, alphabetical does. + session.execute( + "CREATE MATERIALIZED VIEW cyclist_by_r_age " + + "AS SELECT cid, age, birthday, name, country " + + "FROM cyclist_mv " + + "WHERE age IS NOT NULL AND cid IS NOT NULL " + + "PRIMARY KEY (age, cid) " + + "WITH CLUSTERING ORDER BY (cid DESC)"); + + // A materialized view for cyclist_mv, select * + session.execute( + "CREATE MATERIALIZED VIEW cyclist_by_a_age " + + "AS SELECT * " + + "FROM cyclist_mv " + + "WHERE age IS NOT NULL AND cid IS NOT NULL " + + "PRIMARY KEY (age, cid)"); + + // A materialized view for cyclist_mv, select columns + session.execute( + "CREATE MATERIALIZED VIEW cyclist_by_age " + + "AS SELECT cid, age, birthday, name, country " + + "FROM cyclist_mv " + + "WHERE age IS NOT NULL AND cid IS NOT NULL " + + "PRIMARY KEY (age, cid) WITH comment = 'simple view'"); + } + } + + // A table with a secondary index, taken from documentation on secondary index. + session.execute( + SchemaBuilder.createTable("rank_by_year_and_name") + .addPartitionKey("race_year", DataType.cint()) + .addPartitionKey("race_name", DataType.text()) + .addClusteringColumn("rank", DataType.cint()) + .addColumn("cyclist_name", DataType.text())); + + session.execute( + SchemaBuilder.createIndex("ryear").onTable("rank_by_year_and_name").andColumn("race_year")); + + session.execute( + SchemaBuilder.createIndex("rrank").onTable("rank_by_year_and_name").andColumn("rank")); + + // udfs and udas require 2.22+ + if (ccm().getCassandraVersion().compareTo(VersionNumber.parse("2.2")) >= 0) { + // UDFs + session.execute( + "CREATE OR REPLACE FUNCTION avgState ( state tuple, val int ) CALLED ON NULL INPUT RETURNS tuple LANGUAGE java AS \n" + + " 'if (val !=null) { state.setInt(0, state.getInt(0)+1); state.setLong(1, state.getLong(1)+val.intValue()); } return state;';"); + session.execute( + "CREATE OR REPLACE FUNCTION avgFinal ( state tuple ) CALLED ON NULL INPUT RETURNS double LANGUAGE java AS \n" + + " 'double r = 0; if (state.getInt(0) == 0) return null; r = state.getLong(1); r /= state.getInt(0); return Double.valueOf(r);';"); + + // UDAs + session.execute( + "CREATE AGGREGATE IF NOT EXISTS mean ( int ) \n" + + "SFUNC avgState STYPE tuple FINALFUNC avgFinal INITCOND (0,0);"); + session.execute( + "CREATE AGGREGATE IF NOT EXISTS average ( int ) \n" + + "SFUNC avgState STYPE tuple FINALFUNC avgFinal INITCOND (0,0);"); + } + + ks = cluster().getMetadata().getKeyspace(keyspace); + + // validate that the exported schema matches what was expected exactly. + assertThat(ks.exportAsString().trim()).isEqualTo(getExpectedCqlString()); + + // Also validate that when you create a Cluster with schema already created that the exported + // string + // is the same. + Cluster newCluster = + this.createClusterBuilderNoDebouncing() + .addContactPointsWithPorts(this.getContactPointsWithPorts()) + .build(); + try { + newCluster.init(); + ks = newCluster.getMetadata().getKeyspace(keyspace); + assertThat(ks.exportAsString().trim()).isEqualTo(getExpectedCqlString()); + } finally { + newCluster.close(); + } + } + + private String getExpectedCqlString() { + VersionNumber cassandraVersion = ccm().getCassandraVersion(); + VersionNumber dseVersion = ccm().getDSEVersion(); + String majorMinor; + if (dseVersion != null && dseVersion.getMajor() == 6 && dseVersion.getMinor() < 8) { + // DSE 6.0 and 6.7 report C* 4.0 but in reality it is C* 3.11 + majorMinor = "3.11"; + } else { + majorMinor = cassandraVersion.getMajor() + "." + cassandraVersion.getMinor(); + } + String resourceName = "/export_as_string_test_" + majorMinor + ".cql"; + + Closer closer = Closer.create(); + try { + InputStream is = ExportAsStringTest.class.getResourceAsStream(resourceName); + assertThat(is) + .as( + "No reference script for this version (was looking for src/test/resources" + + resourceName + + ")") + .isNotNull(); + closer.register(is); + + BufferedReader in = new BufferedReader(new InputStreamReader(is)); + StringWriter out = new StringWriter(); + + String line; + while ((line = in.readLine()) != null) { + + String trimmedLine = line.trim(); + if (trimmedLine.startsWith("/*") || trimmedLine.startsWith("*")) continue; + out.write(line); + out.write(System.getProperty("line.separator")); + } + return out.toString().trim(); + } catch (IOException e) { + logger.warn("Failure to read {}", resourceName, e); + fail("Unable to read " + resourceName + " is it defined?"); + } finally { + try { + closer.close(); + } catch (IOException e) { // no op + logger.warn("Failure closing streams", e); + } + } + return ""; + } +} diff --git a/driver-core/src/test/java/com/datastax/driver/core/ExtendedPeerCheckDisabledTest.java b/driver-core/src/test/java/com/datastax/driver/core/ExtendedPeerCheckDisabledTest.java index bac2ada7564..b4b8c7ed4f7 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/ExtendedPeerCheckDisabledTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/ExtendedPeerCheckDisabledTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,22 +17,30 @@ */ package com.datastax.driver.core; +import org.testng.SkipException; import org.testng.annotations.Test; public class ExtendedPeerCheckDisabledTest { - /** - * Validates that if the com.datastax.driver.EXTENDED_PEER_CHECK system property is set to false that a peer - * with null values for host_id, data_center, rack, tokens is not ignored. - * - * @test_category host:metadata - * @jira_ticket JAVA-852 - * @since 2.1.10 - */ - @Test(groups = "isolated", dataProvider = "disallowedNullColumnsInPeerData", dataProviderClass = ControlConnectionTest.class) - @CCMConfig(createCcm = false) - public void should_use_peer_if_extended_peer_check_is_disabled(String columns) { - System.setProperty("com.datastax.driver.EXTENDED_PEER_CHECK", "false"); - ControlConnectionTest.run_with_null_peer_info(columns, true); + /** + * Validates that if the com.datastax.driver.EXTENDED_PEER_CHECK system property is set to false + * that a peer with null values for host_id, data_center, rack, tokens is not ignored. + * + * @test_category host:metadata + * @jira_ticket JAVA-852 + * @since 2.1.10 + */ + @Test( + groups = "isolated", + dataProvider = "disallowedNullColumnsInPeerData", + dataProviderClass = ControlConnectionTest.class) + @CCMConfig(createCcm = false) + public void should_use_peer_if_extended_peer_check_is_disabled( + String columns, boolean withPeersV2, boolean requiresExtendedPeerCheck) { + System.setProperty("com.datastax.driver.EXTENDED_PEER_CHECK", "false"); + if (!requiresExtendedPeerCheck) { + throw new SkipException("Absence of column does not require extended peer check, skipping"); } + ControlConnectionTest.run_with_null_peer_info(columns, true, withPeersV2); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/FakeHost.java b/driver-core/src/test/java/com/datastax/driver/core/FakeHost.java index 1629edd5d22..4dd70a541a8 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/FakeHost.java +++ b/driver-core/src/test/java/com/datastax/driver/core/FakeHost.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,6 +17,8 @@ */ package com.datastax.driver.core; +import static org.assertj.core.api.Assertions.fail; + import java.io.IOException; import java.net.InetAddress; import java.net.ServerSocket; @@ -23,77 +27,72 @@ import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; -import static org.assertj.core.api.Assertions.fail; - -/** - * Fake Cassandra host that will cause a given error when the driver tries to connect to it. - */ +/** Fake Cassandra host that will cause a given error when the driver tries to connect to it. */ public class FakeHost { - public enum Behavior {THROWING_CONNECT_TIMEOUTS, THROWING_OPERATION_TIMEOUTS} + public enum Behavior { + THROWING_CONNECT_TIMEOUTS, + THROWING_OPERATION_TIMEOUTS + } - final String address; - private final int port; - private final Behavior behavior; - private final ExecutorService executor; + final String address; + private final int port; + private final Behavior behavior; + private final ExecutorService executor; - FakeHost(String address, int port, Behavior behavior) { - this.address = address; - this.port = port; - this.behavior = behavior; - this.executor = Executors.newSingleThreadExecutor(); - } + FakeHost(String address, int port, Behavior behavior) { + this.address = address; + this.port = port; + this.behavior = behavior; + this.executor = Executors.newSingleThreadExecutor(); + } - public void start() { - executor.execute(new AcceptClientAndWait(address, port, behavior)); - } - - public void stop() { - executor.shutdownNow(); - } + public void start() { + executor.execute(new AcceptClientAndWait(address, port, behavior)); + } - private static class AcceptClientAndWait implements Runnable { + public void stop() { + executor.shutdownNow(); + } - private final String address; - private final int port; - private final Behavior behavior; + private static class AcceptClientAndWait implements Runnable { - public AcceptClientAndWait(String address, int port, Behavior behavior) { - this.address = address; - this.port = port; - this.behavior = behavior; - } + private final String address; + private final int port; + private final Behavior behavior; - @Override - public void run() { - ServerSocket server = null; - Socket client = null; - try { - InetAddress bindAddress = InetAddress.getByName(address); - int backlog = (behavior == Behavior.THROWING_CONNECT_TIMEOUTS) - ? 1 - : -1; // default - server = new ServerSocket(port, backlog, bindAddress); + public AcceptClientAndWait(String address, int port, Behavior behavior) { + this.address = address; + this.port = port; + this.behavior = behavior; + } - if (behavior == Behavior.THROWING_CONNECT_TIMEOUTS) { - // fill backlog queue - client = new Socket(); - client.connect(server.getLocalSocketAddress()); - } - TimeUnit.MINUTES.sleep(10); - fail("Mock host wasn't expected to live more than 10 minutes"); - } catch (IOException e) { - fail("Unexpected I/O exception", e); - } catch (InterruptedException e) { - // interruption is the expected way to stop this runnable, exit - try { - if (client != null) - client.close(); - server.close(); - } catch (IOException e1) { - fail("Unexpected error while closing sockets", e); - } + @Override + public void run() { + ServerSocket server = null; + Socket client = null; + try { + InetAddress bindAddress = InetAddress.getByName(address); + int backlog = (behavior == Behavior.THROWING_CONNECT_TIMEOUTS) ? 1 : -1; // default + server = new ServerSocket(port, backlog, bindAddress); - } + if (behavior == Behavior.THROWING_CONNECT_TIMEOUTS) { + // fill backlog queue + client = new Socket(); + client.connect(server.getLocalSocketAddress()); + } + TimeUnit.MINUTES.sleep(10); + fail("Mock host wasn't expected to live more than 10 minutes"); + } catch (IOException e) { + fail("Unexpected I/O exception", e); + } catch (InterruptedException e) { + // interruption is the expected way to stop this runnable, exit + try { + if (client != null) client.close(); + server.close(); + } catch (IOException e1) { + fail("Unexpected error while closing sockets", e); } + } } + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/FetchingTest.java b/driver-core/src/test/java/com/datastax/driver/core/FetchingTest.java index 2a9e59738a8..78a86126e22 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/FetchingTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/FetchingTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,48 +17,51 @@ */ package com.datastax.driver.core; +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertFalse; +import static org.testng.Assert.assertTrue; + import com.datastax.driver.core.exceptions.UnsupportedFeatureException; import org.testng.annotations.Test; -import static org.testng.Assert.*; - -/** - * Test ResultSet paging correct behavior. - */ +/** Test ResultSet paging correct behavior. */ public class FetchingTest extends CCMTestsSupport { - @Override - public void onTestContextInitialized() { - execute("CREATE TABLE test (k text, v int, PRIMARY KEY (k, v))"); - } + @Override + public void onTestContextInitialized() { + execute("CREATE TABLE test (k text, v int, PRIMARY KEY (k, v))"); + } + + @Test(groups = "short") + public void simplePagingTest() { + try { + // Insert data + String key = "paging_test"; + for (int i = 0; i < 100; i++) + session().execute(String.format("INSERT INTO test (k, v) VALUES ('%s', %d)", key, i)); + + SimpleStatement st = + new SimpleStatement(String.format("SELECT v FROM test WHERE k='%s'", key)); + st.setFetchSize(5); // Ridiculously small fetch size for testing purpose. Don't do at home. + ResultSet rs = session().execute(st); + + assertFalse(rs.isFullyFetched()); + + for (int i = 0; i < 100; i++) { + // isExhausted makes sure we do fetch if needed + assertFalse(rs.isExhausted()); + assertEquals(rs.getAvailableWithoutFetching(), 5 - (i % 5)); + assertEquals(rs.one().getInt(0), i); + } + + assertTrue(rs.isExhausted()); + assertTrue(rs.isFullyFetched()); - @Test(groups = "short") - public void simplePagingTest() { - try { - // Insert data - String key = "paging_test"; - for (int i = 0; i < 100; i++) - session().execute(String.format("INSERT INTO test (k, v) VALUES ('%s', %d)", key, i)); - - SimpleStatement st = new SimpleStatement(String.format("SELECT v FROM test WHERE k='%s'", key)); - st.setFetchSize(5); // Ridiculously small fetch size for testing purpose. Don't do at home. - ResultSet rs = session().execute(st); - - assertFalse(rs.isFullyFetched()); - - for (int i = 0; i < 100; i++) { - // isExhausted makes sure we do fetch if needed - assertFalse(rs.isExhausted()); - assertEquals(rs.getAvailableWithoutFetching(), 5 - (i % 5)); - assertEquals(rs.one().getInt(0), i); - } - - assertTrue(rs.isExhausted()); - assertTrue(rs.isFullyFetched()); - - } catch (UnsupportedFeatureException e) { - // This is expected when testing the protocol v1 - assertEquals(cluster().getConfiguration().getProtocolOptions().getProtocolVersion(), ProtocolVersion.V1); - } + } catch (UnsupportedFeatureException e) { + // This is expected when testing the protocol v1 + assertEquals( + cluster().getConfiguration().getProtocolOptions().getProtocolVersion(), + ProtocolVersion.V1); } + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/FrameLengthTest.java b/driver-core/src/test/java/com/datastax/driver/core/FrameLengthTest.java index 4405ce1a6f8..9d799e274b7 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/FrameLengthTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/FrameLengthTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,107 +17,134 @@ */ package com.datastax.driver.core; +import static com.datastax.driver.core.querybuilder.QueryBuilder.bindMarker; +import static com.datastax.driver.core.querybuilder.QueryBuilder.eq; +import static com.datastax.driver.core.querybuilder.QueryBuilder.insertInto; +import static com.datastax.driver.core.querybuilder.QueryBuilder.select; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Fail.fail; + import com.datastax.driver.core.exceptions.FrameTooLongException; import com.datastax.driver.core.querybuilder.Insert; import com.datastax.driver.core.schemabuilder.Create; import com.datastax.driver.core.schemabuilder.SchemaBuilder; +import com.datastax.driver.core.utils.CassandraVersion; +import java.nio.ByteBuffer; +import java.util.Collection; +import java.util.Random; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.testng.annotations.BeforeClass; import org.testng.annotations.Test; -import java.nio.ByteBuffer; -import java.util.Collection; -import java.util.Random; - -import static com.datastax.driver.core.querybuilder.QueryBuilder.*; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Fail.fail; - @CCMConfig(numberOfNodes = 2) public class FrameLengthTest extends CCMTestsSupport { - Logger logger = LoggerFactory.getLogger(FrameLengthTest.class); + Logger logger = LoggerFactory.getLogger(FrameLengthTest.class); + + private static final String tableName = "blob_table"; + private static final int colCount = 256; + private static final int rowsPerPartitionCount = 4; + private static final int partitionCount = 1; + private static final int bytesPerCol = 1024; - private static final String tableName = "blob_table"; - private static final int colCount = 256; - private static final int rowsPerPartitionCount = 4; - private static final int partitionCount = 1; - private static final int bytesPerCol = 1024; + @BeforeClass(groups = {"isolated"}) + public void beforeTestClass() throws Exception { + // Set max frame size to 1MB to make it easier to manifest frame length error. + System.setProperty("com.datastax.driver.NATIVE_TRANSPORT_MAX_FRAME_SIZE_IN_MB", "1"); + super.beforeTestClass(); + } - @BeforeClass(groups = {"isolated"}) - public void beforeTestClass() throws Exception { - // Set max frame size to 1MB to make it easier to manifest frame length error. - System.setProperty("com.datastax.driver.NATIVE_TRANSPORT_MAX_FRAME_SIZE_IN_MB", "1"); - super.beforeTestClass(); + @Override + public void onTestContextInitialized() { + logger.info("Creating table {} with {} {}-byte blob columns", tableName, colCount, bytesPerCol); + Random random = new Random(); + // Create table + Create create = + SchemaBuilder.createTable(tableName) + .addPartitionKey("k", DataType.cint()) + .addClusteringColumn("c", DataType.cint()); + for (int i = 0; i < colCount; i++) { + create.addColumn("col" + i, DataType.blob()); } + execute(create.getQueryString()); - @Override - public void onTestContextInitialized() { - logger.info("Creating table {} with {} {}-byte blob columns", tableName, colCount, bytesPerCol); - Random random = new Random(); - // Create table - Create create = SchemaBuilder.createTable(tableName).addPartitionKey("k", DataType.cint()).addClusteringColumn("c", DataType.cint()); - for (int i = 0; i < colCount; i++) { - create.addColumn("col" + i, DataType.blob()); - } - execute(create.getQueryString()); + // build prepared statement. + Insert insert = insertInto(tableName).value("k", bindMarker()).value("c", bindMarker()); + for (int i = 0; i < colCount; i++) { + insert = insert.value("col" + i, bindMarker()); + } - // build prepared statement. - Insert insert = insertInto(tableName).value("k", bindMarker()).value("c", bindMarker()); - for (int i = 0; i < colCount; i++) { - insert = insert.value("col" + i, bindMarker()); - } + PreparedStatement prepared = session().prepare(insert); - PreparedStatement prepared = session().prepare(insert); - - // Insert rows. - logger.info("Inserting data for {} partitions.", partitionCount); - for (int i = 0; i < partitionCount; i++) { - logger.info("Inserting {} rows in partition {}", rowsPerPartitionCount, i); - for (int r = 0; r < rowsPerPartitionCount; r++) { - BoundStatement stmt = prepared.bind(); - stmt.setInt("k", i); - stmt.setInt("c", r); - for (int c = 0; c < colCount; c++) { - byte[] b = new byte[bytesPerCol]; - random.nextBytes(b); - ByteBuffer in = ByteBuffer.wrap(b); - stmt.setBytes("col" + c, in); - } - session().execute(stmt); - } + // Insert rows. + logger.info("Inserting data for {} partitions.", partitionCount); + for (int i = 0; i < partitionCount; i++) { + logger.info("Inserting {} rows in partition {}", rowsPerPartitionCount, i); + for (int r = 0; r < rowsPerPartitionCount; r++) { + BoundStatement stmt = prepared.bind(); + stmt.setInt("k", i); + stmt.setInt("c", r); + for (int c = 0; c < colCount; c++) { + byte[] b = new byte[bytesPerCol]; + random.nextBytes(b); + ByteBuffer in = ByteBuffer.wrap(b); + stmt.setBytes("col" + c, in); } - logger.info("Done loading {}", tableName); + session().execute(stmt); + } } + logger.info("Done loading {}", tableName); + } - /** - * Validates that if a frame is received that exceeds NATIVE_TRANSPORT_MAX_FRAME_SIZE_IN_MB that - * the driver is able to recover, not lose host connectivity and make further queries. It - * configures NATIVE_TRANSPORT_MAX_FRAME_SIZE_IN_MB to 1 MB to make the error easier to reproduce. - * - * @jira_ticket JAVA-1292 - * @jira_ticket JAVA-1293 - * @test_category connection - */ - @Test(groups = "isolated") - public void should_throw_exception_when_frame_exceeds_configured_max() { - try { - session().execute(select().from(tableName).where(eq("k", 0))); - fail("Exception expected"); - } catch (FrameTooLongException ftle) { - // Expected. - } + /** + * Validates that if a frame is received that exceeds NATIVE_TRANSPORT_MAX_FRAME_SIZE_IN_MB that + * the driver is able to recover, not lose host connectivity and make further queries. It + * configures NATIVE_TRANSPORT_MAX_FRAME_SIZE_IN_MB to 1 MB to make the error easier to reproduce. + * + * @jira_ticket JAVA-1292 + * @jira_ticket JAVA-1293 + * @test_category connection + */ + @Test(groups = "isolated") + public void should_throw_exception_when_frame_exceeds_configured_max() { + skipTestWithCassandraVersionOrHigher("4.0.0", "frame-size exceeding with default-protocol"); + run_frame_size_exceeding_queries(session()); + } - // Both hosts should remain up. - Collection hosts = session().getState().getConnectedHosts(); - assertThat(hosts).hasSize(2).extractingResultOf("isUp").containsOnly(true); + /** + * With cassandra 4.0.0+, V5 protocol is default which breaks requests into segments. Force V4 + * protocol to allow us to test frame-size limitation code. + */ + @CassandraVersion("4.0.0") + @Test(groups = "isolated") + public void should_throw_exception_when_frame_exceeds_configured_max_v4_protocol_cassandra4() { + Cluster cluster = + register(createClusterBuilder().withProtocolVersion(ProtocolVersion.V4).build()); + Session session = register(cluster.connect()); + useKeyspace(session, keyspace); - // Should be able to make a query that is less than the max frame size. - // Execute multiple time to exercise all hosts. - for (int i = 0; i < 10; i++) { - ResultSet result = session().execute(select().from(tableName).where(eq("k", 0)).and(eq("c", 0))); - assertThat(result.getAvailableWithoutFetching()).isEqualTo(1); - } + run_frame_size_exceeding_queries(session); + } + + private void run_frame_size_exceeding_queries(Session session) { + try { + session.execute(select().from(tableName).where(eq("k", 0))); + fail("Exception expected"); + } catch (FrameTooLongException ftle) { + // Expected. + } + + // Both hosts should remain up. + Collection hosts = session.getState().getConnectedHosts(); + assertThat(hosts).hasSize(2).extractingResultOf("isUp").containsOnly(true); + + // Should be able to make a query that is less than the max frame size. + // Execute multiple time to exercise all hosts. + for (int i = 0; i < 10; i++) { + ResultSet result = + session.execute(select().from(tableName).where(eq("k", 0)).and(eq("c", 0))); + assertThat(result.getAvailableWithoutFetching()).isEqualTo(1); } + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/FunctionMetadataAssert.java b/driver-core/src/test/java/com/datastax/driver/core/FunctionMetadataAssert.java index eec319000ed..93ebce19045 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/FunctionMetadataAssert.java +++ b/driver-core/src/test/java/com/datastax/driver/core/FunctionMetadataAssert.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,27 +17,28 @@ */ package com.datastax.driver.core; -import org.assertj.core.api.AbstractAssert; - import static org.assertj.core.api.Assertions.assertThat; -public class FunctionMetadataAssert extends AbstractAssert { - protected FunctionMetadataAssert(FunctionMetadata actual) { - super(actual, FunctionMetadataAssert.class); - } +import org.assertj.core.api.AbstractAssert; + +public class FunctionMetadataAssert + extends AbstractAssert { + protected FunctionMetadataAssert(FunctionMetadata actual) { + super(actual, FunctionMetadataAssert.class); + } - public FunctionMetadataAssert hasSignature(String name) { - assertThat(actual.getSignature()).isEqualTo(name); - return this; - } + public FunctionMetadataAssert hasSignature(String name) { + assertThat(actual.getSignature()).isEqualTo(name); + return this; + } - public FunctionMetadataAssert isInKeyspace(String keyspaceName) { - assertThat(actual.getKeyspace().getName()).isEqualTo(keyspaceName); - return this; - } + public FunctionMetadataAssert isInKeyspace(String keyspaceName) { + assertThat(actual.getKeyspace().getName()).isEqualTo(keyspaceName); + return this; + } - public FunctionMetadataAssert hasBody(String body) { - assertThat(actual.getBody()).isEqualTo(body); - return this; - } + public FunctionMetadataAssert hasBody(String body) { + assertThat(actual.getBody()).isEqualTo(body); + return this; + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/FunctionMetadataTest.java b/driver-core/src/test/java/com/datastax/driver/core/FunctionMetadataTest.java index 83aee5a348c..1aebf53e315 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/FunctionMetadataTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/FunctionMetadataTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,165 +17,179 @@ */ package com.datastax.driver.core; -import com.datastax.driver.core.utils.CassandraVersion; -import org.testng.annotations.Test; - import static com.datastax.driver.core.Assertions.assertThat; import static com.datastax.driver.core.DataType.cint; import static com.datastax.driver.core.DataType.map; import static org.assertj.core.api.Assertions.entry; +import com.datastax.driver.core.utils.CassandraVersion; +import org.testng.annotations.Test; + @CassandraVersion("2.2.0") @CCMConfig(config = "enable_user_defined_functions:true") public class FunctionMetadataTest extends CCMTestsSupport { - @Test(groups = "short") - public void should_parse_and_format_simple_function() { - // given - String cql = String.format("CREATE FUNCTION %s.plus(s int,v int) RETURNS NULL ON NULL INPUT RETURNS int LANGUAGE java AS 'return s+v;';", keyspace); - // when - session().execute(cql); - // then - KeyspaceMetadata keyspace = cluster().getMetadata().getKeyspace(this.keyspace); - FunctionMetadata function = keyspace.getFunction("plus", cint(), cint()); - assertThat(function).isNotNull(); - assertThat(function.getKeyspace()).isEqualTo(keyspace); - assertThat(function.getSignature()).isEqualTo("plus(int,int)"); - assertThat(function.getSimpleName()).isEqualTo("plus"); - assertThat(function.getReturnType()).isEqualTo(cint()); - assertThat(function.getArguments()) - .containsEntry("s", cint()) - .containsEntry("v", cint()); - assertThat(function.getLanguage()).isEqualTo("java"); - assertThat(function.getBody()).isEqualTo("return s+v;"); - assertThat(function.isCalledOnNullInput()).isFalse(); - assertThat(function.toString()) - .isEqualTo(cql); - assertThat(function.exportAsString()) - .isEqualTo(String.format("CREATE FUNCTION %s.plus(\n" - + " s int,\n" - + " v int)\n" - + "RETURNS NULL ON NULL INPUT\n" - + "RETURNS int\n" - + "LANGUAGE java\n" - + "AS 'return s+v;';", this.keyspace)); - } - - @Test(groups = "short") - public void should_parse_and_format_function_with_no_arguments() { - // given - String cql = String.format("CREATE FUNCTION %s.pi() CALLED ON NULL INPUT RETURNS double LANGUAGE java AS 'return Math.PI;';", keyspace); - // when - session().execute(cql); - // then - KeyspaceMetadata keyspace = cluster().getMetadata().getKeyspace(this.keyspace); - FunctionMetadata function = keyspace.getFunction("pi"); - assertThat(function).isNotNull(); - assertThat(function.getKeyspace()).isEqualTo(keyspace); - assertThat(function.getSignature()).isEqualTo("pi()"); - assertThat(function.getSimpleName()).isEqualTo("pi"); - assertThat(function.getReturnType()).isEqualTo(DataType.cdouble()); - assertThat(function.getArguments()).isEmpty(); - assertThat(function.getLanguage()).isEqualTo("java"); - assertThat(function.getBody()).isEqualTo("return Math.PI;"); - assertThat(function.isCalledOnNullInput()).isTrue(); - assertThat(function.toString()) - .isEqualTo(cql); - assertThat(function.exportAsString()) - .isEqualTo(String.format("CREATE FUNCTION %s.pi()\n" - + "CALLED ON NULL INPUT\n" - + "RETURNS double\n" - + "LANGUAGE java\n" - + "AS 'return Math.PI;';", this.keyspace)); - } + @Test(groups = "short") + public void should_parse_and_format_simple_function() { + // given + String cql = + String.format( + "CREATE FUNCTION %s.plus(s int,v int) RETURNS NULL ON NULL INPUT RETURNS int LANGUAGE java AS 'return s+v;';", + keyspace); + // when + session().execute(cql); + // then + KeyspaceMetadata keyspace = cluster().getMetadata().getKeyspace(this.keyspace); + FunctionMetadata function = keyspace.getFunction("plus", cint(), cint()); + assertThat(function).isNotNull(); + assertThat(function.getKeyspace()).isEqualTo(keyspace); + assertThat(function.getSignature()).isEqualTo("plus(int,int)"); + assertThat(function.getSimpleName()).isEqualTo("plus"); + assertThat(function.getReturnType()).isEqualTo(cint()); + assertThat(function.getArguments()).containsEntry("s", cint()).containsEntry("v", cint()); + assertThat(function.getLanguage()).isEqualTo("java"); + assertThat(function.getBody()).isEqualTo("return s+v;"); + assertThat(function.isCalledOnNullInput()).isFalse(); + assertThat(function.toString()).isEqualTo(cql); + assertThat(function.exportAsString()) + .isEqualTo( + String.format( + "CREATE FUNCTION %s.plus(s int,v int)\n" + + " RETURNS NULL ON NULL INPUT\n" + + " RETURNS int\n" + + " LANGUAGE java\n" + + " AS 'return s+v;';", + this.keyspace)); + } - @Test(groups = "short") - public void should_parse_and_format_function_with_udts() { - // given - String body = - "//If \"called on null input\", handle nulls\n" - + "if(ADDRESS == null) return previous_total + 0;\n" - + "//User types are converted to com.datastax.driver.core.UDTValue types\n" - + "java.util.Set phones = ADDRESS.getSet(\"phones\", com.datastax.driver.core.UDTValue.class);\n" - + "return previous_total + phones.size();\n"; - String cqlFunction = String.format( - "CREATE FUNCTION %s.\"NUM_PHONES_ACCU\"(previous_total int,\"ADDRESS\" \"Address\") " - + "CALLED ON NULL INPUT " - + "RETURNS int " - + "LANGUAGE java " - + "AS " - + "'" - + body - + "';", keyspace); - // when - session().execute(cqlFunction); - // then - KeyspaceMetadata keyspace = cluster().getMetadata().getKeyspace(this.keyspace); - UserType addressType = keyspace.getUserType("\"Address\""); - FunctionMetadata function = keyspace.getFunction("\"NUM_PHONES_ACCU\"", cint(), addressType); - assertThat(function).isNotNull(); - assertThat(function.getKeyspace()).isEqualTo(keyspace); + @Test(groups = "short") + public void should_parse_and_format_function_with_no_arguments() { + // given + String cql = + String.format( + "CREATE FUNCTION %s.pi() CALLED ON NULL INPUT RETURNS double LANGUAGE java AS 'return Math.PI;';", + keyspace); + // when + session().execute(cql); + // then + KeyspaceMetadata keyspace = cluster().getMetadata().getKeyspace(this.keyspace); + FunctionMetadata function = keyspace.getFunction("pi"); + assertThat(function).isNotNull(); + assertThat(function.getKeyspace()).isEqualTo(keyspace); + assertThat(function.getSignature()).isEqualTo("pi()"); + assertThat(function.getSimpleName()).isEqualTo("pi"); + assertThat(function.getReturnType()).isEqualTo(DataType.cdouble()); + assertThat(function.getArguments()).isEmpty(); + assertThat(function.getLanguage()).isEqualTo("java"); + assertThat(function.getBody()).isEqualTo("return Math.PI;"); + assertThat(function.isCalledOnNullInput()).isTrue(); + assertThat(function.toString()).isEqualTo(cql); + assertThat(function.exportAsString()) + .isEqualTo( + String.format( + "CREATE FUNCTION %s.pi()\n" + + " CALLED ON NULL INPUT\n" + + " RETURNS double\n" + + " LANGUAGE java\n" + + " AS 'return Math.PI;';", + this.keyspace)); + } - assertThat(function.getSignature()).isEqualTo("\"NUM_PHONES_ACCU\"(int,\"Address\")"); - assertThat(function.getSimpleName()).isEqualTo("NUM_PHONES_ACCU"); - assertThat(function.getReturnType()).isEqualTo(cint()); - assertThat(function.getArguments()).containsExactly(entry("previous_total", cint()), entry("ADDRESS", addressType)); - assertThat(function.getLanguage()).isEqualTo("java"); - assertThat(function.getBody()).isEqualTo(body); - assertThat(function.isCalledOnNullInput()).isTrue(); - assertThat(function.toString()).isEqualTo(cqlFunction); - } + @Test(groups = "short") + public void should_parse_and_format_function_with_udts() { + // given + String body = + "//If \"called on null input\", handle nulls\n" + + "if(ADDRESS == null) return previous_total + 0;\n" + + "//User types are converted to com.datastax.driver.core.UDTValue types\n" + + "java.util.Set phones = ADDRESS.getSet(\"phones\", com.datastax.driver.core.UDTValue.class);\n" + + "return previous_total + phones.size();\n"; + String cqlFunction = + String.format( + "CREATE FUNCTION %s.\"NUM_PHONES_ACCU\"(previous_total int,\"ADDRESS\" \"Address\") " + + "CALLED ON NULL INPUT " + + "RETURNS int " + + "LANGUAGE java " + + "AS " + + "'" + + body + + "';", + keyspace); + // when + session().execute(cqlFunction); + // then + KeyspaceMetadata keyspace = cluster().getMetadata().getKeyspace(this.keyspace); + UserType addressType = keyspace.getUserType("\"Address\""); + FunctionMetadata function = keyspace.getFunction("\"NUM_PHONES_ACCU\"", cint(), addressType); + assertThat(function).isNotNull(); + assertThat(function.getKeyspace()).isEqualTo(keyspace); - /** - * Ensures that functions whose arguments contain complex types such as - * tuples and collections, and nested combinations thereof, are - * correctly parsed. - * - * @jira_ticket JAVA-1137 - */ - @Test(groups = "short") - public void should_parse_and_format_functions_with_complex_arguments() { - // given - String cql = String.format("CREATE FUNCTION %s.complex(x tuple, map>) RETURNS NULL ON NULL INPUT RETURNS int LANGUAGE java AS 'return 42;';", keyspace); - // when - session().execute(cql); - // then - KeyspaceMetadata keyspace = cluster().getMetadata().getKeyspace(this.keyspace); - DataType argumentType = cluster().getMetadata().newTupleType(cluster().getMetadata().newTupleType(cint()), map(cint(), cint())); - FunctionMetadata function = keyspace.getFunction("complex", argumentType); - assertThat(function).isNotNull(); - assertThat(function.getKeyspace()).isEqualTo(keyspace); - assertThat(function.getSignature()).isEqualTo("complex(tuple, map>)"); - assertThat(function.getSimpleName()).isEqualTo("complex"); - assertThat(function.getReturnType()).isEqualTo(cint()); - assertThat(function.getArguments()) - .containsEntry("x", argumentType); - assertThat(function.getLanguage()).isEqualTo("java"); - assertThat(function.getBody()).isEqualTo("return 42;"); - assertThat(function.isCalledOnNullInput()).isFalse(); - assertThat(function.toString()) - .isEqualTo(cql); - assertThat(function.exportAsString()) - .isEqualTo(String.format("CREATE FUNCTION %s.complex(\n" - + " x tuple, map>)\n" - + "RETURNS NULL ON NULL INPUT\n" - + "RETURNS int\n" - + "LANGUAGE java\n" - + "AS 'return 42;';", this.keyspace)); - } + assertThat(function.getSignature()).isEqualTo("\"NUM_PHONES_ACCU\"(int,\"Address\")"); + assertThat(function.getSimpleName()).isEqualTo("NUM_PHONES_ACCU"); + assertThat(function.getReturnType()).isEqualTo(cint()); + assertThat(function.getArguments()) + .containsExactly(entry("previous_total", cint()), entry("ADDRESS", addressType)); + assertThat(function.getLanguage()).isEqualTo("java"); + assertThat(function.getBody()).isEqualTo(body); + assertThat(function.isCalledOnNullInput()).isTrue(); + assertThat(function.toString()).isEqualTo(cqlFunction); + } - @Override - public void onTestContextInitialized() { - execute( - String.format("CREATE TYPE IF NOT EXISTS %s.\"Phone\" (number text)", keyspace), - String.format("CREATE TYPE IF NOT EXISTS %s.\"Address\" (" - + " street text," - + " city text," - + " zip int," - + " phones frozen>>," - + " location frozen>" - + ")", keyspace) - ); - } + /** + * Ensures that functions whose arguments contain complex types such as tuples and collections, + * and nested combinations thereof, are correctly parsed. + * + * @jira_ticket JAVA-1137 + */ + @Test(groups = "short") + public void should_parse_and_format_functions_with_complex_arguments() { + // given + String cql = + String.format( + "CREATE FUNCTION %s.complex(x tuple, map>) RETURNS NULL ON NULL INPUT RETURNS int LANGUAGE java AS 'return 42;';", + keyspace); + // when + session().execute(cql); + // then + KeyspaceMetadata keyspace = cluster().getMetadata().getKeyspace(this.keyspace); + DataType argumentType = + cluster() + .getMetadata() + .newTupleType(cluster().getMetadata().newTupleType(cint()), map(cint(), cint())); + FunctionMetadata function = keyspace.getFunction("complex", argumentType); + assertThat(function).isNotNull(); + assertThat(function.getKeyspace()).isEqualTo(keyspace); + assertThat(function.getSignature()).isEqualTo("complex(tuple, map>)"); + assertThat(function.getSimpleName()).isEqualTo("complex"); + assertThat(function.getReturnType()).isEqualTo(cint()); + assertThat(function.getArguments()).containsEntry("x", argumentType); + assertThat(function.getLanguage()).isEqualTo("java"); + assertThat(function.getBody()).isEqualTo("return 42;"); + assertThat(function.isCalledOnNullInput()).isFalse(); + assertThat(function.toString()).isEqualTo(cql); + assertThat(function.exportAsString()) + .isEqualTo( + String.format( + "CREATE FUNCTION %s.complex(x tuple, map>)\n" + + " RETURNS NULL ON NULL INPUT\n" + + " RETURNS int\n" + + " LANGUAGE java\n" + + " AS 'return 42;';", + this.keyspace)); + } + @Override + public void onTestContextInitialized() { + execute( + String.format("CREATE TYPE IF NOT EXISTS %s.\"Phone\" (number text)", keyspace), + String.format( + "CREATE TYPE IF NOT EXISTS %s.\"Address\" (" + + " street text," + + " city text," + + " zip int," + + " phones frozen>>," + + " location frozen>" + + ")", + keyspace)); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/GettableDataIntegrationTest.java b/driver-core/src/test/java/com/datastax/driver/core/GettableDataIntegrationTest.java index 21018c4baf0..38d77520e4e 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/GettableDataIntegrationTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/GettableDataIntegrationTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,478 +17,501 @@ */ package com.datastax.driver.core; +import static com.datastax.driver.core.TestUtils.getValue; +import static com.datastax.driver.core.TestUtils.setValue; +import static com.google.common.collect.Lists.newArrayList; +import static org.assertj.core.api.Assertions.assertThat; + import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; import com.google.common.reflect.TypeToken; -import org.testng.annotations.BeforeClass; -import org.testng.annotations.Test; - import java.math.BigDecimal; import java.math.BigInteger; import java.net.InetAddress; import java.net.UnknownHostException; import java.nio.ByteBuffer; -import java.util.*; +import java.util.Date; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.UUID; import java.util.concurrent.atomic.AtomicInteger; - -import static com.datastax.driver.core.TestUtils.getValue; -import static com.datastax.driver.core.TestUtils.setValue; -import static com.google.common.collect.Lists.newArrayList; -import static org.assertj.core.api.Assertions.assertThat; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Test; public class GettableDataIntegrationTest extends CCMTestsSupport { - boolean is21; + boolean is21; - CodecRegistry registry = new CodecRegistry(); + CodecRegistry registry = new CodecRegistry(); - // Used for generating unique keys. - AtomicInteger keyCounter = new AtomicInteger(0); + // Used for generating unique keys. + AtomicInteger keyCounter = new AtomicInteger(0); - @Override - public void onTestContextInitialized() { - is21 = ccm().getCassandraVersion().compareTo(VersionNumber.parse("2.1.3")) > 0; - // only add tuples / nested collections at > 2.1.3. - execute("CREATE TABLE codec_mapping (k int PRIMARY KEY, " - + "v int, l list, m map" + - (is21 ? ", t tuple, s set>>)" : ")")); - } + @Override + public void onTestContextInitialized() { + is21 = ccm().getCassandraVersion().compareTo(VersionNumber.parse("2.1.3")) > 0; + // only add tuples / nested collections at > 2.1.3. + execute( + "CREATE TABLE codec_mapping (k int PRIMARY KEY, " + + "v int, l list, m map" + + (is21 ? ", t tuple, s set>>)" : ")")); + } - @Override - public Cluster.Builder createClusterBuilder() { - return Cluster.builder().withCodecRegistry(registry); - } + @Override + public Cluster.Builder createClusterBuilder() { + return super.createClusterBuilder().withCodecRegistry(registry); + } - @BeforeClass(groups = "short") - public void setUpRegistry() { - for (TypeMapping mapping : mappings) { - registry.register(mapping.codec); - } + @BeforeClass(groups = "short") + public void setUpRegistry() { + for (TypeMapping mapping : mappings) { + registry.register(mapping.codec); } + } - static final ByteBuffer intBuf = ByteBuffer.allocate(4); + static final ByteBuffer intBuf = ByteBuffer.allocate(4); - static { - intBuf.putInt(1); - intBuf.flip(); - } + static { + intBuf.putInt(1); + intBuf.flip(); + } - static InetAddress localhost; + static InetAddress localhost; - static { - try { - localhost = InetAddress.getLocalHost(); - } catch (UnknownHostException e) { - localhost = null; - } + static { + try { + localhost = InetAddress.getLocalHost(); + } catch (UnknownHostException e) { + localhost = null; } - - // Mappings of Codecs, Data Type to access as (used for determining what get|set method to call), value to set. - private TypeMapping[] mappings = { - // set|getString - new TypeMapping(new IntToStringCodec(), DataType.varchar(), "1"), - // set|getLong - new TypeMapping(new IntToLongCodec(), DataType.bigint(), 1L), - // set|getBytes - new TypeMapping(new IntToByteBufferCodec(), DataType.blob(), intBuf), - // set|getBool - new TypeMapping(new IntToBooleanCodec(), DataType.cboolean(), true), - // set|getDecimal - new TypeMapping(new IntToBigDecimalCodec(), DataType.decimal(), new BigDecimal(1)), - // set|getDouble - new TypeMapping(new IntToDoubleCodec(), DataType.cdouble(), 1.0d), - // set|getFloat - new TypeMapping(new IntToFloatCodec(), DataType.cfloat(), 1.0f), - // set|getInet - new TypeMapping(new IntToInetAddressCodec(), DataType.inet(), localhost), - // set|getTime - new TypeMapping(new IntToLongCodec(), DataType.time(), 8675309L), - // set|getByte - new TypeMapping(new IntToByteCodec(), DataType.tinyint(), (byte) 0xCF), - // set|getShort - new TypeMapping(new IntToShortCodec(), DataType.smallint(), (short) 1003), - // set|getTimestamp - new TypeMapping(new IntToDateCodec(), DataType.timestamp(), new Date(124677)), - // set|getDate - new TypeMapping(new IntToLocalDateCodec(), DataType.date(), LocalDate.fromDaysSinceEpoch(1523)), - // set|getUUID - new TypeMapping(new IntToUUIDCodec(), DataType.uuid(), new UUID(244242, 0)), - // set|getVarint - new TypeMapping(new IntToBigIntegerCodec(), DataType.varint(), BigInteger.valueOf(4566432L)) - }; - - /** - * Validates that all {@link GettableData} types will allow their get methods to be invoked on a column that does - * not match data's cql type if a codec is registered that maps the java type of the getter method to the cql type - * of the column. - *

    - * Also validates that all {@link SettableData} types will allow their set methods to be invoked on a column that - * does not match data's cql type if a codec is registered that maps the java type of the setter method to the cql - * type of the column. - *

    - * Executes the following for each set|get set: - *

    - *

      - *
    1. Insert row using a prepared statement and binding by name.
    2. - *
    3. Insert row using a prepared statement and binding by index.
    4. - *
    5. Insert row using a prepared statement and binding everything at once.
    6. - *
    7. Retrieve inserted rows and get values by name.
    8. - *
    9. Retrieve inserted rows and get values by index.
    10. - *
    - * - * @jira_ticket JAVA-940 - * @test_category queries - */ - @Test(groups = "short") - public void should_allow_getting_and_setting_by_type_if_codec_registered() { - String insertStmt = "INSERT INTO codec_mapping (k,v,l,m" + (is21 ? ",t,s" : "") + ") values (?,?,?,?" + (is21 ? ",?,?)" : ")"); - PreparedStatement insert = session().prepare(insertStmt); - PreparedStatement select = session().prepare("SELECT v,l,m" + (is21 ? ",t,s" : "") + " from codec_mapping where k=?"); - - TupleType tupleType = new TupleType(newArrayList(DataType.cint(), DataType.cint()), - cluster().getConfiguration().getProtocolOptions().getProtocolVersion(), registry); - - for (TypeMapping mapping : mappings) { - // Keys used to insert data in this iteration. - List keys = newArrayList(); - - // Values to store. - Map map = ImmutableMap.of(mapping.value, mapping.value); - List list = newArrayList(mapping.value); - Set> set = ImmutableSet.of(list); - TupleValue tupleValue = new TupleValue(tupleType); - setValue(tupleValue, 0, mapping.outerType, mapping.value); - setValue(tupleValue, 1, mapping.outerType, mapping.value); - - // Insert by name. - BoundStatement byName = insert.bind(); - int byNameKey = keyCounter.incrementAndGet(); - keys.add(byNameKey); - byName.setInt("k", byNameKey); - setValue(byName, "v", mapping.outerType, mapping.value); - byName.setList("l", list, mapping.javaType); - byName.setMap("m", map, mapping.javaType, mapping.javaType); - if (is21) { - byName.setTupleValue("t", tupleValue); - byName.setSet("s", set, TypeTokens.listOf(mapping.javaType)); - } - session().execute(byName); - - // Insert by index. - BoundStatement byIndex = insert.bind(); - int byIndexKey = keyCounter.incrementAndGet(); - keys.add(byIndexKey); - byIndex.setInt(0, byIndexKey); - setValue(byIndex, 1, mapping.outerType, mapping.value); - byIndex.setList(2, list, mapping.javaType); - byIndex.setMap(3, map, mapping.javaType, mapping.javaType); - if (is21) { - byIndex.setTupleValue(4, tupleValue); - byIndex.setSet(5, set, TypeTokens.listOf(mapping.javaType)); - } - session().execute(byIndex); - - // Insert by binding all at once. - BoundStatement fullBind; - int fullBindKey = keyCounter.incrementAndGet(); - keys.add(fullBindKey); - if (is21) { - fullBind = insert.bind(fullBindKey, mapping.value, list, map, tupleValue, set); - } else { - fullBind = insert.bind(fullBindKey, mapping.value, list, map); - } - session().execute(fullBind); - - for (int key : keys) { - // Retrieve by name. - Row row = session().execute(select.bind(key)).one(); - assertThat(getValue(row, "v", mapping.outerType, registry)).isEqualTo(mapping.value); - assertThat(row.getList("l", mapping.codec.getJavaType())).isEqualTo(list); - assertThat(row.getMap("m", mapping.codec.getJavaType(), mapping.codec.getJavaType())).isEqualTo(map); - - if (is21) { - TupleValue returnedTuple = row.getTupleValue("t"); - assertThat(getValue(returnedTuple, 0, mapping.outerType, registry)).isEqualTo(mapping.value); - assertThat(getValue(returnedTuple, 1, mapping.outerType, registry)).isEqualTo(mapping.value); - - assertThat(row.getSet("s", TypeTokens.listOf(mapping.javaType))).isEqualTo(set); - } - - // Retrieve by index. - assertThat(getValue(row, 0, mapping.outerType, registry)).isEqualTo(mapping.value); - assertThat(row.getList(1, mapping.codec.getJavaType())).isEqualTo(list); - assertThat(row.getMap(2, mapping.codec.getJavaType(), mapping.codec.getJavaType())).isEqualTo(map); - - if (is21) { - TupleValue returnedTuple = row.getTupleValue(3); - assertThat(getValue(returnedTuple, 0, mapping.outerType, registry)).isEqualTo(mapping.value); - assertThat(getValue(returnedTuple, 1, mapping.outerType, registry)).isEqualTo(mapping.value); - - assertThat(row.getSet(4, TypeTokens.listOf(mapping.javaType))).isEqualTo(set); - } - } - } + } + + // Mappings of Codecs, Data Type to access as (used for determining what get|set method to call), + // value to set. + private TypeMapping[] mappings = { + // set|getString + new TypeMapping(new IntToStringCodec(), DataType.varchar(), "1"), + // set|getLong + new TypeMapping(new IntToLongCodec(), DataType.bigint(), 1L), + // set|getBytes + new TypeMapping(new IntToByteBufferCodec(), DataType.blob(), intBuf), + // set|getBool + new TypeMapping(new IntToBooleanCodec(), DataType.cboolean(), true), + // set|getDecimal + new TypeMapping(new IntToBigDecimalCodec(), DataType.decimal(), new BigDecimal(1)), + // set|getDouble + new TypeMapping(new IntToDoubleCodec(), DataType.cdouble(), 1.0d), + // set|getFloat + new TypeMapping(new IntToFloatCodec(), DataType.cfloat(), 1.0f), + // set|getInet + new TypeMapping(new IntToInetAddressCodec(), DataType.inet(), localhost), + // set|getTime + new TypeMapping(new IntToLongCodec(), DataType.time(), 8675309L), + // set|getByte + new TypeMapping(new IntToByteCodec(), DataType.tinyint(), (byte) 0xCF), + // set|getShort + new TypeMapping(new IntToShortCodec(), DataType.smallint(), (short) 1003), + // set|getTimestamp + new TypeMapping(new IntToDateCodec(), DataType.timestamp(), new Date(124677)), + // set|getDate + new TypeMapping( + new IntToLocalDateCodec(), DataType.date(), LocalDate.fromDaysSinceEpoch(1523)), + // set|getUUID + new TypeMapping(new IntToUUIDCodec(), DataType.uuid(), new UUID(244242, 0)), + // set|getVarint + new TypeMapping( + new IntToBigIntegerCodec(), DataType.varint(), BigInteger.valueOf(4566432L)) + }; + + /** + * Validates that all {@link GettableData} types will allow their get methods to be invoked on a + * column that does not match data's cql type if a codec is registered that maps the java type of + * the getter method to the cql type of the column. + * + *

    Also validates that all {@link SettableData} types will allow their set methods to be + * invoked on a column that does not match data's cql type if a codec is registered that maps the + * java type of the setter method to the cql type of the column. + * + *

    Executes the following for each set|get set: + * + *

    + * + *

      + *
    1. Insert row using a prepared statement and binding by name. + *
    2. Insert row using a prepared statement and binding by index. + *
    3. Insert row using a prepared statement and binding everything at once. + *
    4. Retrieve inserted rows and get values by name. + *
    5. Retrieve inserted rows and get values by index. + *
    + * + * @jira_ticket JAVA-940 + * @test_category queries + */ + @Test(groups = "short") + public void should_allow_getting_and_setting_by_type_if_codec_registered() { + String insertStmt = + "INSERT INTO codec_mapping (k,v,l,m" + + (is21 ? ",t,s" : "") + + ") values (?,?,?,?" + + (is21 ? ",?,?)" : ")"); + PreparedStatement insert = session().prepare(insertStmt); + PreparedStatement select = + session().prepare("SELECT v,l,m" + (is21 ? ",t,s" : "") + " from codec_mapping where k=?"); + + TupleType tupleType = + new TupleType( + newArrayList(DataType.cint(), DataType.cint()), + cluster().getConfiguration().getProtocolOptions().getProtocolVersion(), + registry); + + for (TypeMapping mapping : mappings) { + // Keys used to insert data in this iteration. + List keys = newArrayList(); + + // Values to store. + Map map = ImmutableMap.of(mapping.value, mapping.value); + List list = newArrayList(mapping.value); + Set> set = ImmutableSet.of(list); + TupleValue tupleValue = new TupleValue(tupleType); + setValue(tupleValue, 0, mapping.outerType, mapping.value); + setValue(tupleValue, 1, mapping.outerType, mapping.value); + + // Insert by name. + BoundStatement byName = insert.bind(); + int byNameKey = keyCounter.incrementAndGet(); + keys.add(byNameKey); + byName.setInt("k", byNameKey); + setValue(byName, "v", mapping.outerType, mapping.value); + byName.setList("l", list, mapping.javaType); + byName.setMap("m", map, mapping.javaType, mapping.javaType); + if (is21) { + byName.setTupleValue("t", tupleValue); + byName.setSet("s", set, TypeTokens.listOf(mapping.javaType)); + } + session().execute(byName); + + // Insert by index. + BoundStatement byIndex = insert.bind(); + int byIndexKey = keyCounter.incrementAndGet(); + keys.add(byIndexKey); + byIndex.setInt(0, byIndexKey); + setValue(byIndex, 1, mapping.outerType, mapping.value); + byIndex.setList(2, list, mapping.javaType); + byIndex.setMap(3, map, mapping.javaType, mapping.javaType); + if (is21) { + byIndex.setTupleValue(4, tupleValue); + byIndex.setSet(5, set, TypeTokens.listOf(mapping.javaType)); + } + session().execute(byIndex); + + // Insert by binding all at once. + BoundStatement fullBind; + int fullBindKey = keyCounter.incrementAndGet(); + keys.add(fullBindKey); + if (is21) { + fullBind = insert.bind(fullBindKey, mapping.value, list, map, tupleValue, set); + } else { + fullBind = insert.bind(fullBindKey, mapping.value, list, map); + } + session().execute(fullBind); + + for (int key : keys) { + // Retrieve by name. + Row row = session().execute(select.bind(key)).one(); + assertThat(getValue(row, "v", mapping.outerType, registry)).isEqualTo(mapping.value); + assertThat(row.getList("l", mapping.codec.getJavaType())).isEqualTo(list); + assertThat(row.getMap("m", mapping.codec.getJavaType(), mapping.codec.getJavaType())) + .isEqualTo(map); + + if (is21) { + TupleValue returnedTuple = row.getTupleValue("t"); + assertThat(getValue(returnedTuple, 0, mapping.outerType, registry)) + .isEqualTo(mapping.value); + assertThat(getValue(returnedTuple, 1, mapping.outerType, registry)) + .isEqualTo(mapping.value); + + assertThat(row.getSet("s", TypeTokens.listOf(mapping.javaType))).isEqualTo(set); + } + + // Retrieve by index. + assertThat(getValue(row, 0, mapping.outerType, registry)).isEqualTo(mapping.value); + assertThat(row.getList(1, mapping.codec.getJavaType())).isEqualTo(list); + assertThat(row.getMap(2, mapping.codec.getJavaType(), mapping.codec.getJavaType())) + .isEqualTo(map); + + if (is21) { + TupleValue returnedTuple = row.getTupleValue(3); + assertThat(getValue(returnedTuple, 0, mapping.outerType, registry)) + .isEqualTo(mapping.value); + assertThat(getValue(returnedTuple, 1, mapping.outerType, registry)) + .isEqualTo(mapping.value); + + assertThat(row.getSet(4, TypeTokens.listOf(mapping.javaType))).isEqualTo(set); + } + } } - - private static class TypeMapping { - final TypeCodec codec; - final TypeToken javaType; - final DataType outerType; - final T value; - - @SuppressWarnings("unchecked") - TypeMapping(TypeCodec codec, DataType outerType, T value) { - this.codec = codec; - this.javaType = (TypeToken) codec.getJavaType(); - this.outerType = outerType; - this.value = value; - } + } + + private static class TypeMapping { + final TypeCodec codec; + final TypeToken javaType; + final DataType outerType; + final T value; + + @SuppressWarnings("unchecked") + TypeMapping(TypeCodec codec, DataType outerType, T value) { + this.codec = codec; + this.javaType = (TypeToken) codec.getJavaType(); + this.outerType = outerType; + this.value = value; } + } - // Int <-> Type mappings. - private static class IntToLongCodec extends MappingCodec { + // Int <-> Type mappings. + private static class IntToLongCodec extends MappingCodec { - IntToLongCodec() { - super(TypeCodec.cint(), Long.class); - } + IntToLongCodec() { + super(TypeCodec.cint(), Long.class); + } - @Override - protected Long deserialize(Integer value) { - return value.longValue(); - } + @Override + protected Long deserialize(Integer value) { + return value.longValue(); + } - @Override - protected Integer serialize(Long value) { - return value.intValue(); - } + @Override + protected Integer serialize(Long value) { + return value.intValue(); } + } - private static class IntToStringCodec extends MappingCodec { + private static class IntToStringCodec extends MappingCodec { - IntToStringCodec() { - super(TypeCodec.cint(), String.class); - } + IntToStringCodec() { + super(TypeCodec.cint(), String.class); + } - @Override - protected String deserialize(Integer value) { - return value.toString(); - } + @Override + protected String deserialize(Integer value) { + return value.toString(); + } - @Override - protected Integer serialize(String value) { - return Integer.parseInt(value); - } + @Override + protected Integer serialize(String value) { + return Integer.parseInt(value); } + } - private static class IntToByteBufferCodec extends MappingCodec { + private static class IntToByteBufferCodec extends MappingCodec { - IntToByteBufferCodec() { - super(TypeCodec.cint(), ByteBuffer.class); - } + IntToByteBufferCodec() { + super(TypeCodec.cint(), ByteBuffer.class); + } - @Override - protected ByteBuffer deserialize(Integer value) { - ByteBuffer buf = ByteBuffer.allocate(4); - buf.putInt(value); - buf.flip(); - return buf; - } + @Override + protected ByteBuffer deserialize(Integer value) { + ByteBuffer buf = ByteBuffer.allocate(4); + buf.putInt(value); + buf.flip(); + return buf; + } - @Override - protected Integer serialize(ByteBuffer value) { - return value.duplicate().getInt(); - } + @Override + protected Integer serialize(ByteBuffer value) { + return value.duplicate().getInt(); } + } - private static class IntToBooleanCodec extends MappingCodec { + private static class IntToBooleanCodec extends MappingCodec { - IntToBooleanCodec() { - super(TypeCodec.cint(), Boolean.class); - } + IntToBooleanCodec() { + super(TypeCodec.cint(), Boolean.class); + } - @Override - protected Boolean deserialize(Integer value) { - return value != 0; - } + @Override + protected Boolean deserialize(Integer value) { + return value != 0; + } - @Override - protected Integer serialize(Boolean value) { - return value ? 1 : 0; - } + @Override + protected Integer serialize(Boolean value) { + return value ? 1 : 0; } + } - private static class IntToBigDecimalCodec extends MappingCodec { + private static class IntToBigDecimalCodec extends MappingCodec { - IntToBigDecimalCodec() { - super(TypeCodec.cint(), BigDecimal.class); - } + IntToBigDecimalCodec() { + super(TypeCodec.cint(), BigDecimal.class); + } - @Override - protected BigDecimal deserialize(Integer value) { - return new BigDecimal(value); - } + @Override + protected BigDecimal deserialize(Integer value) { + return new BigDecimal(value); + } - @Override - protected Integer serialize(BigDecimal value) { - return value.intValue(); - } + @Override + protected Integer serialize(BigDecimal value) { + return value.intValue(); } + } - private static class IntToDoubleCodec extends MappingCodec { + private static class IntToDoubleCodec extends MappingCodec { - IntToDoubleCodec() { - super(TypeCodec.cint(), Double.class); - } + IntToDoubleCodec() { + super(TypeCodec.cint(), Double.class); + } - @Override - protected Double deserialize(Integer value) { - return value.doubleValue(); - } + @Override + protected Double deserialize(Integer value) { + return value.doubleValue(); + } - @Override - protected Integer serialize(Double value) { - return value.intValue(); - } + @Override + protected Integer serialize(Double value) { + return value.intValue(); } + } - private static class IntToFloatCodec extends MappingCodec { + private static class IntToFloatCodec extends MappingCodec { - IntToFloatCodec() { - super(TypeCodec.cint(), Float.class); - } + IntToFloatCodec() { + super(TypeCodec.cint(), Float.class); + } - @Override - protected Float deserialize(Integer value) { - return value.floatValue(); - } + @Override + protected Float deserialize(Integer value) { + return value.floatValue(); + } - @Override - protected Integer serialize(Float value) { - return value.intValue(); - } + @Override + protected Integer serialize(Float value) { + return value.intValue(); } + } - private static class IntToInetAddressCodec extends MappingCodec { + private static class IntToInetAddressCodec extends MappingCodec { - IntToInetAddressCodec() { - super(TypeCodec.cint(), InetAddress.class); - } + IntToInetAddressCodec() { + super(TypeCodec.cint(), InetAddress.class); + } - @Override - protected InetAddress deserialize(Integer value) { - byte[] address = ByteBuffer.allocate(4).putInt(value).array(); - try { - return InetAddress.getByAddress(address); - } catch (UnknownHostException e) { - return null; - } - } + @Override + protected InetAddress deserialize(Integer value) { + byte[] address = ByteBuffer.allocate(4).putInt(value).array(); + try { + return InetAddress.getByAddress(address); + } catch (UnknownHostException e) { + return null; + } + } - @Override - protected Integer serialize(InetAddress value) { - return ByteBuffer.wrap(value.getAddress()).getInt(); - } + @Override + protected Integer serialize(InetAddress value) { + return ByteBuffer.wrap(value.getAddress()).getInt(); } + } - private static class IntToByteCodec extends MappingCodec { + private static class IntToByteCodec extends MappingCodec { - IntToByteCodec() { - super(TypeCodec.cint(), Byte.class); - } + IntToByteCodec() { + super(TypeCodec.cint(), Byte.class); + } - @Override - protected Byte deserialize(Integer value) { - return value.byteValue(); - } + @Override + protected Byte deserialize(Integer value) { + return value.byteValue(); + } - @Override - protected Integer serialize(Byte value) { - return value.intValue(); - } + @Override + protected Integer serialize(Byte value) { + return value.intValue(); } + } - private static class IntToShortCodec extends MappingCodec { + private static class IntToShortCodec extends MappingCodec { - IntToShortCodec() { - super(TypeCodec.cint(), Short.class); - } + IntToShortCodec() { + super(TypeCodec.cint(), Short.class); + } - @Override - protected Short deserialize(Integer value) { - return value.shortValue(); - } + @Override + protected Short deserialize(Integer value) { + return value.shortValue(); + } - @Override - protected Integer serialize(Short value) { - return value.intValue(); - } + @Override + protected Integer serialize(Short value) { + return value.intValue(); } + } - private static class IntToDateCodec extends MappingCodec { + private static class IntToDateCodec extends MappingCodec { - IntToDateCodec() { - super(TypeCodec.cint(), Date.class); - } + IntToDateCodec() { + super(TypeCodec.cint(), Date.class); + } - @Override - protected Date deserialize(Integer value) { - return new Date(value); - } + @Override + protected Date deserialize(Integer value) { + return new Date(value); + } - @Override - protected Integer serialize(Date value) { - return new Long(value.getTime()).intValue(); - } + @Override + protected Integer serialize(Date value) { + return new Long(value.getTime()).intValue(); } + } - private static class IntToLocalDateCodec extends MappingCodec { + private static class IntToLocalDateCodec extends MappingCodec { - IntToLocalDateCodec() { - super(TypeCodec.cint(), LocalDate.class); - } + IntToLocalDateCodec() { + super(TypeCodec.cint(), LocalDate.class); + } - @Override - protected LocalDate deserialize(Integer value) { - return LocalDate.fromDaysSinceEpoch(value); - } + @Override + protected LocalDate deserialize(Integer value) { + return LocalDate.fromDaysSinceEpoch(value); + } - @Override - protected Integer serialize(LocalDate value) { - return value.getDaysSinceEpoch(); - } + @Override + protected Integer serialize(LocalDate value) { + return value.getDaysSinceEpoch(); } + } - private static class IntToUUIDCodec extends MappingCodec { + private static class IntToUUIDCodec extends MappingCodec { - IntToUUIDCodec() { - super(TypeCodec.cint(), UUID.class); - } + IntToUUIDCodec() { + super(TypeCodec.cint(), UUID.class); + } - @Override - protected UUID deserialize(Integer value) { - return new UUID(value, 0); - } + @Override + protected UUID deserialize(Integer value) { + return new UUID(value, 0); + } - @Override - protected Integer serialize(UUID value) { - return new Long(value.getMostSignificantBits()).intValue(); - } + @Override + protected Integer serialize(UUID value) { + return new Long(value.getMostSignificantBits()).intValue(); } + } - private static class IntToBigIntegerCodec extends MappingCodec { + private static class IntToBigIntegerCodec extends MappingCodec { - IntToBigIntegerCodec() { - super(TypeCodec.cint(), BigInteger.class); - } + IntToBigIntegerCodec() { + super(TypeCodec.cint(), BigInteger.class); + } - @Override - protected BigInteger deserialize(Integer value) { - return BigInteger.valueOf((long) value); - } + @Override + protected BigInteger deserialize(Integer value) { + return BigInteger.valueOf((long) value); + } - @Override - protected Integer serialize(BigInteger value) { - return value.intValue(); - } + @Override + protected Integer serialize(BigInteger value) { + return value.intValue(); } + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/HeapCompressionTest.java b/driver-core/src/test/java/com/datastax/driver/core/HeapCompressionTest.java index bf80cad0e14..e330f4bf70f 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/HeapCompressionTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/HeapCompressionTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,34 +23,35 @@ public class HeapCompressionTest extends CompressionTest { - @BeforeClass(groups = "isolated") - public void beforeTestClass() throws Exception { - // Configure with noPreferDirect and noUnsafe to force heap buffers. - System.setProperty("io.netty.noPreferDirect", "true"); - System.setProperty("io.netty.noUnsafe", "true"); - super.beforeTestClass(); - } + @BeforeClass(groups = "isolated") + public void beforeTestClass() throws Exception { + // Configure with noPreferDirect and noUnsafe to force heap buffers. + System.setProperty("io.netty.noPreferDirect", "true"); + System.setProperty("io.netty.noUnsafe", "true"); + super.beforeTestClass(); + } - /** - * Validates that snappy compression still works when using heap buffers. - * - * @test_category connection:compression - * @expected_result session established and queries made successfully using it. - */ - @Test(groups = "isolated") - public void should_function_with_snappy_compression() throws Exception { - compressionTest(ProtocolOptions.Compression.SNAPPY); - } + /** + * Validates that snappy compression still works when using heap buffers. + * + * @test_category connection:compression + * @expected_result session established and queries made successfully using it. + */ + @Test(groups = "isolated") + public void should_function_with_snappy_compression() throws Exception { + skipTestWithCassandraVersionOrHigher("4.0.0", "snappy"); + compressionTest(ProtocolOptions.Compression.SNAPPY); + } - /** - * Validates that lz4 compression still works when using heap buffers. - * - * @test_category connection:compression - * @expected_result session established and queries made successfully using it. - */ - @Test(groups = "isolated") - @CassandraVersion("2.0.0") - public void should_function_with_lz4_compression() throws Exception { - compressionTest(ProtocolOptions.Compression.LZ4); - } + /** + * Validates that lz4 compression still works when using heap buffers. + * + * @test_category connection:compression + * @expected_result session established and queries made successfully using it. + */ + @Test(groups = "isolated") + @CassandraVersion("2.0.0") + public void should_function_with_lz4_compression() throws Exception { + compressionTest(ProtocolOptions.Compression.LZ4); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/HeartbeatTest.java b/driver-core/src/test/java/com/datastax/driver/core/HeartbeatTest.java index 2ccb8009c58..c38600ef16e 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/HeartbeatTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/HeartbeatTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +17,15 @@ */ package com.datastax.driver.core; +import static com.datastax.driver.core.Assertions.assertThat; +import static java.util.concurrent.TimeUnit.SECONDS; +import static org.assertj.core.api.Assertions.fail; +import static org.scassandra.http.client.PrimingRequest.queryBuilder; +import static org.scassandra.http.client.PrimingRequest.then; + import com.google.common.util.concurrent.Uninterruptibles; +import java.util.concurrent.TimeUnit; +import java.util.regex.Pattern; import org.apache.log4j.Level; import org.apache.log4j.Logger; import org.slf4j.LoggerFactory; @@ -23,260 +33,264 @@ import org.testng.annotations.BeforeMethod; import org.testng.annotations.Test; -import java.util.concurrent.TimeUnit; -import java.util.regex.Pattern; - -import static com.datastax.driver.core.Assertions.assertThat; -import static java.util.concurrent.TimeUnit.SECONDS; -import static org.assertj.core.api.Assertions.fail; -import static org.scassandra.http.client.PrimingRequest.queryBuilder; -import static org.scassandra.http.client.PrimingRequest.then; - public class HeartbeatTest extends ScassandraTestBase { - static org.slf4j.Logger logger = LoggerFactory.getLogger(HeartbeatTest.class); - Logger connectionLogger = Logger.getLogger(Connection.class); - MemoryAppender logs; - Level originalLevel; - - @BeforeMethod(groups = "long") - public void startCapturingLogs() { - originalLevel = connectionLogger.getLevel(); - connectionLogger.setLevel(Level.DEBUG); - logs = new MemoryAppender(); - connectionLogger.addAppender(logs); - } - - @AfterMethod(groups = "long", alwaysRun = true) - public void stopCapturingLogs() { - connectionLogger.setLevel(originalLevel); - connectionLogger.removeAppender(logs); - } - - /** - * Ensures that a heartbeat message is sent after the configured heartbeat interval of idle time and succeeds and - * continues to be sent as long as the connection remains idle. - * - * @test_category connection:heartbeat - * @expected_result heartbeat is sent after heartbeat interval (3) seconds of idle time. - * @jira_ticket JAVA-533 - * @since 2.0.10, 2.1.5 - */ - @Test(groups = "long") - public void should_send_heartbeat_when_connection_is_inactive() throws InterruptedException { - Cluster cluster = Cluster.builder() - .addContactPoints(hostAddress.getAddress()) - .withPort(scassandra.getBinaryPort()) - .withPoolingOptions(new PoolingOptions().setHeartbeatIntervalSeconds(3)) - .build(); - - try { - // Don't create any session, only the control connection will be established - cluster.init(); - - for (int i = 0; i < 5; i++) { - triggerRequestOnControlConnection(cluster); - SECONDS.sleep(1); - } - assertThat(logs.getNext()).doesNotContain("sending heartbeat"); - - // Ensure heartbeat is sent after no activity. - SECONDS.sleep(4); - assertThat(logs.getNext()) - .contains("sending heartbeat") - .contains("heartbeat query succeeded"); - - // Ensure heartbeat is sent after continued inactivity. - SECONDS.sleep(4); - assertThat(logs.getNext()) - .contains("sending heartbeat") - .contains("heartbeat query succeeded"); - - // Ensure heartbeat is not sent after activity. - logs.getNext(); - for (int i = 0; i < 5; i++) { - triggerRequestOnControlConnection(cluster); - SECONDS.sleep(1); - } - assertThat(logs.getNext()).doesNotContain("sending heartbeat"); - - // Finally, ensure heartbeat is sent after inactivity. - SECONDS.sleep(4); - assertThat(logs.getNext()) - .contains("sending heartbeat") - .contains("heartbeat query succeeded"); - } finally { - cluster.close(); - } + static org.slf4j.Logger logger = LoggerFactory.getLogger(HeartbeatTest.class); + Logger connectionLogger = Logger.getLogger(Connection.class); + MemoryAppender logs; + Level originalLevel; + + @BeforeMethod(groups = "long") + public void startCapturingLogs() { + originalLevel = connectionLogger.getLevel(); + connectionLogger.setLevel(Level.DEBUG); + logs = new MemoryAppender(); + connectionLogger.addAppender(logs); + } + + @AfterMethod(groups = "long", alwaysRun = true) + public void stopCapturingLogs() { + connectionLogger.setLevel(originalLevel); + connectionLogger.removeAppender(logs); + } + + /** + * Ensures that a heartbeat message is sent after the configured heartbeat interval of idle time + * and succeeds and continues to be sent as long as the connection remains idle. + * + * @test_category connection:heartbeat + * @expected_result heartbeat is sent after heartbeat interval (3) seconds of idle time. + * @jira_ticket JAVA-533 + * @since 2.0.10, 2.1.5 + */ + @Test(groups = "long") + public void should_send_heartbeat_when_connection_is_inactive() throws InterruptedException { + Cluster cluster = + Cluster.builder() + .addContactPoint(hostEndPoint) + .withPort(scassandra.getBinaryPort()) + .withPoolingOptions(new PoolingOptions().setHeartbeatIntervalSeconds(3)) + .build(); + + try { + // Don't create any session, only the control connection will be established + cluster.init(); + + for (int i = 0; i < 5; i++) { + triggerRequestOnControlConnection(cluster); + SECONDS.sleep(1); + } + assertThat(logs.getNext()).doesNotContain("sending heartbeat"); + + // Ensure heartbeat is sent after no activity. + SECONDS.sleep(4); + assertThat(logs.getNext()) + .contains("sending heartbeat") + .contains("heartbeat query succeeded"); + + // Ensure heartbeat is sent after continued inactivity. + SECONDS.sleep(4); + assertThat(logs.getNext()) + .contains("sending heartbeat") + .contains("heartbeat query succeeded"); + + // Ensure heartbeat is not sent after activity. + logs.getNext(); + for (int i = 0; i < 5; i++) { + triggerRequestOnControlConnection(cluster); + SECONDS.sleep(1); + } + assertThat(logs.getNext()).doesNotContain("sending heartbeat"); + + // Finally, ensure heartbeat is sent after inactivity. + SECONDS.sleep(4); + assertThat(logs.getNext()) + .contains("sending heartbeat") + .contains("heartbeat query succeeded"); + } finally { + cluster.close(); } - - /** - * Verifies that there exists a line in logs that matches pattern. - * - * @param logs Captured log entries. - * @param pattern Pattern to match on individual lines. - * @return - */ - private void assertLineMatches(String logs, Pattern pattern) { - String lines[] = logs.split("\\r?\\n"); - for (String line : lines) { - if (pattern.matcher(line).matches()) { - return; - } - } - fail("Expecting: [" + logs + "] to contain " + pattern); + } + + /** + * Verifies that there exists a line in logs that matches pattern. + * + * @param logs Captured log entries. + * @param pattern Pattern to match on individual lines. + * @return + */ + private void assertLineMatches(String logs, Pattern pattern) { + String lines[] = logs.split("\\r?\\n"); + for (String line : lines) { + if (pattern.matcher(line).matches()) { + return; + } } - - /** - * Verifies that no line in logs matches pattern. - * - * @param logs Captured log entries. - * @param pattern Pattern to match on individual lines. - */ - private void assertNoLineMatches(String logs, Pattern pattern) { - String lines[] = logs.split("\\r?\\n"); - for (String line : lines) { - if (pattern.matcher(line).matches()) { - fail("Expecting: [" + logs + "] not to contain " + pattern); - } - } + fail("Expecting: [" + logs + "] to contain " + pattern); + } + + /** + * Verifies that no line in logs matches pattern. + * + * @param logs Captured log entries. + * @param pattern Pattern to match on individual lines. + */ + private void assertNoLineMatches(String logs, Pattern pattern) { + String lines[] = logs.split("\\r?\\n"); + for (String line : lines) { + if (pattern.matcher(line).matches()) { + fail("Expecting: [" + logs + "] not to contain " + pattern); + } } - - /** - * Ensures that a heartbeat message is sent after the configured heartbeat interval of idle time when no data is - * received on a connection even though are successful writes on the socket. - * - * @test_category connection:heartbeat - * @expected_result heartbeat is sent after heartbeat interval (3) seconds of idle time. - * @jira_ticket JAVA-1346 - * @since 3.0.6, 3.1.3 - */ - @Test(groups = "long") - public void should_send_heartbeat_when_requests_being_written_but_nothing_received() throws Exception { - Cluster cluster = Cluster.builder() - .addContactPoints(hostAddress.getAddress()) - .withPort(scassandra.getBinaryPort()) - .withPoolingOptions(new PoolingOptions().setHeartbeatIntervalSeconds(3).setConnectionsPerHost(HostDistance.LOCAL, 1, 1)) - .build(); - - // Prime 'ping' to never return a response this is a way to create outgoing traffic - // without receiving anything inbound. - scassandra.primingClient() - .prime(queryBuilder().withQuery("ping").withThen(then().withFixedDelay(8675309999L))); - - // Thread that will submit queries that get no response repeatedly. - Thread submitter = null; - try { - // Don't create any session, only the control connection will be established - cluster.init(); - - // Find the connection in the connection pool. - SessionManager session = (SessionManager) cluster.connect(); - Host host = TestUtils.findHost(cluster, 1); - Connection connection = session.pools.get(host).connections.get(0); - - // Extract connection name from toString implementation. - String connectionName = connection.toString() - .replaceAll("\\-", "\\\\-") // Replace - with \- so its properly escaped as a regex. - .replaceAll("Connection\\[\\/", "") // Replace first part of toString (Connection[ - .replaceAll("\\, inFlight.*", ""); // Replace everything after ',inFlight' - - // Define patterns that check for whether or not heartbeats are sent / received on a given connection. - Pattern heartbeatSentPattern = Pattern.compile(".*" + connectionName + ".*sending heartbeat"); - Pattern heartbeatReceivedPattern = Pattern.compile(".*" + connectionName + ".*heartbeat query succeeded"); - logger.debug("Heartbeat pattern is {}", heartbeatSentPattern); - - // Start query submission thread. - submitter = new Thread(new QuerySubmitter(session)); - submitter.start(); - - for (int i = 0; i < 5; i++) { - session.execute("bar"); - SECONDS.sleep(1); - } - - // Should be no heartbeats sent on pooled connection since we had successful requests. - String log = logs.getNext(); - assertNoLineMatches(log, heartbeatSentPattern); - - int inFlight = connection.inFlight.get(); - assertThat(inFlight).isGreaterThan(0); - - // Ensure heartbeat is sent after no received data, even though we have inflight requests (JAVA-1346). - SECONDS.sleep(4); - // Verify more requests were sent over this time period. - assertThat(connection.inFlight.get()).isGreaterThan(inFlight); - log = logs.getNext(); - // Heartbeat should have been sent and received. - assertLineMatches(log, heartbeatSentPattern); - assertLineMatches(log, heartbeatReceivedPattern); - } finally { - // interrupt thread so it stops submitting queries. - if (submitter != null) { - submitter.interrupt(); - } - cluster.close(); - } + } + + /** + * Ensures that a heartbeat message is sent after the configured heartbeat interval of idle time + * when no data is received on a connection even though are successful writes on the socket. + * + * @test_category connection:heartbeat + * @expected_result heartbeat is sent after heartbeat interval (3) seconds of idle time. + * @jira_ticket JAVA-1346 + * @since 3.0.6, 3.1.3 + */ + @Test(groups = "long") + public void should_send_heartbeat_when_requests_being_written_but_nothing_received() + throws Exception { + Cluster cluster = + Cluster.builder() + .addContactPoint(hostEndPoint) + .withPort(scassandra.getBinaryPort()) + .withPoolingOptions( + new PoolingOptions() + .setHeartbeatIntervalSeconds(3) + .setConnectionsPerHost(HostDistance.LOCAL, 1, 1)) + .build(); + + // Prime 'ping' to never return a response this is a way to create outgoing traffic + // without receiving anything inbound. + scassandra + .primingClient() + .prime(queryBuilder().withQuery("ping").withThen(then().withFixedDelay(8675309999L))); + + // Thread that will submit queries that get no response repeatedly. + Thread submitter = null; + try { + // Don't create any session, only the control connection will be established + cluster.init(); + + // Find the connection in the connection pool. + SessionManager session = (SessionManager) cluster.connect(); + Host host = TestUtils.findHost(cluster, 1); + Connection connection = session.pools.get(host).connections.get(0); + + // Extract connection name from toString implementation. + String connectionName = + connection + .toString() + .replaceAll("\\-", "\\\\-") // Replace - with \- so its properly escaped as a regex. + .replaceAll("Connection\\[\\/", "") // Replace first part of toString (Connection[ + .replaceAll("\\, inFlight.*", ""); // Replace everything after ',inFlight' + + // Define patterns that check for whether or not heartbeats are sent / received on a given + // connection. + Pattern heartbeatSentPattern = Pattern.compile(".*" + connectionName + ".*sending heartbeat"); + Pattern heartbeatReceivedPattern = + Pattern.compile(".*" + connectionName + ".*heartbeat query succeeded"); + logger.debug("Heartbeat pattern is {}", heartbeatSentPattern); + + // Start query submission thread. + submitter = new Thread(new QuerySubmitter(session)); + submitter.start(); + + for (int i = 0; i < 5; i++) { + session.execute("bar"); + SECONDS.sleep(1); + } + + // Should be no heartbeats sent on pooled connection since we had successful requests. + String log = logs.getNext(); + assertNoLineMatches(log, heartbeatSentPattern); + + int inFlight = connection.inFlight.get(); + assertThat(inFlight).isGreaterThan(0); + + // Ensure heartbeat is sent after no received data, even though we have inflight requests + // (JAVA-1346). + SECONDS.sleep(4); + // Verify more requests were sent over this time period. + assertThat(connection.inFlight.get()).isGreaterThan(inFlight); + log = logs.getNext(); + // Heartbeat should have been sent and received. + assertLineMatches(log, heartbeatSentPattern); + assertLineMatches(log, heartbeatReceivedPattern); + } finally { + // interrupt thread so it stops submitting queries. + if (submitter != null) { + submitter.interrupt(); + } + cluster.close(); } + } - private static class QuerySubmitter implements Runnable { + private static class QuerySubmitter implements Runnable { - private final Session session; + private final Session session; - QuerySubmitter(Session session) { - this.session = session; - } - - @Override - public void run() { - while (!Thread.currentThread().isInterrupted()) { - logger.debug("Sending ping, for which we expect no response"); - session.executeAsync("ping"); - Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS); - } - } + QuerySubmitter(Session session) { + this.session = session; } - /** - * Ensures that a heartbeat message is not sent if the configured heartbeat interval is 0. - *

    - * While difficult to prove the absence of evidence, the test will wait up to the default heartbeat interval - * (30 seconds + 1) and check to see if the heartbeat was sent. - * - * @test_category connection:heartbeat - * @expected_result heartbeat is not sent after default heartbeat interval (60) seconds of idle time. - * @jira_ticket JAVA-533 - * @since 2.0.10, 2.1.5 - */ - @Test(groups = "long") - public void should_not_send_heartbeat_when_disabled() throws InterruptedException { - Cluster cluster = Cluster.builder() - .addContactPoints(hostAddress.getAddress()) - .withPort(scassandra.getBinaryPort()) - .withPoolingOptions(new PoolingOptions() - .setHeartbeatIntervalSeconds(0)) - .build(); - - try { - // Don't create any session, only the control connection will be established - cluster.init(); - - for (int i = 0; i < 5; i++) { - triggerRequestOnControlConnection(cluster); - SECONDS.sleep(1); - } - assertThat(logs.get()).doesNotContain("sending heartbeat"); - - // Sleep for a while and ensure no heartbeat is sent. - SECONDS.sleep(32); - assertThat(logs.get()).doesNotContain("sending heartbeat"); - } finally { - cluster.close(); - } + @Override + public void run() { + while (!Thread.currentThread().isInterrupted()) { + logger.debug("Sending ping, for which we expect no response"); + session.executeAsync("ping"); + Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS); + } } - - // Simulates activity on the control connection via the internal API - private void triggerRequestOnControlConnection(Cluster cluster) { - cluster.manager.controlConnection.refreshNodeInfo(TestUtils.findHost(cluster, 1)); + } + + /** + * Ensures that a heartbeat message is not sent if the configured heartbeat interval is 0. + * + *

    While difficult to prove the absence of evidence, the test will wait up to the default + * heartbeat interval (30 seconds + 1) and check to see if the heartbeat was sent. + * + * @test_category connection:heartbeat + * @expected_result heartbeat is not sent after default heartbeat interval (60) seconds of idle + * time. + * @jira_ticket JAVA-533 + * @since 2.0.10, 2.1.5 + */ + @Test(groups = "long") + public void should_not_send_heartbeat_when_disabled() throws InterruptedException { + Cluster cluster = + Cluster.builder() + .addContactPoint(hostEndPoint) + .withPort(scassandra.getBinaryPort()) + .withPoolingOptions(new PoolingOptions().setHeartbeatIntervalSeconds(0)) + .build(); + + try { + // Don't create any session, only the control connection will be established + cluster.init(); + + for (int i = 0; i < 5; i++) { + triggerRequestOnControlConnection(cluster); + SECONDS.sleep(1); + } + assertThat(logs.get()).doesNotContain("sending heartbeat"); + + // Sleep for a while and ensure no heartbeat is sent. + SECONDS.sleep(32); + assertThat(logs.get()).doesNotContain("sending heartbeat"); + } finally { + cluster.close(); } + } + + // Simulates activity on the control connection via the internal API + private void triggerRequestOnControlConnection(Cluster cluster) { + cluster.manager.controlConnection.refreshNodeInfo(TestUtils.findHost(cluster, 1)); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/HostAssert.java b/driver-core/src/test/java/com/datastax/driver/core/HostAssert.java index 4f9573269ab..c0ea4b73ac0 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/HostAssert.java +++ b/driver-core/src/test/java/com/datastax/driver/core/HostAssert.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,186 +17,230 @@ */ package com.datastax.driver.core; +import static com.datastax.driver.core.ConditionChecker.check; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.fail; + import com.datastax.driver.core.Host.State; import com.datastax.driver.core.Host.StateListener; import com.datastax.driver.core.policies.LoadBalancingPolicy; -import org.assertj.core.api.AbstractAssert; - import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.util.UUID; import java.util.concurrent.Callable; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; - -import static com.datastax.driver.core.ConditionChecker.check; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.fail; +import org.assertj.core.api.AbstractAssert; public class HostAssert extends AbstractAssert { - private final Cluster cluster; - - protected HostAssert(Host host) { - this(host, null); - } - - protected HostAssert(Host host, Cluster cluster) { - super(host, HostAssert.class); - this.cluster = cluster; - } - - public HostAssert hasState(Host.State expected) { - assertThat(actual.state).isEqualTo(expected); - return this; - } - - public HostAssert isUp() { - assertThat(actual.isUp()).isTrue(); - return this; - } - - public HostAssert isDown() { - assertThat(actual.isUp()).isFalse(); - return this; - } - - public HostAssert isAtDistance(HostDistance expected) { - LoadBalancingPolicy loadBalancingPolicy = cluster.manager.loadBalancingPolicy(); - assertThat(loadBalancingPolicy.distance(actual)).isEqualTo(expected); - return this; - } - - public HostAssert isReconnectingFromDown() { - assertThat(actual.getReconnectionAttemptFuture() != null && !actual.getReconnectionAttemptFuture().isDone()) - .isTrue(); - return this; - } - - public HostAssert isInDatacenter(String datacenter) { - assertThat(actual.getDatacenter()).isEqualTo(datacenter); - return this; - } - - public HostAssert isNotReconnectingFromDown() { - // Ensure that host is not attempting a reconnect. Because of JAVA-970 we cannot - // be sure that there is a race and another pool is created before the host is marked down so we - // check to see it stops after 30 seconds. - // TODO: Change this to check only once if JAVA-970 is fixed. - check().before(30, TimeUnit.SECONDS).that(new Callable() { - @Override - public Boolean call() throws Exception { + private final Cluster cluster; + + protected HostAssert(Host host) { + this(host, null); + } + + protected HostAssert(Host host, Cluster cluster) { + super(host, HostAssert.class); + this.cluster = cluster; + } + + public HostAssert hasState(Host.State expected) { + assertThat(actual.state).isEqualTo(expected); + return this; + } + + public HostAssert isUp() { + assertThat(actual.isUp()).isTrue(); + return this; + } + + public HostAssert isDown() { + assertThat(actual.isUp()).isFalse(); + return this; + } + + public HostAssert isAtDistance(HostDistance expected) { + LoadBalancingPolicy loadBalancingPolicy = cluster.manager.loadBalancingPolicy(); + assertThat(loadBalancingPolicy.distance(actual)).isEqualTo(expected); + return this; + } + + public HostAssert isReconnectingFromDown() { + assertThat( + actual.getReconnectionAttemptFuture() != null + && !actual.getReconnectionAttemptFuture().isDone()) + .isTrue(); + return this; + } + + public HostAssert isInDatacenter(String datacenter) { + assertThat(actual.getDatacenter()).isEqualTo(datacenter); + return this; + } + + public HostAssert isNotReconnectingFromDown() { + // Ensure that host is not attempting a reconnect. Because of JAVA-970 we cannot + // be sure that there is a race and another pool is created before the host is marked down so we + // check to see it stops after 30 seconds. + // TODO: Change this to check only once if JAVA-970 is fixed. + check() + .before(30, TimeUnit.SECONDS) + .that( + new Callable() { + @Override + public Boolean call() throws Exception { // Whether or not host is down and reconnection attempt is in progress. - return actual.getReconnectionAttemptFuture() != null && !actual.getReconnectionAttemptFuture().isDone(); - } - }).becomesFalse(); - return this.isDown(); - } - - public HostAssert comesUpWithin(long duration, TimeUnit unit) { - final CountDownLatch upSignal = new CountDownLatch(1); - StateListener upListener = new StateListenerBase() { - - @Override - public void onUp(Host host) { - upSignal.countDown(); - } - - @Override - public void onAdd(Host host) { - // Special case, cassandra will sometimes not send an 'UP' topology change event - // for a new node, because of this we also listen for add events. - upSignal.countDown(); - } + return actual.getReconnectionAttemptFuture() != null + && !actual.getReconnectionAttemptFuture().isDone(); + } + }) + .becomesFalse(); + return this.isDown(); + } + + public HostAssert comesUpWithin(long duration, TimeUnit unit) { + final CountDownLatch upSignal = new CountDownLatch(1); + StateListener upListener = + new StateListenerBase() { + + @Override + public void onUp(Host host) { + upSignal.countDown(); + } + + @Override + public void onAdd(Host host) { + // Special case, cassandra will sometimes not send an 'UP' topology change event + // for a new node, because of this we also listen for add events. + upSignal.countDown(); + } }; - cluster.register(upListener); - try { - // If the host is already up or if we receive the UP signal within given time - if (actual.isUp() || upSignal.await(duration, unit)) { - return this; - } - } catch (InterruptedException e) { - fail("Got interrupted while waiting for host to come up"); - } finally { - cluster.unregister(upListener); - } - fail(actual + " did not come up within " + duration + " " + unit); - return this; - } - - public HostAssert goesDownWithin(long duration, TimeUnit unit) { - final CountDownLatch downSignal = new CountDownLatch(1); - StateListener upListener = new StateListenerBase() { - @Override - public void onDown(Host host) { - downSignal.countDown(); - } + cluster.register(upListener); + try { + // If the host is already up or if we receive the UP signal within given time + if (actual.isUp() || upSignal.await(duration, unit)) { + return this; + } + } catch (InterruptedException e) { + fail("Got interrupted while waiting for host to come up"); + } finally { + cluster.unregister(upListener); + } + fail(actual + " did not come up within " + duration + " " + unit); + return this; + } + + public HostAssert goesDownWithin(long duration, TimeUnit unit) { + final CountDownLatch downSignal = new CountDownLatch(1); + StateListener upListener = + new StateListenerBase() { + @Override + public void onDown(Host host) { + downSignal.countDown(); + } }; - cluster.register(upListener); - try { - // If the host is already down or if we receive the DOWN signal within given time - if (actual.state == State.DOWN || downSignal.await(duration, unit)) - return this; - } catch (InterruptedException e) { - fail("Got interrupted while waiting for host to go down"); - } finally { - cluster.unregister(upListener); - } - fail(actual + " did not go down within " + duration + " " + unit); - return this; - } - - @SuppressWarnings("deprecation") - public HostAssert hasWorkload(String workload) { - assertThat(actual.getDseWorkload()).isNotNull().isEqualTo(workload); - return this; - } - - @SuppressWarnings("deprecation") - public HostAssert hasNoWorkload() { - assertThat(actual.getDseWorkload()).isNull(); - return this; - } - - @SuppressWarnings("deprecation") - public HostAssert hasDseVersion(VersionNumber versionNumber) { - assertThat(actual.getDseVersion()).isNotNull().isEqualTo(versionNumber); - return this; - } - - @SuppressWarnings("deprecation") - public HostAssert hasNoDseVersion() { - assertThat(actual.getDseVersion()).isNull(); - return this; - } - - @SuppressWarnings("deprecation") - public HostAssert hasDseGraph() { - assertThat(actual.isDseGraphEnabled()).isTrue(); - return this; - } - - @SuppressWarnings("deprecation") - public HostAssert hasNoDseGraph() { - assertThat(actual.isDseGraphEnabled()).isFalse(); - return this; - } - - public HostAssert hasListenAddress(InetAddress address) { - assertThat(actual.getListenAddress()).isNotNull().isEqualTo(address); - return this; - } - - public HostAssert hasNoListenAddress() { - assertThat(actual.getListenAddress()).isNull(); - return this; - } - - public HostAssert hasBroadcastAddress(InetAddress address) { - assertThat(actual.getBroadcastAddress()).isNotNull().isEqualTo(address); - return this; - } - - public HostAssert hasNoBroadcastAddress() { - assertThat(actual.getBroadcastAddress()).isNull(); - return this; - } + cluster.register(upListener); + try { + // If the host is already down or if we receive the DOWN signal within given time + if (actual.state == State.DOWN || downSignal.await(duration, unit)) return this; + } catch (InterruptedException e) { + fail("Got interrupted while waiting for host to go down"); + } finally { + cluster.unregister(upListener); + } + fail(actual + " did not go down within " + duration + " " + unit); + return this; + } + + @SuppressWarnings("deprecation") + public HostAssert hasWorkload(String workload) { + assertThat(actual.getDseWorkload()).isNotNull().isEqualTo(workload); + return this; + } + + @SuppressWarnings("deprecation") + public HostAssert hasNoWorkload() { + assertThat(actual.getDseWorkload()).isNull(); + return this; + } + + @SuppressWarnings("deprecation") + public HostAssert hasDseVersion(VersionNumber versionNumber) { + assertThat(actual.getDseVersion()).isNotNull().isEqualTo(versionNumber); + return this; + } + + @SuppressWarnings("deprecation") + public HostAssert hasNoDseVersion() { + assertThat(actual.getDseVersion()).isNull(); + return this; + } + + @SuppressWarnings("deprecation") + public HostAssert hasDseGraph() { + assertThat(actual.isDseGraphEnabled()).isTrue(); + return this; + } + + @SuppressWarnings("deprecation") + public HostAssert hasNoDseGraph() { + assertThat(actual.isDseGraphEnabled()).isFalse(); + return this; + } + + public HostAssert hasSocketAddress(InetSocketAddress address) { + assertThat(actual.getEndPoint().resolve()).isNotNull().isEqualTo(address); + return this; + } + + public HostAssert hasListenAddress(InetAddress address) { + assertThat(actual.getListenAddress()).isNotNull().isEqualTo(address); + return this; + } + + public HostAssert hasListenSocketAddress(InetSocketAddress address) { + assertThat(actual.getListenSocketAddress()).isNotNull().isEqualTo(address); + return this; + } + + public HostAssert hasNoListenAddress() { + assertThat(actual.getListenAddress()).isNull(); + return this; + } + + public HostAssert hasNoListenSocketAddress() { + assertThat(actual.getListenSocketAddress()).isNull(); + return this; + } + + public HostAssert hasBroadcastAddress(InetAddress address) { + assertThat(actual.getBroadcastAddress()).isNotNull().isEqualTo(address); + return this; + } + + public HostAssert hasBroadcastSocketAddress(InetSocketAddress address) { + assertThat(actual.getBroadcastSocketAddress()).isNotNull().isEqualTo(address); + return this; + } + + public HostAssert hasNoBroadcastAddress() { + assertThat(actual.getBroadcastAddress()).isNull(); + return this; + } + + public HostAssert hasNoBroadcastSocketAddress() { + assertThat(actual.getBroadcastSocketAddress()).isNull(); + return this; + } + + public HostAssert hasHostId(UUID hostId) { + assertThat(actual.getHostId()).isEqualTo(hostId); + return this; + } + + public HostAssert hasSchemaVersion(UUID schemaVersion) { + assertThat(actual.getSchemaVersion()).isEqualTo(schemaVersion); + return this; + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/HostConnectionPoolMultiTest.java b/driver-core/src/test/java/com/datastax/driver/core/HostConnectionPoolMultiTest.java index 90d91bc1446..f5469a0be02 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/HostConnectionPoolMultiTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/HostConnectionPoolMultiTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,105 +17,106 @@ */ package com.datastax.driver.core; -import com.datastax.driver.core.policies.ConstantReconnectionPolicy; -import com.google.common.util.concurrent.Uninterruptibles; -import org.testng.annotations.AfterMethod; -import org.testng.annotations.BeforeMethod; -import org.testng.annotations.Test; - -import java.net.InetSocketAddress; - import static com.datastax.driver.core.Assertions.assertThat; import static com.datastax.driver.core.HostDistance.LOCAL; import static com.datastax.driver.core.TestUtils.nonDebouncingQueryOptions; import static java.util.concurrent.TimeUnit.SECONDS; import static org.scassandra.http.client.ClosedConnectionReport.CloseType.CLOSE; -public class HostConnectionPoolMultiTest { - - private ScassandraCluster scassandra; - - private Cluster cluster; +import com.datastax.driver.core.policies.ConstantReconnectionPolicy; +import com.google.common.util.concurrent.Uninterruptibles; +import java.net.InetSocketAddress; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; - @BeforeMethod(groups = {"short", "long"}) - private void setUp() { - scassandra = ScassandraCluster.builder().withNodes(2).build(); - scassandra.init(); - } +public class HostConnectionPoolMultiTest { - @AfterMethod(groups = {"short", "long"}, alwaysRun = true) - private void tearDown() { - if (cluster != null) { - cluster.close(); - } - scassandra.stop(); - } + private ScassandraCluster scassandra; - private void createCluster(int core, int max) { - PoolingOptions poolingOptions = new PoolingOptions().setConnectionsPerHost(LOCAL, core, max); - SocketOptions socketOptions = new SocketOptions().setReadTimeoutMillis(1000); - cluster = Cluster.builder() - .addContactPoints(scassandra.address(1).getAddress()) - .withPort(scassandra.getBinaryPort()) - .withQueryOptions(nonDebouncingQueryOptions()) - .withPoolingOptions(poolingOptions) - .withSocketOptions(socketOptions) - .withReconnectionPolicy(new ConstantReconnectionPolicy(1000)) - .build(); - cluster.connect(); - } + private Cluster cluster; - /** - * Ensures that if all connections fail to a host on pool init that the host is marked down. - * - * @jira_ticket JAVA-544 - * @test_category connection:connection_pool - * @since 2.0.11 - */ - @Test(groups = "short") - public void should_mark_host_down_if_all_connections_fail_on_init() { - // Prevent any connections on node 2. - scassandra.node(2).currentClient().disableListener(); - createCluster(8, 8); - - // Node 2 should be in a down state while node 1 stays up. - assertThat(cluster).host(2).goesDownWithin(10, SECONDS); - assertThat(cluster).host(1).isUp(); - - // Node 2 should come up as soon as it is able to reconnect. - scassandra.node(2).currentClient().enableListener(); - assertThat(cluster).host(2).comesUpWithin(2, SECONDS); - } + @BeforeMethod(groups = {"short", "long"}) + private void setUp() { + scassandra = ScassandraCluster.builder().withNodes(2).build(); + scassandra.init(); + } - /** - * Ensures that if the control connection goes down, but the Host bound the control connection - * still has an up pool, the Host should remain up and the Control Connection should be replaced. - * - * @jira_ticket JAVA-544 - * @test_category connection:connection_pool - * @since 2.0.11 - */ - @Test(groups = "short") - public void should_replace_control_connection_if_it_goes_down_but_host_remains_up() { - createCluster(1, 2); - - // Ensure control connection is on node 1. - assertThat(cluster).usesControlHost(1); - - // Identify the socket associated with the control connection. - Connection controlConnection = cluster.manager.controlConnection.connectionRef.get(); - InetSocketAddress controlSocket = (InetSocketAddress) controlConnection.channel.localAddress(); - - // Close the control connection. - scassandra.node(1).currentClient() - .closeConnection(CLOSE, controlSocket); - - // Sleep reconnect interval * 2 to allow time to reconnect. - Uninterruptibles.sleepUninterruptibly(2, SECONDS); - - // Ensure the control connection was replaced and host 1 remains up. - assertThat(cluster).hasOpenControlConnection() - .host(1).isUp(); - assertThat(cluster.manager.controlConnection.connectionRef.get()).isNotEqualTo(controlConnection); + @AfterMethod( + groups = {"short", "long"}, + alwaysRun = true) + private void tearDown() { + if (cluster != null) { + cluster.close(); } + scassandra.stop(); + } + + private void createCluster(int core, int max) { + PoolingOptions poolingOptions = new PoolingOptions().setConnectionsPerHost(LOCAL, core, max); + SocketOptions socketOptions = new SocketOptions().setReadTimeoutMillis(1000); + cluster = + Cluster.builder() + .addContactPoints(scassandra.address(1).getAddress()) + .withPort(scassandra.getBinaryPort()) + .withQueryOptions(nonDebouncingQueryOptions()) + .withPoolingOptions(poolingOptions) + .withSocketOptions(socketOptions) + .withReconnectionPolicy(new ConstantReconnectionPolicy(1000)) + .build(); + cluster.connect(); + } + + /** + * Ensures that if all connections fail to a host on pool init that the host is marked down. + * + * @jira_ticket JAVA-544 + * @test_category connection:connection_pool + * @since 2.0.11 + */ + @Test(groups = "short") + public void should_mark_host_down_if_all_connections_fail_on_init() { + // Prevent any connections on node 2. + scassandra.node(2).currentClient().disableListener(); + createCluster(8, 8); + + // Node 2 should be in a down state while node 1 stays up. + assertThat(cluster).host(2).goesDownWithin(10, SECONDS); + assertThat(cluster).host(1).isUp(); + + // Node 2 should come up as soon as it is able to reconnect. + scassandra.node(2).currentClient().enableListener(); + assertThat(cluster).host(2).comesUpWithin(2, SECONDS); + } + + /** + * Ensures that if the control connection goes down, but the Host bound the control connection + * still has an up pool, the Host should remain up and the Control Connection should be replaced. + * + * @jira_ticket JAVA-544 + * @test_category connection:connection_pool + * @since 2.0.11 + */ + @Test(groups = "short") + public void should_replace_control_connection_if_it_goes_down_but_host_remains_up() { + createCluster(1, 2); + + // Ensure control connection is on node 1. + assertThat(cluster).usesControlHost(1); + + // Identify the socket associated with the control connection. + Connection controlConnection = cluster.manager.controlConnection.connectionRef.get(); + InetSocketAddress controlSocket = (InetSocketAddress) controlConnection.channel.localAddress(); + + // Close the control connection. + scassandra.node(1).currentClient().closeConnection(CLOSE, controlSocket); + + // Sleep reconnect interval * 2 to allow time to reconnect. + Uninterruptibles.sleepUninterruptibly(2, SECONDS); + + // Ensure the control connection was replaced and host 1 remains up. + assertThat(cluster).hasOpenControlConnection().host(1).isUp(); + assertThat(cluster.manager.controlConnection.connectionRef.get()) + .isNotEqualTo(controlConnection); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/HostConnectionPoolTest.java b/driver-core/src/test/java/com/datastax/driver/core/HostConnectionPoolTest.java index 63b2119e7b8..b68a9a9263e 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/HostConnectionPoolTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/HostConnectionPoolTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,1510 +17,1693 @@ */ package com.datastax.driver.core; +import static com.datastax.driver.core.Assertions.assertThat; +import static com.datastax.driver.core.ConditionChecker.check; +import static com.datastax.driver.core.PoolingOptions.NEW_CONNECTION_THRESHOLD_LOCAL_KEY; +import static com.google.common.collect.Lists.newArrayList; +import static java.util.concurrent.TimeUnit.MILLISECONDS; +import static java.util.concurrent.TimeUnit.SECONDS; +import static org.mockito.Mockito.after; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.reset; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.timeout; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.scassandra.http.client.ClosedConnectionReport.CloseType.CLOSE; +import static org.scassandra.http.client.PrimingRequest.queryBuilder; +import static org.scassandra.http.client.PrimingRequest.then; +import static org.scassandra.http.client.Result.server_error; +import static org.testng.Assert.fail; + import com.codahale.metrics.Gauge; -import com.datastax.driver.core.exceptions.*; +import com.datastax.driver.core.exceptions.BusyConnectionException; +import com.datastax.driver.core.exceptions.BusyPoolException; +import com.datastax.driver.core.exceptions.ConnectionException; +import com.datastax.driver.core.exceptions.DriverException; +import com.datastax.driver.core.exceptions.NoHostAvailableException; import com.datastax.driver.core.policies.ConstantReconnectionPolicy; import com.google.common.base.Function; import com.google.common.base.Predicate; import com.google.common.base.Throwables; -import com.google.common.util.concurrent.*; -import org.scassandra.cql.PrimitiveType; -import org.scassandra.http.client.PrimingRequest; -import org.testng.annotations.BeforeClass; -import org.testng.annotations.Test; - +import com.google.common.util.concurrent.ForwardingListeningExecutorService; +import com.google.common.util.concurrent.FutureCallback; +import com.google.common.util.concurrent.ListenableFuture; +import com.google.common.util.concurrent.ListeningExecutorService; +import com.google.common.util.concurrent.Uninterruptibles; import java.net.InetSocketAddress; import java.util.Collection; import java.util.Collections; import java.util.Iterator; import java.util.List; -import java.util.concurrent.*; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.Semaphore; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; - -import static com.datastax.driver.core.Assertions.assertThat; -import static com.datastax.driver.core.ConditionChecker.check; -import static com.datastax.driver.core.PoolingOptions.NEW_CONNECTION_THRESHOLD_LOCAL_KEY; -import static com.google.common.collect.Lists.newArrayList; -import static java.util.concurrent.TimeUnit.MILLISECONDS; -import static java.util.concurrent.TimeUnit.SECONDS; -import static org.mockito.Mockito.*; -import static org.scassandra.http.client.ClosedConnectionReport.CloseType.CLOSE; -import static org.scassandra.http.client.PrimingRequest.queryBuilder; -import static org.scassandra.http.client.PrimingRequest.then; -import static org.scassandra.http.client.Result.server_error; -import static org.testng.Assert.fail; +import org.scassandra.cql.PrimitiveType; +import org.scassandra.http.client.PrimingRequest; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Test; public class HostConnectionPoolTest extends ScassandraTestBase.PerClassCluster { - static final Integer NEW_CONNECTION_THRESHOLD = PoolingOptions.DEFAULTS.get(ProtocolVersion.V1) - .get(NEW_CONNECTION_THRESHOLD_LOCAL_KEY); - - @BeforeClass(groups = {"short", "long"}) - public void reinitializeCluster() { - // Don't use the provided cluster, each test will create its own instead. - cluster.close(); + static final Integer NEW_CONNECTION_THRESHOLD = + PoolingOptions.DEFAULTS.get(ProtocolVersion.V1).get(NEW_CONNECTION_THRESHOLD_LOCAL_KEY); + + @BeforeClass(groups = {"short", "long"}) + public void reinitializeCluster() { + // Don't use the provided cluster, each test will create its own instead. + cluster.close(); + } + + /** + * Ensure the given pool has the given size within 5 seconds. + * + * @param pool Pool to check. + * @param expectedSize Expected size of pool. + */ + private void assertPoolSize(HostConnectionPool pool, final int expectedSize) { + check() + .before(5, TimeUnit.SECONDS) + .that( + pool, + new Predicate() { + @Override + public boolean apply(HostConnectionPool input) { + return input.connections.size() == expectedSize; + } + }) + .becomesTrue(); + } + + private void assertBorrowedConnections( + Iterable requests, List expectedConnections) { + for (MockRequest request : requests) { + assertThat(expectedConnections).contains(request.getConnection()); } - - /** - * Ensure the given pool has the given size within 5 seconds. - * - * @param pool Pool to check. - * @param expectedSize Expected size of pool. - */ - private void assertPoolSize(HostConnectionPool pool, final int expectedSize) { - check().before(5, TimeUnit.SECONDS) - .that(pool, new Predicate() { - @Override - public boolean apply(HostConnectionPool input) { - return input.connections.size() == expectedSize; - } - }).becomesTrue(); + } + + private void assertBorrowedConnection( + Iterable requests, Connection expectedConnection) { + assertBorrowedConnections(requests, Collections.singletonList(expectedConnection)); + } + + /** + * Ensures that if a fixed-sized pool has filled its core connections and reached its maximum + * number of enqueued requests, then borrowConnection will fail instead of creating a new + * connection. + * + * @jira_ticket JAVA-419 + * @test_category connection:connection_pool + * @since 2.0.10, 2.1.6 + */ + @Test(groups = "short") + public void fixed_size_pool_should_fill_its_core_connections_and_queue_and_then_reject() { + Cluster cluster = createClusterBuilder().build(); + List allRequests = newArrayList(); + try { + HostConnectionPool pool = createPool(cluster, 2, 2); + int maxQueueSize = 256; + + assertThat(pool.connections.size()).isEqualTo(2); + List coreConnections = newArrayList(pool.connections); + // fill connections + List requests = MockRequest.sendMany(2 * 128, pool); + assertBorrowedConnections(requests, coreConnections); + allRequests.addAll(requests); + // fill queue + allRequests.addAll(MockRequest.sendMany(maxQueueSize, pool, maxQueueSize)); + + // add one more request, it should be rejected because the queue is full + MockRequest failedBorrow = MockRequest.send(pool, maxQueueSize); + try { + failedBorrow.getConnection(); + fail("Expected a BusyPoolException"); + } catch (BusyPoolException e) { + assertThat(e).hasMessageContaining("reached its max size"); + } + } finally { + MockRequest.completeAll(allRequests); + cluster.close(); } - - private void assertBorrowedConnections(Iterable requests, List expectedConnections) { - for (MockRequest request : requests) { - assertThat(expectedConnections).contains(request.getConnection()); - } + } + + /** + * Ensures that if a fixed-sized pool has filled its core connections and reached a number of + * requests to cause it to be enqueued, that if the request is not serviced within 100ms, a + * BusyPoolException is raised with a timeout. + * + * @jira_ticket JAVA-1371 + * @test_category connection:connection_pool + * @since 3.0.7 3.1.4 3.2.0 + */ + @Test(groups = "short") + public void should_reject_if_enqueued_and_timeout_reached() { + Cluster cluster = createClusterBuilder().build(); + List allRequests = newArrayList(); + try { + HostConnectionPool pool = createPool(cluster, 1, 1); + List requests = MockRequest.sendMany(128, pool); + allRequests.addAll(requests); + + // pool is now full, this request will be enqueued + MockRequest failedBorrow = MockRequest.send(pool, 100, 128); + try { + failedBorrow.getConnection(); + fail("Expected a BusyPoolException"); + } catch (BusyPoolException e) { + assertThat(e).hasMessageContaining("timed out"); + } + } finally { + MockRequest.completeAll(allRequests); + cluster.close(); } - - private void assertBorrowedConnection(Iterable requests, Connection expectedConnection) { - assertBorrowedConnections(requests, Collections.singletonList(expectedConnection)); + } + + /** + * Validates that if a borrow request is enqueued into a pool for a Host that is currently within + * the window of reconnecting after an error that the future tied to that query times out after + * {@link PoolingOptions#setPoolTimeoutMillis(int)}. + * + *

    This primarily serves to demonstrate that the use case described in JAVA-1371 is fixed by + * using {@link Session#execute(Statement)} shortly after a server error on a connection. + * + * @jira_ticket JAVA-1371 + * @test_category connection:connection_pool + * @since 3.0.7 3.1.4 3.2.0 + */ + @Test(groups = "short") + public void should_not_hang_when_executing_sync_queries() { + primingClient.prime( + queryBuilder() + .withQuery("server_error query") + .withThen(then().withResult(server_error)) + .build()); + + Cluster cluster = + createClusterBuilder() + .withReconnectionPolicy(new ConstantReconnectionPolicy(10000)) + .build(); + // reduce timeout so test runs faster. + cluster.getConfiguration().getPoolingOptions().setPoolTimeoutMillis(500); + try { + Session session = cluster.connect(); + try { + session.execute("server_error query"); + fail("Exception expected"); + } catch (Exception e) { + // error is expected in this case. + } + + try { + session.execute("this should not block indefinitely"); + } catch (NoHostAvailableException nhae) { + // should raise a NHAE with a BusyPoolException. + Collection errors = nhae.getErrors().values(); + assertThat(errors).hasSize(1); + Throwable e = errors.iterator().next(); + assertThat(e).isInstanceOf(BusyPoolException.class); + assertThat(e).hasMessageContaining("timed out"); + } + } finally { + cluster.close(); } - - /** - * Ensures that if a fixed-sized pool has filled its core connections and reached its maximum number of enqueued - * requests, then borrowConnection will fail instead of creating a new connection. - * - * @jira_ticket JAVA-419 - * @test_category connection:connection_pool - * @since 2.0.10, 2.1.6 - */ - @Test(groups = "short") - public void fixed_size_pool_should_fill_its_core_connections_and_queue_and_then_reject() { - Cluster cluster = createClusterBuilder().build(); - List allRequests = newArrayList(); + } + + /** + * Ensures that any enqueued connection borrow requests are failed when their associated + * connection pool closes. + * + * @jira_ticket JAVA-839 + * @test_category connection:connection_pool + * @since 3.0.4, 3.1.1 + */ + @Test(groups = "short") + public void requests_with_enqueued_borrow_requests_should_be_failed_when_pool_closes() { + Cluster cluster = createClusterBuilder().build(); + List requests = newArrayList(); + try { + HostConnectionPool pool = createPool(cluster, 2, 2); + int maxQueueSize = 256; + + assertThat(pool.connections.size()).isEqualTo(2); + List coreConnections = newArrayList(pool.connections); + // fill connections + requests = MockRequest.sendMany(2 * 128, pool); + assertBorrowedConnections(requests, coreConnections); + // fill queue + List queuedRequests = MockRequest.sendMany(maxQueueSize, pool, maxQueueSize); + + // Closing the pool should fail all queued connections. + pool.closeAsync(); + + for (MockRequest queuedRequest : queuedRequests) { + // Future should be completed. + assertThat(queuedRequest.connectionFuture.isDone()).isTrue(); try { - HostConnectionPool pool = createPool(cluster, 2, 2); - int maxQueueSize = 256; - - assertThat(pool.connections.size()).isEqualTo(2); - List coreConnections = newArrayList(pool.connections); - // fill connections - List requests = MockRequest.sendMany(2 * 128, pool); - assertBorrowedConnections(requests, coreConnections); - allRequests.addAll(requests); - // fill queue - allRequests.addAll(MockRequest.sendMany(maxQueueSize, pool, maxQueueSize)); - - // add one more request, it should be rejected because the queue is full - MockRequest failedBorrow = MockRequest.send(pool, maxQueueSize); - try { - failedBorrow.getConnection(); - fail("Expected a BusyPoolException"); - } catch (BusyPoolException e) { - assertThat(e).hasMessageContaining("reached its max size"); - } - } finally { - MockRequest.completeAll(allRequests); - cluster.close(); + // Future should fail as result of the pool closing. + queuedRequest.getConnection(); + fail("Expected a ConnectionException"); + } catch (ConnectionException e) { + /*expected*/ } + } + } finally { + MockRequest.completeAll(requests); + cluster.close(); } - - /** - * Ensures that if a fixed-sized pool has filled its core connections and reached a number of requests to cause - * it to be enqueued, that if the request is not serviced within 100ms, a BusyPoolException is raised with a timeout. - * - * @jira_ticket JAVA-1371 - * @test_category connection:connection_pool - * @since 3.0.7 3.1.4 3.2.0 - */ - @Test(groups = "short") - public void should_reject_if_enqueued_and_timeout_reached() { - Cluster cluster = createClusterBuilder().build(); - List allRequests = newArrayList(); + } + + /** + * Validates that if the keyspace tied to the Session's pool state is different than the keyspace + * on the connection being used in dequeue that {@link Connection#setKeyspaceAsync(String)} is set + * on that connection and that "USE keyspace" is only called once since setKeyspaceAsync should + * not attempt setting the keyspace if there is already a request inflight that is doing this. + * + * @jira_ticket JAVA-839 + * @test_category connection:connection_pool + * @since 3.0.4, 3.1.1 + */ + @Test(groups = "short") + public void should_adjust_connection_keyspace_on_dequeue_if_pool_state_is_different() + throws TimeoutException, ExecutionException { + Cluster cluster = createClusterBuilder().build(); + List requests = newArrayList(); + try { + HostConnectionPool pool = createPool(cluster, 1, 1); + int maxQueueSize = 256; + + assertThat(pool.connections.size()).isEqualTo(1); + List coreConnections = newArrayList(pool.connections); + // fill connections + requests = MockRequest.sendMany(128, pool); + assertBorrowedConnections(requests, coreConnections); + // fill queue + List queuedRequests = MockRequest.sendMany(maxQueueSize, pool, maxQueueSize); + + // Wait for connections to be borrowed before changing keyspace. + for (MockRequest request : requests) { + Uninterruptibles.getUninterruptibly(request.connectionFuture, 5, TimeUnit.SECONDS); + } + // Simulate change of keyspace on pool. Prime a delay so existing requests can complete + // beforehand. + primingClient.prime( + PrimingRequest.queryBuilder() + .withQuery("USE \"newkeyspace\"") + .withThen(PrimingRequest.then().withFixedDelay(2000L))); + pool.manager.poolsState.setKeyspace("newkeyspace"); + + // Complete all requests, this should cause dequeue on connection. + MockRequest.completeAll(requests); + + // Check the status on queued request's connection futures. We expect that dequeue should + // be called when connection is released by previous requests completing, and that one set + // keyspace attempt should be tried. + + int count = 0; + for (MockRequest queuedRequest : queuedRequests) { try { - HostConnectionPool pool = createPool(cluster, 1, 1); - List requests = MockRequest.sendMany(128, pool); - allRequests.addAll(requests); - - // pool is now full, this request will be enqueued - MockRequest failedBorrow = MockRequest.send(pool, 100, 128); - try { - failedBorrow.getConnection(); - fail("Expected a BusyPoolException"); - } catch (BusyPoolException e) { - assertThat(e).hasMessageContaining("timed out"); - } - } finally { - MockRequest.completeAll(allRequests); - cluster.close(); + Uninterruptibles.getUninterruptibly(queuedRequest.connectionFuture, 10, TimeUnit.SECONDS); + count++; + } catch (ExecutionException e) { + // 128th request should timeout since all in flight requests are used. + assertThat(e.getCause()) + .isInstanceOf(BusyPoolException.class) + .hasMessageContaining("timed out after"); + assertThat(count).isEqualTo(128); + break; } + } + + // We should only have gotten one 'USE newkeyspace' query since Connection#setKeyspaceAsync + // should only do + // this once if there is already a request in flight. + assertThat(activityClient.retrieveQueries()) + .extractingResultOf("getQuery") + .containsOnlyOnce("USE \"newkeyspace\""); + } finally { + MockRequest.completeAll(requests); + cluster.close(); } - - /** - * Validates that if a borrow request is enqueued into a pool for a Host that is currently - * within the window of reconnecting after an error that the future tied to that query times out - * after {@link PoolingOptions#setPoolTimeoutMillis(int)}. - *

    - * This primarily serves to demonstrate that the use case described in JAVA-1371 is fixed by using - * {@link Session#execute(Statement)} shortly after a server error on a connection. - * - * @jira_ticket JAVA-1371 - * @test_category connection:connection_pool - * @since 3.0.7 3.1.4 3.2.0 - */ - @Test(groups = "short") - public void should_not_hang_when_executing_sync_queries() { - primingClient.prime( - queryBuilder() - .withQuery("server_error query") - .withThen(then().withResult(server_error)) - .build() - ); - - Cluster cluster = createClusterBuilder() - .withReconnectionPolicy(new ConstantReconnectionPolicy(10000)).build(); - // reduce timeout so test runs faster. - cluster.getConfiguration().getPoolingOptions().setPoolTimeoutMillis(500); - try { - Session session = cluster.connect(); - try { - session.execute("server_error query"); - fail("Exception expected"); - } catch (Exception e) { - // error is expected in this case. - } - - try { - session.execute("this should not block indefinitely"); - } catch (NoHostAvailableException nhae) { - // should raise a NHAE with a BusyPoolException. - Collection errors = nhae.getErrors().values(); - assertThat(errors).hasSize(1); - Throwable e = errors.iterator().next(); - assertThat(e).isInstanceOf(BusyPoolException.class); - assertThat(e).hasMessageContaining("timed out"); - } - } finally { - cluster.close(); - } + } + + /** + * Ensures that on borrowConnection if a set keyspace attempt is in progress on that connection + * for a different keyspace than the pool state that the borrowConnection future returned is + * failed. + * + * @jira_ticket JAVA-839 + * @test_category connection:connection_pool + * @since 3.0.4, 3.1.1 + */ + @Test(groups = "short") + public void + should_fail_in_borrowConnection_when_setting_keyspace_and_another_set_keyspace_attempt_is_in_flight() + throws TimeoutException { + Cluster cluster = createClusterBuilder().build(); + try { + HostConnectionPool pool = createPool(cluster, 1, 1); + + // Respond to setting as slowks very slowly. + primingClient.prime( + PrimingRequest.queryBuilder() + .withQuery("USE \"slowks\"") + .withThen(PrimingRequest.then().withFixedDelay(5000L))); + + Connection connection = pool.connections.get(0); + + connection.setKeyspaceAsync("slowks"); + + // Simulate change of keyspace on pool. + pool.manager.poolsState.setKeyspace("newks"); + + MockRequest request = MockRequest.send(pool); + + try { + Uninterruptibles.getUninterruptibly(request.connectionFuture, 5, TimeUnit.SECONDS); + fail("Should have thrown exception"); + } catch (ExecutionException e) { + assertThat(e.getCause()).isInstanceOf(DriverException.class); + assertThat(e.getCause().getMessage()) + .contains( + "Aborting attempt to set keyspace to 'newks' since there is already an in flight attempt to set keyspace to 'slowks'."); + } + } finally { + cluster.close(); } - - /** - * Ensures that any enqueued connection borrow requests are failed when their associated connection pool closes. - * - * @jira_ticket JAVA-839 - * @test_category connection:connection_pool - * @since 3.0.4, 3.1.1 - */ - @Test(groups = "short") - public void requests_with_enqueued_borrow_requests_should_be_failed_when_pool_closes() { - Cluster cluster = createClusterBuilder().build(); - List requests = newArrayList(); - try { - HostConnectionPool pool = createPool(cluster, 2, 2); - int maxQueueSize = 256; - - assertThat(pool.connections.size()).isEqualTo(2); - List coreConnections = newArrayList(pool.connections); - // fill connections - requests = MockRequest.sendMany(2 * 128, pool); - assertBorrowedConnections(requests, coreConnections); - // fill queue - List queuedRequests = MockRequest.sendMany(maxQueueSize, pool, maxQueueSize); - - // Closing the pool should fail all queued connections. - pool.closeAsync(); - - for (MockRequest queuedRequest : queuedRequests) { - // Future should be completed. - assertThat(queuedRequest.connectionFuture.isDone()).isTrue(); - try { - // Future should fail as result of the pool closing. - queuedRequest.getConnection(); - fail("Expected a ConnectionException"); - } catch (ConnectionException e) {/*expected*/} - } - } finally { - MockRequest.completeAll(requests); - cluster.close(); - } + } + + /** + * Ensures that while dequeuing borrow connection requests that if a set keyspace attempt is in + * progress on that connection for a difference keyspace than the pool state that the future for + * that borrow attempt is failed. + * + * @jira_ticket JAVA-839 + * @test_category connection:connection_pool + * @since 3.0.4, 3.1.1 + */ + @Test(groups = "short") + public void + should_fail_in_dequeue_when_setting_keyspace_and_another_set_keyspace_attempt_is_in_flight() + throws ExecutionException, TimeoutException { + Cluster cluster = createClusterBuilder().build(); + List requests = newArrayList(); + try { + HostConnectionPool pool = createPool(cluster, 1, 1); + // Limit requests per connection to 100 so we don't exhaust stream ids. + cluster + .getConfiguration() + .getPoolingOptions() + .setMaxRequestsPerConnection(HostDistance.LOCAL, 100); + int maxQueueSize = 256; + + assertThat(pool.connections.size()).isEqualTo(1); + List coreConnections = newArrayList(pool.connections); + + // fill connections + requests = MockRequest.sendMany(100, pool); + assertBorrowedConnections(requests, coreConnections); + + // send a request that will be queued. + MockRequest queuedRequest = MockRequest.send(pool, maxQueueSize); + + // Wait for connections to be borrowed before changing keyspace. + for (MockRequest request : requests) { + Uninterruptibles.getUninterruptibly(request.connectionFuture, 5, TimeUnit.SECONDS); + } + + // Respond to setting as slowks very slowly. + primingClient.prime( + PrimingRequest.queryBuilder() + .withQuery("USE \"slowks\"") + .withThen(PrimingRequest.then().withFixedDelay(5000L))); + Connection connection = pool.connections.get(0); + connection.setKeyspaceAsync("slowks"); + + // Simulate change of keyspace on pool. + pool.manager.poolsState.setKeyspace("newkeyspace"); + + // Complete all requests, this should cause dequeue on connection. + MockRequest.completeAll(requests); + + try { + Uninterruptibles.getUninterruptibly(queuedRequest.connectionFuture, 5, TimeUnit.SECONDS); + fail("Should have thrown exception"); + } catch (ExecutionException e) { + assertThat(e.getCause()).isInstanceOf(DriverException.class); + assertThat(e.getCause().getMessage()) + .contains( + "Aborting attempt to set keyspace to 'newkeyspace' since there is already an in flight attempt to set keyspace to 'slowks'."); + } + } finally { + MockRequest.completeAll(requests); + cluster.close(); } - - /** - * Validates that if the keyspace tied to the Session's pool state is different than the keyspace on the connection - * being used in dequeue that {@link Connection#setKeyspaceAsync(String)} is set on that connection and that - * "USE keyspace" is only called once since setKeyspaceAsync should not attempt setting the keyspace if there is - * already a request inflight that is doing this. - * - * @jira_ticket JAVA-839 - * @test_category connection:connection_pool - * @since 3.0.4, 3.1.1 - */ - @Test(groups = "short") - public void should_adjust_connection_keyspace_on_dequeue_if_pool_state_is_different() throws TimeoutException, ExecutionException { - Cluster cluster = createClusterBuilder().build(); - List requests = newArrayList(); - try { - HostConnectionPool pool = createPool(cluster, 1, 1); - int maxQueueSize = 256; - - assertThat(pool.connections.size()).isEqualTo(1); - List coreConnections = newArrayList(pool.connections); - // fill connections - requests = MockRequest.sendMany(128, pool); - assertBorrowedConnections(requests, coreConnections); - // fill queue - List queuedRequests = MockRequest.sendMany(maxQueueSize, pool, maxQueueSize); - - // Wait for connections to be borrowed before changing keyspace. - for (MockRequest request : requests) { - Uninterruptibles.getUninterruptibly(request.connectionFuture, 5, TimeUnit.SECONDS); - } - // Simulate change of keyspace on pool. Prime a delay so existing requests can complete beforehand. - primingClient.prime(PrimingRequest.queryBuilder().withQuery("USE \"newkeyspace\"").withThen(PrimingRequest.then().withFixedDelay(2000L))); - pool.manager.poolsState.setKeyspace("newkeyspace"); - - // Complete all requests, this should cause dequeue on connection. - MockRequest.completeAll(requests); - - // Check the status on queued request's connection futures. We expect that dequeue should - // be called when connection is released by previous requests completing, and that one set - // keyspace attempt should be tried. - - int count = 0; - for (MockRequest queuedRequest : queuedRequests) { - try { - Uninterruptibles.getUninterruptibly(queuedRequest.connectionFuture, 10, TimeUnit.SECONDS); - count++; - } catch (ExecutionException e) { - // 128th request should timeout since all in flight requests are used. - assertThat(e.getCause()) - .isInstanceOf(BusyPoolException.class) - .hasMessageContaining("timed out after"); - assertThat(count).isEqualTo(128); - break; - } - } - - // We should only have gotten one 'USE newkeyspace' query since Connection#setKeyspaceAsync should only do - // this once if there is already a request in flight. - assertThat(activityClient.retrieveQueries()).extractingResultOf("getQuery").containsOnlyOnce("USE \"newkeyspace\""); - } finally { - MockRequest.completeAll(requests); - cluster.close(); - } + } + + /** + * Ensures that if a variable-sized pool has filled up to its maximum connections that + * borrowConnection will return a failed future instead of creating a new connection. + * + * @jira_ticket JAVA-419 + * @test_category connection:connection_pool + * @since 2.0.10, 2.1.6 + */ + @Test(groups = "short") + public void variable_size_pool_should_fill_its_connections_and_then_reject() throws Exception { + Cluster cluster = createClusterBuilder().build(); + List allRequests = newArrayList(); + try { + HostConnectionPool pool = createPool(cluster, 1, 2); + Connection.Factory factory = spy(cluster.manager.connectionFactory); + cluster.manager.connectionFactory = factory; + + assertThat(pool.connections.size()).isEqualTo(1); + Connection coreConnection = pool.connections.get(0); + + // Fill enough connections to hit the threshold. + List requests = MockRequest.sendMany(NEW_CONNECTION_THRESHOLD, pool); + assertBorrowedConnection(requests, coreConnection); + allRequests.addAll(requests); + allRequests.add(MockRequest.send(pool)); + + // Allow time for new connection to be spawned. + verify(factory, after(2000).times(1)).open(any(HostConnectionPool.class)); + assertPoolSize(pool, 2); + + // Borrow more and ensure the connection returned is a non-core connection. + for (int i = 0; i < NEW_CONNECTION_THRESHOLD; i++) { + MockRequest request = MockRequest.send(pool); + assertThat(request.getConnection()).isNotEqualTo(coreConnection); + allRequests.add(request); + } + + // Fill remaining connections (28 + 28) - 1 + allRequests.addAll(MockRequest.sendMany(55, pool)); + + MockRequest failedBorrow = MockRequest.send(pool); + try { + failedBorrow.getConnection(); + fail("Expected a BusyPoolException"); + } catch (BusyPoolException e) { + /*expected*/ + } + } finally { + MockRequest.completeAll(allRequests); + cluster.close(); } - - /** - * Ensures that on borrowConnection if a set keyspace attempt is in progress on that connection for a different keyspace than the - * pool state that the borrowConnection future returned is failed. - * - * @jira_ticket JAVA-839 - * @test_category connection:connection_pool - * @since 3.0.4, 3.1.1 - */ - @Test(groups = "short") - public void should_fail_in_borrowConnection_when_setting_keyspace_and_another_set_keyspace_attempt_is_in_flight() throws TimeoutException { - Cluster cluster = createClusterBuilder().build(); - try { - HostConnectionPool pool = createPool(cluster, 1, 1); - - // Respond to setting as slowks very slowly. - primingClient.prime(PrimingRequest.queryBuilder().withQuery("USE \"slowks\"").withThen(PrimingRequest.then().withFixedDelay(5000L))); - - Connection connection = pool.connections.get(0); - - connection.setKeyspaceAsync("slowks"); - - // Simulate change of keyspace on pool. - pool.manager.poolsState.setKeyspace("newks"); - - MockRequest request = MockRequest.send(pool); - - try { - Uninterruptibles.getUninterruptibly(request.connectionFuture, 5, TimeUnit.SECONDS); - fail("Should have thrown exception"); - } catch (ExecutionException e) { - assertThat(e.getCause()).isInstanceOf(DriverException.class); - assertThat(e.getCause().getMessage()).contains("Aborting attempt to set keyspace to 'newks' since there is already an in flight attempt to set keyspace to 'slowks'."); - } - } finally { - cluster.close(); - } + } + + /** + * Ensures that if the core connection pool is full that borrowConnection will create and use a + * new connection. + * + * @jira_ticket JAVA-419 + * @test_category connection:connection_pool + * @since 2.0.10, 2.1.6 + */ + @Test(groups = "short") + public void should_add_extra_connection_when_core_full() throws Exception { + Cluster cluster = createClusterBuilder().build(); + List allRequests = newArrayList(); + try { + HostConnectionPool pool = createPool(cluster, 1, 2); + Connection.Factory factory = spy(cluster.manager.connectionFactory); + cluster.manager.connectionFactory = factory; + Connection core = pool.connections.get(0); + + // Fill core connection + 1 + List requests = MockRequest.sendMany(NEW_CONNECTION_THRESHOLD, pool); + assertBorrowedConnection(requests, core); + allRequests.addAll(requests); + allRequests.add(MockRequest.send(pool)); + + // Reaching the threshold should have triggered the creation of an extra one + verify(factory, after(2000).times(1)).open(any(HostConnectionPool.class)); + assertPoolSize(pool, 2); + } finally { + MockRequest.completeAll(allRequests); + cluster.close(); } - - /** - * Ensures that while dequeuing borrow connection requests that if a set keyspace attempt is in progress on that connection for a difference - * keyspace than the pool state that the future for that borrow attempt is failed. - * - * @jira_ticket JAVA-839 - * @test_category connection:connection_pool - * @since 3.0.4, 3.1.1 - */ - @Test(groups = "short") - public void should_fail_in_dequeue_when_setting_keyspace_and_another_set_keyspace_attempt_is_in_flight() throws ExecutionException, TimeoutException { - Cluster cluster = createClusterBuilder().build(); - List requests = newArrayList(); - try { - HostConnectionPool pool = createPool(cluster, 1, 1); - // Limit requests per connection to 100 so we don't exhaust stream ids. - cluster.getConfiguration().getPoolingOptions().setMaxRequestsPerConnection(HostDistance.LOCAL, 100); - int maxQueueSize = 256; - - assertThat(pool.connections.size()).isEqualTo(1); - List coreConnections = newArrayList(pool.connections); - - // fill connections - requests = MockRequest.sendMany(100, pool); - assertBorrowedConnections(requests, coreConnections); - - // send a request that will be queued. - MockRequest queuedRequest = MockRequest.send(pool, maxQueueSize); - - // Wait for connections to be borrowed before changing keyspace. - for (MockRequest request : requests) { - Uninterruptibles.getUninterruptibly(request.connectionFuture, 5, TimeUnit.SECONDS); - } - - // Respond to setting as slowks very slowly. - primingClient.prime(PrimingRequest.queryBuilder().withQuery("USE \"slowks\"").withThen(PrimingRequest.then().withFixedDelay(5000L))); - Connection connection = pool.connections.get(0); - connection.setKeyspaceAsync("slowks"); - - // Simulate change of keyspace on pool. - pool.manager.poolsState.setKeyspace("newkeyspace"); - - // Complete all requests, this should cause dequeue on connection. - MockRequest.completeAll(requests); - - try { - Uninterruptibles.getUninterruptibly(queuedRequest.connectionFuture, 5, TimeUnit.SECONDS); - fail("Should have thrown exception"); - } catch (ExecutionException e) { - assertThat(e.getCause()).isInstanceOf(DriverException.class); - assertThat(e.getCause().getMessage()).contains("Aborting attempt to set keyspace to 'newkeyspace' since there is already an in flight attempt to set keyspace to 'slowks'."); - } - } finally { - MockRequest.completeAll(requests); - cluster.close(); - } + } + + /** + * Ensures that a trashed connection that has not been timed out should be resurrected into the + * connection pool if borrowConnection is called and a new connection is needed. + * + * @jira_ticket JAVA-419 + * @test_category connection:connection_pool + * @since 2.0.10, 2.1.6 + */ + @Test(groups = "long") + public void should_resurrect_trashed_connection_within_idle_timeout() throws Exception { + Cluster cluster = + createClusterBuilder() + .withPoolingOptions(new PoolingOptions().setIdleTimeoutSeconds(20)) + .build(); + List allRequests = newArrayList(); + try { + HostConnectionPool pool = createPool(cluster, 1, 2); + Connection.Factory factory = spy(cluster.manager.connectionFactory); + cluster.manager.connectionFactory = factory; + Connection connection1 = pool.connections.get(0); + + List requests = MockRequest.sendMany(NEW_CONNECTION_THRESHOLD, pool); + assertBorrowedConnections(requests, Collections.singletonList(connection1)); + allRequests.addAll(requests); + allRequests.add(MockRequest.send(pool)); + + verify(factory, after(2000).times(1)).open(any(HostConnectionPool.class)); + assertPoolSize(pool, 2); + Connection connection2 = pool.connections.get(1); + + assertThat(connection1.inFlight.get()).isEqualTo(101); + assertThat(connection2.inFlight.get()).isEqualTo(0); + + // Go back under the capacity of 1 connection + MockRequest.completeMany(51, allRequests); + + assertThat(connection1.inFlight.get()).isEqualTo(50); + assertThat(connection2.inFlight.get()).isEqualTo(0); + + // Given enough time, one connection gets trashed (and the implementation picks the first one) + Uninterruptibles.sleepUninterruptibly(20, TimeUnit.SECONDS); + assertThat(pool.connections).containsExactly(connection2); + assertThat(pool.trash).containsExactly(connection1); + + // Now borrow enough to go just under the 1 connection threshold + allRequests.addAll(MockRequest.sendMany(50, pool)); + + assertThat(pool.connections).containsExactly(connection2); + assertThat(pool.trash).containsExactly(connection1); + assertThat(connection1.inFlight.get()).isEqualTo(50); + assertThat(connection2.inFlight.get()).isEqualTo(50); + + // Borrowing one more time should resurrect the trashed connection + allRequests.addAll(MockRequest.sendMany(1, pool)); + verify(factory, after(2000).times(1)).open(any(HostConnectionPool.class)); + assertPoolSize(pool, 2); + + assertThat(pool.connections).containsExactly(connection2, connection1); + assertThat(pool.trash).isEmpty(); + assertThat(connection1.inFlight.get()).isEqualTo(50); + assertThat(connection2.inFlight.get()).isEqualTo(51); + } finally { + MockRequest.completeAll(allRequests); + cluster.close(); } - - /** - * Ensures that if a variable-sized pool has filled up to its maximum connections that borrowConnection will - * return a failed future instead of creating a new connection. - * - * @jira_ticket JAVA-419 - * @test_category connection:connection_pool - * @since 2.0.10, 2.1.6 - */ - @Test(groups = "short") - public void variable_size_pool_should_fill_its_connections_and_then_reject() throws Exception { - Cluster cluster = createClusterBuilder().build(); - List allRequests = newArrayList(); - try { - HostConnectionPool pool = createPool(cluster, 1, 2); - Connection.Factory factory = spy(cluster.manager.connectionFactory); - cluster.manager.connectionFactory = factory; - - assertThat(pool.connections.size()).isEqualTo(1); - Connection coreConnection = pool.connections.get(0); - - // Fill enough connections to hit the threshold. - List requests = MockRequest.sendMany(NEW_CONNECTION_THRESHOLD, pool); - assertBorrowedConnection(requests, coreConnection); - allRequests.addAll(requests); - allRequests.add(MockRequest.send(pool)); - - // Allow time for new connection to be spawned. - verify(factory, after(2000).times(1)).open(any(HostConnectionPool.class)); - assertPoolSize(pool, 2); - - // Borrow more and ensure the connection returned is a non-core connection. - for (int i = 0; i < NEW_CONNECTION_THRESHOLD; i++) { - MockRequest request = MockRequest.send(pool); - assertThat(request.getConnection()).isNotEqualTo(coreConnection); - allRequests.add(request); - } - - // Fill remaining connections (28 + 28) - 1 - allRequests.addAll(MockRequest.sendMany(55, pool)); - - MockRequest failedBorrow = MockRequest.send(pool); - try { - failedBorrow.getConnection(); - fail("Expected a BusyPoolException"); - } catch (BusyPoolException e) { /*expected*/} - } finally { - MockRequest.completeAll(allRequests); - cluster.close(); - } + } + + /** + * Ensures that a trashed connection that has been timed out should not be resurrected into the + * connection pool if borrowConnection is called and a new connection is needed. + * + * @jira_ticket JAVA-419 + * @test_category connection:connection_pool + * @since 2.0.10, 2.1.6 + */ + @Test(groups = "long") + public void should_not_resurrect_trashed_connection_after_idle_timeout() throws Exception { + Cluster cluster = + createClusterBuilder() + .withPoolingOptions(new PoolingOptions().setIdleTimeoutSeconds(20)) + .build(); + List allRequests = newArrayList(); + try { + HostConnectionPool pool = createPool(cluster, 1, 2); + Connection.Factory factory = spy(cluster.manager.connectionFactory); + cluster.manager.connectionFactory = factory; + Connection connection1 = pool.connections.get(0); + + List requests = MockRequest.sendMany(NEW_CONNECTION_THRESHOLD, pool); + assertBorrowedConnection(requests, connection1); + allRequests.addAll(requests); + allRequests.add(MockRequest.send(pool)); + + verify(factory, after(2000).times(1)).open(any(HostConnectionPool.class)); + assertPoolSize(pool, 2); + reset(factory); + Connection connection2 = pool.connections.get(1); + + assertThat(connection1.inFlight.get()).isEqualTo(101); + assertThat(connection2.inFlight.get()).isEqualTo(0); + + // Go back under the capacity of 1 connection + MockRequest.completeMany(51, allRequests); + + assertThat(connection1.inFlight.get()).isEqualTo(50); + assertThat(connection2.inFlight.get()).isEqualTo(0); + + // Given enough time, one connection gets trashed (and the implementation picks the first one) + Uninterruptibles.sleepUninterruptibly(20, TimeUnit.SECONDS); + assertThat(pool.connections).containsExactly(connection2); + assertThat(pool.trash).containsExactly(connection1); + + // Return trashed connection down to 0 inFlight + MockRequest.completeMany(50, allRequests); + assertThat(connection1.inFlight.get()).isEqualTo(0); + + // Give enough time for trashed connection to be cleaned up from the trash: + Uninterruptibles.sleepUninterruptibly(30, TimeUnit.SECONDS); + assertThat(pool.connections).containsExactly(connection2); + assertThat(pool.trash).isEmpty(); + assertThat(connection1.isClosed()).isTrue(); + + // Fill the live connection to go over the threshold where a second one is needed + requests = MockRequest.sendMany(NEW_CONNECTION_THRESHOLD, pool); + assertBorrowedConnection(requests, connection2); + allRequests.addAll(requests); + allRequests.add(MockRequest.send(pool)); + assertThat(connection2.inFlight.get()).isEqualTo(101); + verify(factory, after(2000).times(1)).open(any(HostConnectionPool.class)); + assertPoolSize(pool, 2); + + // Borrow again to get the new connection + MockRequest request = MockRequest.send(pool); + allRequests.add(request); + assertThat(request.getConnection()) + .isNotEqualTo(connection2) // should not be the full connection + .isNotEqualTo(connection1); // should not be the previously trashed one + } finally { + MockRequest.completeAll(allRequests); + cluster.close(); } - - /** - * Ensures that if the core connection pool is full that borrowConnection will create and use a new connection. - * - * @jira_ticket JAVA-419 - * @test_category connection:connection_pool - * @since 2.0.10, 2.1.6 - */ - @Test(groups = "short") - public void should_add_extra_connection_when_core_full() throws Exception { - Cluster cluster = createClusterBuilder().build(); - List allRequests = newArrayList(); - try { - HostConnectionPool pool = createPool(cluster, 1, 2); - Connection.Factory factory = spy(cluster.manager.connectionFactory); - cluster.manager.connectionFactory = factory; - Connection core = pool.connections.get(0); - - // Fill core connection + 1 - List requests = MockRequest.sendMany(NEW_CONNECTION_THRESHOLD, pool); - assertBorrowedConnection(requests, core); - allRequests.addAll(requests); - allRequests.add(MockRequest.send(pool)); - - // Reaching the threshold should have triggered the creation of an extra one - verify(factory, after(2000).times(1)).open(any(HostConnectionPool.class)); - assertPoolSize(pool, 2); - } finally { - MockRequest.completeAll(allRequests); - cluster.close(); - } + } + + /** + * Ensures that a trashed connection that has been timed out should not be closed until it has 0 + * in flight requests. + * + * @jira_ticket JAVA-419 + * @test_category connection:connection_pool + * @since 2.0.10, 2.1.6 + */ + @Test(groups = "long") + public void should_not_close_trashed_connection_until_no_in_flight() throws Exception { + Cluster cluster = + createClusterBuilder() + .withPoolingOptions(new PoolingOptions().setIdleTimeoutSeconds(20)) + .build(); + List allRequests = newArrayList(); + + try { + HostConnectionPool pool = createPool(cluster, 1, 2); + Connection.Factory factory = spy(cluster.manager.connectionFactory); + cluster.manager.connectionFactory = factory; + Connection connection1 = pool.connections.get(0); + + // Fill core connection enough to trigger creation of another one + List requests = MockRequest.sendMany(NEW_CONNECTION_THRESHOLD, pool); + assertBorrowedConnections(requests, Collections.singletonList(connection1)); + allRequests.addAll(requests); + allRequests.add(MockRequest.send(pool)); + + verify(factory, after(2000).times(1)).open(any(HostConnectionPool.class)); + assertThat(pool.connections).hasSize(2); + + // Return enough times to get back under the threshold where one connection is enough + MockRequest.completeMany(50, allRequests); + + // Give enough time for one connection to be trashed. Due to the implementation, this will be + // the first one. + // It still has in-flight requests so should not get closed. + Uninterruptibles.sleepUninterruptibly(30, TimeUnit.SECONDS); + assertThat(pool.trash).containsExactly(connection1); + assertThat(connection1.inFlight.get()).isEqualTo(51); + assertThat(connection1.isClosed()).isFalse(); + + // Consume all inFlight requests on the trashed connection. + MockRequest.completeMany(51, allRequests); + + // Sleep enough time for the connection to be consider idled and closed. + Uninterruptibles.sleepUninterruptibly(30, TimeUnit.SECONDS); + + // The connection should be now closed. + // The trashed connection should be closed and not in the pool or trash. + assertThat(connection1.isClosed()).isTrue(); + assertThat(pool.connections).doesNotContain(connection1); + assertThat(pool.trash).doesNotContain(connection1); + } finally { + MockRequest.completeAll(allRequests); + cluster.close(); } - - /** - * Ensures that a trashed connection that has not been timed out should be resurrected into the connection pool if - * borrowConnection is called and a new connection is needed. - * - * @jira_ticket JAVA-419 - * @test_category connection:connection_pool - * @since 2.0.10, 2.1.6 - */ - @Test(groups = "long") - public void should_resurrect_trashed_connection_within_idle_timeout() throws Exception { - Cluster cluster = createClusterBuilder().withPoolingOptions(new PoolingOptions().setIdleTimeoutSeconds(20)).build(); - List allRequests = newArrayList(); - try { - HostConnectionPool pool = createPool(cluster, 1, 2); - Connection.Factory factory = spy(cluster.manager.connectionFactory); - cluster.manager.connectionFactory = factory; - Connection connection1 = pool.connections.get(0); - - List requests = MockRequest.sendMany(NEW_CONNECTION_THRESHOLD, pool); - assertBorrowedConnections(requests, Collections.singletonList(connection1)); - allRequests.addAll(requests); - allRequests.add(MockRequest.send(pool)); - - verify(factory, after(2000).times(1)).open(any(HostConnectionPool.class)); - assertPoolSize(pool, 2); - Connection connection2 = pool.connections.get(1); - - assertThat(connection1.inFlight.get()).isEqualTo(101); - assertThat(connection2.inFlight.get()).isEqualTo(0); - - // Go back under the capacity of 1 connection - MockRequest.completeMany(51, allRequests); - - assertThat(connection1.inFlight.get()).isEqualTo(50); - assertThat(connection2.inFlight.get()).isEqualTo(0); - - // Given enough time, one connection gets trashed (and the implementation picks the first one) - Uninterruptibles.sleepUninterruptibly(20, TimeUnit.SECONDS); - assertThat(pool.connections).containsExactly(connection2); - assertThat(pool.trash).containsExactly(connection1); - - // Now borrow enough to go just under the 1 connection threshold - allRequests.addAll(MockRequest.sendMany(50, pool)); - - assertThat(pool.connections).containsExactly(connection2); - assertThat(pool.trash).containsExactly(connection1); - assertThat(connection1.inFlight.get()).isEqualTo(50); - assertThat(connection2.inFlight.get()).isEqualTo(50); - - // Borrowing one more time should resurrect the trashed connection - allRequests.addAll(MockRequest.sendMany(1, pool)); - verify(factory, after(2000).times(1)).open(any(HostConnectionPool.class)); - assertPoolSize(pool, 2); - - assertThat(pool.connections).containsExactly(connection2, connection1); - assertThat(pool.trash).isEmpty(); - assertThat(connection1.inFlight.get()).isEqualTo(50); - assertThat(connection2.inFlight.get()).isEqualTo(51); - } finally { - MockRequest.completeAll(allRequests); - cluster.close(); - } + } + + /** + * Ensures that if a connection that has less than the minimum available stream ids is returned to + * the pool that the connection is put in the trash. + * + * @jira_ticket JAVA-419 + * @test_category connection:connection_pool + * @since 2.0.10, 2.1.6 + */ + @Test(groups = "short") + public void should_trash_on_returning_connection_with_insufficient_streams() throws Exception { + Cluster cluster = createClusterBuilder().build(); + List allRequests = newArrayList(); + try { + HostConnectionPool pool = createPool(cluster, 1, 2); + Connection.Factory factory = spy(cluster.manager.connectionFactory); + cluster.manager.connectionFactory = factory; + Connection core = pool.connections.get(0); + + List requests = MockRequest.sendMany(NEW_CONNECTION_THRESHOLD, pool); + assertBorrowedConnections(requests, Collections.singletonList(core)); + allRequests.addAll(requests); + allRequests.add(MockRequest.send(pool)); + + verify(factory, after(2000).times(1)).open(any(HostConnectionPool.class)); + assertThat(pool.connections).hasSize(2); + + // Grab the new non-core connection and replace it with a spy. + Connection extra1 = spy(pool.connections.get(1)); + pool.connections.set(1, extra1); + + // Borrow 10 times to ensure pool is utilized. + allRequests.addAll(MockRequest.sendMany(10, pool)); + assertThat(pool.connections).hasSize(2); + + // stub the maxAvailableStreams method to return 0, indicating there are no remaining streams. + // this should cause the connection to be replaced and trashed on returnConnection. + doReturn(0).when(extra1).maxAvailableStreams(); + + // On returning of the connection, should detect that there are no available streams and trash + // it. + assertThat(pool.trash).hasSize(0); + pool.returnConnection(extra1, false); + assertThat(pool.trash).hasSize(1); + } finally { + MockRequest.completeAll(allRequests); + cluster.close(); } - - /** - * Ensures that a trashed connection that has been timed out should not be resurrected into the connection pool if - * borrowConnection is called and a new connection is needed. - * - * @jira_ticket JAVA-419 - * @test_category connection:connection_pool - * @since 2.0.10, 2.1.6 - */ - @Test(groups = "long") - public void should_not_resurrect_trashed_connection_after_idle_timeout() throws Exception { - Cluster cluster = createClusterBuilder().withPoolingOptions(new PoolingOptions().setIdleTimeoutSeconds(20)).build(); - List allRequests = newArrayList(); - try { - HostConnectionPool pool = createPool(cluster, 1, 2); - Connection.Factory factory = spy(cluster.manager.connectionFactory); - cluster.manager.connectionFactory = factory; - Connection connection1 = pool.connections.get(0); - - List requests = MockRequest.sendMany(NEW_CONNECTION_THRESHOLD, pool); - assertBorrowedConnection(requests, connection1); - allRequests.addAll(requests); - allRequests.add(MockRequest.send(pool)); - - verify(factory, after(2000).times(1)).open(any(HostConnectionPool.class)); - assertPoolSize(pool, 2); - reset(factory); - Connection connection2 = pool.connections.get(1); - - assertThat(connection1.inFlight.get()).isEqualTo(101); - assertThat(connection2.inFlight.get()).isEqualTo(0); - - // Go back under the capacity of 1 connection - MockRequest.completeMany(51, allRequests); - - assertThat(connection1.inFlight.get()).isEqualTo(50); - assertThat(connection2.inFlight.get()).isEqualTo(0); - - // Given enough time, one connection gets trashed (and the implementation picks the first one) - Uninterruptibles.sleepUninterruptibly(20, TimeUnit.SECONDS); - assertThat(pool.connections).containsExactly(connection2); - assertThat(pool.trash).containsExactly(connection1); - - // Return trashed connection down to 0 inFlight - MockRequest.completeMany(50, allRequests); - assertThat(connection1.inFlight.get()).isEqualTo(0); - - // Give enough time for trashed connection to be cleaned up from the trash: - Uninterruptibles.sleepUninterruptibly(30, TimeUnit.SECONDS); - assertThat(pool.connections).containsExactly(connection2); - assertThat(pool.trash).isEmpty(); - assertThat(connection1.isClosed()).isTrue(); - - // Fill the live connection to go over the threshold where a second one is needed - requests = MockRequest.sendMany(NEW_CONNECTION_THRESHOLD, pool); - assertBorrowedConnection(requests, connection2); - allRequests.addAll(requests); - allRequests.add(MockRequest.send(pool)); - assertThat(connection2.inFlight.get()).isEqualTo(101); - verify(factory, after(2000).times(1)).open(any(HostConnectionPool.class)); - assertPoolSize(pool, 2); - - // Borrow again to get the new connection - MockRequest request = MockRequest.send(pool); - allRequests.add(request); - assertThat(request.getConnection()) - .isNotEqualTo(connection2) // should not be the full connection - .isNotEqualTo(connection1); // should not be the previously trashed one - } finally { - MockRequest.completeAll(allRequests); - cluster.close(); - } + } + + /** + * Ensures that if a connection on a host is lost but other connections remain intact in the Pool + * that the host is not marked down. + * + * @jira_ticket JAVA-544 + * @test_category connection:connection_pool + * @since 2.0.11 + */ + @Test(groups = "short") + public void should_keep_host_up_when_one_connection_lost() throws Exception { + Cluster cluster = createClusterBuilder().build(); + try { + HostConnectionPool pool = createPool(cluster, 2, 2); + Connection core0 = pool.connections.get(0); + Connection core1 = pool.connections.get(1); + + // Drop a connection and ensure the host stays up. + currentClient.disableListener(); + currentClient.closeConnection(CLOSE, ((InetSocketAddress) core0.channel.localAddress())); + Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS); + + // connection 0 should be down, while connection 1 and the Host should remain up. + assertThat(core0.isClosed()).isTrue(); + assertThat(core1.isClosed()).isFalse(); + assertThat(pool.connections).doesNotContain(core0); + assertThat(cluster).host(1).hasState(Host.State.UP); + assertThat(cluster).hasOpenControlConnection(); + } finally { + cluster.close(); } - - /** - * Ensures that a trashed connection that has been timed out should not be closed until it has 0 in flight requests. - * - * @jira_ticket JAVA-419 - * @test_category connection:connection_pool - * @since 2.0.10, 2.1.6 - */ - @Test(groups = "long") - public void should_not_close_trashed_connection_until_no_in_flight() throws Exception { - Cluster cluster = createClusterBuilder().withPoolingOptions(new PoolingOptions().setIdleTimeoutSeconds(20)).build(); - List allRequests = newArrayList(); - - try { - HostConnectionPool pool = createPool(cluster, 1, 2); - Connection.Factory factory = spy(cluster.manager.connectionFactory); - cluster.manager.connectionFactory = factory; - Connection connection1 = pool.connections.get(0); - - // Fill core connection enough to trigger creation of another one - List requests = MockRequest.sendMany(NEW_CONNECTION_THRESHOLD, pool); - assertBorrowedConnections(requests, Collections.singletonList(connection1)); - allRequests.addAll(requests); - allRequests.add(MockRequest.send(pool)); - - verify(factory, after(2000).times(1)).open(any(HostConnectionPool.class)); - assertThat(pool.connections).hasSize(2); - - // Return enough times to get back under the threshold where one connection is enough - MockRequest.completeMany(50, allRequests); - - // Give enough time for one connection to be trashed. Due to the implementation, this will be the first one. - // It still has in-flight requests so should not get closed. - Uninterruptibles.sleepUninterruptibly(30, TimeUnit.SECONDS); - assertThat(pool.trash).containsExactly(connection1); - assertThat(connection1.inFlight.get()).isEqualTo(51); - assertThat(connection1.isClosed()).isFalse(); - - // Consume all inFlight requests on the trashed connection. - MockRequest.completeMany(51, allRequests); - - // Sleep enough time for the connection to be consider idled and closed. - Uninterruptibles.sleepUninterruptibly(30, TimeUnit.SECONDS); - - // The connection should be now closed. - // The trashed connection should be closed and not in the pool or trash. - assertThat(connection1.isClosed()).isTrue(); - assertThat(pool.connections).doesNotContain(connection1); - assertThat(pool.trash).doesNotContain(connection1); - } finally { - MockRequest.completeAll(allRequests); - cluster.close(); - } + } + + /** + * Ensures that if all connections on a host are closed that the host is marked down and the + * control connection is notified of that fact and re-established itself. + * + * @jira_ticket JAVA-544 + * @test_category connection:connection_pool + * @since 2.0.11 + */ + @Test(groups = "short") + public void should_mark_host_down_when_no_connections_remaining() throws Exception { + int readTimeout = 1000; + int reconnectInterval = 1000; + Cluster cluster = + this.createClusterBuilder() + .withSocketOptions( + new SocketOptions() + .setConnectTimeoutMillis(readTimeout) + .setReadTimeoutMillis(reconnectInterval)) + .withReconnectionPolicy(new ConstantReconnectionPolicy(1000)) + .build(); + try { + cluster.init(); + + Connection.Factory factory = spy(cluster.manager.connectionFactory); + cluster.manager.connectionFactory = factory; + + HostConnectionPool pool = createPool(cluster, 8, 8); + // copy list to track these connections. + List connections = newArrayList(pool.connections); + + reset(factory); + + // Drop all connections. + currentClient.disableListener(); + currentClient.closeConnections(CLOSE); + + // The host should be marked down and the control connection closed. + assertThat(cluster).host(1).goesDownWithin(10, TimeUnit.SECONDS); + assertThat(cluster).hasClosedControlConnection(); + + // Ensure all connections are closed. + for (Connection connection : connections) { + assertThat(connection.isClosed()).isTrue(); + } + + // Expect a reconnect attempt on host after reconnect interval + // on behalf of the control connection. + verify(factory, timeout(reconnectInterval * 2).atLeastOnce()).open(host); + + // Sleep for a bit to allow reconnect to fail. + Uninterruptibles.sleepUninterruptibly(readTimeout * 2, TimeUnit.MILLISECONDS); + + // Ensure control connection is still closed. + assertThat(cluster).hasClosedControlConnection(); + + // Reenable connectivity. + currentClient.enableListener(); + + // Reconnect attempt should have been connected for control connection + // and pool. + // 2 attempts for connection.open (reconnect control connection and initial connection for + // host state). + verify(factory, after(reconnectInterval * 2).atLeast(2)).open(host); + // 7 attempts for core connections after first initial connection. + verify(factory, timeout(reconnectInterval * 2)) + .newConnections(any(HostConnectionPool.class), eq(7)); + + // Wait some reasonable amount of time for connection to reestablish. + Uninterruptibles.sleepUninterruptibly(readTimeout, TimeUnit.MILLISECONDS); + + // Control Connection should now be open. + assertThat(cluster).hasOpenControlConnection(); + assertThat(cluster).host(1).hasState(Host.State.UP); + } finally { + cluster.close(); } - - /** - * Ensures that if a connection that has less than the minimum available stream ids is returned to the pool that - * the connection is put in the trash. - * - * @jira_ticket JAVA-419 - * @test_category connection:connection_pool - * @since 2.0.10, 2.1.6 - */ - @Test(groups = "short") - public void should_trash_on_returning_connection_with_insufficient_streams() throws Exception { - Cluster cluster = createClusterBuilder().build(); - List allRequests = newArrayList(); - try { - HostConnectionPool pool = createPool(cluster, 1, 2); - Connection.Factory factory = spy(cluster.manager.connectionFactory); - cluster.manager.connectionFactory = factory; - Connection core = pool.connections.get(0); - - List requests = MockRequest.sendMany(NEW_CONNECTION_THRESHOLD, pool); - assertBorrowedConnections(requests, Collections.singletonList(core)); - allRequests.addAll(requests); - allRequests.add(MockRequest.send(pool)); - - verify(factory, after(2000).times(1)).open(any(HostConnectionPool.class)); - assertThat(pool.connections).hasSize(2); - - // Grab the new non-core connection and replace it with a spy. - Connection extra1 = spy(pool.connections.get(1)); - pool.connections.set(1, extra1); - - // Borrow 10 times to ensure pool is utilized. - allRequests.addAll(MockRequest.sendMany(10, pool)); - assertThat(pool.connections).hasSize(2); - - // stub the maxAvailableStreams method to return 0, indicating there are no remaining streams. - // this should cause the connection to be replaced and trashed on returnConnection. - doReturn(0).when(extra1).maxAvailableStreams(); - - // On returning of the connection, should detect that there are no available streams and trash it. - assertThat(pool.trash).hasSize(0); - pool.returnConnection(extra1); - assertThat(pool.trash).hasSize(1); - } finally { - MockRequest.completeAll(allRequests); - cluster.close(); - } + } + + /** + * Ensures that if a connection on a host is lost that brings the number of active connections in + * a pool under core connection count that up to core connections are re-established, but only + * after the next reconnect schedule has elapsed. + * + * @jira_ticket JAVA-544 + * @test_category connection:connection_pool + * @since 2.0.11 + */ + @Test(groups = "short") + public void should_create_new_connections_when_connection_lost_and_under_core_connections() + throws Exception { + int readTimeout = 1000; + int reconnectInterval = 1000; + Cluster cluster = + this.createClusterBuilder() + .withSocketOptions( + new SocketOptions() + .setConnectTimeoutMillis(readTimeout) + .setReadTimeoutMillis(reconnectInterval)) + .withReconnectionPolicy(new ConstantReconnectionPolicy(1000)) + .build(); + List allRequests = newArrayList(); + try { + cluster.init(); + + Connection.Factory factory = spy(cluster.manager.connectionFactory); + cluster.manager.connectionFactory = factory; + + TestExecutorService blockingExecutor = + new TestExecutorService(cluster.manager.blockingExecutor); + cluster.manager.blockingExecutor = blockingExecutor; + + HostConnectionPool pool = createPool(cluster, 3, 3); + Connection core0 = pool.connections.get(0); + Connection core1 = pool.connections.get(1); + Connection core2 = pool.connections.get(2); + + // Drop two core connections. + // Disable new connections initially and we'll eventually reenable it. + currentClient.disableListener(); + currentClient.closeConnection(CLOSE, ((InetSocketAddress) core0.channel.localAddress())); + currentClient.closeConnection(CLOSE, ((InetSocketAddress) core2.channel.localAddress())); + Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS); + + // Since we have a connection left the host should remain up. + assertThat(cluster).host(1).hasState(Host.State.UP); + assertThat(pool.connections).hasSize(1); + + // The borrowed connection should be the open one. + MockRequest request = MockRequest.send(pool); + allRequests.add(request); + assertThat(request.getConnection()).isEqualTo(core1); + + // Should not have tried to create a new core connection since reconnection time had not + // elapsed. + verify(factory, never()).open(any(HostConnectionPool.class)); + + // Sleep to elapse the Reconnection Policy. + Uninterruptibles.sleepUninterruptibly(reconnectInterval, TimeUnit.MILLISECONDS); + + // Attempt to borrow connection, this should trigger ensureCoreConnections thus spawning a new + // connection. + blockingExecutor.reset(); + request = MockRequest.send(pool); + allRequests.add(request); + assertThat(request.getConnection()).isEqualTo(core1); + + // Should have tried to open up to core connections as result of borrowing a connection past + // reconnect time and not being at core. + blockingExecutor.blockUntilNextTaskCompleted(); + verify(factory).open(any(HostConnectionPool.class)); + reset(factory); + + // Sleep for reconnect interval to allow reconnection time to elapse. + Uninterruptibles.sleepUninterruptibly( + (readTimeout + reconnectInterval) * 2, TimeUnit.MILLISECONDS); + + // Enable listening so new connections succeed. + currentClient.enableListener(); + // Sleep to elapse the Reconnection Policy. + Uninterruptibles.sleepUninterruptibly(reconnectInterval, TimeUnit.MILLISECONDS); + + // Try to borrow a connection, the pool should grow. + blockingExecutor.reset(); + allRequests.add(MockRequest.send(pool)); + blockingExecutor.blockUntilNextTaskCompleted(); + verify(factory).open(any(HostConnectionPool.class)); + reset(factory); + + // Another core connection should be opened as result of another request to get us up to core + // connections. + blockingExecutor.reset(); + allRequests.add(MockRequest.send(pool)); + blockingExecutor.blockUntilNextTaskCompleted(); + verify(factory).open(any(HostConnectionPool.class)); + reset(factory); + + // Sending another request should not grow the pool any more, since we are now at core + // connections. + allRequests.add(MockRequest.send(pool)); + verify(factory, after((reconnectInterval + readTimeout) * 2).never()) + .open(any(HostConnectionPool.class)); + } finally { + MockRequest.completeAll(allRequests); + cluster.close(); } - - /** - * Ensures that if a connection on a host is lost but other connections remain intact in the Pool that the - * host is not marked down. - * - * @jira_ticket JAVA-544 - * @test_category connection:connection_pool - * @since 2.0.11 - */ - @Test(groups = "short") - public void should_keep_host_up_when_one_connection_lost() throws Exception { - Cluster cluster = createClusterBuilder().build(); - try { - HostConnectionPool pool = createPool(cluster, 2, 2); - Connection core0 = pool.connections.get(0); - Connection core1 = pool.connections.get(1); - - // Drop a connection and ensure the host stays up. - currentClient.disableListener(); - currentClient.closeConnection(CLOSE, ((InetSocketAddress) core0.channel.localAddress())); - Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS); - - // connection 0 should be down, while connection 1 and the Host should remain up. - assertThat(core0.isClosed()).isTrue(); - assertThat(core1.isClosed()).isFalse(); - assertThat(pool.connections).doesNotContain(core0); - assertThat(cluster).host(1).hasState(Host.State.UP); - assertThat(cluster).hasOpenControlConnection(); - } finally { - cluster.close(); - } + } + + /** + * Ensures that if a connection on a host is lost and the number of remaining connections is at + * core connection count that no connections are re-established until after there are enough + * inflight requests to justify creating one and the reconnection interval has elapsed. + * + * @jira_ticket JAVA-544 + * @test_category connection:connection_pool + * @since 2.0.11 + */ + @Test(groups = "short") + public void should_not_schedule_reconnect_when_connection_lost_and_at_core_connections() + throws Exception { + int readTimeout = 1000; + int reconnectInterval = 1000; + Cluster cluster = + this.createClusterBuilder() + .withSocketOptions( + new SocketOptions() + .setConnectTimeoutMillis(readTimeout) + .setReadTimeoutMillis(reconnectInterval)) + .withReconnectionPolicy(new ConstantReconnectionPolicy(1000)) + .build(); + List allRequests = newArrayList(); + try { + cluster.init(); + + Connection.Factory factory = spy(cluster.manager.connectionFactory); + cluster.manager.connectionFactory = factory; + + HostConnectionPool pool = createPool(cluster, 1, 2); + Connection core0 = pool.connections.get(0); + + // Create enough inFlight requests to spawn another connection. + List core0requests = newArrayList(); + for (int i = 0; i < 101; i++) { + MockRequest request = MockRequest.send(pool); + assertThat(request.getConnection()).isEqualTo(core0); + core0requests.add(request); + } + + // Pool should grow by 1. + verify(factory, after(2000).times(1)).open(any(HostConnectionPool.class)); + assertThat(pool.connections).hasSize(2); + + // Reset factory mock as we'll be checking for new open() invokes later. + reset(factory); + + // Grab the new non-core connection. + Connection extra1 = pool.connections.get(1); + + // Drop a connection and disable listening. + currentClient.closeConnection(CLOSE, ((InetSocketAddress) core0.channel.localAddress())); + Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS); + currentClient.disableListener(); + + // Since core0 was closed, all of it's requests should have errored. + for (MockRequest request : core0requests) { + assertThat(request.state.get()).isEqualTo(MockRequest.State.FAILED); + } + + assertThat(cluster).host(1).hasState(Host.State.UP); + + // Create enough inFlight requests to fill connection. + List requests = MockRequest.sendMany(100, pool); + assertBorrowedConnections(requests, Collections.singletonList(extra1)); + allRequests.addAll(requests); + assertThat(pool.connections).hasSize(1); + + // A new connection should never have been spawned since we didn't max out core. + verify(factory, after(readTimeout).never()).open(any(HostConnectionPool.class)); + + // Borrow another connection, since we exceed max another connection should be opened. + MockRequest request = MockRequest.send(pool); + allRequests.add(request); + assertThat(request.getConnection()).isEqualTo(extra1); + + // After some time the a connection should attempt to be opened (but will fail). + verify(factory, timeout(readTimeout)).open(any(HostConnectionPool.class)); + assertPoolSize(pool, 1); + assertThat(pool.connections).hasSize(1); + + // Wait some reasonable amount of time for connection to reestablish then check pool size. + Uninterruptibles.sleepUninterruptibly(readTimeout * 2, TimeUnit.MILLISECONDS); + // Reconnecting failed since listening was enabled. + assertPoolSize(pool, 1); + + // Re enable listening then wait for reconnect. + currentClient.enableListener(); + Uninterruptibles.sleepUninterruptibly(reconnectInterval, TimeUnit.MILLISECONDS); + + // Borrow another connection, since we exceed max another connection should be opened. + request = MockRequest.send(pool); + allRequests.add(request); + assertThat(request.getConnection()).isEqualTo(extra1); + + // Wait some reasonable amount of time for connection to reestablish then check pool size. + Uninterruptibles.sleepUninterruptibly(readTimeout, TimeUnit.MILLISECONDS); + // Reconnecting should have exceeded and pool will have grown. + assertThat(pool.connections).hasSize(2); + + // Borrowed connection should be the newly spawned connection since the other one has some + // inflight requests. + request = MockRequest.send(pool); + allRequests.add(request); + assertThat(request.getConnection()).isNotEqualTo(core0).isNotEqualTo(extra1); + } finally { + MockRequest.completeAll(allRequests); + cluster.close(); } - - /** - * Ensures that if all connections on a host are closed that the host is marked - * down and the control connection is notified of that fact and re-established - * itself. - * - * @jira_ticket JAVA-544 - * @test_category connection:connection_pool - * @since 2.0.11 - */ - @Test(groups = "short") - public void should_mark_host_down_when_no_connections_remaining() throws Exception { - int readTimeout = 1000; - int reconnectInterval = 1000; - Cluster cluster = this.createClusterBuilder() - .withSocketOptions(new SocketOptions() - .setConnectTimeoutMillis(readTimeout) - .setReadTimeoutMillis(reconnectInterval)) - .withReconnectionPolicy(new ConstantReconnectionPolicy(1000)).build(); - try { - cluster.init(); - - Connection.Factory factory = spy(cluster.manager.connectionFactory); - cluster.manager.connectionFactory = factory; - - HostConnectionPool pool = createPool(cluster, 8, 8); - // copy list to track these connections. - List connections = newArrayList(pool.connections); - - reset(factory); - - // Drop all connections. - currentClient.disableListener(); - currentClient.closeConnections(CLOSE); - - // The host should be marked down and the control connection closed. - assertThat(cluster).host(1).goesDownWithin(10, TimeUnit.SECONDS); - assertThat(cluster).hasClosedControlConnection(); - - // Ensure all connections are closed. - for (Connection connection : connections) { - assertThat(connection.isClosed()).isTrue(); - } - - // Expect a reconnect attempt on host after reconnect interval - // on behalf of the control connection. - verify(factory, timeout(reconnectInterval * 2).atLeastOnce()).open(host); - - // Sleep for a bit to allow reconnect to fail. - Uninterruptibles.sleepUninterruptibly(readTimeout * 2, TimeUnit.MILLISECONDS); - - // Ensure control connection is still closed. - assertThat(cluster).hasClosedControlConnection(); - - // Reenable connectivity. - currentClient.enableListener(); - - // Reconnect attempt should have been connected for control connection - // and pool. - // 2 attempts for connection.open (reconnect control connection and initial connection for host state). - verify(factory, after(reconnectInterval * 2).atLeast(2)).open(host); - // 7 attempts for core connections after first initial connection. - verify(factory, timeout(reconnectInterval * 2)).newConnections(any(HostConnectionPool.class), eq(7)); - - // Wait some reasonable amount of time for connection to reestablish. - Uninterruptibles.sleepUninterruptibly(readTimeout, TimeUnit.MILLISECONDS); - - // Control Connection should now be open. - assertThat(cluster).hasOpenControlConnection(); - assertThat(cluster).host(1).hasState(Host.State.UP); - } finally { - cluster.close(); - } + } + + /** + * Ensures that if some connections fail on pool init that the host and subsequently the control + * connection is not marked down. The test also ensures that when making requests on the pool that + * connections are brought up to core. + * + * @jira_ticket JAVA-544 + * @test_category connection:connection_pool + * @since 2.0.11 + */ + @Test(groups = "short") + public void should_not_mark_host_down_if_some_connections_fail_on_init() throws Exception { + int readTimeout = 1000; + int reconnectInterval = 1000; + Cluster cluster = + this.createClusterBuilder() + .withSocketOptions( + new SocketOptions() + .setConnectTimeoutMillis(readTimeout) + .setReadTimeoutMillis(reconnectInterval)) + .withReconnectionPolicy(new ConstantReconnectionPolicy(1000)) + .build(); + List allRequests = newArrayList(); + try { + cluster.init(); + + Connection.Factory factory = spy(cluster.manager.connectionFactory); + cluster.manager.connectionFactory = factory; + + // Allow the first 4 connections to establish, but disable after that. + currentClient.disableListener(4); + HostConnectionPool pool = createPool(cluster, 8, 8); + + reset(factory); + + // Pool size should show all successful connections. + assertThat(pool.connections).hasSize(4); + + // Control connection should remain up in addition to to host. + assertThat(cluster).host(1).hasState(Host.State.UP); + assertThat(cluster).hasOpenControlConnection(); + + // Reenable listener, wait reconnectInterval and then try borrowing a connection. + currentClient.enableListener(); + + Uninterruptibles.sleepUninterruptibly(reconnectInterval, TimeUnit.MILLISECONDS); + + // Should open up to core connections, however it will only spawn up to 1 connection + // per request, so we need to make enough requests to make up the deficit. Additionally + // we need to wait for connections to be established between requests for the pool + // to spawn new connections (since it only allows one simultaneous creation). + for (int i = 5; i <= 8; i++) { + allRequests.add(MockRequest.send(pool)); + verify(factory, timeout(readTimeout)).open(any(HostConnectionPool.class)); + reset(factory); + assertPoolSize(pool, i); + } + } finally { + MockRequest.completeAll(allRequests); + cluster.close(); } - - /** - * Ensures that if a connection on a host is lost that brings the number of active connections in a pool - * under core connection count that up to core connections are re-established, but only after the - * next reconnect schedule has elapsed. - * - * @jira_ticket JAVA-544 - * @test_category connection:connection_pool - * @since 2.0.11 - */ - @Test(groups = "short") - public void should_create_new_connections_when_connection_lost_and_under_core_connections() throws Exception { - int readTimeout = 1000; - int reconnectInterval = 1000; - Cluster cluster = this.createClusterBuilder() - .withSocketOptions(new SocketOptions() - .setConnectTimeoutMillis(readTimeout) - .setReadTimeoutMillis(reconnectInterval)) - .withReconnectionPolicy(new ConstantReconnectionPolicy(1000)).build(); - List allRequests = newArrayList(); - try { - cluster.init(); - - Connection.Factory factory = spy(cluster.manager.connectionFactory); - cluster.manager.connectionFactory = factory; - - TestExecutorService blockingExecutor = new TestExecutorService(cluster.manager.blockingExecutor); - cluster.manager.blockingExecutor = blockingExecutor; - - HostConnectionPool pool = createPool(cluster, 3, 3); - Connection core0 = pool.connections.get(0); - Connection core1 = pool.connections.get(1); - Connection core2 = pool.connections.get(2); - - // Drop two core connections. - // Disable new connections initially and we'll eventually reenable it. - currentClient.disableListener(); - currentClient.closeConnection(CLOSE, ((InetSocketAddress) core0.channel.localAddress())); - currentClient.closeConnection(CLOSE, ((InetSocketAddress) core2.channel.localAddress())); - Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS); - - // Since we have a connection left the host should remain up. - assertThat(cluster).host(1).hasState(Host.State.UP); - assertThat(pool.connections).hasSize(1); - - // The borrowed connection should be the open one. - MockRequest request = MockRequest.send(pool); - allRequests.add(request); - assertThat(request.getConnection()).isEqualTo(core1); - - // Should not have tried to create a new core connection since reconnection time had not elapsed. - verify(factory, never()).open(any(HostConnectionPool.class)); - - // Sleep to elapse the Reconnection Policy. - Uninterruptibles.sleepUninterruptibly(reconnectInterval, TimeUnit.MILLISECONDS); - - // Attempt to borrow connection, this should trigger ensureCoreConnections thus spawning a new connection. - blockingExecutor.reset(); - request = MockRequest.send(pool); - allRequests.add(request); - assertThat(request.getConnection()).isEqualTo(core1); - - // Should have tried to open up to core connections as result of borrowing a connection past reconnect time and not being at core. - blockingExecutor.blockUntilNextTaskCompleted(); - verify(factory).open(any(HostConnectionPool.class)); - reset(factory); - - // Sleep for reconnect interval to allow reconnection time to elapse. - Uninterruptibles.sleepUninterruptibly((readTimeout + reconnectInterval) * 2, TimeUnit.MILLISECONDS); - - // Enable listening so new connections succeed. - currentClient.enableListener(); - // Sleep to elapse the Reconnection Policy. - Uninterruptibles.sleepUninterruptibly(reconnectInterval, TimeUnit.MILLISECONDS); - - // Try to borrow a connection, the pool should grow. - blockingExecutor.reset(); - allRequests.add(MockRequest.send(pool)); - blockingExecutor.blockUntilNextTaskCompleted(); - verify(factory).open(any(HostConnectionPool.class)); - reset(factory); - - // Another core connection should be opened as result of another request to get us up to core connections. - blockingExecutor.reset(); - allRequests.add(MockRequest.send(pool)); - blockingExecutor.blockUntilNextTaskCompleted(); - verify(factory).open(any(HostConnectionPool.class)); - reset(factory); - - // Sending another request should not grow the pool any more, since we are now at core connections. - allRequests.add(MockRequest.send(pool)); - verify(factory, after((reconnectInterval + readTimeout) * 2).never()).open(any(HostConnectionPool.class)); - } finally { - MockRequest.completeAll(allRequests); - cluster.close(); - } + } + + /** + * Ensures that if all connections fail on pool init that the host and subsequently the control + * connection is not marked down since the control connection is still active. The test also + * ensures that borrow attempts on the pool fail if we are still in the reconnection window + * according to the ConvictionPolicy. + * + * @jira_ticket JAVA-544 + * @test_category connection:connection_pool + * @since 2.0.11 + */ + @Test(groups = "short") + public void should_throw_exception_if_convicted_and_no_connections_available() { + int readTimeout = 1000; + int reconnectInterval = 1000; + Cluster cluster = + this.createClusterBuilder() + .withSocketOptions( + new SocketOptions() + .setConnectTimeoutMillis(readTimeout) + .setReadTimeoutMillis(reconnectInterval)) + .withReconnectionPolicy(new ConstantReconnectionPolicy(1000)) + .build(); + try { + // Init cluster so control connection is created. + cluster.init(); + assertThat(cluster).hasOpenControlConnection(); + + Connection.Factory factory = spy(cluster.manager.connectionFactory); + cluster.manager.connectionFactory = factory; + + // Disable listener so all connections on pool fail. + currentClient.disableListener(); + HostConnectionPool pool = createPool(cluster, 8, 8); + + reset(factory); + + // Pool should be empty. + assertThat(pool.connections).hasSize(0); + + // Control connection should stay up with the host. + assertThat(cluster).host(1).hasState(Host.State.UP); + assertThat(cluster).hasOpenControlConnection(); + + MockRequest failedBorrow = MockRequest.send(pool); + try { + failedBorrow.getConnection(); + fail("Expected a BusyPoolException"); + } catch (BusyPoolException e) { + /*expected*/ + } + } finally { + cluster.close(); } + } + + /** + * Ensures that if all connections fail on pool init that the host and subsequently the control + * connection is not marked down. The test also ensures that when making requests on the pool + * after the conviction period that all core connections are created. + * + * @jira_ticket JAVA-544 + * @test_category connection:connection_pool + * @since 2.0.11 + */ + @Test(groups = "short") + public void should_wait_on_connection_if_not_convicted_and_no_connections_available() + throws Exception { + int readTimeout = 1000; + int reconnectInterval = 1000; + Cluster cluster = + this.createClusterBuilder() + .withSocketOptions( + new SocketOptions() + .setConnectTimeoutMillis(readTimeout) + .setReadTimeoutMillis(reconnectInterval)) + .withReconnectionPolicy(new ConstantReconnectionPolicy(1000)) + .build(); + try { + // Init cluster so control connection is created. + cluster.init(); + assertThat(cluster).hasOpenControlConnection(); + + Connection.Factory factory = spy(cluster.manager.connectionFactory); + cluster.manager.connectionFactory = factory; + + // Disable listener so all connections on pool fail. + currentClient.disableListener(); + HostConnectionPool pool = createPool(cluster, 8, 8); + + // Pool should be empty. + assertThat(pool.connections).hasSize(0); + + // Control connection should stay up with the host. + assertThat(cluster).host(1).hasState(Host.State.UP); + assertThat(cluster).hasOpenControlConnection(); + + currentClient.enableListener(); + + // Wait for reconnectInterval so ConvictionPolicy allows connection to be created. + Uninterruptibles.sleepUninterruptibly(reconnectInterval, TimeUnit.MILLISECONDS); + + reset(factory); + MockRequest request = MockRequest.send(pool, 1); + + // Should create up to core connections. + verify(factory, timeout(readTimeout * 8).times(8)).open(any(HostConnectionPool.class)); + assertPoolSize(pool, 8); + + request.simulateSuccessResponse(); + } finally { + cluster.close(); + } + } + + /** + * Ensures that if a pool is created with zero core connections that when a request is first sent + * that one and only one connection is created and that it waits on availability of that + * connection and returns it. + * + * @jira_ticket JAVA-544 + * @test_category connection:connection_pool + * @since 2.0.11 + */ + @Test(groups = "short") + public void should_wait_on_connection_if_zero_core_connections() throws Exception { + int readTimeout = 1000; + int reconnectInterval = 1000; + Cluster cluster = + this.createClusterBuilder() + .withSocketOptions( + new SocketOptions() + .setConnectTimeoutMillis(readTimeout) + .setReadTimeoutMillis(reconnectInterval)) + .withReconnectionPolicy(new ConstantReconnectionPolicy(1000)) + .build(); + try { + // Init cluster so control connection is created. + cluster.init(); + assertThat(cluster).hasOpenControlConnection(); + + Connection.Factory factory = spy(cluster.manager.connectionFactory); + cluster.manager.connectionFactory = factory; + + HostConnectionPool pool = createPool(cluster, 0, 2); + + // Pool should be empty. + assertThat(pool.connections).hasSize(0); + + // Control connection should stay up with the host. + assertThat(cluster).host(1).hasState(Host.State.UP); + assertThat(cluster).hasOpenControlConnection(); + + reset(factory); + MockRequest request = MockRequest.send(pool, 1); + + // Should create up to core connections. + verify(factory, timeout(readTimeout).times(1)).open(any(HostConnectionPool.class)); + assertPoolSize(pool, 1); + Uninterruptibles.getUninterruptibly(request.requestInitialized, 10, TimeUnit.SECONDS); + request.simulateSuccessResponse(); + } finally { + cluster.close(); + } + } + + private HostConnectionPool createPool(Cluster cluster, int coreConnections, int maxConnections) { + cluster + .getConfiguration() + .getPoolingOptions() + .setNewConnectionThreshold(HostDistance.LOCAL, 100) + .setMaxRequestsPerConnection(HostDistance.LOCAL, 128) + .setMaxConnectionsPerHost(HostDistance.LOCAL, maxConnections) + .setCoreConnectionsPerHost(HostDistance.LOCAL, coreConnections); + Session session = cluster.connect(); + Host host = TestUtils.findHost(cluster, 1); + + // Replace the existing pool with a spy pool and return it. + SessionManager sm = ((SessionManager) session); + return sm.pools.get(host); + } + + /** + * This test uses a table named "Java349" with 1000 column and performs asynchronously 100k + * insertions. While the insertions are being executed, the number of opened connection is + * monitored. + * + *

    If at anytime, the number of opened connections is negative, this test will fail. + * + * @jira_ticket JAVA-349 + * @test_category connection:connection_pool + * @since 2.0.6, 2.1.1 + */ + @Test(groups = "long", enabled = false /* this test causes timeouts on Jenkins */) + public void open_connections_metric_should_always_be_positive() throws InterruptedException { + // Track progress in a dedicated thread + int numberOfInserts = 100 * 1000; + final CountDownLatch pendingInserts = new CountDownLatch(numberOfInserts); + ExecutorService progressReportExecutor = Executors.newSingleThreadExecutor(); + final Runnable progressReporter = + new Runnable() { + @Override + public void run() { + pendingInserts.countDown(); + } + }; - /** - * Ensures that if a connection on a host is lost and the number of remaining connections is at - * core connection count that no connections are re-established until after there are enough - * inflight requests to justify creating one and the reconnection interval has elapsed. - * - * @jira_ticket JAVA-544 - * @test_category connection:connection_pool - * @since 2.0.11 - */ - @Test(groups = "short") - public void should_not_schedule_reconnect_when_connection_lost_and_at_core_connections() throws Exception { - int readTimeout = 1000; - int reconnectInterval = 1000; - Cluster cluster = this.createClusterBuilder() - .withSocketOptions(new SocketOptions() - .setConnectTimeoutMillis(readTimeout) - .setReadTimeoutMillis(reconnectInterval)) - .withReconnectionPolicy(new ConstantReconnectionPolicy(1000)).build(); - List allRequests = newArrayList(); - try { - cluster.init(); - - Connection.Factory factory = spy(cluster.manager.connectionFactory); - cluster.manager.connectionFactory = factory; - - HostConnectionPool pool = createPool(cluster, 1, 2); - Connection core0 = pool.connections.get(0); - - // Create enough inFlight requests to spawn another connection. - List core0requests = newArrayList(); - for (int i = 0; i < 101; i++) { - MockRequest request = MockRequest.send(pool); - assertThat(request.getConnection()).isEqualTo(core0); - core0requests.add(request); - } - - // Pool should grow by 1. - verify(factory, after(2000).times(1)).open(any(HostConnectionPool.class)); - assertThat(pool.connections).hasSize(2); - - // Reset factory mock as we'll be checking for new open() invokes later. - reset(factory); - - // Grab the new non-core connection. - Connection extra1 = pool.connections.get(1); - - // Drop a connection and disable listening. - currentClient.closeConnection(CLOSE, ((InetSocketAddress) core0.channel.localAddress())); - Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS); - currentClient.disableListener(); - - // Since core0 was closed, all of it's requests should have errored. - for (MockRequest request : core0requests) { - assertThat(request.state.get()).isEqualTo(MockRequest.State.FAILED); + // Track opened connections in a dedicated thread every one second + final AtomicBoolean negativeOpenConnectionCountSpotted = new AtomicBoolean(false); + final Gauge openConnections = cluster.getMetrics().getOpenConnections(); + ScheduledExecutorService openConnectionsWatcherExecutor = Executors.newScheduledThreadPool(1); + final Runnable openConnectionsWatcher = + new Runnable() { + @Override + public void run() { + Integer value = openConnections.getValue(); + if (value < 0) { + System.err.println("Negative value spotted for openConnection metric: " + value); + negativeOpenConnectionCountSpotted.set(true); } + } + }; + openConnectionsWatcherExecutor.scheduleAtFixedRate(openConnectionsWatcher, 1, 1, SECONDS); - assertThat(cluster).host(1).hasState(Host.State.UP); - - // Create enough inFlight requests to fill connection. - List requests = MockRequest.sendMany(100, pool); - assertBorrowedConnections(requests, Collections.singletonList(extra1)); - allRequests.addAll(requests); - assertThat(pool.connections).hasSize(1); - - // A new connection should never have been spawned since we didn't max out core. - verify(factory, after(readTimeout).never()).open(any(HostConnectionPool.class)); - - // Borrow another connection, since we exceed max another connection should be opened. - MockRequest request = MockRequest.send(pool); - allRequests.add(request); - assertThat(request.getConnection()).isEqualTo(extra1); - - // After some time the a connection should attempt to be opened (but will fail). - verify(factory, timeout(readTimeout)).open(any(HostConnectionPool.class)); - assertPoolSize(pool, 1); - assertThat(pool.connections).hasSize(1); - - // Wait some reasonable amount of time for connection to reestablish then check pool size. - Uninterruptibles.sleepUninterruptibly(readTimeout * 2, TimeUnit.MILLISECONDS); - // Reconnecting failed since listening was enabled. - assertPoolSize(pool, 1); - - // Re enable listening then wait for reconnect. - currentClient.enableListener(); - Uninterruptibles.sleepUninterruptibly(reconnectInterval, TimeUnit.MILLISECONDS); - - // Borrow another connection, since we exceed max another connection should be opened. - request = MockRequest.send(pool); - allRequests.add(request); - assertThat(request.getConnection()).isEqualTo(extra1); - - // Wait some reasonable amount of time for connection to reestablish then check pool size. - Uninterruptibles.sleepUninterruptibly(readTimeout, TimeUnit.MILLISECONDS); - // Reconnecting should have exceeded and pool will have grown. - assertThat(pool.connections).hasSize(2); - - // Borrowed connection should be the newly spawned connection since the other one has some inflight requests. - request = MockRequest.send(pool); - allRequests.add(request); - assertThat(request.getConnection()).isNotEqualTo(core0).isNotEqualTo(extra1); - } finally { - MockRequest.completeAll(allRequests); - cluster.close(); - } + // Insert 100k lines in a newly created 1k columns table + PreparedStatement insertStatement = session.prepare(generateJava349InsertStatement()); + for (int key = 0; key < numberOfInserts; key++) { + ResultSetFuture future = session.executeAsync(insertStatement.bind(key)); + future.addListener(progressReporter, progressReportExecutor); } - /** - * Ensures that if some connections fail on pool init that the host and subsequently the - * control connection is not marked down. The test also ensures that when making requests - * on the pool that connections are brought up to core. - * - * @jira_ticket JAVA-544 - * @test_category connection:connection_pool - * @since 2.0.11 - */ - @Test(groups = "short") - public void should_not_mark_host_down_if_some_connections_fail_on_init() throws Exception { - int readTimeout = 1000; - int reconnectInterval = 1000; - Cluster cluster = this.createClusterBuilder() - .withSocketOptions(new SocketOptions() - .setConnectTimeoutMillis(readTimeout) - .setReadTimeoutMillis(reconnectInterval)) - .withReconnectionPolicy(new ConstantReconnectionPolicy(1000)).build(); - List allRequests = newArrayList(); - try { - cluster.init(); - - Connection.Factory factory = spy(cluster.manager.connectionFactory); - cluster.manager.connectionFactory = factory; - - // Allow the first 4 connections to establish, but disable after that. - currentClient.disableListener(4); - HostConnectionPool pool = createPool(cluster, 8, 8); - - reset(factory); - - // Pool size should show all successful connections. - assertThat(pool.connections).hasSize(4); + // Wait for all inserts to happen and stop connections and progress tracking + pendingInserts.await(); + openConnectionsWatcherExecutor.shutdownNow(); + progressReportExecutor.shutdownNow(); - // Control connection should remain up in addition to to host. - assertThat(cluster).host(1).hasState(Host.State.UP); - assertThat(cluster).hasOpenControlConnection(); - - // Reenable listener, wait reconnectInterval and then try borrowing a connection. - currentClient.enableListener(); - - Uninterruptibles.sleepUninterruptibly(reconnectInterval, TimeUnit.MILLISECONDS); - - // Should open up to core connections, however it will only spawn up to 1 connection - // per request, so we need to make enough requests to make up the deficit. Additionally - // we need to wait for connections to be established between requests for the pool - // to spawn new connections (since it only allows one simultaneous creation). - for (int i = 5; i <= 8; i++) { - allRequests.add(MockRequest.send(pool)); - verify(factory, timeout(readTimeout)).open(any(HostConnectionPool.class)); - reset(factory); - assertPoolSize(pool, i); - } - } finally { - MockRequest.completeAll(allRequests); - cluster.close(); - } + if (negativeOpenConnectionCountSpotted.get()) { + fail("Negative value spotted for open connection count"); } + } - /** - * Ensures that if all connections fail on pool init that the host and subsequently the - * control connection is not marked down since the control connection is still active. - * The test also ensures that borrow attempts on the pool fail if we are still in the reconnection window - * according to the ConvictionPolicy. - * - * @jira_ticket JAVA-544 - * @test_category connection:connection_pool - * @since 2.0.11 - */ - @Test(groups = "short") - public void should_throw_exception_if_convicted_and_no_connections_available() { - int readTimeout = 1000; - int reconnectInterval = 1000; - Cluster cluster = this.createClusterBuilder() - .withSocketOptions(new SocketOptions() - .setConnectTimeoutMillis(readTimeout) - .setReadTimeoutMillis(reconnectInterval)) - .withReconnectionPolicy(new ConstantReconnectionPolicy(1000)).build(); - try { - // Init cluster so control connection is created. - cluster.init(); - assertThat(cluster).hasOpenControlConnection(); - - Connection.Factory factory = spy(cluster.manager.connectionFactory); - cluster.manager.connectionFactory = factory; + /** + * Ensures that on a a host is bring up with a reused connection and zero core connections the + * driver do not try to create any connections. + * + * @jira_ticket JAVA-1794 + * @test_category connection:connection_pool + * @since 3.5.1 + */ + @Test(groups = "short") + public void + should_not_create_connections_if_zero_core_connections_and_reused_connection_on_reconnect() + throws Exception { + int reconnectInterval = 1000; - // Disable listener so all connections on pool fail. - currentClient.disableListener(); - HostConnectionPool pool = createPool(cluster, 8, 8); + Cluster cluster = + this.createClusterBuilder() + .withReconnectionPolicy(new ConstantReconnectionPolicy(1000)) + .build(); - reset(factory); + Host.StateListener stateListener = mock(Host.StateListener.class); - // Pool should be empty. - assertThat(pool.connections).hasSize(0); + try { + // Init cluster so control connection is created. + cluster.init(); + assertThat(cluster).hasOpenControlConnection(); - // Control connection should stay up with the host. - assertThat(cluster).host(1).hasState(Host.State.UP); - assertThat(cluster).hasOpenControlConnection(); + Connection.Factory factory = spy(cluster.manager.connectionFactory); + cluster.manager.connectionFactory = factory; - MockRequest failedBorrow = MockRequest.send(pool); - try { - failedBorrow.getConnection(); - fail("Expected a BusyPoolException"); - } catch (BusyPoolException e) { /*expected*/} - } finally { - cluster.close(); - } - } - - /** - * Ensures that if all connections fail on pool init that the host and subsequently the - * control connection is not marked down. The test also ensures that when making requests - * on the pool after the conviction period that all core connections are created. - * - * @jira_ticket JAVA-544 - * @test_category connection:connection_pool - * @since 2.0.11 - */ - @Test(groups = "short") - public void should_wait_on_connection_if_not_convicted_and_no_connections_available() throws Exception { - int readTimeout = 1000; - int reconnectInterval = 1000; - Cluster cluster = this.createClusterBuilder() - .withSocketOptions(new SocketOptions() - .setConnectTimeoutMillis(readTimeout) - .setReadTimeoutMillis(reconnectInterval)) - .withReconnectionPolicy(new ConstantReconnectionPolicy(1000)).build(); - try { - // Init cluster so control connection is created. - cluster.init(); - assertThat(cluster).hasOpenControlConnection(); + HostConnectionPool pool = createPool(cluster, 0, 2); - Connection.Factory factory = spy(cluster.manager.connectionFactory); - cluster.manager.connectionFactory = factory; + cluster.register(stateListener); + cluster.manager.triggerOnDown(pool.host, true); - // Disable listener so all connections on pool fail. - currentClient.disableListener(); - HostConnectionPool pool = createPool(cluster, 8, 8); + Thread.sleep(reconnectInterval * 3); - // Pool should be empty. - assertThat(pool.connections).hasSize(0); + verify(stateListener, times(1)).onUp(pool.host); - // Control connection should stay up with the host. - assertThat(cluster).host(1).hasState(Host.State.UP); - assertThat(cluster).hasOpenControlConnection(); + // Pool should be empty. + assertThat(pool.connections).hasSize(0); - currentClient.enableListener(); + // Control connection should stay up with the host. + assertThat(cluster).host(1).hasState(Host.State.UP); + assertThat(cluster).hasOpenControlConnection(); - // Wait for reconnectInterval so ConvictionPolicy allows connection to be created. - Uninterruptibles.sleepUninterruptibly(reconnectInterval, TimeUnit.MILLISECONDS); - - reset(factory); - MockRequest request = MockRequest.send(pool, 1); - - // Should create up to core connections. - verify(factory, timeout(readTimeout * 8).times(8)).open(any(HostConnectionPool.class)); - assertPoolSize(pool, 8); - - request.simulateSuccessResponse(); - } finally { - cluster.close(); - } + } finally { + cluster.close(); } + } - /** - * Ensures that if a pool is created with zero core connections that when a request - * is first sent that one and only one connection is created and that it waits on availability - * of that connection and returns it. - * - * @jira_ticket JAVA-544 - * @test_category connection:connection_pool - * @since 2.0.11 - */ - @Test(groups = "short") - public void should_wait_on_connection_if_zero_core_connections() throws Exception { - int readTimeout = 1000; - int reconnectInterval = 1000; - Cluster cluster = this.createClusterBuilder() - .withSocketOptions(new SocketOptions() - .setConnectTimeoutMillis(readTimeout) - .setReadTimeoutMillis(reconnectInterval)) - .withReconnectionPolicy(new ConstantReconnectionPolicy(1000)).build(); - try { - // Init cluster so control connection is created. - cluster.init(); - assertThat(cluster).hasOpenControlConnection(); - - Connection.Factory factory = spy(cluster.manager.connectionFactory); - cluster.manager.connectionFactory = factory; - - HostConnectionPool pool = createPool(cluster, 0, 2); - - // Pool should be empty. - assertThat(pool.connections).hasSize(0); - - // Control connection should stay up with the host. - assertThat(cluster).host(1).hasState(Host.State.UP); - assertThat(cluster).hasOpenControlConnection(); - - reset(factory); - MockRequest request = MockRequest.send(pool, 1); - - // Should create up to core connections. - verify(factory, timeout(readTimeout).times(1)).open(any(HostConnectionPool.class)); - assertPoolSize(pool, 1); - Uninterruptibles.getUninterruptibly(request.requestInitialized, 10, TimeUnit.SECONDS); - request.simulateSuccessResponse(); - } finally { - cluster.close(); - } + private String generateJava349InsertStatement() { + StringBuilder sb = new StringBuilder("INSERT INTO Java349 (mykey"); + for (int i = 0; i < 1000; i++) { + sb.append(", column").append(i); } - - private HostConnectionPool createPool(Cluster cluster, int coreConnections, int maxConnections) { - cluster.getConfiguration().getPoolingOptions() - .setNewConnectionThreshold(HostDistance.LOCAL, 100) - .setMaxRequestsPerConnection(HostDistance.LOCAL, 128) - .setMaxConnectionsPerHost(HostDistance.LOCAL, maxConnections) - .setCoreConnectionsPerHost(HostDistance.LOCAL, coreConnections); - Session session = cluster.connect(); - Host host = TestUtils.findHost(cluster, 1); - - // Replace the existing pool with a spy pool and return it. - SessionManager sm = ((SessionManager) session); - return sm.pools.get(host); + sb.append(") VALUES (?"); + for (int i = 0; i < 1000; i++) { + sb.append(", ").append(i); + } + sb.append(");"); + + PrimingRequest preparedStatementPrime = + PrimingRequest.preparedStatementBuilder() + .withQuery(sb.toString()) + .withThen(then().withVariableTypes(PrimitiveType.INT)) + .build(); + primingClient.prime(preparedStatementPrime); + return sb.toString(); + } + + /** + * Mock ResponseCallback that simulates the behavior of SpeculativeExecution (in terms of + * borrowing/releasing connections). + */ + static class MockRequest implements Connection.ResponseCallback { + + enum State { + START, + COMPLETED, + FAILED, + TIMED_OUT } - /** - *

    - * This test uses a table named "Java349" with 1000 column and performs asynchronously 100k insertions. While the - * insertions are being executed, the number of opened connection is monitored. - *

    - * If at anytime, the number of opened connections is negative, this test will fail. - * - * @jira_ticket JAVA-349 - * @test_category connection:connection_pool - * @since 2.0.6, 2.1.1 - */ - @Test(groups = "long", enabled = false /* this test causes timeouts on Jenkins */) - public void open_connections_metric_should_always_be_positive() throws InterruptedException { - // Track progress in a dedicated thread - int numberOfInserts = 100 * 1000; - final CountDownLatch pendingInserts = new CountDownLatch(numberOfInserts); - ExecutorService progressReportExecutor = Executors.newSingleThreadExecutor(); - final Runnable progressReporter = new Runnable() { - @Override - public void run() { - pendingInserts.countDown(); - } - }; + final ListenableFuture connectionFuture; - // Track opened connections in a dedicated thread every one second - final AtomicBoolean negativeOpenConnectionCountSpotted = new AtomicBoolean(false); - final Gauge openConnections = cluster.getMetrics().getOpenConnections(); - ScheduledExecutorService openConnectionsWatcherExecutor = Executors.newScheduledThreadPool(1); - final Runnable openConnectionsWatcher = new Runnable() { - @Override - public void run() { - Integer value = openConnections.getValue(); - if (value < 0) { - System.err.println("Negative value spotted for openConnection metric: " + value); - negativeOpenConnectionCountSpotted.set(true); - } - } - }; - openConnectionsWatcherExecutor.scheduleAtFixedRate(openConnectionsWatcher, 1, 1, SECONDS); + final ListenableFuture requestInitialized; - // Insert 100k lines in a newly created 1k columns table - PreparedStatement insertStatement = session.prepare(generateJava349InsertStatement()); - for (int key = 0; key < numberOfInserts; key++) { - ResultSetFuture future = session.executeAsync(insertStatement.bind(key)); - future.addListener(progressReporter, progressReportExecutor); - } + private volatile Connection.ResponseHandler responseHandler; - // Wait for all inserts to happen and stop connections and progress tracking - pendingInserts.await(); - openConnectionsWatcherExecutor.shutdownNow(); - progressReportExecutor.shutdownNow(); + final AtomicReference state = new AtomicReference(State.START); - if (negativeOpenConnectionCountSpotted.get()) { - fail("Negative value spotted for open connection count"); - } + static MockRequest send(HostConnectionPool pool) { + return send(pool, 0); } - private String generateJava349InsertStatement() { - StringBuilder sb = new StringBuilder("INSERT INTO Java349 (mykey"); - for (int i = 0; i < 1000; i++) { - sb.append(", column").append(i); - } - sb.append(") VALUES (?"); - for (int i = 0; i < 1000; i++) { - sb.append(", ").append(i); - } - sb.append(");"); - - PrimingRequest preparedStatementPrime = PrimingRequest.preparedStatementBuilder() - .withQuery(sb.toString()) - .withThen(then().withVariableTypes(PrimitiveType.INT)) - .build(); - primingClient.prime(preparedStatementPrime); - return sb.toString(); + static MockRequest send(HostConnectionPool pool, int maxQueueSize) + throws ConnectionException, BusyConnectionException { + return send(pool, 5000, maxQueueSize); } - /** - * Mock ResponseCallback that simulates the behavior of SpeculativeExecution (in terms of borrowing/releasing connections). - */ - static class MockRequest implements Connection.ResponseCallback { - - enum State {START, COMPLETED, FAILED, TIMED_OUT} - - final ListenableFuture connectionFuture; - - final ListenableFuture requestInitialized; - - private volatile Connection.ResponseHandler responseHandler; - - final AtomicReference state = new AtomicReference(State.START); - - static MockRequest send(HostConnectionPool pool) { - return send(pool, 0); - } - - static MockRequest send(HostConnectionPool pool, int maxQueueSize) throws ConnectionException, BusyConnectionException { - return send(pool, 5000, maxQueueSize); - } + static MockRequest send(HostConnectionPool pool, int timeoutMillis, int maxQueueSize) + throws ConnectionException, BusyConnectionException { + return new MockRequest(pool, timeoutMillis, maxQueueSize); + } - static MockRequest send(HostConnectionPool pool, int timeoutMillis, int maxQueueSize) throws ConnectionException, BusyConnectionException { - return new MockRequest(pool, timeoutMillis, maxQueueSize); - } + private static List sendMany(int count, HostConnectionPool pool) + throws ConnectionException { + return sendMany(count, pool, 0); + } - private static List sendMany(int count, HostConnectionPool pool) throws ConnectionException { - return sendMany(count, pool, 0); - } + private static List sendMany(int count, HostConnectionPool pool, int maxQueueSize) + throws ConnectionException { + List requests = newArrayList(); + for (int i = 0; i < count; i++) { + MockRequest request = send(pool, maxQueueSize); + requests.add(request); + } + return requests; + } - private static List sendMany(int count, HostConnectionPool pool, int maxQueueSize) throws ConnectionException { - List requests = newArrayList(); - for (int i = 0; i < count; i++) { - MockRequest request = send(pool, maxQueueSize); - requests.add(request); - } - return requests; - } - - /** - * Completes count requests by simulating a successful response. - */ - private static void completeMany(int count, List requests) { - Iterator requestIt = requests.iterator(); - for (int i = 0; i < count; i++) { - if (requestIt.hasNext()) { - MockRequest request = requestIt.next(); - request.simulateSuccessResponse(); - requestIt.remove(); - } else { - break; - } - } + /** Completes count requests by simulating a successful response. */ + private static void completeMany(int count, List requests) { + Iterator requestIt = requests.iterator(); + for (int i = 0; i < count; i++) { + if (requestIt.hasNext()) { + MockRequest request = requestIt.next(); + request.simulateSuccessResponse(); + requestIt.remove(); + } else { + break; } + } + } - /** - * Completes all requests by simulating a successful response. - */ - private static void completeAll(List requests) { - for (MockRequest request : requests) { - request.simulateSuccessResponse(); - } - } + /** Completes all requests by simulating a successful response. */ + private static void completeAll(List requests) { + for (MockRequest request : requests) { + request.simulateSuccessResponse(); + } + } - private MockRequest(HostConnectionPool pool, int timeoutMillis, int maxQueueSize) throws ConnectionException { - this.connectionFuture = pool.borrowConnection(timeoutMillis, MILLISECONDS, maxQueueSize); - requestInitialized = Futures.transform(this.connectionFuture, new Function() { + private MockRequest(HostConnectionPool pool, int timeoutMillis, int maxQueueSize) + throws ConnectionException { + this.connectionFuture = pool.borrowConnection(timeoutMillis, MILLISECONDS, maxQueueSize); + requestInitialized = + GuavaCompatibility.INSTANCE.transform( + this.connectionFuture, + new Function() { @Override public Connection.ResponseHandler apply(Connection connection) { - MockRequest thisRequest = MockRequest.this; - thisRequest.responseHandler = new Connection.ResponseHandler(connection, -1, thisRequest); - connection.dispatcher.add(thisRequest.responseHandler); - return responseHandler; + MockRequest thisRequest = MockRequest.this; + thisRequest.responseHandler = + new Connection.ResponseHandler(connection, -1, thisRequest); + connection.dispatcher.add(thisRequest.responseHandler); + return responseHandler; } - }); - } + }); + } - void simulateSuccessResponse() { - onSet(getConnection(), null, 0, 0); - } + void simulateSuccessResponse() { + onSet(getConnection(), null, 0, 0); + } - @SuppressWarnings("unused") - void simulateErrorResponse() { - onException(getConnection(), null, 0, 0); - } + @SuppressWarnings("unused") + void simulateErrorResponse() { + onException(getConnection(), null, 0, 0); + } - @SuppressWarnings("unused") - void simulateTimeout() { - if (onTimeout(getConnection(), 0, 0)) - responseHandler.cancelHandler(); - } + @SuppressWarnings("unused") + void simulateTimeout() { + if (onTimeout(getConnection(), 0, 0)) responseHandler.cancelHandler(); + } - Connection getConnection() { - try { - return Uninterruptibles.getUninterruptibly(connectionFuture, 500, MILLISECONDS); - } catch (ExecutionException e) { - throw Throwables.propagate(e.getCause()); - } catch (TimeoutException e) { - fail("Timed out getting connection"); - return null; // never reached - } - } + Connection getConnection() { + try { + return Uninterruptibles.getUninterruptibly(connectionFuture, 500, MILLISECONDS); + } catch (ExecutionException e) { + throw Throwables.propagate(e.getCause()); + } catch (TimeoutException e) { + fail("Timed out getting connection"); + return null; // never reached + } + } - @Override - public void onSet(Connection connection, Message.Response response, long latency, int retryCount) { - assertThat(connectionFuture.isDone()).isTrue(); - try { - assertThat(Uninterruptibles.getUninterruptibly(connectionFuture)).isNotNull(); - } catch (ExecutionException e) { - Throwables.propagate(e.getCause()); - } - if (state.compareAndSet(State.START, State.COMPLETED)) { - connection.dispatcher.removeHandler(responseHandler, true); - connection.release(); - } - } + @Override + public void onSet( + Connection connection, Message.Response response, long latency, int retryCount) { + assertThat(connectionFuture.isDone()).isTrue(); + try { + assertThat(Uninterruptibles.getUninterruptibly(connectionFuture)).isNotNull(); + } catch (ExecutionException e) { + Throwables.propagate(e.getCause()); + } + if (state.compareAndSet(State.START, State.COMPLETED)) { + connection.dispatcher.removeHandler(responseHandler, true); + connection.release(); + } + } - @Override - public void onException(Connection connection, Exception exception, long latency, int retryCount) { - if (state.compareAndSet(State.START, State.FAILED)) { - connection.dispatcher.removeHandler(responseHandler, true); - connection.release(); - } - } + @Override + public void onException( + Connection connection, Exception exception, long latency, int retryCount) { + if (state.compareAndSet(State.START, State.FAILED)) { + connection.dispatcher.removeHandler(responseHandler, true); + connection.release(); + } + } - @Override - public boolean onTimeout(Connection connection, long latency, int retryCount) { - return state.compareAndSet(State.START, State.TIMED_OUT); - } + @Override + public boolean onTimeout(Connection connection, long latency, int retryCount) { + return state.compareAndSet(State.START, State.TIMED_OUT); + } - @Override - public Message.Request request() { - return null; // not needed for this test class - } + @Override + public Message.Request request() { + return null; // not needed for this test class + } - @Override - public int retryCount() { - return 0; // value not important for this test class - } + @Override + public int retryCount() { + return 0; // value not important for this test class } + } - static class TestExecutorService extends ForwardingListeningExecutorService { + static class TestExecutorService extends ForwardingListeningExecutorService { - private final ListeningExecutorService delegate; + private final ListeningExecutorService delegate; - private final Semaphore semaphore = new Semaphore(0); + private final Semaphore semaphore = new Semaphore(0); - TestExecutorService(ListeningExecutorService delegate) { - this.delegate = delegate; - } + TestExecutorService(ListeningExecutorService delegate) { + this.delegate = delegate; + } - @Override - protected ListeningExecutorService delegate() { - return delegate; - } + @Override + protected ListeningExecutorService delegate() { + return delegate; + } - public void reset() { - semaphore.drainPermits(); - } + public void reset() { + semaphore.drainPermits(); + } - public void blockUntilNextTaskCompleted() throws InterruptedException { - semaphore.tryAcquire(1, 1, TimeUnit.MINUTES); - } + public void blockUntilNextTaskCompleted() throws InterruptedException { + semaphore.tryAcquire(1, 1, TimeUnit.MINUTES); + } - @Override - public ListenableFuture submit(Runnable task) { - ListenableFuture future = super.submit(task); - Futures.addCallback(future, new FutureCallback() { - @Override - public void onSuccess(Object result) { - semaphore.release(1); - } + @Override + public ListenableFuture submit(Runnable task) { + ListenableFuture future = super.submit(task); + GuavaCompatibility.INSTANCE.addCallback( + future, + new FutureCallback() { + @Override + public void onSuccess(Object result) { + semaphore.release(1); + } - @Override - public void onFailure(Throwable t) { - semaphore.release(1); - } - }); - return future; - } + @Override + public void onFailure(Throwable t) { + semaphore.release(1); + } + }); + return future; } + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/HostMetadataIntegrationTest.java b/driver-core/src/test/java/com/datastax/driver/core/HostMetadataIntegrationTest.java index 12f7649a991..019d775f434 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/HostMetadataIntegrationTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/HostMetadataIntegrationTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,306 +17,373 @@ */ package com.datastax.driver.core; +import static com.datastax.driver.core.Assertions.assertThat; +import static com.datastax.driver.core.TestUtils.nonQuietClusterCloseOptions; + +import com.datastax.driver.core.utils.UUIDs; +import java.net.InetAddress; +import java.util.UUID; import org.apache.log4j.Level; import org.apache.log4j.Logger; import org.testng.annotations.Test; -import java.net.InetAddress; - -import static com.datastax.driver.core.Assertions.assertThat; -import static com.datastax.driver.core.TestUtils.nonQuietClusterCloseOptions; - public class HostMetadataIntegrationTest { - /** - * Validates that {@link Host#getDseVersion()} and {@link Host#getDseWorkload()} return values defined in - * the dse_version and workload columns if they are present in system.local - * or system.peers otherwise they return null. - * - * @test_category host:metadata - * @jira_ticket JAVA-1042 - */ - @Test(groups = "short") - public void should_parse_dse_workload_and_version_if_available() { - // given: A 5 node cluster with all nodes having a workload and dse_version except node 2. - ScassandraCluster scassandraCluster = ScassandraCluster.builder() - .withIpPrefix(TestUtils.IP_PREFIX) - .withNodes(5) - .forcePeerInfo(1, 1, "workload", "Analytics") - .forcePeerInfo(1, 1, "dse_version", "4.8.4") - .forcePeerInfo(1, 3, "workload", "Solr") - .forcePeerInfo(1, 3, "dse_version", "4.8.4") - .forcePeerInfo(1, 4, "workload", "Cassandra") - .forcePeerInfo(1, 4, "dse_version", "4.8.4") - .forcePeerInfo(1, 5, "workload", "AmazingNewFeature") - .forcePeerInfo(1, 5, "dse_version", "5.0.0") - .build(); - - Cluster cluster = Cluster.builder() - .addContactPoints(scassandraCluster.address(1).getAddress()) - .withPort(scassandraCluster.getBinaryPort()) - .withNettyOptions(nonQuietClusterCloseOptions) - .build(); - - try { - scassandraCluster.init(); - // when: initializing a cluster instance. - cluster.init(); - - // then: All nodes except node 2 should have a workload and dse version. - assertThat(cluster).host(1).hasWorkload("Analytics").hasDseVersion(VersionNumber.parse("4.8.4")); - assertThat(cluster).host(2).hasNoWorkload().hasNoDseVersion(); - assertThat(cluster).host(3).hasWorkload("Solr").hasDseVersion(VersionNumber.parse("4.8.4")); - assertThat(cluster).host(4).hasWorkload("Cassandra").hasDseVersion(VersionNumber.parse("4.8.4")); - assertThat(cluster).host(5).hasWorkload("AmazingNewFeature").hasDseVersion(VersionNumber.parse("5.0.0")); - } finally { - cluster.close(); - scassandraCluster.stop(); - } + /** + * Validates that {@link Host#getDseVersion()} and {@link Host#getDseWorkload()} return values + * defined in the dse_version and workload columns if they are present + * in system.local or system.peers otherwise they return null. + * + * @test_category host:metadata + * @jira_ticket JAVA-1042 + */ + @Test(groups = "short") + public void should_parse_dse_workload_and_version_if_available() { + // given: A 5 node cluster with all nodes having a workload and dse_version except node 2. + ScassandraCluster scassandraCluster = + ScassandraCluster.builder() + .withIpPrefix(TestUtils.IP_PREFIX) + .withNodes(5) + .forcePeerInfo(1, 1, "workload", "Analytics") + .forcePeerInfo(1, 1, "dse_version", "4.8.4") + .forcePeerInfo(1, 3, "workload", "Solr") + .forcePeerInfo(1, 3, "dse_version", "4.8.4") + .forcePeerInfo(1, 4, "workload", "Cassandra") + .forcePeerInfo(1, 4, "dse_version", "4.8.4") + .forcePeerInfo(1, 5, "workload", "AmazingNewFeature") + .forcePeerInfo(1, 5, "dse_version", "5.0.0") + .build(); + + Cluster cluster = + Cluster.builder() + .addContactPoints(scassandraCluster.address(1).getAddress()) + .withPort(scassandraCluster.getBinaryPort()) + .withNettyOptions(nonQuietClusterCloseOptions) + .build(); + + try { + scassandraCluster.init(); + // when: initializing a cluster instance. + cluster.init(); + + // then: All nodes except node 2 should have a workload and dse version. + assertThat(cluster) + .host(1) + .hasWorkload("Analytics") + .hasDseVersion(VersionNumber.parse("4.8.4")); + assertThat(cluster).host(2).hasNoWorkload().hasNoDseVersion(); + assertThat(cluster).host(3).hasWorkload("Solr").hasDseVersion(VersionNumber.parse("4.8.4")); + assertThat(cluster) + .host(4) + .hasWorkload("Cassandra") + .hasDseVersion(VersionNumber.parse("4.8.4")); + assertThat(cluster) + .host(5) + .hasWorkload("AmazingNewFeature") + .hasDseVersion(VersionNumber.parse("5.0.0")); + } finally { + cluster.close(); + scassandraCluster.stop(); } - - /** - * Validates that {@link Host#getDseVersion()} and {@link Host#getDseWorkload()} return null if - * the dse_version and workload columns are not present in system.local - * for the control host. - * - * @test_category host:metadata - * @jira_ticket JAVA-1042 - */ - @Test(groups = "short") - public void should_not_parse_dse_workload_and_version_if_not_present_in_local_table() { - // given: A cluster with node 1 (control host) not having a workload or dse_version set, and node 2 with those - // columns set. - ScassandraCluster scassandraCluster = ScassandraCluster.builder() - .withIpPrefix(TestUtils.IP_PREFIX) - .withNodes(2) - .forcePeerInfo(1, 2, "workload", "Analytics") - .forcePeerInfo(1, 2, "dse_version", "4.8.4") - .build(); - - Cluster cluster = Cluster.builder() - .addContactPoints(scassandraCluster.address(1).getAddress()) - .withPort(scassandraCluster.getBinaryPort()) - .withNettyOptions(nonQuietClusterCloseOptions) - .build(); - - try { - scassandraCluster.init(); - // when: initializing a cluster instance. - cluster.init(); - - // then: - // - node 1 should have no workload or dse version. - // - node 2 should have a workload and a dse version. - assertThat(cluster).host(1).hasNoWorkload().hasNoDseVersion(); - assertThat(cluster).host(2).hasWorkload("Analytics").hasDseVersion(VersionNumber.parse("4.8.4")); - } finally { - cluster.close(); - scassandraCluster.stop(); - } + } + + /** + * Validates that {@link Host#getDseVersion()} and {@link Host#getDseWorkload()} return null if + * the dse_version and workload columns are not present in + * system.local for the control host. + * + * @test_category host:metadata + * @jira_ticket JAVA-1042 + */ + @Test(groups = "short") + public void should_not_parse_dse_workload_and_version_if_not_present_in_local_table() { + // given: A cluster with node 1 (control host) not having a workload or dse_version set, and + // node 2 with those + // columns set. + ScassandraCluster scassandraCluster = + ScassandraCluster.builder() + .withIpPrefix(TestUtils.IP_PREFIX) + .withNodes(2) + .forcePeerInfo(1, 2, "workload", "Analytics") + .forcePeerInfo(1, 2, "dse_version", "4.8.4") + .build(); + + Cluster cluster = + Cluster.builder() + .addContactPoints(scassandraCluster.address(1).getAddress()) + .withPort(scassandraCluster.getBinaryPort()) + .withNettyOptions(nonQuietClusterCloseOptions) + .build(); + + try { + scassandraCluster.init(); + // when: initializing a cluster instance. + cluster.init(); + + // then: + // - node 1 should have no workload or dse version. + // - node 2 should have a workload and a dse version. + assertThat(cluster).host(1).hasNoWorkload().hasNoDseVersion(); + assertThat(cluster) + .host(2) + .hasWorkload("Analytics") + .hasDseVersion(VersionNumber.parse("4.8.4")); + } finally { + cluster.close(); + scassandraCluster.stop(); } - - /** - * Validates that if dse_version column is a non-version value in system.local or - * system.peers that a warning is logged and {@link Host#getDseVersion()} returns null. - * - * @test_category host:metadata - * @jira_ticket JAVA-1042 - */ - @Test(groups = "short") - public void should_log_warning_when_invalid_version_used_for_dse_version() { - // given: A cluster with a node that has an invalid version for dse_version in system.local. - ScassandraCluster scassandraCluster = ScassandraCluster.builder() - .withIpPrefix(TestUtils.IP_PREFIX) - .withNodes(1) - .forcePeerInfo(1, 1, "workload", "Analytics") - .forcePeerInfo(1, 1, "dse_version", "Invalid Version!") - .build(); - - Cluster cluster = Cluster.builder() - .addContactPoints(scassandraCluster.address(1).getAddress()) - .withPort(scassandraCluster.getBinaryPort()) - .withNettyOptions(nonQuietClusterCloseOptions) - .build(); - - MemoryAppender logs = new MemoryAppender(); - Logger logger = Logger.getLogger(Host.class); - Level originalLoggerLevel = logger.getLevel(); - logger.setLevel(Level.WARN); - logger.addAppender(logs); - - try { - scassandraCluster.init(); - // when: initializing a cluster instance. - cluster.init(); - - // then: dse version for that host should not be set and a warning shall be logged. - assertThat(logs.get()) - .contains("Error parsing DSE version Invalid Version!. This shouldn't have happened"); - assertThat(cluster).host(1).hasNoDseVersion().hasWorkload("Analytics"); - } finally { - logger.removeAppender(logs); - logger.setLevel(originalLoggerLevel); - cluster.close(); - scassandraCluster.stop(); - } + } + + /** + * Validates that if dse_version column is a non-version value in system.local + * or system.peers that a warning is logged and {@link Host#getDseVersion()} + * returns null. + * + * @test_category host:metadata + * @jira_ticket JAVA-1042 + */ + @Test(groups = "short") + public void should_log_warning_when_invalid_version_used_for_dse_version() { + // given: A cluster with a node that has an invalid version for dse_version in system.local. + ScassandraCluster scassandraCluster = + ScassandraCluster.builder() + .withIpPrefix(TestUtils.IP_PREFIX) + .withNodes(1) + .forcePeerInfo(1, 1, "workload", "Analytics") + .forcePeerInfo(1, 1, "dse_version", "Invalid Version!") + .build(); + + Cluster cluster = + Cluster.builder() + .addContactPoints(scassandraCluster.address(1).getAddress()) + .withPort(scassandraCluster.getBinaryPort()) + .withNettyOptions(nonQuietClusterCloseOptions) + .build(); + + MemoryAppender logs = new MemoryAppender(); + Logger logger = Logger.getLogger(Host.class); + Level originalLoggerLevel = logger.getLevel(); + logger.setLevel(Level.WARN); + logger.addAppender(logs); + + try { + scassandraCluster.init(); + // when: initializing a cluster instance. + cluster.init(); + + // then: dse version for that host should not be set and a warning shall be logged. + assertThat(logs.get()) + .contains("Error parsing DSE version Invalid Version!. This shouldn't have happened"); + assertThat(cluster).host(1).hasNoDseVersion().hasWorkload("Analytics"); + } finally { + logger.removeAppender(logs); + logger.setLevel(originalLoggerLevel); + cluster.close(); + scassandraCluster.stop(); } - - /** - * Validates that {@link Host#isDseGraphEnabled()} returns the value defined in the graph columns if - * it is present in system.local or system.peers otherwise it returns false. - * - * @test_category host:metadata - * @jira_ticket JAVA-1171 - */ - @Test(groups = "short") - public void should_parse_dse_graph_if_available() { - // given: A 3 node cluster with all nodes having a graph value except node 2. - ScassandraCluster scassandraCluster = ScassandraCluster.builder() - .withIpPrefix(TestUtils.IP_PREFIX) - .withNodes(3) - .forcePeerInfo(1, 1, "graph", true) - .forcePeerInfo(1, 3, "graph", false) - .build(); - - Cluster cluster = Cluster.builder() - .addContactPoints(scassandraCluster.address(1).getAddress()) - .withPort(scassandraCluster.getBinaryPort()) - .withNettyOptions(nonQuietClusterCloseOptions) - .build(); - - try { - scassandraCluster.init(); - // when: initializing a cluster instance. - cluster.init(); - - // then: - // - node 1 should have graph. - // - node 2 and node 3 should not have graph. - assertThat(cluster).host(1).hasDseGraph(); - assertThat(cluster).host(2).hasNoDseGraph(); - assertThat(cluster).host(3).hasNoDseGraph(); - } finally { - cluster.close(); - scassandraCluster.stop(); - } + } + + /** + * Validates that {@link Host#isDseGraphEnabled()} returns the value defined in the graph + * columns if it is present in system.local or system.peers + * otherwise it returns false. + * + * @test_category host:metadata + * @jira_ticket JAVA-1171 + */ + @Test(groups = "short") + public void should_parse_dse_graph_if_available() { + // given: A 3 node cluster with all nodes having a graph value except node 2. + ScassandraCluster scassandraCluster = + ScassandraCluster.builder() + .withIpPrefix(TestUtils.IP_PREFIX) + .withNodes(3) + .forcePeerInfo(1, 1, "graph", true) + .forcePeerInfo(1, 3, "graph", false) + .build(); + + Cluster cluster = + Cluster.builder() + .addContactPoints(scassandraCluster.address(1).getAddress()) + .withPort(scassandraCluster.getBinaryPort()) + .withNettyOptions(nonQuietClusterCloseOptions) + .build(); + + try { + scassandraCluster.init(); + // when: initializing a cluster instance. + cluster.init(); + + // then: + // - node 1 should have graph. + // - node 2 and node 3 should not have graph. + assertThat(cluster).host(1).hasDseGraph(); + assertThat(cluster).host(2).hasNoDseGraph(); + assertThat(cluster).host(3).hasNoDseGraph(); + } finally { + cluster.close(); + scassandraCluster.stop(); } - - /** - * Validates that {@link Host#isDseGraphEnabled()} returns false if the graph column is not present - * in system.local for the control host. - * - * @test_category host:metadata - * @jira_ticket JAVA-1171 - */ - @Test(groups = "short") - public void should_not_parse_dse_graph_if_not_present_in_local_table() { - // given: A cluster with node 1 (control host) not having graph set, and node 2 with it set. - ScassandraCluster scassandraCluster = ScassandraCluster.builder() - .withIpPrefix(TestUtils.IP_PREFIX) - .withNodes(2) - .forcePeerInfo(1, 2, "graph", true) - .build(); - - Cluster cluster = Cluster.builder() - .addContactPoints(scassandraCluster.address(1).getAddress()) - .withPort(scassandraCluster.getBinaryPort()) - .withNettyOptions(nonQuietClusterCloseOptions) - .build(); - - try { - scassandraCluster.init(); - // when: initializing a cluster instance. - cluster.init(); - - // then: - // - node 1 should have no graph. - // - node 2 should have graph. - assertThat(cluster).host(1).hasNoDseGraph(); - assertThat(cluster).host(2).hasDseGraph(); - } finally { - cluster.close(); - scassandraCluster.stop(); - } + } + + /** + * Validates that {@link Host#isDseGraphEnabled()} returns false if the graph column + * is not present in system.local for the control host. + * + * @test_category host:metadata + * @jira_ticket JAVA-1171 + */ + @Test(groups = "short") + public void should_not_parse_dse_graph_if_not_present_in_local_table() { + // given: A cluster with node 1 (control host) not having graph set, and node 2 with it set. + ScassandraCluster scassandraCluster = + ScassandraCluster.builder() + .withIpPrefix(TestUtils.IP_PREFIX) + .withNodes(2) + .forcePeerInfo(1, 2, "graph", true) + .build(); + + Cluster cluster = + Cluster.builder() + .addContactPoints(scassandraCluster.address(1).getAddress()) + .withPort(scassandraCluster.getBinaryPort()) + .withNettyOptions(nonQuietClusterCloseOptions) + .build(); + + try { + scassandraCluster.init(); + // when: initializing a cluster instance. + cluster.init(); + + // then: + // - node 1 should have no graph. + // - node 2 should have graph. + assertThat(cluster).host(1).hasNoDseGraph(); + assertThat(cluster).host(2).hasDseGraph(); + } finally { + cluster.close(); + scassandraCluster.stop(); } - - /** - * Validates that {@link Host#getBroadcastAddress()} is set for all hosts (control host or not) in the default - * case of a cluster, as the broadcast address is derived from broadcast_address in - * system.local and peer in system.peers. - * - * @test_category host:metadata - * @jira_ticket JAVA-1035 - */ - @Test(groups = "short") - public void should_set_broadcast_address_for_all_nodes() { - // given: A cluster with 3 nodes. - ScassandraCluster scassandraCluster = ScassandraCluster.builder() - .withIpPrefix(TestUtils.IP_PREFIX) - .withNodes(3) - .build(); - - Cluster cluster = Cluster.builder() - .addContactPoints(scassandraCluster.address(1).getAddress()) - .withPort(scassandraCluster.getBinaryPort()) - .withNettyOptions(nonQuietClusterCloseOptions) - .build(); - - try { - scassandraCluster.init(); - // when: initializing a cluster instance. - cluster.init(); - - // then: broadcast address should be set for each host: - // - broadcast_address is used for system.local and is always present, so control host should have it. - // - peer column is used for system.peers to resolve broadcast address and is always present. - for (int i = 1; i <= scassandraCluster.nodes().size(); i++) { - assertThat(cluster).host(i).hasBroadcastAddress(TestUtils.addressOfNode(i)); - } - } finally { - cluster.close(); - scassandraCluster.stop(); - } + } + + /** + * Validates that {@link Host#getBroadcastAddress()} is set for all hosts (control host or not) in + * the default case of a cluster, as the broadcast address is derived from broadcast_address + * in system.local and peer in system.peers. + * + * @test_category host:metadata + * @jira_ticket JAVA-1035 + */ + @Test(groups = "short") + public void should_set_broadcast_address_for_all_nodes() { + // given: A cluster with 3 nodes. + ScassandraCluster scassandraCluster = + ScassandraCluster.builder().withIpPrefix(TestUtils.IP_PREFIX).withNodes(3).build(); + + Cluster cluster = + Cluster.builder() + .addContactPoints(scassandraCluster.address(1).getAddress()) + .withPort(scassandraCluster.getBinaryPort()) + .withNettyOptions(nonQuietClusterCloseOptions) + .build(); + + try { + scassandraCluster.init(); + // when: initializing a cluster instance. + cluster.init(); + + // then: broadcast address should be set for each host: + // - broadcast_address is used for system.local and is always present, so control host should + // have it. + // - peer column is used for system.peers to resolve broadcast address and is always present. + for (int i = 1; i <= scassandraCluster.nodes().size(); i++) { + assertThat(cluster).host(i).hasBroadcastAddress(TestUtils.addressOfNode(i)); + } + } finally { + cluster.close(); + scassandraCluster.stop(); } - - /** - * Validates that {@link Host#getListenAddress()} is set for a host if and only if the listen_address - * column is present and set in the system.peers table or if the host is the control host. - * - * @test_category host:metadata - * @jira_ticket JAVA-1035 - */ - @Test(groups = "short") - public void should_set_listen_address_if_available() { - // given: A Cluster with 3 nodes and node 2 being configured with a listen address in its peers table. - InetAddress listenAddress = TestUtils.addressOfNode(10); - ScassandraCluster scassandraCluster = ScassandraCluster.builder() - .withIpPrefix(TestUtils.IP_PREFIX) - .withNodes(3) - .forcePeerInfo(1, 2, "listen_address", listenAddress) - .build(); - - Cluster cluster = Cluster.builder() - .addContactPoints(scassandraCluster.address(1).getAddress()) - .withPort(scassandraCluster.getBinaryPort()) - .withNettyOptions(nonQuietClusterCloseOptions) - .build(); - - try { - // when: initializing a cluster instance. - scassandraCluster.init(); - cluster.init(); - - // then: listen address should be set appropriate for each host: - // - Control Host will always have a listen address (listen_address should always be present). - assertThat(cluster).host(1).hasListenAddress(TestUtils.addressOfNode(1)); - // - Host 2 should have a listen address since we provided one. - assertThat(cluster).host(2).hasListenAddress(listenAddress); - // - Host 3 should have no listen address as it wasn't provided. - assertThat(cluster).host(3).hasNoListenAddress(); - } finally { - cluster.close(); - scassandraCluster.stop(); - } + } + + /** + * Validates that {@link Host#getListenAddress()} is set for a host if and only if the + * listen_address column is present and set in the system.peers table or if + * the host is the control host. + * + * @test_category host:metadata + * @jira_ticket JAVA-1035 + */ + @Test(groups = "short") + public void should_set_listen_address_if_available() { + // given: A Cluster with 3 nodes and node 2 being configured with a listen address in its peers + // table. + InetAddress listenAddress = TestUtils.addressOfNode(10); + ScassandraCluster scassandraCluster = + ScassandraCluster.builder() + .withIpPrefix(TestUtils.IP_PREFIX) + .withNodes(3) + .forcePeerInfo(1, 2, "listen_address", listenAddress) + .build(); + + Cluster cluster = + Cluster.builder() + .addContactPoints(scassandraCluster.address(1).getAddress()) + .withPort(scassandraCluster.getBinaryPort()) + .withNettyOptions(nonQuietClusterCloseOptions) + .build(); + + try { + // when: initializing a cluster instance. + scassandraCluster.init(); + cluster.init(); + + // then: listen address should be set appropriate for each host: + // - Control Host will always have a listen address (listen_address should always be present). + assertThat(cluster).host(1).hasListenAddress(TestUtils.addressOfNode(1)); + // - Host 2 should have a listen address since we provided one. + assertThat(cluster).host(2).hasListenAddress(listenAddress); + // - Host 3 should have no listen address as it wasn't provided. + assertThat(cluster).host(3).hasNoListenAddress(); + } finally { + cluster.close(); + scassandraCluster.stop(); + } + } + + @Test(groups = "short") + public void should_parse_host_id_and_schema_version() { + UUID hostId1 = UUIDs.random(); + UUID hostId2 = UUIDs.random(); + UUID schemaVersion = UUIDs.random(); + + ScassandraCluster scassandraCluster = + ScassandraCluster.builder() + .withIpPrefix(TestUtils.IP_PREFIX) + .withNodes(2) + .forcePeerInfo(1, 1, "host_id", hostId1) + .forcePeerInfo(1, 1, "schema_version", schemaVersion) + .forcePeerInfo(1, 2, "host_id", hostId2) + .forcePeerInfo(1, 2, "schema_version", schemaVersion) + .build(); + + Cluster cluster = + Cluster.builder() + .addContactPoints(scassandraCluster.address(1).getAddress()) + .withPort(scassandraCluster.getBinaryPort()) + .build(); + + try { + scassandraCluster.init(); + cluster.init(); + + assertThat(cluster).host(1).hasHostId(hostId1); + assertThat(cluster).host(1).hasSchemaVersion(schemaVersion); + assertThat(cluster).host(2).hasHostId(hostId2); + assertThat(cluster).host(2).hasSchemaVersion(schemaVersion); + + } finally { + cluster.close(); + scassandraCluster.stop(); } + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/HostTargetingTest.java b/driver-core/src/test/java/com/datastax/driver/core/HostTargetingTest.java new file mode 100644 index 00000000000..de777ce4147 --- /dev/null +++ b/driver-core/src/test/java/com/datastax/driver/core/HostTargetingTest.java @@ -0,0 +1,154 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import static com.datastax.driver.core.Assertions.assertThat; +import static com.datastax.driver.core.TestUtils.nonQuietClusterCloseOptions; +import static org.scassandra.http.client.PrimingRequest.then; +import static org.testng.Assert.fail; + +import com.datastax.driver.core.exceptions.NoHostAvailableException; +import com.datastax.driver.core.exceptions.UnavailableException; +import com.datastax.driver.core.policies.DCAwareRoundRobinPolicy; +import com.datastax.driver.core.policies.HostFilterPolicy; +import com.datastax.driver.core.policies.LoadBalancingPolicy; +import com.google.common.base.Predicate; +import org.mockito.Mockito; +import org.scassandra.http.client.PrimingRequest; +import org.scassandra.http.client.Result; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; + +public class HostTargetingTest { + + private ScassandraCluster sCluster; + private Cluster cluster; + private Session session; + // Allow connecting to all hosts except for the 4th one. + private LoadBalancingPolicy lbSpy = + Mockito.spy( + new HostFilterPolicy( + DCAwareRoundRobinPolicy.builder().build(), + new Predicate() { + @Override + public boolean apply(Host host) { + return !host.getEndPoint().resolve().getAddress().getHostAddress().endsWith("4"); + } + })); + + @BeforeMethod(groups = "short") + public void setUp() { + sCluster = ScassandraCluster.builder().withNodes(4).build(); + sCluster.init(); + + cluster = + Cluster.builder() + .addContactPoints(sCluster.address(1).getAddress()) + .withPort(sCluster.getBinaryPort()) + .withLoadBalancingPolicy(lbSpy) + .withNettyOptions(nonQuietClusterCloseOptions) + .build(); + session = cluster.connect(); + + // Reset invocations before entering test. + Mockito.reset(lbSpy); + } + + @AfterMethod(groups = "short") + public void tearDown() { + if (cluster != null) { + cluster.close(); + } + if (sCluster != null) { + sCluster.stop(); + } + } + + private void verifyNoLbpInteractions() { + // load balancing policy should have been skipped completely as host was set. + Mockito.verify(lbSpy, Mockito.times(0)) + .newQueryPlan(Mockito.any(String.class), Mockito.any(Statement.class)); + } + + @Test(groups = "short") + public void should_use_host_on_statement() { + for (int i = 0; i < 10; i++) { + int hostIndex = i % 3 + 1; + Host host = TestUtils.findHost(cluster, hostIndex); + + // given a statement with host explicitly set. + Statement statement = new SimpleStatement("select * system.local").setHost(host); + + // when statement is executed + ResultSet result = session.execute(statement); + + // then the query should have been sent to the configured host. + assertThat(result.getExecutionInfo().getQueriedHost()).isSameAs(host); + + verifyNoLbpInteractions(); + } + } + + @Test(groups = "short") + public void should_fail_if_host_fails_query() { + String query = "mock"; + sCluster + .node(1) + .primingClient() + .prime( + PrimingRequest.queryBuilder() + .withQuery(query) + .withThen(then().withResult(Result.unavailable)) + .build()); + + // given a statement with a host configured to fail the given query. + Host host1 = TestUtils.findHost(cluster, 1); + Statement statement = new SimpleStatement(query).setHost(host1); + + try { + // when statement is executed an error should be raised. + session.execute(statement); + fail("Query should have failed"); + } catch (NoHostAvailableException e) { + // then the request should fail with a NHAE and no host was tried. + assertThat(e.getErrors()).hasSize(1); + assertThat(e.getErrors().values().iterator().next()).isInstanceOf(UnavailableException.class); + } finally { + verifyNoLbpInteractions(); + } + } + + @Test(groups = "short") + public void should_fail_if_host_is_not_connected() { + // given a statement with host explicitly set that for which we have no active pool. + Host host4 = TestUtils.findHost(cluster, 4); + Statement statement = new SimpleStatement("select * system.local").setHost(host4); + + try { + // when statement is executed + session.execute(statement); + fail("Query should have failed"); + } catch (NoHostAvailableException e) { + // then the request should fail with a NHAE and no host was tried. + assertThat(e.getErrors()).isEmpty(); + } finally { + verifyNoLbpInteractions(); + } + } +} diff --git a/driver-core/src/test/java/com/datastax/driver/core/IndexMetadataAssert.java b/driver-core/src/test/java/com/datastax/driver/core/IndexMetadataAssert.java index 8607b4f2d13..ab3d49cc7c6 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/IndexMetadataAssert.java +++ b/driver-core/src/test/java/com/datastax/driver/core/IndexMetadataAssert.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,54 +17,54 @@ */ package com.datastax.driver.core; +import static org.assertj.core.api.Assertions.assertThat; + import com.datastax.driver.core.IndexMetadata.Kind; import org.assertj.core.api.AbstractAssert; -import static org.assertj.core.api.Assertions.assertThat; - public class IndexMetadataAssert extends AbstractAssert { - public IndexMetadataAssert(IndexMetadata actual) { - super(actual, IndexMetadataAssert.class); - } + public IndexMetadataAssert(IndexMetadata actual) { + super(actual, IndexMetadataAssert.class); + } - public IndexMetadataAssert hasName(String name) { - assertThat(actual.getName()).isEqualTo(name); - return this; - } + public IndexMetadataAssert hasName(String name) { + assertThat(actual.getName()).isEqualTo(name); + return this; + } - public IndexMetadataAssert hasParent(TableMetadata parent) { - assertThat(actual.getTable()).isEqualTo(parent); - return this; - } + public IndexMetadataAssert hasParent(TableMetadata parent) { + assertThat(actual.getTable()).isEqualTo(parent); + return this; + } - public IndexMetadataAssert hasOption(String name, String value) { - assertThat(actual.getOption(name)).isEqualTo(value); - return this; - } + public IndexMetadataAssert hasOption(String name, String value) { + assertThat(actual.getOption(name)).isEqualTo(value); + return this; + } - public IndexMetadataAssert asCqlQuery(String cqlQuery) { - assertThat(actual.asCQLQuery()).isEqualTo(cqlQuery); - return this; - } + public IndexMetadataAssert asCqlQuery(String cqlQuery) { + assertThat(actual.asCQLQuery()).isEqualTo(cqlQuery); + return this; + } - public IndexMetadataAssert isCustomIndex() { - assertThat(actual.isCustomIndex()).isTrue(); - return this; - } + public IndexMetadataAssert isCustomIndex() { + assertThat(actual.isCustomIndex()).isTrue(); + return this; + } - public IndexMetadataAssert isNotCustomIndex() { - assertThat(actual.isCustomIndex()).isFalse(); - return this; - } + public IndexMetadataAssert isNotCustomIndex() { + assertThat(actual.isCustomIndex()).isFalse(); + return this; + } - public IndexMetadataAssert hasTarget(String target) { - assertThat(actual.getTarget()).isEqualTo(target); - return this; - } + public IndexMetadataAssert hasTarget(String target) { + assertThat(actual.getTarget()).isEqualTo(target); + return this; + } - public IndexMetadataAssert hasKind(Kind kind) { - assertThat(actual.getKind()).isEqualTo(kind); - return this; - } + public IndexMetadataAssert hasKind(Kind kind) { + assertThat(actual.getKind()).isEqualTo(kind); + return this; + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/IndexMetadataTest.java b/driver-core/src/test/java/com/datastax/driver/core/IndexMetadataTest.java index 3a1fa999cfb..fce604b3e1b 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/IndexMetadataTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/IndexMetadataTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,30 +17,39 @@ */ package com.datastax.driver.core; -import com.datastax.driver.core.ColumnMetadata.*; +import static com.datastax.driver.core.Assertions.assertThat; +import static com.datastax.driver.core.ColumnMetadata.COLUMN_NAME; +import static com.datastax.driver.core.ColumnMetadata.COMPONENT_INDEX; +import static com.datastax.driver.core.ColumnMetadata.INDEX_NAME; +import static com.datastax.driver.core.ColumnMetadata.INDEX_OPTIONS; +import static com.datastax.driver.core.ColumnMetadata.INDEX_TYPE; +import static com.datastax.driver.core.ColumnMetadata.KIND_V2; +import static com.datastax.driver.core.ColumnMetadata.VALIDATOR; +import static com.datastax.driver.core.DataType.cint; +import static com.datastax.driver.core.DataType.map; +import static com.datastax.driver.core.DataType.text; +import static com.datastax.driver.core.IndexMetadata.Kind.COMPOSITES; +import static com.datastax.driver.core.IndexMetadata.Kind.CUSTOM; +import static com.datastax.driver.core.IndexMetadata.Kind.KEYS; + +import com.datastax.driver.core.ColumnMetadata.Raw; import com.datastax.driver.core.Token.M3PToken; import com.datastax.driver.core.utils.CassandraVersion; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.google.common.primitives.Ints; -import org.testng.annotations.Test; - import java.nio.ByteBuffer; import java.util.List; import java.util.Map; - -import static com.datastax.driver.core.Assertions.assertThat; -import static com.datastax.driver.core.ColumnMetadata.*; -import static com.datastax.driver.core.DataType.*; -import static com.datastax.driver.core.IndexMetadata.Kind.*; +import org.testng.annotations.Test; @CassandraVersion("1.2.0") public class IndexMetadataTest extends CCMTestsSupport { - /** - * Column definitions for schema_columns table (legacy pre-3.0 layout). - */ - private static final ColumnDefinitions legacyColumnDefs = new ColumnDefinitions(new ColumnDefinitions.Definition[]{ + /** Column definitions for schema_columns table (legacy pre-3.0 layout). */ + private static final ColumnDefinitions legacyColumnDefs = + new ColumnDefinitions( + new ColumnDefinitions.Definition[] { definition(COLUMN_NAME, text()), definition(COMPONENT_INDEX, cint()), definition(KIND_V2, text()), @@ -46,335 +57,383 @@ public class IndexMetadataTest extends CCMTestsSupport { definition(INDEX_TYPE, text()), definition(VALIDATOR, text()), definition(INDEX_OPTIONS, text()) - }, CodecRegistry.DEFAULT_INSTANCE); + }, + CodecRegistry.DEFAULT_INSTANCE); - /** - * Column definitions for indexes table (post-3.0 layout). - */ - private static final ColumnDefinitions indexColumnDefs = new ColumnDefinitions(new ColumnDefinitions.Definition[]{ + /** Column definitions for indexes table (post-3.0 layout). */ + private static final ColumnDefinitions indexColumnDefs = + new ColumnDefinitions( + new ColumnDefinitions.Definition[] { definition(IndexMetadata.NAME, text()), definition(IndexMetadata.KIND, text()), definition(IndexMetadata.OPTIONS, map(text(), text())) - }, CodecRegistry.DEFAULT_INSTANCE); - - private static final TypeCodec> MAP_CODEC = TypeCodec.map(TypeCodec.varchar(), TypeCodec.varchar()); - - private ProtocolVersion protocolVersion; - - @Override - public void onTestContextInitialized() { - protocolVersion = cluster().getConfiguration().getProtocolOptions().getProtocolVersion(); - String createTable = "CREATE TABLE indexing (" - + "id int," - + "id2 int," - + "map_values map," - + "map_keys map," - + "map_entries map," - + "map_all map," - + "text_column text, " - + "\"MixedCaseColumn\" list," - + - // Frozen collections was introduced only in C* 2.1.3 - (ccm().getCassandraVersion().compareTo(VersionNumber.parse("2.1.3")) >= 0 - ? - ", map_full frozen>," - + "set_full frozen>," - + "list_full frozen>," - : - "") - + "PRIMARY KEY (id, id2));"; - execute(createTable); - } - - @Test(groups = "short") - public void should_create_metadata_for_simple_index() { - String createValuesIndex = String.format("CREATE INDEX text_column_index ON %s.indexing (text_column);", keyspace); - session().execute(createValuesIndex); - ColumnMetadata column = getColumn("text_column"); - IndexMetadata index = getIndex("text_column_index"); - assertThat(index) - .hasName("text_column_index") - .hasParent((TableMetadata) column.getParent()) - .isNotCustomIndex() - .hasTarget("text_column") - .hasKind(COMPOSITES) - .asCqlQuery(createValuesIndex); - assertThat((TableMetadata) column.getParent()).hasIndex(index); - } - - @Test(groups = "short") - @CassandraVersion(value = "2.1", description = "index names with quoted identifiers and collection indexes not supported until 2.1") - public void should_create_metadata_for_values_index_on_mixed_case_column() { - // 3.0 assumes the 'values' keyword if index on a collection - String createValuesIndex = ccm().getCassandraVersion().getMajor() > 2 ? - String.format("CREATE INDEX \"MixedCaseIndex\" ON %s.indexing (values(\"MixedCaseColumn\"));", keyspace) : - String.format("CREATE INDEX \"MixedCaseIndex\" ON %s.indexing (\"MixedCaseColumn\");", keyspace); - session().execute(createValuesIndex); - ColumnMetadata column = getColumn("\"MixedCaseColumn\""); - IndexMetadata index = getIndex("\"MixedCaseIndex\""); - assertThat(index) - .hasName("MixedCaseIndex") - .hasParent((TableMetadata) column.getParent()) - .isNotCustomIndex() - .hasTarget(ccm().getCassandraVersion().getMajor() > 2 ? "values(\"MixedCaseColumn\")" : "\"MixedCaseColumn\"") - .hasKind(COMPOSITES) - .asCqlQuery(createValuesIndex); - assertThat((TableMetadata) column.getParent()).hasIndex(index); - } - - @Test(groups = "short") - @CassandraVersion("2.1.0") - public void should_create_metadata_for_index_on_map_values() { - // 3.0 assumes the 'values' keyword if index on a collection - String createValuesIndex = ccm().getCassandraVersion().getMajor() > 2 ? - String.format("CREATE INDEX map_values_index ON %s.indexing (values(map_values));", keyspace) : - String.format("CREATE INDEX map_values_index ON %s.indexing (map_values);", keyspace); - session().execute(createValuesIndex); - ColumnMetadata column = getColumn("map_values"); - IndexMetadata index = getIndex("map_values_index"); - assertThat(index) - .hasName("map_values_index") - .hasParent((TableMetadata) column.getParent()) - .isNotCustomIndex() - .hasTarget(ccm().getCassandraVersion().getMajor() > 2 ? "values(map_values)" : "map_values") - .hasKind(COMPOSITES) - .asCqlQuery(createValuesIndex); - assertThat((TableMetadata) column.getParent()).hasIndex(index); - } - - @Test(groups = "short") - @CassandraVersion("2.1.0") - public void should_create_metadata_for_index_on_map_keys() { - String createKeysIndex = String.format("CREATE INDEX map_keys_index ON %s.indexing (keys(map_keys));", keyspace); - session().execute(createKeysIndex); - ColumnMetadata column = getColumn("map_keys"); - IndexMetadata index = getIndex("map_keys_index"); - assertThat(index) - .hasName("map_keys_index") - .hasParent((TableMetadata) column.getParent()) - .isNotCustomIndex() - .hasTarget("keys(map_keys)") - .hasKind(COMPOSITES) - .asCqlQuery(createKeysIndex); - assertThat((TableMetadata) column.getParent()).hasIndex(index); - } - - @Test(groups = "short") - @CassandraVersion("2.1.3") - public void should_create_metadata_for_full_index_on_map() { - String createFullIndex = String.format("CREATE INDEX map_full_index ON %s.indexing (full(map_full));", keyspace); - session().execute(createFullIndex); - ColumnMetadata column = getColumn("map_full"); - IndexMetadata index = getIndex("map_full_index"); - assertThat(index) - .hasName("map_full_index") - .hasParent((TableMetadata) column.getParent()) - .isNotCustomIndex() - .hasTarget("full(map_full)") - .hasKind(COMPOSITES) - .asCqlQuery(createFullIndex); - assertThat((TableMetadata) column.getParent()).hasIndex(index); - } - - @Test(groups = "short") - @CassandraVersion("2.1.3") - public void should_create_metadata_for_full_index_on_set() { - String createFullIndex = String.format("CREATE INDEX set_full_index ON %s.indexing (full(set_full));", keyspace); - session().execute(createFullIndex); - ColumnMetadata column = getColumn("set_full"); - IndexMetadata index = getIndex("set_full_index"); - assertThat(index) - .hasName("set_full_index") - .hasParent((TableMetadata) column.getParent()) - .isNotCustomIndex() - .hasTarget("full(set_full)") - .hasKind(COMPOSITES) - .asCqlQuery(createFullIndex); - assertThat((TableMetadata) column.getParent()).hasIndex(index); - } - - @Test(groups = "short") - @CassandraVersion("2.1.3") - public void should_create_metadata_for_full_index_on_list() { - String createFullIndex = String.format("CREATE INDEX list_full_index ON %s.indexing (full(list_full));", keyspace); - session().execute(createFullIndex); - ColumnMetadata column = getColumn("list_full"); - IndexMetadata index = getIndex("list_full_index"); - assertThat(index) - .hasName("list_full_index") - .hasParent((TableMetadata) column.getParent()) - .isNotCustomIndex() - .hasTarget("full(list_full)") - .hasKind(COMPOSITES) - .asCqlQuery(createFullIndex); - assertThat((TableMetadata) column.getParent()).hasIndex(index); - } - - @Test(groups = "short") - @CassandraVersion("2.2.0") - public void should_create_metadata_for_index_on_map_entries() { - String createEntriesIndex = String.format("CREATE INDEX map_entries_index ON %s.indexing (entries(map_entries));", keyspace); - session().execute(createEntriesIndex); - ColumnMetadata column = getColumn("map_entries"); - IndexMetadata index = getIndex("map_entries_index"); - assertThat(index) - .hasName("map_entries_index") - .hasParent((TableMetadata) column.getParent()) - .isNotCustomIndex() - .hasTarget("entries(map_entries)") - .hasKind(COMPOSITES) - .asCqlQuery(createEntriesIndex); - assertThat((TableMetadata) column.getParent()).hasIndex(index); - } - - @Test(groups = "short") - @CassandraVersion("3.0") - public void should_allow_multiple_indexes_on_map_column() { - String createEntriesIndex = String.format("CREATE INDEX map_all_entries_index ON %s.indexing (entries(map_all));", keyspace); - session().execute(createEntriesIndex); - String createKeysIndex = String.format("CREATE INDEX map_all_keys_index ON %s.indexing (keys(map_all));", keyspace); - session().execute(createKeysIndex); - String createValuesIndex = String.format("CREATE INDEX map_all_values_index ON %s.indexing (values(map_all));", keyspace); - session().execute(createValuesIndex); - - ColumnMetadata column = getColumn("map_all"); - TableMetadata table = (TableMetadata) column.getParent(); - - assertThat(getIndex("map_all_entries_index")) - .hasParent(table) - .asCqlQuery(createEntriesIndex); - - assertThat(getIndex("map_all_keys_index")) - .hasParent(table) - .asCqlQuery(createKeysIndex); - - assertThat(getIndex("map_all_values_index")) - .hasParent(table) - .asCqlQuery(createValuesIndex); - } - - @Test( - groups = "short", - description = "This test case builds a ColumnMetadata object programmatically to test custom indices with pre-3.0 layout," - + "otherwise, it would require deploying an actual custom index class into the C* test cluster") - public void should_parse_legacy_custom_index_options() { - TableMetadata table = getTable("indexing"); - List columnData = ImmutableList.of( - wrap("text_column"), // column name - wrap(0), // component index - wrap("regular"), // column kind - wrap("custom_index"), // index name - wrap("CUSTOM"), // index type - wrap("org.apache.cassandra.db.marshal.UTF8Type"), // validator - wrap("{\"foo\" : \"bar\", \"class_name\" : \"dummy.DummyIndex\"}") // index options - ); - Row columnRow = ArrayBackedRow.fromData(legacyColumnDefs, M3PToken.FACTORY, protocolVersion, columnData); - Raw columnRaw = Raw.fromRow(columnRow, VersionNumber.parse("2.1")); - ColumnMetadata column = ColumnMetadata.fromRaw(table, columnRaw, DataType.varchar()); - IndexMetadata index = IndexMetadata.fromLegacy(column, columnRaw); - assertThat(index) - .isNotNull() - .hasName("custom_index") - .isCustomIndex() - .hasOption("foo", "bar") - .hasKind(CUSTOM) - .asCqlQuery(String.format("CREATE CUSTOM INDEX custom_index ON %s.indexing (text_column) " - + "USING 'dummy.DummyIndex' WITH OPTIONS = {'foo' : 'bar'};", keyspace)); - } - - @Test( - groups = "short", - description = "This test case builds a ColumnMetadata object programmatically to test custom indices with post-3.0 layout," - + "otherwise, it would require deploying an actual custom index class into the C* test cluster") - public void should_parse_custom_index_options() { - TableMetadata table = getTable("indexing"); - List indexData = ImmutableList.of( - wrap("custom_index"), // index name - wrap("CUSTOM"), // kind - MAP_CODEC.serialize(ImmutableMap.of( - "foo", "bar", - IndexMetadata.CUSTOM_INDEX_OPTION_NAME, "dummy.DummyIndex", - IndexMetadata.TARGET_OPTION_NAME, "a, b, keys(c)" - ), protocolVersion) // options - ); - Row indexRow = ArrayBackedRow.fromData(indexColumnDefs, M3PToken.FACTORY, protocolVersion, indexData); - IndexMetadata index = IndexMetadata.fromRow(table, indexRow); - assertThat(index) - .isNotNull() - .hasName("custom_index") - .isCustomIndex() - .hasOption("foo", "bar") - .hasTarget("a, b, keys(c)") - .hasKind(CUSTOM) - .asCqlQuery(String.format("CREATE CUSTOM INDEX custom_index ON %s.indexing (a, b, keys(c)) " - + "USING 'dummy.DummyIndex' WITH OPTIONS = {'foo' : 'bar'};", keyspace)); - } - - /** - * Validates a special case where a 'KEYS' index was created using thrift. In this particular case the index lacks - * index_options, however the index_options value is a 'null' string rather then a null value. - * - * @test_category metadata - * @expected_result Index properly parsed and is present. - * @jira_ticket JAVA-834 - * @since 2.0.11, 2.1.7 - */ - @Test(groups = "short") - public void should_parse_with_null_string_index_options() { - TableMetadata table = getTable("indexing"); - List data = ImmutableList.of( - wrap("b@706172656e745f70617468"), // column name 'parent_path' - ByteBuffer.allocate(0), // component index (null) - wrap("regular"), // kind - wrap("cfs_archive_parent_path"), // index name - wrap("KEYS"), // index type - wrap("org.apache.cassandra.db.marshal.BytesType"), // validator - wrap("null") // index options - ); - Row row = ArrayBackedRow.fromData(legacyColumnDefs, M3PToken.FACTORY, cluster().getConfiguration().getProtocolOptions().getProtocolVersion(), data); - Raw raw = Raw.fromRow(row, VersionNumber.parse("2.1")); - ColumnMetadata column = ColumnMetadata.fromRaw(table, raw, DataType.blob()); - IndexMetadata index = IndexMetadata.fromLegacy(column, raw); - assertThat(index) - .isNotNull() - .hasName("cfs_archive_parent_path") - .isNotCustomIndex() - .hasTarget("\"b@706172656e745f70617468\"") - .hasKind(KEYS) - .asCqlQuery(String.format("CREATE INDEX cfs_archive_parent_path ON %s.indexing (\"b@706172656e745f70617468\");", keyspace)); - assertThat(index.getOption(IndexMetadata.INDEX_KEYS_OPTION_NAME)).isNull(); // While the index type is KEYS, since it lacks index_options it does not get considered. - } - - private static ColumnDefinitions.Definition definition(String name, DataType type) { - return new ColumnDefinitions.Definition("ks", "table", name, type); - } - - private static ByteBuffer wrap(String value) { - return ByteBuffer.wrap(value.getBytes()); - } - - private static ByteBuffer wrap(int number) { - return ByteBuffer.wrap(Ints.toByteArray(number)); - } - - private ColumnMetadata getColumn(String name) { - return getColumn(name, true); - } - - private ColumnMetadata getColumn(String name, boolean fromTable) { - AbstractTableMetadata target = fromTable ? getTable("indexing") : getMaterializedView("mv1"); - return target.getColumn(name); - } - - private IndexMetadata getIndex(String name) { - return getTable("indexing").getIndex(name); - } - - private TableMetadata getTable(String name) { - return cluster().getMetadata().getKeyspace(keyspace).getTable(name); - } - - private MaterializedViewMetadata getMaterializedView(String name) { - return cluster().getMetadata().getKeyspace(keyspace).getMaterializedView(name); - } + }, + CodecRegistry.DEFAULT_INSTANCE); + + private static final TypeCodec> MAP_CODEC = + TypeCodec.map(TypeCodec.varchar(), TypeCodec.varchar()); + + private ProtocolVersion protocolVersion; + + @Override + public void onTestContextInitialized() { + protocolVersion = cluster().getConfiguration().getProtocolOptions().getProtocolVersion(); + String createTable = + "CREATE TABLE indexing (" + + "id int," + + "id2 int," + + "map_values map," + + "map_keys map," + + "map_entries map," + + "map_all map," + + "text_column text, " + + "\"MixedCaseColumn\" list," + + + // Frozen collections was introduced only in C* 2.1.3 + (ccm().getCassandraVersion().compareTo(VersionNumber.parse("2.1.3")) >= 0 + ? ", map_full frozen>," + + "set_full frozen>," + + "list_full frozen>," + : "") + + "PRIMARY KEY (id, id2));"; + execute(createTable); + } + + @Test(groups = "short") + public void should_create_metadata_for_simple_index() { + String createValuesIndex = + String.format("CREATE INDEX text_column_index ON %s.indexing (text_column);", keyspace); + session().execute(createValuesIndex); + ColumnMetadata column = getColumn("text_column"); + IndexMetadata index = getIndex("text_column_index"); + assertThat(index) + .hasName("text_column_index") + .hasParent((TableMetadata) column.getParent()) + .isNotCustomIndex() + .hasTarget("text_column") + .hasKind(COMPOSITES) + .asCqlQuery(createValuesIndex); + assertThat((TableMetadata) column.getParent()).hasIndex(index); + } + + @Test(groups = "short") + @CassandraVersion( + value = "2.1", + description = + "index names with quoted identifiers and collection indexes not supported until 2.1") + public void should_create_metadata_for_values_index_on_mixed_case_column() { + // 3.0 assumes the 'values' keyword if index on a collection + String createValuesIndex = + ccm().getCassandraVersion().getMajor() > 2 + ? String.format( + "CREATE INDEX \"MixedCaseIndex\" ON %s.indexing (values(\"MixedCaseColumn\"));", + keyspace) + : String.format( + "CREATE INDEX \"MixedCaseIndex\" ON %s.indexing (\"MixedCaseColumn\");", keyspace); + session().execute(createValuesIndex); + ColumnMetadata column = getColumn("\"MixedCaseColumn\""); + IndexMetadata index = getIndex("\"MixedCaseIndex\""); + assertThat(index) + .hasName("MixedCaseIndex") + .hasParent((TableMetadata) column.getParent()) + .isNotCustomIndex() + .hasTarget( + ccm().getCassandraVersion().getMajor() > 2 + ? "values(\"MixedCaseColumn\")" + : "\"MixedCaseColumn\"") + .hasKind(COMPOSITES) + .asCqlQuery(createValuesIndex); + assertThat((TableMetadata) column.getParent()).hasIndex(index); + } + + @Test(groups = "short") + @CassandraVersion("2.1.0") + public void should_create_metadata_for_index_on_map_values() { + // 3.0 assumes the 'values' keyword if index on a collection + String createValuesIndex = + ccm().getCassandraVersion().getMajor() > 2 + ? String.format( + "CREATE INDEX map_values_index ON %s.indexing (values(map_values));", keyspace) + : String.format("CREATE INDEX map_values_index ON %s.indexing (map_values);", keyspace); + session().execute(createValuesIndex); + ColumnMetadata column = getColumn("map_values"); + IndexMetadata index = getIndex("map_values_index"); + assertThat(index) + .hasName("map_values_index") + .hasParent((TableMetadata) column.getParent()) + .isNotCustomIndex() + .hasTarget(ccm().getCassandraVersion().getMajor() > 2 ? "values(map_values)" : "map_values") + .hasKind(COMPOSITES) + .asCqlQuery(createValuesIndex); + assertThat((TableMetadata) column.getParent()).hasIndex(index); + } + + @Test(groups = "short") + @CassandraVersion("2.1.0") + public void should_create_metadata_for_index_on_map_keys() { + String createKeysIndex = + String.format("CREATE INDEX map_keys_index ON %s.indexing (keys(map_keys));", keyspace); + session().execute(createKeysIndex); + ColumnMetadata column = getColumn("map_keys"); + IndexMetadata index = getIndex("map_keys_index"); + assertThat(index) + .hasName("map_keys_index") + .hasParent((TableMetadata) column.getParent()) + .isNotCustomIndex() + .hasTarget("keys(map_keys)") + .hasKind(COMPOSITES) + .asCqlQuery(createKeysIndex); + assertThat((TableMetadata) column.getParent()).hasIndex(index); + } + + @Test(groups = "short") + @CassandraVersion("2.1.3") + public void should_create_metadata_for_full_index_on_map() { + String createFullIndex = + String.format("CREATE INDEX map_full_index ON %s.indexing (full(map_full));", keyspace); + session().execute(createFullIndex); + ColumnMetadata column = getColumn("map_full"); + IndexMetadata index = getIndex("map_full_index"); + assertThat(index) + .hasName("map_full_index") + .hasParent((TableMetadata) column.getParent()) + .isNotCustomIndex() + .hasTarget("full(map_full)") + .hasKind(COMPOSITES) + .asCqlQuery(createFullIndex); + assertThat((TableMetadata) column.getParent()).hasIndex(index); + } + + @Test(groups = "short") + @CassandraVersion("2.1.3") + public void should_create_metadata_for_full_index_on_set() { + String createFullIndex = + String.format("CREATE INDEX set_full_index ON %s.indexing (full(set_full));", keyspace); + session().execute(createFullIndex); + ColumnMetadata column = getColumn("set_full"); + IndexMetadata index = getIndex("set_full_index"); + assertThat(index) + .hasName("set_full_index") + .hasParent((TableMetadata) column.getParent()) + .isNotCustomIndex() + .hasTarget("full(set_full)") + .hasKind(COMPOSITES) + .asCqlQuery(createFullIndex); + assertThat((TableMetadata) column.getParent()).hasIndex(index); + } + + @Test(groups = "short") + @CassandraVersion("2.1.3") + public void should_create_metadata_for_full_index_on_list() { + String createFullIndex = + String.format("CREATE INDEX list_full_index ON %s.indexing (full(list_full));", keyspace); + session().execute(createFullIndex); + ColumnMetadata column = getColumn("list_full"); + IndexMetadata index = getIndex("list_full_index"); + assertThat(index) + .hasName("list_full_index") + .hasParent((TableMetadata) column.getParent()) + .isNotCustomIndex() + .hasTarget("full(list_full)") + .hasKind(COMPOSITES) + .asCqlQuery(createFullIndex); + assertThat((TableMetadata) column.getParent()).hasIndex(index); + } + + @Test(groups = "short") + @CassandraVersion("2.2.0") + public void should_create_metadata_for_index_on_map_entries() { + String createEntriesIndex = + String.format( + "CREATE INDEX map_entries_index ON %s.indexing (entries(map_entries));", keyspace); + session().execute(createEntriesIndex); + ColumnMetadata column = getColumn("map_entries"); + IndexMetadata index = getIndex("map_entries_index"); + assertThat(index) + .hasName("map_entries_index") + .hasParent((TableMetadata) column.getParent()) + .isNotCustomIndex() + .hasTarget("entries(map_entries)") + .hasKind(COMPOSITES) + .asCqlQuery(createEntriesIndex); + assertThat((TableMetadata) column.getParent()).hasIndex(index); + } + + @Test(groups = "short") + @CassandraVersion("3.0") + public void should_allow_multiple_indexes_on_map_column() { + String createEntriesIndex = + String.format( + "CREATE INDEX map_all_entries_index ON %s.indexing (entries(map_all));", keyspace); + session().execute(createEntriesIndex); + String createKeysIndex = + String.format("CREATE INDEX map_all_keys_index ON %s.indexing (keys(map_all));", keyspace); + session().execute(createKeysIndex); + String createValuesIndex = + String.format( + "CREATE INDEX map_all_values_index ON %s.indexing (values(map_all));", keyspace); + session().execute(createValuesIndex); + + ColumnMetadata column = getColumn("map_all"); + TableMetadata table = (TableMetadata) column.getParent(); + + assertThat(getIndex("map_all_entries_index")).hasParent(table).asCqlQuery(createEntriesIndex); + + assertThat(getIndex("map_all_keys_index")).hasParent(table).asCqlQuery(createKeysIndex); + + assertThat(getIndex("map_all_values_index")).hasParent(table).asCqlQuery(createValuesIndex); + } + + @Test( + groups = "short", + description = + "This test case builds a ColumnMetadata object programmatically to test custom indices with pre-3.0 layout," + + "otherwise, it would require deploying an actual custom index class into the C* test cluster") + public void should_parse_legacy_custom_index_options() { + TableMetadata table = getTable("indexing"); + List columnData = + ImmutableList.of( + wrap("text_column"), // column name + wrap(0), // component index + wrap("regular"), // column kind + wrap("custom_index"), // index name + wrap("CUSTOM"), // index type + wrap("org.apache.cassandra.db.marshal.UTF8Type"), // validator + wrap("{\"foo\" : \"bar\", \"class_name\" : \"dummy.DummyIndex\"}") // index options + ); + Row columnRow = + ArrayBackedRow.fromData(legacyColumnDefs, M3PToken.FACTORY, protocolVersion, columnData); + Raw columnRaw = Raw.fromRow(columnRow, VersionNumber.parse("2.1")); + ColumnMetadata column = ColumnMetadata.fromRaw(table, columnRaw, DataType.varchar()); + IndexMetadata index = IndexMetadata.fromLegacy(column, columnRaw); + assertThat(index) + .isNotNull() + .hasName("custom_index") + .isCustomIndex() + .hasOption("foo", "bar") + .hasKind(CUSTOM) + .asCqlQuery( + String.format( + "CREATE CUSTOM INDEX custom_index ON %s.indexing (text_column) " + + "USING 'dummy.DummyIndex' WITH OPTIONS = {'foo' : 'bar'};", + keyspace)); + } + + @Test( + groups = "short", + description = + "This test case builds a ColumnMetadata object programmatically to test custom indices with post-3.0 layout," + + "otherwise, it would require deploying an actual custom index class into the C* test cluster") + public void should_parse_custom_index_options() { + TableMetadata table = getTable("indexing"); + List indexData = + ImmutableList.of( + wrap("custom_index"), // index name + wrap("CUSTOM"), // kind + MAP_CODEC.serialize( + ImmutableMap.of( + "foo", + "bar", + IndexMetadata.CUSTOM_INDEX_OPTION_NAME, + "dummy.DummyIndex", + IndexMetadata.TARGET_OPTION_NAME, + "a, b, keys(c)"), + protocolVersion) // options + ); + Row indexRow = + ArrayBackedRow.fromData(indexColumnDefs, M3PToken.FACTORY, protocolVersion, indexData); + IndexMetadata index = IndexMetadata.fromRow(table, indexRow); + assertThat(index) + .isNotNull() + .hasName("custom_index") + .isCustomIndex() + .hasOption("foo", "bar") + .hasTarget("a, b, keys(c)") + .hasKind(CUSTOM) + .asCqlQuery( + String.format( + "CREATE CUSTOM INDEX custom_index ON %s.indexing (a, b, keys(c)) " + + "USING 'dummy.DummyIndex' WITH OPTIONS = {'foo' : 'bar'};", + keyspace)); + } + + /** + * Validates a special case where a 'KEYS' index was created using thrift. In this particular case + * the index lacks index_options, however the index_options value is a 'null' string rather then a + * null value. + * + * @test_category metadata + * @expected_result Index properly parsed and is present. + * @jira_ticket JAVA-834 + * @since 2.0.11, 2.1.7 + */ + @Test(groups = "short") + public void should_parse_with_null_string_index_options() { + TableMetadata table = getTable("indexing"); + List data = + ImmutableList.of( + wrap("b@706172656e745f70617468"), // column name 'parent_path' + ByteBuffer.allocate(0), // component index (null) + wrap("regular"), // kind + wrap("cfs_archive_parent_path"), // index name + wrap("KEYS"), // index type + wrap("org.apache.cassandra.db.marshal.BytesType"), // validator + wrap("null") // index options + ); + Row row = + ArrayBackedRow.fromData( + legacyColumnDefs, + M3PToken.FACTORY, + cluster().getConfiguration().getProtocolOptions().getProtocolVersion(), + data); + Raw raw = Raw.fromRow(row, VersionNumber.parse("2.1")); + ColumnMetadata column = ColumnMetadata.fromRaw(table, raw, DataType.blob()); + IndexMetadata index = IndexMetadata.fromLegacy(column, raw); + assertThat(index) + .isNotNull() + .hasName("cfs_archive_parent_path") + .isNotCustomIndex() + .hasTarget("\"b@706172656e745f70617468\"") + .hasKind(KEYS) + .asCqlQuery( + String.format( + "CREATE INDEX cfs_archive_parent_path ON %s.indexing (\"b@706172656e745f70617468\");", + keyspace)); + assertThat(index.getOption(IndexMetadata.INDEX_KEYS_OPTION_NAME)) + .isNull(); // While the index type is KEYS, since it lacks index_options it does not get + // considered. + } + + private static ColumnDefinitions.Definition definition(String name, DataType type) { + return new ColumnDefinitions.Definition("ks", "table", name, type); + } + + private static ByteBuffer wrap(String value) { + return ByteBuffer.wrap(value.getBytes()); + } + + private static ByteBuffer wrap(int number) { + return ByteBuffer.wrap(Ints.toByteArray(number)); + } + + private ColumnMetadata getColumn(String name) { + return getColumn(name, true); + } + + private ColumnMetadata getColumn(String name, boolean fromTable) { + AbstractTableMetadata target = fromTable ? getTable("indexing") : getMaterializedView("mv1"); + return target.getColumn(name); + } + + private IndexMetadata getIndex(String name) { + return getTable("indexing").getIndex(name); + } + + private TableMetadata getTable(String name) { + return cluster().getMetadata().getKeyspace(keyspace).getTable(name); + } + + private MaterializedViewMetadata getMaterializedView(String name) { + return cluster().getMetadata().getKeyspace(keyspace).getMaterializedView(name); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/Jdk8SSLEncryptionTest.java b/driver-core/src/test/java/com/datastax/driver/core/Jdk8SSLEncryptionTest.java index 49df45e6c35..3df510c7546 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/Jdk8SSLEncryptionTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/Jdk8SSLEncryptionTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,156 +17,181 @@ */ package com.datastax.driver.core; -import io.netty.handler.ssl.SslContextBuilder; -import org.testng.annotations.Test; +import static com.datastax.driver.core.CreateCCM.TestMode.PER_METHOD; +import static io.netty.handler.ssl.SslProvider.OPENSSL; -import javax.net.ssl.*; +import io.netty.handler.ssl.SslContextBuilder; import java.net.Socket; -import java.security.*; +import java.security.InvalidAlgorithmParameterException; +import java.security.KeyStore; +import java.security.KeyStoreException; +import java.security.Provider; +import java.security.SecureRandom; import java.security.cert.CertificateException; import java.security.cert.X509Certificate; - -import static com.datastax.driver.core.CreateCCM.TestMode.PER_METHOD; -import static io.netty.handler.ssl.SslProvider.OPENSSL; +import javax.net.ssl.ManagerFactoryParameters; +import javax.net.ssl.SSLContext; +import javax.net.ssl.SSLEngine; +import javax.net.ssl.SSLParameters; +import javax.net.ssl.TrustManager; +import javax.net.ssl.TrustManagerFactory; +import javax.net.ssl.TrustManagerFactorySpi; +import javax.net.ssl.X509ExtendedTrustManager; +import org.testng.annotations.Test; @CreateCCM(PER_METHOD) @CCMConfig(auth = false) public class Jdk8SSLEncryptionTest extends SSLTestBase { - /** - * Validates that {@link RemoteEndpointAwareSSLOptions} implementations properly pass remote endpoint information - * to the underlying {@link SSLEngine} that is created. This is done by creating a custom {@link TrustManagerFactory} - * that inspects the peer information on the {@link SSLEngine} in - * {@link X509ExtendedTrustManager#checkServerTrusted(X509Certificate[], String, SSLEngine)} and throws a - * {@link CertificateException} if the peer host or port do not match. - *

    - * This test is prefixed with 'Jdk8' so it only runs against JDK 8+ runtimes. This is required because - * X509ExtendedTrustManager was added in JDK 7. Technically this would also run against JDK 7, but for simplicity - * we only run it against 8+. - * - * @test_category connection:ssl - * @jira_ticket JAVA-1364 - * @since 3.2.0 - */ - @Test(groups = "short", dataProvider = "sslImplementation", dataProviderClass = SSLTestBase.class) - public void should_pass_peer_address_to_engine(SslImplementation sslImplementation) throws Exception { - String expectedPeerHost = TestUtils.IP_PREFIX + "1"; - int expectedPeerPort = ccm().getBinaryPort(); - - EngineInspectingTrustManagerFactory tmf = new EngineInspectingTrustManagerFactory(expectedPeerHost, expectedPeerPort); - SSLOptions options = null; - switch (sslImplementation) { - case JDK: - SSLContext sslContext = SSLContext.getInstance("TLS"); - sslContext.init(null, tmf.getTrustManagers(), new SecureRandom()); - SSLParameters parameters = sslContext.getDefaultSSLParameters(); - parameters.setEndpointIdentificationAlgorithm("HTTPS"); - options = RemoteEndpointAwareJdkSSLOptions.builder().withSSLContext(sslContext).build(); - break; - case NETTY_OPENSSL: - SslContextBuilder builder = SslContextBuilder - .forClient() - .sslProvider(OPENSSL) - .trustManager(tmf); - - options = new RemoteEndpointAwareNettySSLOptions(builder.build()); - } - - connectWithSSLOptions(options); + /** + * Validates that {@link RemoteEndpointAwareSSLOptions} implementations properly pass remote + * endpoint information to the underlying {@link SSLEngine} that is created. This is done by + * creating a custom {@link TrustManagerFactory} that inspects the peer information on the {@link + * SSLEngine} in {@link X509ExtendedTrustManager#checkServerTrusted(X509Certificate[], String, + * SSLEngine)} and throws a {@link CertificateException} if the peer host or port do not match. + * + *

    This test is prefixed with 'Jdk8' so it only runs against JDK 8+ runtimes. This is required + * because X509ExtendedTrustManager was added in JDK 7. Technically this would also run against + * JDK 7, but for simplicity we only run it against 8+. + * + * @test_category connection:ssl + * @jira_ticket JAVA-1364 + * @since 3.2.0 + */ + @Test(groups = "short", dataProvider = "sslImplementation", dataProviderClass = SSLTestBase.class) + public void should_pass_peer_address_to_engine(SslImplementation sslImplementation) + throws Exception { + String expectedPeerHost = TestUtils.IP_PREFIX + "1"; + int expectedPeerPort = ccm().getBinaryPort(); + + EngineInspectingTrustManagerFactory tmf = + new EngineInspectingTrustManagerFactory(expectedPeerHost, expectedPeerPort); + SSLOptions options = null; + switch (sslImplementation) { + case JDK: + SSLContext sslContext = SSLContext.getInstance("TLS"); + sslContext.init(null, tmf.getTrustManagers(), new SecureRandom()); + SSLParameters parameters = sslContext.getDefaultSSLParameters(); + parameters.setEndpointIdentificationAlgorithm("HTTPS"); + options = RemoteEndpointAwareJdkSSLOptions.builder().withSSLContext(sslContext).build(); + break; + case NETTY_OPENSSL: + SslContextBuilder builder = + SslContextBuilder.forClient().sslProvider(OPENSSL).trustManager(tmf); + + options = new RemoteEndpointAwareNettySSLOptions(builder.build()); } - static class EngineInspectingTrustManagerFactory extends TrustManagerFactory { - - private static final Provider provider = new Provider("", 0.0, "") { + connectWithSSLOptions(options); + } - }; + static class EngineInspectingTrustManagerFactory extends TrustManagerFactory { - final EngineInspectingTrustManagerFactorySpi spi; + private static final Provider provider = new Provider("", 0.0, "") {}; - EngineInspectingTrustManagerFactory(String expectedPeerHost, int expectedPeerPort) { - this(new EngineInspectingTrustManagerFactorySpi(expectedPeerHost, expectedPeerPort)); - } + final EngineInspectingTrustManagerFactorySpi spi; - private EngineInspectingTrustManagerFactory(EngineInspectingTrustManagerFactorySpi spi) { - super(spi, provider, "EngineInspectingTrustManagerFactory"); - this.spi = spi; - } + EngineInspectingTrustManagerFactory(String expectedPeerHost, int expectedPeerPort) { + this(new EngineInspectingTrustManagerFactorySpi(expectedPeerHost, expectedPeerPort)); } - static class EngineInspectingTrustManagerFactorySpi extends TrustManagerFactorySpi { - - String expectedPeerHost; - int expectedPeerPort; - - private final TrustManager tm = new X509ExtendedTrustManager() { - - @Override - public void checkServerTrusted(X509Certificate[] certs, String authType, SSLEngine sslEngine) throws CertificateException { - // Capture peer address information and compare it to expectation. - String peerHost = sslEngine.getPeerHost(); - int peerPort = sslEngine.getPeerPort(); - if (peerHost == null || !peerHost.equals(expectedPeerHost)) { - throw new CertificateException(String.format("Expected SSLEngine.getPeerHost() (%s) to equal (%s)", peerHost, expectedPeerHost)); - } - if (peerPort != expectedPeerPort) { - throw new CertificateException(String.format("Expected SSLEngine.getPeerPort() (%d) to equal (%d)", peerPort, expectedPeerPort)); - } - } - - @Override - public void checkServerTrusted(X509Certificate[] certs, String authType, Socket socket) throws CertificateException { - // no op - } - - @Override - public void checkServerTrusted(X509Certificate[] certs, String authType) throws CertificateException { - // no op - } - - @Override - public void checkClientTrusted(X509Certificate[] x509Certificates, String s, SSLEngine sslEngine) throws CertificateException { - // Since we are doing server trust only, this is a no op. - throw new UnsupportedOperationException("TrustManger is for establishing server trust only."); - - } - - @Override - public void checkClientTrusted(X509Certificate[] certs, String authType, Socket socket) throws CertificateException { - // Since we are doing server trust only, this is a no op. - throw new UnsupportedOperationException("TrustManger is for establishing server trust only."); + private EngineInspectingTrustManagerFactory(EngineInspectingTrustManagerFactorySpi spi) { + super(spi, provider, "EngineInspectingTrustManagerFactory"); + this.spi = spi; + } + } + + static class EngineInspectingTrustManagerFactorySpi extends TrustManagerFactorySpi { + + String expectedPeerHost; + int expectedPeerPort; + + private final TrustManager tm = + new X509ExtendedTrustManager() { + + @Override + public void checkServerTrusted( + X509Certificate[] certs, String authType, SSLEngine sslEngine) + throws CertificateException { + // Capture peer address information and compare it to expectation. + String peerHost = sslEngine.getPeerHost(); + int peerPort = sslEngine.getPeerPort(); + if (peerHost == null || !peerHost.equals(expectedPeerHost)) { + throw new CertificateException( + String.format( + "Expected SSLEngine.getPeerHost() (%s) to equal (%s)", + peerHost, expectedPeerHost)); } - - @Override - public void checkClientTrusted(X509Certificate[] certs, String authType) throws CertificateException { - // Since we are doing server trust only, this is a no op. - throw new UnsupportedOperationException("TrustManger is for establishing server trust only."); + if (peerPort != expectedPeerPort) { + throw new CertificateException( + String.format( + "Expected SSLEngine.getPeerPort() (%d) to equal (%d)", + peerPort, expectedPeerPort)); } + } + @Override + public void checkServerTrusted(X509Certificate[] certs, String authType, Socket socket) + throws CertificateException { + // no op + } - @Override - public X509Certificate[] getAcceptedIssuers() { - return new X509Certificate[0]; - } + @Override + public void checkServerTrusted(X509Certificate[] certs, String authType) + throws CertificateException { + // no op + } + + @Override + public void checkClientTrusted( + X509Certificate[] x509Certificates, String s, SSLEngine sslEngine) + throws CertificateException { + // Since we are doing server trust only, this is a no op. + throw new UnsupportedOperationException( + "TrustManger is for establishing server trust only."); + } + + @Override + public void checkClientTrusted(X509Certificate[] certs, String authType, Socket socket) + throws CertificateException { + // Since we are doing server trust only, this is a no op. + throw new UnsupportedOperationException( + "TrustManger is for establishing server trust only."); + } + + @Override + public void checkClientTrusted(X509Certificate[] certs, String authType) + throws CertificateException { + // Since we are doing server trust only, this is a no op. + throw new UnsupportedOperationException( + "TrustManger is for establishing server trust only."); + } + + @Override + public X509Certificate[] getAcceptedIssuers() { + return new X509Certificate[0]; + } }; - EngineInspectingTrustManagerFactorySpi(String expectedPeerHost, int expectedPeerPort) { - this.expectedPeerHost = expectedPeerHost; - this.expectedPeerPort = expectedPeerPort; - } + EngineInspectingTrustManagerFactorySpi(String expectedPeerHost, int expectedPeerPort) { + this.expectedPeerHost = expectedPeerHost; + this.expectedPeerPort = expectedPeerPort; + } - @Override - protected void engineInit(KeyStore keyStore) throws KeyStoreException { - // no op - } + @Override + protected void engineInit(KeyStore keyStore) throws KeyStoreException { + // no op + } - @Override - protected void engineInit(ManagerFactoryParameters managerFactoryParameters) throws InvalidAlgorithmParameterException { - // no op - } + @Override + protected void engineInit(ManagerFactoryParameters managerFactoryParameters) + throws InvalidAlgorithmParameterException { + // no op + } - @Override - protected TrustManager[] engineGetTrustManagers() { - return new TrustManager[]{tm}; - } + @Override + protected TrustManager[] engineGetTrustManagers() { + return new TrustManager[] {tm}; } + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/KeyspaceMetadataAssert.java b/driver-core/src/test/java/com/datastax/driver/core/KeyspaceMetadataAssert.java index 77d014a8940..87b5810455e 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/KeyspaceMetadataAssert.java +++ b/driver-core/src/test/java/com/datastax/driver/core/KeyspaceMetadataAssert.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,27 +17,28 @@ */ package com.datastax.driver.core; -import org.assertj.core.api.AbstractAssert; - import static org.assertj.core.api.Assertions.assertThat; -public class KeyspaceMetadataAssert extends AbstractAssert { - protected KeyspaceMetadataAssert(KeyspaceMetadata actual) { - super(actual, KeyspaceMetadataAssert.class); - } +import org.assertj.core.api.AbstractAssert; + +public class KeyspaceMetadataAssert + extends AbstractAssert { + protected KeyspaceMetadataAssert(KeyspaceMetadata actual) { + super(actual, KeyspaceMetadataAssert.class); + } - public KeyspaceMetadataAssert hasName(String name) { - assertThat(actual.getName()).isEqualTo(name); - return this; - } + public KeyspaceMetadataAssert hasName(String name) { + assertThat(actual.getName()).isEqualTo(name); + return this; + } - public KeyspaceMetadataAssert isDurableWrites() { - assertThat(actual.isDurableWrites()).isTrue(); - return this; - } + public KeyspaceMetadataAssert isDurableWrites() { + assertThat(actual.isDurableWrites()).isTrue(); + return this; + } - public KeyspaceMetadataAssert isNotDurableWrites() { - assertThat(actual.isDurableWrites()).isFalse(); - return this; - } + public KeyspaceMetadataAssert isNotDurableWrites() { + assertThat(actual.isDurableWrites()).isFalse(); + return this; + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/LargeDataTest.java b/driver-core/src/test/java/com/datastax/driver/core/LargeDataTest.java index 94182794d6a..ab1c6c34120 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/LargeDataTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/LargeDataTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,275 +17,290 @@ */ package com.datastax.driver.core; -import com.datastax.driver.core.querybuilder.Batch; -import com.datastax.driver.core.querybuilder.Insert; -import com.datastax.driver.core.utils.CassandraVersion; -import org.testng.annotations.Test; - -import java.nio.ByteBuffer; - import static com.datastax.driver.core.CreateCCM.TestMode.PER_METHOD; import static com.datastax.driver.core.TestUtils.CREATE_KEYSPACE_SIMPLE_FORMAT; -import static com.datastax.driver.core.querybuilder.QueryBuilder.*; +import static com.datastax.driver.core.querybuilder.QueryBuilder.batch; +import static com.datastax.driver.core.querybuilder.QueryBuilder.eq; +import static com.datastax.driver.core.querybuilder.QueryBuilder.insertInto; +import static com.datastax.driver.core.querybuilder.QueryBuilder.select; import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertTrue; -/** - * Test limitations when using large amounts of data with the driver - */ +import com.datastax.driver.core.querybuilder.Batch; +import com.datastax.driver.core.querybuilder.Insert; +import com.datastax.driver.core.utils.CassandraVersion; +import java.nio.ByteBuffer; +import org.testng.annotations.Test; + +/** Test limitations when using large amounts of data with the driver */ @CreateCCM(PER_METHOD) public class LargeDataTest extends CCMTestsSupport { - /* - * Test a wide row of size 1,000,000 - * @param c The cluster object - * @param key The key value that will receive the data - * @throws Throwable - */ - private void testWideRows(int key) throws Throwable { - // Write data - for (int i = 0; i < 1000000; ++i) { - session().execute(insertInto("wide_rows").value("k", key).value("i", i).setConsistencyLevel(ConsistencyLevel.QUORUM)); - } - - // Read data - ResultSet rs = session().execute(select("i").from("wide_rows").where(eq("k", key))); - - // Verify data - int i = 0; - for (Row row : rs) { - assertTrue(row.getInt("i") == i++); - } + /* + * Test a wide row of size 1,000,000 + * @param c The cluster object + * @param key The key value that will receive the data + * @throws Throwable + */ + private void testWideRows(int key) throws Throwable { + // Write data + for (int i = 0; i < 1000000; ++i) { + session() + .execute( + insertInto("wide_rows") + .value("k", key) + .value("i", i) + .setConsistencyLevel(ConsistencyLevel.QUORUM)); } - /* - * Test a batch that writes a row of size 4,000 - * @param c The cluster object - * @param key The key value that will receive the data - * @throws Throwable - */ - private void testWideBatchRows(int key) throws Throwable { - // Write data - Batch q = batch(); - for (int i = 0; i < 4000; ++i) { - q = q.add(insertInto("wide_batch_rows").value("k", key).value("i", i)); - } - session().execute(q.setConsistencyLevel(ConsistencyLevel.QUORUM)); + // Read data + ResultSet rs = session().execute(select("i").from("wide_rows").where(eq("k", key))); - // Read data - ResultSet rs = session().execute(select("i").from("wide_batch_rows").where(eq("k", key))); + // Verify data + int i = 0; + for (Row row : rs) { + assertTrue(row.getInt("i") == i++); + } + } - // Verify data - int i = 0; - for (Row row : rs) { - assertTrue(row.getInt("i") == i++); - } + /* + * Test a batch that writes a row of size 4,000 + * @param c The cluster object + * @param key The key value that will receive the data + * @throws Throwable + */ + private void testWideBatchRows(int key) throws Throwable { + // Write data + Batch q = batch(); + for (int i = 0; i < 4000; ++i) { + q = q.add(insertInto("wide_batch_rows").value("k", key).value("i", i)); } + session().execute(q.setConsistencyLevel(ConsistencyLevel.QUORUM)); - /* - * Test a wide row of size 1,000,000 consisting of a ByteBuffer - * @param c The cluster object - * @param key The key value that will receive the data - * @throws Throwable - */ - private void testByteRows(int key) throws Throwable { - // Build small ByteBuffer sample - ByteBuffer bb = ByteBuffer.allocate(58); - bb.putShort((short) 0xCAFE); - bb.flip(); + // Read data + ResultSet rs = session().execute(select("i").from("wide_batch_rows").where(eq("k", key))); - // Write data - for (int i = 0; i < 1000000; ++i) { - session().execute(insertInto("wide_byte_rows").value("k", key).value("i", bb).setConsistencyLevel(ConsistencyLevel.QUORUM)); - } + // Verify data + int i = 0; + for (Row row : rs) { + assertTrue(row.getInt("i") == i++); + } + } - // Read data - ResultSet rs = session().execute(select("i").from("wide_byte_rows").where(eq("k", key))); + /* + * Test a wide row of size 1,000,000 consisting of a ByteBuffer + * @param c The cluster object + * @param key The key value that will receive the data + * @throws Throwable + */ + private void testByteRows(int key) throws Throwable { + // Build small ByteBuffer sample + ByteBuffer bb = ByteBuffer.allocate(58); + bb.putShort((short) 0xCAFE); + bb.flip(); - // Verify data - for (Row row : rs) { - assertEquals(row.getBytes("i"), bb); - } + // Write data + for (int i = 0; i < 1000000; ++i) { + session() + .execute( + insertInto("wide_byte_rows") + .value("k", key) + .value("i", bb) + .setConsistencyLevel(ConsistencyLevel.QUORUM)); } - /* - * Test a row with a single extra large text value - * @param c The cluster object - * @param key The key value that will receive the data - * @throws Throwable - */ - private void testLargeText(int key) throws Throwable { - // Write data - StringBuilder b = new StringBuilder(); - for (int i = 0; i < 1000000; ++i) { - // Create ultra-long text - b.append(i); - } - session().execute(insertInto("large_text").value("k", key).value("txt", b.toString()).setConsistencyLevel(ConsistencyLevel.QUORUM)); + // Read data + ResultSet rs = session().execute(select("i").from("wide_byte_rows").where(eq("k", key))); - // Read data - Row row = session().execute(select().all().from("large_text").where(eq("k", key))).one(); + // Verify data + for (Row row : rs) { + assertEquals(row.getBytes("i"), bb); + } + } - // Verify data - assertTrue(b.toString().equals(row.getString("txt"))); + /* + * Test a row with a single extra large text value + * @param c The cluster object + * @param key The key value that will receive the data + * @throws Throwable + */ + private void testLargeText(int key) throws Throwable { + // Write data + StringBuilder b = new StringBuilder(); + for (int i = 0; i < 1000000; ++i) { + // Create ultra-long text + b.append(i); } + session() + .execute( + insertInto("large_text") + .value("k", key) + .value("txt", b.toString()) + .setConsistencyLevel(ConsistencyLevel.QUORUM)); - /* - * Converts an integer to an string of letters - * @param i The integer that maps to letter - * @return - * - * TODO This doesn't offer protection from generating column names that are reserved keywords. This does - * work with CQL 3.0, but may break in future specifications. Should fix this to ensure it does not. - */ - private static String createColumnName(int i) { - String[] letters = {"a", "b", "c", "d", "e", "f", "g", "h", "j", "l"}; + // Read data + Row row = session().execute(select().all().from("large_text").where(eq("k", key))).one(); - StringBuilder columnName; - int currentI; + // Verify data + assertTrue(b.toString().equals(row.getString("txt"))); + } - currentI = i; - columnName = new StringBuilder(); - while (true) { - columnName.append(letters[currentI % 10]); - currentI /= 10; - if (currentI == 0) - break; - } + /* + * Converts an integer to an string of letters + * @param i The integer that maps to letter + * @return + * + * TODO This doesn't offer protection from generating column names that are reserved keywords. This does + * work with CQL 3.0, but may break in future specifications. Should fix this to ensure it does not. + */ + private static String createColumnName(int i) { + String[] letters = {"a", "b", "c", "d", "e", "f", "g", "h", "j", "l"}; - return columnName.toString(); - } + StringBuilder columnName; + int currentI; - /* - * Creates a table with 330 columns - * @param c The cluster object - * @param key The key value that will receive the data - * @throws Throwable - */ - private void testWideTable(int key) throws Throwable { - // Write data - Insert insertStatement = insertInto("wide_table").value("k", key); - for (int i = 0; i < 330; ++i) { - insertStatement = insertStatement.value(createColumnName(i), i); - } - session().execute(insertStatement.setConsistencyLevel(ConsistencyLevel.QUORUM)); + currentI = i; + columnName = new StringBuilder(); + while (true) { + columnName.append(letters[currentI % 10]); + currentI /= 10; + if (currentI == 0) break; + } - // Read data - Row row = session().execute(select().all().from("wide_table").where(eq("k", key))).one(); + return columnName.toString(); + } - // Verify data - for (int i = 0; i < 330; ++i) { - assertTrue(row.getInt(createColumnName(i)) == i); - } + /* + * Creates a table with 330 columns + * @param c The cluster object + * @param key The key value that will receive the data + * @throws Throwable + */ + private void testWideTable(int key) throws Throwable { + // Write data + Insert insertStatement = insertInto("wide_table").value("k", key); + for (int i = 0; i < 330; ++i) { + insertStatement = insertStatement.value(createColumnName(i), i); } + session().execute(insertStatement.setConsistencyLevel(ConsistencyLevel.QUORUM)); + // Read data + Row row = session().execute(select().all().from("wide_table").where(eq("k", key))).one(); - /** - * Test a wide row of size 1,000,000 - */ - @Test(groups = "stress") - @CassandraVersion(value = "2.0.0", description = "< 2.0 is skipped as 1.2 does not handle reading wide rows well.") - public void wideRows() throws Throwable { - session().execute(String.format(CREATE_KEYSPACE_SIMPLE_FORMAT, "large_data", 1)); - session().execute("USE large_data"); - session().execute(String.format("CREATE TABLE %s (k INT, i INT, PRIMARY KEY(k, i))", "wide_rows")); - testWideRows(0); + // Verify data + for (int i = 0; i < 330; ++i) { + assertTrue(row.getInt(createColumnName(i)) == i); } + } - /** - * Test a batch that writes a row of size 4,000 (just below the error threshold for 2.2). - */ - @Test(groups = "stress") - @CassandraVersion(value = "2.0.0", description = "< 2.0 is skipped as 1.2 does not handle large batches well.") - public void wideBatchRows() throws Throwable { - session().execute(String.format(CREATE_KEYSPACE_SIMPLE_FORMAT, "large_data", 1)); - session().execute("USE large_data"); - session().execute(String.format("CREATE TABLE %s (k INT, i INT, PRIMARY KEY(k, i))", "wide_batch_rows")); - testWideBatchRows(0); - } + /** Test a wide row of size 1,000,000 */ + @Test(groups = "stress") + @CassandraVersion( + value = "2.0.0", + description = "< 2.0 is skipped as 1.2 does not handle reading wide rows well.") + public void wideRows() throws Throwable { + session().execute(String.format(CREATE_KEYSPACE_SIMPLE_FORMAT, "large_data", 1)); + session().execute("USE large_data"); + session() + .execute(String.format("CREATE TABLE %s (k INT, i INT, PRIMARY KEY(k, i))", "wide_rows")); + testWideRows(0); + } - /** - * Test a wide row of size 1,000,000 consisting of a ByteBuffer - */ - @Test(groups = "stress") - public void wideByteRows() throws Throwable { - session().execute(String.format(CREATE_KEYSPACE_SIMPLE_FORMAT, "large_data", 1)); - session().execute("USE large_data"); - session().execute(String.format("CREATE TABLE %s (k INT, i BLOB, PRIMARY KEY(k, i))", "wide_byte_rows")); - testByteRows(0); - } + /** Test a batch that writes a row of size 4,000 (just below the error threshold for 2.2). */ + @Test(groups = "stress") + @CassandraVersion( + value = "2.0.0", + description = "< 2.0 is skipped as 1.2 does not handle large batches well.") + public void wideBatchRows() throws Throwable { + session().execute(String.format(CREATE_KEYSPACE_SIMPLE_FORMAT, "large_data", 1)); + session().execute("USE large_data"); + session() + .execute( + String.format("CREATE TABLE %s (k INT, i INT, PRIMARY KEY(k, i))", "wide_batch_rows")); + testWideBatchRows(0); + } - /** - * Test a row with a single extra large text value - */ - @Test(groups = "stress") - public void largeText() throws Throwable { - session().execute(String.format(CREATE_KEYSPACE_SIMPLE_FORMAT, "large_data", 1)); - session().execute("USE large_data"); - session().execute(String.format("CREATE TABLE %s (k int PRIMARY KEY, txt text)", "large_text")); - testLargeText(0); - } + /** Test a wide row of size 1,000,000 consisting of a ByteBuffer */ + @Test(groups = "stress") + public void wideByteRows() throws Throwable { + session().execute(String.format(CREATE_KEYSPACE_SIMPLE_FORMAT, "large_data", 1)); + session().execute("USE large_data"); + session() + .execute( + String.format("CREATE TABLE %s (k INT, i BLOB, PRIMARY KEY(k, i))", "wide_byte_rows")); + testByteRows(0); + } + + /** Test a row with a single extra large text value */ + @Test(groups = "stress") + public void largeText() throws Throwable { + session().execute(String.format(CREATE_KEYSPACE_SIMPLE_FORMAT, "large_data", 1)); + session().execute("USE large_data"); + session().execute(String.format("CREATE TABLE %s (k int PRIMARY KEY, txt text)", "large_text")); + testLargeText(0); + } - /** - * Creates a table with 330 columns - */ - @Test(groups = "stress") - public void wideTable() throws Throwable { - session().execute(String.format(CREATE_KEYSPACE_SIMPLE_FORMAT, "large_data", 1)); - session().execute("USE large_data"); - // Create the extra wide table definition - StringBuilder tableDeclaration = new StringBuilder(); - tableDeclaration.append("CREATE TABLE wide_table ("); - tableDeclaration.append("k INT PRIMARY KEY"); - for (int i = 0; i < 330; ++i) { - tableDeclaration.append(String.format(", %s INT", createColumnName(i))); - } - tableDeclaration.append(')'); - session().execute(tableDeclaration.toString()); - testWideTable(0); + /** Creates a table with 330 columns */ + @Test(groups = "stress") + public void wideTable() throws Throwable { + session().execute(String.format(CREATE_KEYSPACE_SIMPLE_FORMAT, "large_data", 1)); + session().execute("USE large_data"); + // Create the extra wide table definition + StringBuilder tableDeclaration = new StringBuilder(); + tableDeclaration.append("CREATE TABLE wide_table ("); + tableDeclaration.append("k INT PRIMARY KEY"); + for (int i = 0; i < 330; ++i) { + tableDeclaration.append(String.format(", %s INT", createColumnName(i))); } + tableDeclaration.append(')'); + session().execute(tableDeclaration.toString()); + testWideTable(0); + } - /** - * Tests 10 random tests consisting of the other methods in this class - */ - @Test(groups = "duration") - @CCMConfig(numberOfNodes = 3) - public void mixedDurationTest() throws Throwable { - session().execute(String.format(CREATE_KEYSPACE_SIMPLE_FORMAT, "large_data", 3)); - session().execute("USE large_data"); - session().execute(String.format("CREATE TABLE %s (k INT, i INT, PRIMARY KEY(k, i))", "wide_rows")); - session().execute(String.format("CREATE TABLE %s (k INT, i INT, PRIMARY KEY(k, i))", "wide_batch_rows")); - session().execute(String.format("CREATE TABLE %s (k INT, i BLOB, PRIMARY KEY(k, i))", "wide_byte_rows")); - session().execute(String.format("CREATE TABLE %s (k int PRIMARY KEY, txt text)", "large_text")); - // Create the extra wide table definition - StringBuilder tableDeclaration = new StringBuilder(); - tableDeclaration.append("CREATE TABLE wide_table ("); - tableDeclaration.append("k INT PRIMARY KEY"); - for (int i = 0; i < 330; ++i) { - tableDeclaration.append(String.format(", %s INT", createColumnName(i))); - } - tableDeclaration.append(')'); - session().execute(tableDeclaration.toString()); - for (int i = 0; i < 10; ++i) { - switch ((int) (Math.random() * 5)) { - case 0: - testWideRows(0); - break; - case 1: - testWideBatchRows(0); - break; - case 2: - testByteRows(0); - break; - case 3: - testLargeText(0); - break; - case 4: - testWideTable(0); - break; - default: - break; - } - } + /** Tests 10 random tests consisting of the other methods in this class */ + @Test(groups = "duration") + @CCMConfig(numberOfNodes = 3) + public void mixedDurationTest() throws Throwable { + session().execute(String.format(CREATE_KEYSPACE_SIMPLE_FORMAT, "large_data", 3)); + session().execute("USE large_data"); + session() + .execute(String.format("CREATE TABLE %s (k INT, i INT, PRIMARY KEY(k, i))", "wide_rows")); + session() + .execute( + String.format("CREATE TABLE %s (k INT, i INT, PRIMARY KEY(k, i))", "wide_batch_rows")); + session() + .execute( + String.format("CREATE TABLE %s (k INT, i BLOB, PRIMARY KEY(k, i))", "wide_byte_rows")); + session().execute(String.format("CREATE TABLE %s (k int PRIMARY KEY, txt text)", "large_text")); + // Create the extra wide table definition + StringBuilder tableDeclaration = new StringBuilder(); + tableDeclaration.append("CREATE TABLE wide_table ("); + tableDeclaration.append("k INT PRIMARY KEY"); + for (int i = 0; i < 330; ++i) { + tableDeclaration.append(String.format(", %s INT", createColumnName(i))); + } + tableDeclaration.append(')'); + session().execute(tableDeclaration.toString()); + for (int i = 0; i < 10; ++i) { + switch ((int) (Math.random() * 5)) { + case 0: + testWideRows(0); + break; + case 1: + testWideBatchRows(0); + break; + case 2: + testByteRows(0); + break; + case 3: + testLargeText(0); + break; + case 4: + testWideTable(0); + break; + default: + break; + } } + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/LoadBalancingPolicyBootstrapTest.java b/driver-core/src/test/java/com/datastax/driver/core/LoadBalancingPolicyBootstrapTest.java index 0395009290b..4d1bd8336f6 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/LoadBalancingPolicyBootstrapTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/LoadBalancingPolicyBootstrapTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,194 +17,210 @@ */ package com.datastax.driver.core; +import static com.datastax.driver.core.CreateCCM.TestMode.PER_METHOD; +import static com.datastax.driver.core.LoadBalancingPolicyBootstrapTest.Action.ADD; +import static com.datastax.driver.core.LoadBalancingPolicyBootstrapTest.Action.DOWN; +import static com.datastax.driver.core.LoadBalancingPolicyBootstrapTest.Action.INIT; +import static com.datastax.driver.core.LoadBalancingPolicyBootstrapTest.Action.REMOVE; +import static com.datastax.driver.core.LoadBalancingPolicyBootstrapTest.Action.UP; +import static org.assertj.core.api.Assertions.assertThat; + import com.datastax.driver.core.policies.DelegatingLoadBalancingPolicy; import com.datastax.driver.core.policies.LoadBalancingPolicy; import com.datastax.driver.core.policies.RoundRobinPolicy; import com.datastax.driver.core.utils.MoreObjects; import com.google.common.collect.Lists; +import java.util.Collection; +import java.util.List; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.testng.annotations.Test; -import java.util.Collection; -import java.util.List; - -import static com.datastax.driver.core.CreateCCM.TestMode.PER_METHOD; -import static com.datastax.driver.core.LoadBalancingPolicyBootstrapTest.HistoryPolicy.Action.*; -import static com.datastax.driver.core.LoadBalancingPolicyBootstrapTest.HistoryPolicy.entry; -import static org.assertj.core.api.Assertions.assertThat; - @CCMConfig(numberOfNodes = 2, dirtiesContext = true, createCluster = false) @CreateCCM(PER_METHOD) public class LoadBalancingPolicyBootstrapTest extends CCMTestsSupport { - private static final Logger logger = LoggerFactory.getLogger(LoadBalancingPolicyBootstrapTest.class); - - /** - * Ensures that when a cluster is initialized that {@link LoadBalancingPolicy#init(Cluster, Collection)} is called - * with each reachable contact point. - * - * @test_category load_balancing:notification - * @expected_result init() is called for each of two contact points. - */ - @Test(groups = "short") - public void should_init_policy_with_up_contact_points() throws Exception { - HistoryPolicy policy = new HistoryPolicy(new RoundRobinPolicy()); - - Cluster cluster = register(Cluster.builder() - .addContactPoints(getContactPoints()) - .withPort(ccm().getBinaryPort()) - .withLoadBalancingPolicy(policy) - .build()); - - try { - cluster.init(); - - assertThat(policy.history).containsOnly( - entry(INIT, TestUtils.findHost(cluster, 1)), - entry(INIT, TestUtils.findHost(cluster, 2)) - ); - } finally { - cluster.close(); + private static final Logger logger = + LoggerFactory.getLogger(LoadBalancingPolicyBootstrapTest.class); + + /** + * Ensures that when a cluster is initialized that {@link LoadBalancingPolicy#init(Cluster, + * Collection)} is called with each reachable contact point. + * + * @test_category load_balancing:notification + * @expected_result init() is called for each of two contact points. + */ + @Test(groups = "short") + public void should_init_policy_with_up_contact_points() throws Exception { + HistoryPolicy policy = new HistoryPolicy(new RoundRobinPolicy()); + + Cluster cluster = register(createClusterBuilder().withLoadBalancingPolicy(policy).build()); + + try { + cluster.init(); + + // To support astra, only hosts in Metadata#getContactPoints are passed to init() + // TestUtils#configureClusterBuilder only uses the first host as the contact point + // Remaining hosts are learned after connection via onAdd() + assertThat(policy.history) + .containsOnly( + entry(INIT, TestUtils.findHost(cluster, 1)), + entry(ADD, TestUtils.findHost(cluster, 2))); + } finally { + cluster.close(); + } + } + + /** + * Ensures that {@link LoadBalancingPolicy#onDown(Host)} is called for a contact point that + * couldn't be reached while initializing the control connection, but only after {@link + * LoadBalancingPolicy#init(Cluster, Collection)} has been called. + * + * @test_category load_balancing:notification + * @expected_result init() is called with the up host, followed by onDown() for the downed host. + * @jira_ticket JAVA-613 + * @since 2.0.10, 2.1.5 + */ + @Test(groups = "long") + public void should_send_down_notifications_after_init_when_contact_points_are_down() + throws Exception { + + // In order to validate this behavior, we need to stop the first node that would be attempted to + // be + // established as the control connection. + // This depends on the internal behavior and will even be made totally random by JAVA-618, + // therefore + // we retry the scenario until we get the desired preconditions. + + int nodeToStop = 1; + int tries = 1, maxTries = 10; + for (; tries <= maxTries; tries++) { + nodeToStop = (nodeToStop == 1) ? 2 : 1; // switch nodes at each try + int activeNode = nodeToStop == 2 ? 1 : 2; + + ccm().stop(nodeToStop); + ccm().waitForDown(nodeToStop); + + // usually only one contact point is used to build the test cluster + // here we explicitly add both endpoints so we can test load + // balancing initial connection when the first connection point is down + HistoryPolicy policy = new HistoryPolicy(new RoundRobinPolicy()); + Cluster cluster = + register( + createClusterBuilder() + .addContactPoints(ccm().getContactPoints().get(1)) + .withLoadBalancingPolicy(policy) + .build()); + + try { + cluster.init(); + + if (policy.history.contains(entry(DOWN, TestUtils.findHost(cluster, nodeToStop)))) { + // This is the situation we're testing, the control connection tried the stopped node + // first. + assertThat(policy.history) + .containsExactly( + entry(INIT, TestUtils.findHost(cluster, activeNode)), + entry(DOWN, TestUtils.findHost(cluster, nodeToStop))); + break; + } else { + assertThat(policy.history) + .containsOnly( + entry(INIT, TestUtils.findHost(cluster, 1)), + entry(INIT, TestUtils.findHost(cluster, 2))); + + logger.info("Could not get first contact point to fail, retrying"); + + cluster.close(); + + ccm().start(nodeToStop); + ccm().waitForUp(nodeToStop); } + } finally { + cluster.close(); + } } - /** - * Ensures that {@link LoadBalancingPolicy#onDown(Host)} is called for a contact point that couldn't - * be reached while initializing the control connection, but only after - * {@link LoadBalancingPolicy#init(Cluster, Collection)} has been called. - * - * @test_category load_balancing:notification - * @expected_result init() is called with the up host, followed by onDown() for the downed host. - * @jira_ticket JAVA-613 - * @since 2.0.10, 2.1.5 - */ - @Test(groups = "long") - public void should_send_down_notifications_after_init_when_contact_points_are_down() throws Exception { - - // In order to validate this behavior, we need to stop the first node that would be attempted to be - // established as the control connection. - // This depends on the internal behavior and will even be made totally random by JAVA-618, therefore - // we retry the scenario until we get the desired preconditions. - - int nodeToStop = 1; - int tries = 1, maxTries = 10; - for (; tries <= maxTries; tries++) { - nodeToStop = (nodeToStop == 1) ? 2 : 1; // switch nodes at each try - int activeNode = nodeToStop == 2 ? 1 : 2; - - ccm().stop(nodeToStop); - ccm().waitForDown(nodeToStop); - - HistoryPolicy policy = new HistoryPolicy(new RoundRobinPolicy()); - Cluster cluster = register(Cluster.builder() - .addContactPoints(getContactPoints()) - .withPort(ccm().getBinaryPort()) - .withLoadBalancingPolicy(policy) - .build()); - - try { - cluster.init(); - - if (policy.history.contains(entry(DOWN, TestUtils.findHost(cluster, nodeToStop)))) { - // This is the situation we're testing, the control connection tried the stopped node first. - assertThat(policy.history).containsExactly( - entry(INIT, TestUtils.findHost(cluster, activeNode)), - entry(DOWN, TestUtils.findHost(cluster, nodeToStop)) - ); - break; - } else { - assertThat(policy.history).containsOnly( - entry(INIT, TestUtils.findHost(cluster, 1)), - entry(INIT, TestUtils.findHost(cluster, 2)) - ); - - logger.info("Could not get first contact point to fail, retrying"); - - cluster.close(); - - ccm().start(nodeToStop); - ccm().waitForUp(nodeToStop); - } - } finally { - cluster.close(); - } - } + if (tries == maxTries + 1) + logger.warn("Could not get first contact point to fail after {} tries", maxTries); + } + + enum Action { + INIT, + UP, + DOWN, + ADD, + REMOVE + } + + static Entry entry(Action action, Host host) { + return new Entry(action, host); + } + + static class Entry { + final Action action; + final Host host; + + public Entry(Action action, Host host) { + this.action = action; + this.host = host; + } - if (tries == maxTries + 1) - logger.warn("Could not get first contact point to fail after {} tries", maxTries); + @Override + public boolean equals(Object other) { + if (other == this) return true; + if (other instanceof Entry) { + Entry that = (Entry) other; + return this.action == that.action && this.host.equals(that.host); + } + return false; } - static class HistoryPolicy extends DelegatingLoadBalancingPolicy { - enum Action {INIT, UP, DOWN, ADD, REMOVE} - - static class Entry { - final Action action; - final Host host; - - public Entry(Action action, Host host) { - this.action = action; - this.host = host; - } - - @Override - public boolean equals(Object other) { - if (other == this) - return true; - if (other instanceof Entry) { - Entry that = (Entry) other; - return this.action == that.action && this.host.equals(that.host); - } - return false; - } - - @Override - public int hashCode() { - return MoreObjects.hashCode(action, host); - } - - @Override - public String toString() { - return String.format("Entry(action=%s, host=%s)", action, host); - } - } + @Override + public int hashCode() { + return MoreObjects.hashCode(action, host); + } - static Entry entry(Action action, Host host) { - return new Entry(action, host); - } + @Override + public String toString() { + return String.format("Entry(action=%s, host=%s)", action, host); + } + } - List history = Lists.newArrayList(); + static class HistoryPolicy extends DelegatingLoadBalancingPolicy { - public HistoryPolicy(LoadBalancingPolicy delegate) { - super(delegate); - } + List history = Lists.newArrayList(); - @Override - public void init(Cluster cluster, Collection hosts) { - super.init(cluster, hosts); - for (Host host : hosts) { - history.add(entry(INIT, host)); - } - } + public HistoryPolicy(LoadBalancingPolicy delegate) { + super(delegate); + } - public void onAdd(Host host) { - history.add(entry(ADD, host)); - super.onAdd(host); - } + @Override + public void init(Cluster cluster, Collection hosts) { + super.init(cluster, hosts); + for (Host host : hosts) { + history.add(entry(INIT, host)); + } + } - public void onUp(Host host) { - history.add(entry(UP, host)); - super.onUp(host); - } + public void onAdd(Host host) { + history.add(entry(ADD, host)); + super.onAdd(host); + } - public void onDown(Host host) { - history.add(entry(DOWN, host)); - super.onDown(host); - } + public void onUp(Host host) { + history.add(entry(UP, host)); + super.onUp(host); + } - public void onRemove(Host host) { - history.add(entry(REMOVE, host)); - super.onRemove(host); - } + public void onDown(Host host) { + history.add(entry(DOWN, host)); + super.onDown(host); + } + + public void onRemove(Host host) { + history.add(entry(REMOVE, host)); + super.onRemove(host); } + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/LoadBalancingPolicyRefreshTest.java b/driver-core/src/test/java/com/datastax/driver/core/LoadBalancingPolicyRefreshTest.java index f8878d5e734..e1829a65558 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/LoadBalancingPolicyRefreshTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/LoadBalancingPolicyRefreshTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,92 +17,102 @@ */ package com.datastax.driver.core; +import static com.datastax.driver.core.CreateCCM.TestMode.PER_METHOD; +import static org.testng.Assert.assertTrue; + import com.datastax.driver.core.policies.LoadBalancingPolicy; import com.google.common.collect.Iterators; -import org.testng.annotations.Test; - import java.net.InetAddress; import java.util.Collection; import java.util.Iterator; - -import static com.datastax.driver.core.CreateCCM.TestMode.PER_METHOD; -import static org.testng.Assert.assertTrue; +import org.testng.annotations.Test; // Test that PoolingOpions.refreshConnectedHosts works as expected (JAVA-309) @CreateCCM(PER_METHOD) public class LoadBalancingPolicyRefreshTest extends CCMTestsSupport { - UpdatablePolicy policy; + UpdatablePolicy policy; - private class UpdatablePolicy implements LoadBalancingPolicy { + private class UpdatablePolicy implements LoadBalancingPolicy { - private Cluster cluster; - private Host theHost; + private Cluster cluster; + private Host theHost; - public void changeTheHost(Host theNewHost) { - this.theHost = theNewHost; - cluster.getConfiguration().getPoolingOptions().refreshConnectedHosts(); - } + public void changeTheHost(Host theNewHost) { + this.theHost = theNewHost; + cluster.getConfiguration().getPoolingOptions().refreshConnectedHosts(); + } - public void init(Cluster cluster, Collection hosts) { - this.cluster = cluster; - try { - for (Host h : hosts) - if (h.getAddress().equals(InetAddress.getByName(TestUtils.IP_PREFIX + '1'))) - this.theHost = h; - } catch (Exception e) { - throw new RuntimeException(e); - } - } + @Override + public void init(Cluster cluster, Collection hosts) { + this.cluster = cluster; + try { + for (Host h : hosts) + if (h.getEndPoint() + .resolve() + .getAddress() + .equals(InetAddress.getByName(TestUtils.IP_PREFIX + '1'))) this.theHost = h; + } catch (Exception e) { + throw new RuntimeException(e); + } + } - public HostDistance distance(Host host) { - return host == theHost ? HostDistance.LOCAL : HostDistance.IGNORED; - } + @Override + public HostDistance distance(Host host) { + return host == theHost ? HostDistance.LOCAL : HostDistance.IGNORED; + } - public Iterator newQueryPlan(String loggedKeyspace, Statement statement) { - return Iterators.singletonIterator(theHost); - } + @Override + public Iterator newQueryPlan(String loggedKeyspace, Statement statement) { + return Iterators.singletonIterator(theHost); + } - public void onAdd(Host h) { - } + @Override + public void onAdd(Host h) {} - public void onRemove(Host h) { - } + @Override + public void onRemove(Host h) {} - public void onUp(Host h) { - } + @Override + public void onUp(Host h) {} - public void onDown(Host h) { - } + @Override + public void onDown(Host h) {} - public void close() { - } - } + @Override + public void close() {} + } - @Test(groups = "short") - @CCMConfig(numberOfNodes = 2, clusterProvider = "updatablePolicy") - public void refreshTest() throws Throwable { - // Ugly - Host[] hosts = new Host[2]; - for (Host h : cluster().getMetadata().getAllHosts()) { - if (h.getAddress().equals(ccm().addressOfNode(1).getAddress())) - hosts[0] = h; - else - hosts[1] = h; - } - - assertTrue(session().getState().getConnectedHosts().contains(hosts[0]), "Connected hosts: " + session().getState().getConnectedHosts()); - assertTrue(!session().getState().getConnectedHosts().contains(hosts[1]), "Connected hosts: " + session().getState().getConnectedHosts()); - - policy.changeTheHost(hosts[1]); - - assertTrue(!session().getState().getConnectedHosts().contains(hosts[0]), "Connected hosts: " + session().getState().getConnectedHosts()); - assertTrue(session().getState().getConnectedHosts().contains(hosts[1]), "Connected hosts: " + session().getState().getConnectedHosts()); + @Test(groups = "short") + @CCMConfig(numberOfNodes = 2, clusterProvider = "updatablePolicy") + public void refreshTest() throws Throwable { + // Ugly + Host[] hosts = new Host[2]; + for (Host h : cluster().getMetadata().getAllHosts()) { + if (h.getEndPoint().resolve().equals(ccm().addressOfNode(1))) hosts[0] = h; + else hosts[1] = h; } - @SuppressWarnings("unused") - private Cluster.Builder updatablePolicy() { - policy = new UpdatablePolicy(); - return Cluster.builder().withLoadBalancingPolicy(policy); - } + assertTrue( + session().getState().getConnectedHosts().contains(hosts[0]), + "Connected hosts: " + session().getState().getConnectedHosts()); + assertTrue( + !session().getState().getConnectedHosts().contains(hosts[1]), + "Connected hosts: " + session().getState().getConnectedHosts()); + + policy.changeTheHost(hosts[1]); + + assertTrue( + !session().getState().getConnectedHosts().contains(hosts[0]), + "Connected hosts: " + session().getState().getConnectedHosts()); + assertTrue( + session().getState().getConnectedHosts().contains(hosts[1]), + "Connected hosts: " + session().getState().getConnectedHosts()); + } + + @SuppressWarnings("unused") + private Cluster.Builder updatablePolicy() { + policy = new UpdatablePolicy(); + return Cluster.builder().withLoadBalancingPolicy(policy); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/LocalDateAssert.java b/driver-core/src/test/java/com/datastax/driver/core/LocalDateAssert.java index fb034b43efa..78b3871b8e7 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/LocalDateAssert.java +++ b/driver-core/src/test/java/com/datastax/driver/core/LocalDateAssert.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,34 +17,34 @@ */ package com.datastax.driver.core; -import org.assertj.core.api.AbstractAssert; - import static org.assertj.core.api.Assertions.assertThat; +import org.assertj.core.api.AbstractAssert; + public class LocalDateAssert extends AbstractAssert { - public LocalDateAssert(LocalDate actual) { - super(actual, LocalDateAssert.class); - } + public LocalDateAssert(LocalDate actual) { + super(actual, LocalDateAssert.class); + } - public LocalDateAssert hasDaysSinceEpoch(int expected) { - assertThat(actual.getDaysSinceEpoch()).isEqualTo(expected); - return this; - } + public LocalDateAssert hasDaysSinceEpoch(int expected) { + assertThat(actual.getDaysSinceEpoch()).isEqualTo(expected); + return this; + } - public LocalDateAssert hasMillisSinceEpoch(long expected) { - assertThat(actual.getMillisSinceEpoch()).isEqualTo(expected); - return this; - } + public LocalDateAssert hasMillisSinceEpoch(long expected) { + assertThat(actual.getMillisSinceEpoch()).isEqualTo(expected); + return this; + } - public LocalDateAssert hasYearMonthDay(int expectedYear, int expectedMonth, int expectedDay) { - assertThat(actual.getYear()).isEqualTo(expectedYear); - assertThat(actual.getMonth()).isEqualTo(expectedMonth); - assertThat(actual.getDay()).isEqualTo(expectedDay); - return this; - } + public LocalDateAssert hasYearMonthDay(int expectedYear, int expectedMonth, int expectedDay) { + assertThat(actual.getYear()).isEqualTo(expectedYear); + assertThat(actual.getMonth()).isEqualTo(expectedMonth); + assertThat(actual.getDay()).isEqualTo(expectedDay); + return this; + } - public LocalDateAssert hasToString(String expected) { - assertThat(actual.toString()).isEqualTo(expected); - return this; - } + public LocalDateAssert hasToString(String expected) { + assertThat(actual.toString()).isEqualTo(expected); + return this; + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/LocalDateTest.java b/driver-core/src/test/java/com/datastax/driver/core/LocalDateTest.java index 78a43e9cdb6..474039664af 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/LocalDateTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/LocalDateTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,180 +17,180 @@ */ package com.datastax.driver.core; -import org.testng.annotations.Test; +import static com.datastax.driver.core.Assertions.assertThat; +import static com.datastax.driver.core.LocalDate.fromDaysSinceEpoch; +import static com.datastax.driver.core.LocalDate.fromMillisSinceEpoch; +import static com.datastax.driver.core.LocalDate.fromYearMonthDay; import java.util.Calendar; import java.util.concurrent.TimeUnit; - -import static com.datastax.driver.core.Assertions.assertThat; -import static com.datastax.driver.core.LocalDate.*; +import org.testng.annotations.Test; public class LocalDateTest { - @Test(groups = "unit") - public void should_build_from_days_since_epoch() { - assertThat(fromDaysSinceEpoch(0)) - .hasMillisSinceEpoch(0) - .hasDaysSinceEpoch(0) - .hasYearMonthDay(1970, 1, 1) - .hasToString("1970-01-01"); - - assertThat(fromDaysSinceEpoch(10)) - .hasMillisSinceEpoch(TimeUnit.DAYS.toMillis(10)) - .hasDaysSinceEpoch(10) - .hasYearMonthDay(1970, 1, 11) - .hasToString("1970-01-11"); - - assertThat(fromDaysSinceEpoch(-10)) - .hasMillisSinceEpoch(TimeUnit.DAYS.toMillis(-10)) - .hasDaysSinceEpoch(-10) - .hasYearMonthDay(1969, 12, 22) - .hasToString("1969-12-22"); - - assertThat(fromDaysSinceEpoch(Integer.MAX_VALUE)) - .hasMillisSinceEpoch(TimeUnit.DAYS.toMillis(Integer.MAX_VALUE)) - .hasDaysSinceEpoch(Integer.MAX_VALUE) - .hasYearMonthDay(5881580, 7, 11) - .hasToString("5881580-07-11"); - - assertThat(fromDaysSinceEpoch(Integer.MIN_VALUE)) - .hasMillisSinceEpoch(TimeUnit.DAYS.toMillis(Integer.MIN_VALUE)) - .hasDaysSinceEpoch(Integer.MIN_VALUE) - .hasYearMonthDay(-5877641, 6, 23) - .hasToString("-5877641-06-23"); + @Test(groups = "unit") + public void should_build_from_days_since_epoch() { + assertThat(fromDaysSinceEpoch(0)) + .hasMillisSinceEpoch(0) + .hasDaysSinceEpoch(0) + .hasYearMonthDay(1970, 1, 1) + .hasToString("1970-01-01"); + + assertThat(fromDaysSinceEpoch(10)) + .hasMillisSinceEpoch(TimeUnit.DAYS.toMillis(10)) + .hasDaysSinceEpoch(10) + .hasYearMonthDay(1970, 1, 11) + .hasToString("1970-01-11"); + + assertThat(fromDaysSinceEpoch(-10)) + .hasMillisSinceEpoch(TimeUnit.DAYS.toMillis(-10)) + .hasDaysSinceEpoch(-10) + .hasYearMonthDay(1969, 12, 22) + .hasToString("1969-12-22"); + + assertThat(fromDaysSinceEpoch(Integer.MAX_VALUE)) + .hasMillisSinceEpoch(TimeUnit.DAYS.toMillis(Integer.MAX_VALUE)) + .hasDaysSinceEpoch(Integer.MAX_VALUE) + .hasYearMonthDay(5881580, 7, 11) + .hasToString("5881580-07-11"); + + assertThat(fromDaysSinceEpoch(Integer.MIN_VALUE)) + .hasMillisSinceEpoch(TimeUnit.DAYS.toMillis(Integer.MIN_VALUE)) + .hasDaysSinceEpoch(Integer.MIN_VALUE) + .hasYearMonthDay(-5877641, 6, 23) + .hasToString("-5877641-06-23"); + } + + @Test(groups = "unit") + public void should_build_from_millis_since_epoch() { + assertThat(fromMillisSinceEpoch(0)) + .hasMillisSinceEpoch(0) + .hasDaysSinceEpoch(0) + .hasYearMonthDay(1970, 1, 1) + .hasToString("1970-01-01"); + + // Rounding + assertThat(fromMillisSinceEpoch(3600)) + .hasMillisSinceEpoch(0) + .hasDaysSinceEpoch(0) + .hasYearMonthDay(1970, 1, 1) + .hasToString("1970-01-01"); + assertThat(fromMillisSinceEpoch(-3600)) + .hasMillisSinceEpoch(0) + .hasDaysSinceEpoch(0) + .hasYearMonthDay(1970, 1, 1) + .hasToString("1970-01-01"); + + // Bound checks + try { + fromMillisSinceEpoch(TimeUnit.DAYS.toMillis((long) Integer.MIN_VALUE - 1)); + Assertions.fail("Expected an IllegalArgumentException"); + } catch (IllegalArgumentException e) { + /*expected*/ } - - @Test(groups = "unit") - public void should_build_from_millis_since_epoch() { - assertThat(fromMillisSinceEpoch(0)) - .hasMillisSinceEpoch(0) - .hasDaysSinceEpoch(0) - .hasYearMonthDay(1970, 1, 1) - .hasToString("1970-01-01"); - - // Rounding - assertThat(fromMillisSinceEpoch(3600)) - .hasMillisSinceEpoch(0) - .hasDaysSinceEpoch(0) - .hasYearMonthDay(1970, 1, 1) - .hasToString("1970-01-01"); - assertThat(fromMillisSinceEpoch(-3600)) - .hasMillisSinceEpoch(0) - .hasDaysSinceEpoch(0) - .hasYearMonthDay(1970, 1, 1) - .hasToString("1970-01-01"); - - // Bound checks - try { - fromMillisSinceEpoch(TimeUnit.DAYS.toMillis((long) Integer.MIN_VALUE - 1)); - Assertions.fail("Expected an IllegalArgumentException"); - } catch (IllegalArgumentException e) { /*expected*/ } - try { - fromMillisSinceEpoch(TimeUnit.DAYS.toMillis((long) Integer.MAX_VALUE + 1)); - Assertions.fail("Expected an IllegalArgumentException"); - } catch (IllegalArgumentException e) { /*expected*/ } + try { + fromMillisSinceEpoch(TimeUnit.DAYS.toMillis((long) Integer.MAX_VALUE + 1)); + Assertions.fail("Expected an IllegalArgumentException"); + } catch (IllegalArgumentException e) { + /*expected*/ } - - @Test(groups = "unit") - public void should_build_from_year_month_day() { - assertThat(fromYearMonthDay(1970, 1, 1)) - .hasMillisSinceEpoch(0) - .hasDaysSinceEpoch(0) - .hasYearMonthDay(1970, 1, 1) - .hasToString("1970-01-01"); - - // Handling of 0 / negative years - assertThat(fromYearMonthDay(1, 1, 1)) - .hasDaysSinceEpoch(-719162) - .hasYearMonthDay(1, 1, 1) - .hasToString("1-01-01"); - assertThat(fromYearMonthDay(0, 1, 1)) - .hasDaysSinceEpoch(-719162 - 366) - .hasYearMonthDay(0, 1, 1) - .hasToString("0-01-01"); - assertThat(fromYearMonthDay(-1, 1, 1)) - .hasDaysSinceEpoch(-719162 - 366 - 365) - .hasYearMonthDay(-1, 1, 1) - .hasToString("-1-01-01"); - - // Month/day out of bounds - try { - fromYearMonthDay(1970, 0, 1); - Assertions.fail("Expected an IllegalArgumentException"); - } catch (IllegalArgumentException e) { /*expected*/ } - try { - fromYearMonthDay(1970, 13, 1); - Assertions.fail("Expected an IllegalArgumentException"); - } catch (IllegalArgumentException e) { /*expected*/ } - try { - fromYearMonthDay(1970, 1, 0); - Assertions.fail("Expected an IllegalArgumentException"); - } catch (IllegalArgumentException e) { /*expected*/ } - try { - fromYearMonthDay(1970, 1, 32); - Assertions.fail("Expected an IllegalArgumentException"); - } catch (IllegalArgumentException e) { /*expected*/ } - - // Resulting date out of bounds - try { - fromYearMonthDay(6000000, 1, 1); - Assertions.fail("Expected an IllegalArgumentException"); - } catch (IllegalArgumentException e) { /*expected*/ } - try { - fromYearMonthDay(-6000000, 1, 1); - Assertions.fail("Expected an IllegalArgumentException"); - } catch (IllegalArgumentException e) { /*expected*/ } + } + + @Test(groups = "unit") + public void should_build_from_year_month_day() { + assertThat(fromYearMonthDay(1970, 1, 1)) + .hasMillisSinceEpoch(0) + .hasDaysSinceEpoch(0) + .hasYearMonthDay(1970, 1, 1) + .hasToString("1970-01-01"); + + // Handling of 0 / negative years + assertThat(fromYearMonthDay(1, 1, 1)) + .hasDaysSinceEpoch(-719162) + .hasYearMonthDay(1, 1, 1) + .hasToString("1-01-01"); + assertThat(fromYearMonthDay(0, 1, 1)) + .hasDaysSinceEpoch(-719162 - 366) + .hasYearMonthDay(0, 1, 1) + .hasToString("0-01-01"); + assertThat(fromYearMonthDay(-1, 1, 1)) + .hasDaysSinceEpoch(-719162 - 366 - 365) + .hasYearMonthDay(-1, 1, 1) + .hasToString("-1-01-01"); + + // Month/day out of bounds + try { + fromYearMonthDay(1970, 0, 1); + Assertions.fail("Expected an IllegalArgumentException"); + } catch (IllegalArgumentException e) { + /*expected*/ } - - @Test(groups = "unit") - public void should_add_and_subtract_years() { - assertThat(fromYearMonthDay(1970, 1, 1).add(Calendar.YEAR, 1)) - .hasYearMonthDay(1971, 1, 1); - assertThat(fromYearMonthDay(1970, 1, 1).add(Calendar.YEAR, -1)) - .hasYearMonthDay(1969, 1, 1); - assertThat(fromYearMonthDay(1970, 1, 1).add(Calendar.YEAR, -1970)) - .hasYearMonthDay(0, 1, 1); - assertThat(fromYearMonthDay(1970, 1, 1).add(Calendar.YEAR, -1971)) - .hasYearMonthDay(-1, 1, 1); - assertThat(fromYearMonthDay(0, 5, 12).add(Calendar.YEAR, 1)) - .hasYearMonthDay(1, 5, 12); - assertThat(fromYearMonthDay(-1, 5, 12).add(Calendar.YEAR, 1)) - .hasYearMonthDay(0, 5, 12); - assertThat(fromYearMonthDay(-1, 5, 12).add(Calendar.YEAR, 2)) - .hasYearMonthDay(1, 5, 12); + try { + fromYearMonthDay(1970, 13, 1); + Assertions.fail("Expected an IllegalArgumentException"); + } catch (IllegalArgumentException e) { + /*expected*/ } - - @Test(groups = "unit") - public void should_add_and_subtract_months() { - assertThat(fromYearMonthDay(1970, 1, 1).add(Calendar.MONTH, 2)) - .hasYearMonthDay(1970, 3, 1); - assertThat(fromYearMonthDay(1970, 1, 1).add(Calendar.MONTH, 24)) - .hasYearMonthDay(1972, 1, 1); - assertThat(fromYearMonthDay(1970, 1, 1).add(Calendar.MONTH, -5)) - .hasYearMonthDay(1969, 8, 1); - assertThat(fromYearMonthDay(1, 1, 1).add(Calendar.MONTH, -1)) - .hasYearMonthDay(0, 12, 1); - assertThat(fromYearMonthDay(0, 1, 1).add(Calendar.MONTH, -1)) - .hasYearMonthDay(-1, 12, 1); - assertThat(fromYearMonthDay(-1, 12, 1).add(Calendar.MONTH, 1)) - .hasYearMonthDay(0, 1, 1); - assertThat(fromYearMonthDay(0, 12, 1).add(Calendar.MONTH, 1)) - .hasYearMonthDay(1, 1, 1); + try { + fromYearMonthDay(1970, 1, 0); + Assertions.fail("Expected an IllegalArgumentException"); + } catch (IllegalArgumentException e) { + /*expected*/ + } + try { + fromYearMonthDay(1970, 1, 32); + Assertions.fail("Expected an IllegalArgumentException"); + } catch (IllegalArgumentException e) { + /*expected*/ } - @Test(groups = "unit") - public void should_add_and_subtract_days() { - assertThat(fromYearMonthDay(1970, 1, 1).add(Calendar.DAY_OF_MONTH, 12)) - .hasYearMonthDay(1970, 1, 13); - assertThat(fromYearMonthDay(1970, 3, 28).add(Calendar.DAY_OF_MONTH, -40)) - .hasYearMonthDay(1970, 2, 16); - assertThat(fromYearMonthDay(1, 1, 1).add(Calendar.DAY_OF_MONTH, -2)) - .hasYearMonthDay(0, 12, 30); - assertThat(fromYearMonthDay(0, 1, 1).add(Calendar.DAY_OF_MONTH, -2)) - .hasYearMonthDay(-1, 12, 30); - assertThat(fromYearMonthDay(-1, 12, 31).add(Calendar.DAY_OF_MONTH, 4)) - .hasYearMonthDay(0, 1, 4); - assertThat(fromYearMonthDay(0, 12, 25).add(Calendar.DAY_OF_MONTH, 14)) - .hasYearMonthDay(1, 1, 8); + // Resulting date out of bounds + try { + fromYearMonthDay(6000000, 1, 1); + Assertions.fail("Expected an IllegalArgumentException"); + } catch (IllegalArgumentException e) { + /*expected*/ + } + try { + fromYearMonthDay(-6000000, 1, 1); + Assertions.fail("Expected an IllegalArgumentException"); + } catch (IllegalArgumentException e) { + /*expected*/ } + } + + @Test(groups = "unit") + public void should_add_and_subtract_years() { + assertThat(fromYearMonthDay(1970, 1, 1).add(Calendar.YEAR, 1)).hasYearMonthDay(1971, 1, 1); + assertThat(fromYearMonthDay(1970, 1, 1).add(Calendar.YEAR, -1)).hasYearMonthDay(1969, 1, 1); + assertThat(fromYearMonthDay(1970, 1, 1).add(Calendar.YEAR, -1970)).hasYearMonthDay(0, 1, 1); + assertThat(fromYearMonthDay(1970, 1, 1).add(Calendar.YEAR, -1971)).hasYearMonthDay(-1, 1, 1); + assertThat(fromYearMonthDay(0, 5, 12).add(Calendar.YEAR, 1)).hasYearMonthDay(1, 5, 12); + assertThat(fromYearMonthDay(-1, 5, 12).add(Calendar.YEAR, 1)).hasYearMonthDay(0, 5, 12); + assertThat(fromYearMonthDay(-1, 5, 12).add(Calendar.YEAR, 2)).hasYearMonthDay(1, 5, 12); + } + + @Test(groups = "unit") + public void should_add_and_subtract_months() { + assertThat(fromYearMonthDay(1970, 1, 1).add(Calendar.MONTH, 2)).hasYearMonthDay(1970, 3, 1); + assertThat(fromYearMonthDay(1970, 1, 1).add(Calendar.MONTH, 24)).hasYearMonthDay(1972, 1, 1); + assertThat(fromYearMonthDay(1970, 1, 1).add(Calendar.MONTH, -5)).hasYearMonthDay(1969, 8, 1); + assertThat(fromYearMonthDay(1, 1, 1).add(Calendar.MONTH, -1)).hasYearMonthDay(0, 12, 1); + assertThat(fromYearMonthDay(0, 1, 1).add(Calendar.MONTH, -1)).hasYearMonthDay(-1, 12, 1); + assertThat(fromYearMonthDay(-1, 12, 1).add(Calendar.MONTH, 1)).hasYearMonthDay(0, 1, 1); + assertThat(fromYearMonthDay(0, 12, 1).add(Calendar.MONTH, 1)).hasYearMonthDay(1, 1, 1); + } + + @Test(groups = "unit") + public void should_add_and_subtract_days() { + assertThat(fromYearMonthDay(1970, 1, 1).add(Calendar.DAY_OF_MONTH, 12)) + .hasYearMonthDay(1970, 1, 13); + assertThat(fromYearMonthDay(1970, 3, 28).add(Calendar.DAY_OF_MONTH, -40)) + .hasYearMonthDay(1970, 2, 16); + assertThat(fromYearMonthDay(1, 1, 1).add(Calendar.DAY_OF_MONTH, -2)).hasYearMonthDay(0, 12, 30); + assertThat(fromYearMonthDay(0, 1, 1).add(Calendar.DAY_OF_MONTH, -2)) + .hasYearMonthDay(-1, 12, 30); + assertThat(fromYearMonthDay(-1, 12, 31).add(Calendar.DAY_OF_MONTH, 4)).hasYearMonthDay(0, 1, 4); + assertThat(fromYearMonthDay(0, 12, 25).add(Calendar.DAY_OF_MONTH, 14)).hasYearMonthDay(1, 1, 8); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/M3PTokenFactoryTest.java b/driver-core/src/test/java/com/datastax/driver/core/M3PTokenFactoryTest.java index 4e26fd74ac8..2e8c79f13be 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/M3PTokenFactoryTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/M3PTokenFactoryTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,79 +17,76 @@ */ package com.datastax.driver.core; -import org.testng.annotations.Test; +import static org.assertj.core.api.Assertions.assertThat; import java.util.List; - -import static org.assertj.core.api.Assertions.assertThat; +import org.testng.annotations.Test; public class M3PTokenFactoryTest { - Token.Factory factory = Token.M3PToken.FACTORY; - - @Test(groups = "unit") - public void should_split_range() { - List splits = factory.split(factory.fromString("-9223372036854775808"), factory.fromString("4611686018427387904"), 3); - assertThat(splits).containsExactly( - factory.fromString("-4611686018427387904"), - factory.fromString("0") - ); - } + Token.Factory factory = Token.M3PToken.FACTORY; - @Test(groups = "unit") - public void should_split_range_that_wraps_around_the_ring() { - List splits = factory.split(factory.fromString("4611686018427387904"), factory.fromString("0"), 3); - assertThat(splits).containsExactly( - factory.fromString("-9223372036854775807"), - factory.fromString("-4611686018427387903") - ); - } + @Test(groups = "unit") + public void should_split_range() { + List splits = + factory.split( + factory.fromString("-9223372036854775808"), + factory.fromString("4611686018427387904"), + 3); + assertThat(splits) + .containsExactly(factory.fromString("-4611686018427387904"), factory.fromString("0")); + } - @Test(groups = "unit") - public void should_split_range_when_division_not_integral() { - List splits = factory.split(factory.fromString("0"), factory.fromString("11"), 3); - assertThat(splits).containsExactly( - factory.fromString("4"), - factory.fromString("8") - ); - } + @Test(groups = "unit") + public void should_split_range_that_wraps_around_the_ring() { + List splits = + factory.split(factory.fromString("4611686018427387904"), factory.fromString("0"), 3); + assertThat(splits) + .containsExactly( + factory.fromString("-9223372036854775807"), factory.fromString("-4611686018427387903")); + } - @Test(groups = "unit") - public void should_split_range_producing_empty_splits() { - List splits = factory.split(factory.fromString("0"), factory.fromString("2"), 5); - assertThat(splits).containsExactly( - factory.fromString("1"), - factory.fromString("2"), - factory.fromString("2"), - factory.fromString("2") - ); - } + @Test(groups = "unit") + public void should_split_range_when_division_not_integral() { + List splits = factory.split(factory.fromString("0"), factory.fromString("11"), 3); + assertThat(splits).containsExactly(factory.fromString("4"), factory.fromString("8")); + } - @Test(groups = "unit") - public void should_split_range_producing_empty_splits_near_ring_end() { - Token minToken = factory.fromString("-9223372036854775808"); - Token maxToken = factory.fromString("9223372036854775807"); + @Test(groups = "unit") + public void should_split_range_producing_empty_splits() { + List splits = factory.split(factory.fromString("0"), factory.fromString("2"), 5); + assertThat(splits) + .containsExactly( + factory.fromString("1"), + factory.fromString("2"), + factory.fromString("2"), + factory.fromString("2")); + } - // These are edge cases where we want to make sure we don't accidentally generate the ]min,min] range (which is the whole ring) - List splits = factory.split(maxToken, minToken, 3); - assertThat(splits).containsExactly( - maxToken, - maxToken - ); + @Test(groups = "unit") + public void should_split_range_producing_empty_splits_near_ring_end() { + Token minToken = factory.fromString("-9223372036854775808"); + Token maxToken = factory.fromString("9223372036854775807"); - splits = factory.split(minToken, factory.fromString("-9223372036854775807"), 3); - assertThat(splits).containsExactly( - factory.fromString("-9223372036854775807"), - factory.fromString("-9223372036854775807") - ); - } + // These are edge cases where we want to make sure we don't accidentally generate the ]min,min] + // range (which is the whole ring) + List splits = factory.split(maxToken, minToken, 3); + assertThat(splits).containsExactly(maxToken, maxToken); - @Test(groups = "unit") - public void should_split_whole_ring() { - List splits = factory.split(factory.fromString("-9223372036854775808"), factory.fromString("-9223372036854775808"), 3); - assertThat(splits).containsExactly( - factory.fromString("-3074457345618258603"), - factory.fromString("3074457345618258602") - ); - } + splits = factory.split(minToken, factory.fromString("-9223372036854775807"), 3); + assertThat(splits) + .containsExactly( + factory.fromString("-9223372036854775807"), factory.fromString("-9223372036854775807")); + } -} \ No newline at end of file + @Test(groups = "unit") + public void should_split_whole_ring() { + List splits = + factory.split( + factory.fromString("-9223372036854775808"), + factory.fromString("-9223372036854775808"), + 3); + assertThat(splits) + .containsExactly( + factory.fromString("-3074457345618258603"), factory.fromString("3074457345618258602")); + } +} diff --git a/driver-core/src/test/java/com/datastax/driver/core/M3PTokenIntegrationTest.java b/driver-core/src/test/java/com/datastax/driver/core/M3PTokenIntegrationTest.java index 4bed18eef75..78ae9019b01 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/M3PTokenIntegrationTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/M3PTokenIntegrationTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,12 +21,12 @@ public class M3PTokenIntegrationTest extends TokenIntegrationTest { - public M3PTokenIntegrationTest() { - super(DataType.bigint(), false); - } + public M3PTokenIntegrationTest() { + super(DataType.bigint(), false); + } - @Override - protected Token.Factory tokenFactory() { - return M3PToken.FACTORY; - } + @Override + protected Token.Factory tokenFactory() { + return M3PToken.FACTORY; + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/M3PTokenVnodeIntegrationTest.java b/driver-core/src/test/java/com/datastax/driver/core/M3PTokenVnodeIntegrationTest.java index 82734bb92e0..9d5b027588a 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/M3PTokenVnodeIntegrationTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/M3PTokenVnodeIntegrationTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,12 +20,12 @@ @CCMConfig(options = "--vnodes") public class M3PTokenVnodeIntegrationTest extends TokenIntegrationTest { - public M3PTokenVnodeIntegrationTest() { - super(DataType.bigint(), true); - } + public M3PTokenVnodeIntegrationTest() { + super(DataType.bigint(), true); + } - @Override - protected Token.Factory tokenFactory() { - return Token.M3PToken.FACTORY; - } + @Override + protected Token.Factory tokenFactory() { + return Token.M3PToken.FACTORY; + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/MappingCodec.java b/driver-core/src/test/java/com/datastax/driver/core/MappingCodec.java index 6271dc78ee3..e734d85b572 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/MappingCodec.java +++ b/driver-core/src/test/java/com/datastax/driver/core/MappingCodec.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,48 +19,50 @@ import com.datastax.driver.core.exceptions.InvalidTypeException; import com.google.common.reflect.TypeToken; - import java.nio.ByteBuffer; /** - * This class is a copy of MappingCodec declared in the extras module, - * to avoid circular dependencies between Maven modules. + * This class is a copy of MappingCodec declared in the extras module, to avoid circular + * dependencies between Maven modules. */ public abstract class MappingCodec extends TypeCodec { - protected final TypeCodec innerCodec; - - public MappingCodec(TypeCodec innerCodec, Class javaType) { - this(innerCodec, TypeToken.of(javaType)); - } + protected final TypeCodec innerCodec; - public MappingCodec(TypeCodec innerCodec, TypeToken javaType) { - super(innerCodec.getCqlType(), javaType); - this.innerCodec = innerCodec; - } + public MappingCodec(TypeCodec innerCodec, Class javaType) { + this(innerCodec, TypeToken.of(javaType)); + } - @Override - public ByteBuffer serialize(O value, ProtocolVersion protocolVersion) throws InvalidTypeException { - return innerCodec.serialize(serialize(value), protocolVersion); - } + public MappingCodec(TypeCodec innerCodec, TypeToken javaType) { + super(innerCodec.getCqlType(), javaType); + this.innerCodec = innerCodec; + } - @Override - public O deserialize(ByteBuffer bytes, ProtocolVersion protocolVersion) throws InvalidTypeException { - return deserialize(innerCodec.deserialize(bytes, protocolVersion)); - } + @Override + public ByteBuffer serialize(O value, ProtocolVersion protocolVersion) + throws InvalidTypeException { + return innerCodec.serialize(serialize(value), protocolVersion); + } - @Override - public O parse(String value) throws InvalidTypeException { - return value == null || value.isEmpty() || value.equalsIgnoreCase("NULL") ? null : deserialize(innerCodec.parse(value)); - } + @Override + public O deserialize(ByteBuffer bytes, ProtocolVersion protocolVersion) + throws InvalidTypeException { + return deserialize(innerCodec.deserialize(bytes, protocolVersion)); + } - @Override - public String format(O value) throws InvalidTypeException { - return value == null ? null : innerCodec.format(serialize(value)); - } + @Override + public O parse(String value) throws InvalidTypeException { + return value == null || value.isEmpty() || value.equalsIgnoreCase("NULL") + ? null + : deserialize(innerCodec.parse(value)); + } - protected abstract O deserialize(I value); + @Override + public String format(O value) throws InvalidTypeException { + return value == null ? null : innerCodec.format(serialize(value)); + } - protected abstract I serialize(O value); + protected abstract O deserialize(I value); + protected abstract I serialize(O value); } diff --git a/driver-core/src/test/java/com/datastax/driver/core/MaterializedViewMetadataAssert.java b/driver-core/src/test/java/com/datastax/driver/core/MaterializedViewMetadataAssert.java index d1356afaf14..7df2630e59e 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/MaterializedViewMetadataAssert.java +++ b/driver-core/src/test/java/com/datastax/driver/core/MaterializedViewMetadataAssert.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,29 +17,29 @@ */ package com.datastax.driver.core; -import org.assertj.core.api.AbstractAssert; - import static org.assertj.core.api.Assertions.assertThat; -public class MaterializedViewMetadataAssert extends AbstractAssert { +import org.assertj.core.api.AbstractAssert; - public MaterializedViewMetadataAssert(MaterializedViewMetadata actual) { - super(actual, MaterializedViewMetadataAssert.class); - } +public class MaterializedViewMetadataAssert + extends AbstractAssert { - public MaterializedViewMetadataAssert hasName(String name) { - assertThat(actual.getName()).isEqualTo(name); - return this; - } + public MaterializedViewMetadataAssert(MaterializedViewMetadata actual) { + super(actual, MaterializedViewMetadataAssert.class); + } - public MaterializedViewMetadataAssert hasBaseTable(TableMetadata table) { - assertThat(actual.getBaseTable()).isEqualTo(table); - return this; - } + public MaterializedViewMetadataAssert hasName(String name) { + assertThat(actual.getName()).isEqualTo(name); + return this; + } - public MaterializedViewMetadataAssert hasNumberOfColumns(int expected) { - assertThat(actual.getColumns().size()).isEqualTo(expected); - return this; - } + public MaterializedViewMetadataAssert hasBaseTable(TableMetadata table) { + assertThat(actual.getBaseTable()).isEqualTo(table); + return this; + } + public MaterializedViewMetadataAssert hasNumberOfColumns(int expected) { + assertThat(actual.getColumns().size()).isEqualTo(expected); + return this; + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/MaterializedViewMetadataTest.java b/driver-core/src/test/java/com/datastax/driver/core/MaterializedViewMetadataTest.java index 51eb73e00ec..404a6b5f7d6 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/MaterializedViewMetadataTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/MaterializedViewMetadataTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,110 +17,149 @@ */ package com.datastax.driver.core; -import com.datastax.driver.core.utils.CassandraVersion; -import org.testng.annotations.Test; - import static com.datastax.driver.core.Assertions.assertThat; import static com.datastax.driver.core.ClusteringOrder.DESC; import static com.datastax.driver.core.DataType.cint; +import com.datastax.driver.core.utils.CassandraVersion; +import org.testng.annotations.Test; + @CassandraVersion("3.0") public class MaterializedViewMetadataTest extends CCMTestsSupport { - /** - * Validates that a materialized view is properly retrieved and parsed. - * - * @test_category metadata, materialized_view - * @jira_ticket JAVA-825 - */ - @Test(groups = "short") - public void should_create_view_metadata() { - - // given - String createTable = String.format( - "CREATE TABLE %s.scores(" - + "user TEXT," - + "game TEXT," - + "year INT," - + "month INT," - + "day INT," - + "score INT," - + "PRIMARY KEY (user, game, year, month, day)" - + ")", - keyspace); - String createMV = String.format( - "CREATE MATERIALIZED VIEW %s.monthlyhigh AS " - + "SELECT game, year, month, score, user, day FROM %s.scores " - + "WHERE game IS NOT NULL AND year IS NOT NULL AND month IS NOT NULL AND score IS NOT NULL AND user IS NOT NULL AND day IS NOT NULL " - + "PRIMARY KEY ((game, year, month), score, user, day) " - + "WITH CLUSTERING ORDER BY (score DESC, user ASC, day ASC)", - keyspace, keyspace); + /** + * Validates that a materialized view is properly retrieved and parsed. + * + * @test_category metadata, materialized_view + * @jira_ticket JAVA-825 + */ + @Test(groups = "short") + public void should_create_view_metadata() { - // when - session().execute(createTable); - session().execute(createMV); + // given + String createTable = + String.format( + "CREATE TABLE %s.scores(" + + "user TEXT," + + "game TEXT," + + "year INT," + + "month INT," + + "day INT," + + "score INT," + + "PRIMARY KEY (user, game, year, month, day)" + + ")", + keyspace); + String createMV = + String.format( + "CREATE MATERIALIZED VIEW %s.monthlyhigh AS " + + "SELECT game, year, month, score, user, day FROM %s.scores " + + "WHERE game IS NOT NULL AND year IS NOT NULL AND month IS NOT NULL AND score IS NOT NULL AND user IS NOT NULL AND day IS NOT NULL " + + "PRIMARY KEY ((game, year, month), score, user, day) " + + "WITH CLUSTERING ORDER BY (score DESC, user ASC, day ASC)", + keyspace, keyspace); - // then - TableMetadata table = cluster().getMetadata().getKeyspace(keyspace).getTable("scores"); - MaterializedViewMetadata mv = cluster().getMetadata().getKeyspace(keyspace).getMaterializedView("monthlyhigh"); + // when + session().execute(createTable); + session().execute(createMV); - assertThat(table).isNotNull().hasName("scores").hasMaterializedView(mv).hasNumberOfColumns(6); - assertThat(table.getColumns().get(0)).isNotNull().hasName("user").isPartitionKey(); - assertThat(table.getColumns().get(1)).isNotNull().hasName("game").isClusteringColumn(); - assertThat(table.getColumns().get(2)).isNotNull().hasName("year").isClusteringColumn(); - assertThat(table.getColumns().get(3)).isNotNull().hasName("month").isClusteringColumn(); - assertThat(table.getColumns().get(4)).isNotNull().hasName("day").isClusteringColumn(); - assertThat(table.getColumns().get(5)).isNotNull().hasName("score").isRegularColumn(); + // then + TableMetadata table = cluster().getMetadata().getKeyspace(keyspace).getTable("scores"); + MaterializedViewMetadata mv = + cluster().getMetadata().getKeyspace(keyspace).getMaterializedView("monthlyhigh"); - assertThat(mv).isNotNull().hasName("monthlyhigh").hasBaseTable(table).hasNumberOfColumns(6).isEqualTo(table.getView("monthlyhigh")); - assertThat(mv.getColumns().get(0)).isNotNull().hasName("game").isPartitionKey(); - assertThat(mv.getColumns().get(1)).isNotNull().hasName("year").isPartitionKey(); - assertThat(mv.getColumns().get(2)).isNotNull().hasName("month").isPartitionKey(); - assertThat(mv.getColumns().get(3)).isNotNull().hasName("score").isClusteringColumn().hasClusteringOrder(DESC); - assertThat(mv.getColumns().get(4)).isNotNull().hasName("user").isClusteringColumn(); - assertThat(mv.getColumns().get(5)).isNotNull().hasName("day").isClusteringColumn(); - assertThat(mv.asCQLQuery(false)).contains(createMV); - } + assertThat(table).isNotNull().hasName("scores").hasMaterializedView(mv).hasNumberOfColumns(6); + assertThat(table.getColumns().get(0)).isNotNull().hasName("user").isPartitionKey(); + assertThat(table.getColumns().get(1)).isNotNull().hasName("game").isClusteringColumn(); + assertThat(table.getColumns().get(2)).isNotNull().hasName("year").isClusteringColumn(); + assertThat(table.getColumns().get(3)).isNotNull().hasName("month").isClusteringColumn(); + assertThat(table.getColumns().get(4)).isNotNull().hasName("day").isClusteringColumn(); + assertThat(table.getColumns().get(5)).isNotNull().hasName("score").isRegularColumn(); - /** - * Validates that a materialized view is properly retrieved and parsed when using quoted identifiers. - * - * @test_category metadata, materialized_view - * @jira_ticket JAVA-825 - */ - @Test(groups = "short") - public void should_create_view_metadata_with_quoted_identifiers() { - // given - String createTable = String.format( - "CREATE TABLE %s.\"T1\" (" - + "\"theKey\" int, " - + "\"the;Clustering\" int, " - + "\"the Value\" int, " - + "PRIMARY KEY (\"theKey\", \"the;Clustering\"))", - keyspace); - String createMV = String.format( - "CREATE MATERIALIZED VIEW %s.\"Mv1\" AS " - + "SELECT \"theKey\", \"the;Clustering\", \"the Value\" " - + "FROM %s.\"T1\" " - + "WHERE \"theKey\" IS NOT NULL AND \"the;Clustering\" IS NOT NULL AND \"the Value\" IS NOT NULL " - + "PRIMARY KEY (\"theKey\", \"the;Clustering\")", - keyspace, keyspace); - // when - session().execute(createTable); - session().execute(createMV); - // then - TableMetadata table = cluster().getMetadata().getKeyspace(keyspace).getTable("\"T1\""); - MaterializedViewMetadata mv = cluster().getMetadata().getKeyspace(keyspace).getMaterializedView("\"Mv1\""); - assertThat(table).isNotNull().hasName("T1").hasMaterializedView(mv).hasNumberOfColumns(3); - assertThat(table.getViews()).hasSize(1).containsOnly(mv); - assertThat(table.getColumns().get(0)).isNotNull().hasName("theKey").isPartitionKey().hasType(cint()); - assertThat(table.getColumns().get(1)).isNotNull().hasName("the;Clustering").isClusteringColumn().hasType(cint()); - assertThat(table.getColumns().get(2)).isNotNull().hasName("the Value").isRegularColumn().hasType(cint()); - assertThat(mv).isNotNull().hasName("Mv1").hasBaseTable(table).hasNumberOfColumns(3); - assertThat(mv.getColumns().get(0)).isNotNull().hasName("theKey").isPartitionKey().hasType(cint()); - assertThat(mv.getColumns().get(1)).isNotNull().hasName("the;Clustering").isClusteringColumn().hasType(cint()); - assertThat(mv.getColumns().get(2)).isNotNull().hasName("the Value").isRegularColumn().hasType(cint()); - assertThat(mv.asCQLQuery(false)).contains(createMV); - } + assertThat(mv) + .isNotNull() + .hasName("monthlyhigh") + .hasBaseTable(table) + .hasNumberOfColumns(6) + .isEqualTo(table.getView("monthlyhigh")); + assertThat(mv.getColumns().get(0)).isNotNull().hasName("game").isPartitionKey(); + assertThat(mv.getColumns().get(1)).isNotNull().hasName("year").isPartitionKey(); + assertThat(mv.getColumns().get(2)).isNotNull().hasName("month").isPartitionKey(); + assertThat(mv.getColumns().get(3)) + .isNotNull() + .hasName("score") + .isClusteringColumn() + .hasClusteringOrder(DESC); + assertThat(mv.getColumns().get(4)).isNotNull().hasName("user").isClusteringColumn(); + assertThat(mv.getColumns().get(5)).isNotNull().hasName("day").isClusteringColumn(); + assertThat(mv.asCQLQuery(false)).contains(createMV); + } + /** + * Validates that a materialized view is properly retrieved and parsed when using quoted + * identifiers. + * + * @test_category metadata, materialized_view + * @jira_ticket JAVA-825 + */ + @Test(groups = "short") + public void should_create_view_metadata_with_quoted_identifiers() { + // given + String createTable = + String.format( + "CREATE TABLE %s.\"T1\" (" + + "\"theKey\" int, " + + "\"the;Clustering\" int, " + + "\"the Value\" int, " + + "PRIMARY KEY (\"theKey\", \"the;Clustering\"))", + keyspace); + String createMV = + String.format( + "CREATE MATERIALIZED VIEW %s.\"Mv1\" AS " + + "SELECT \"theKey\", \"the;Clustering\", \"the Value\" " + + "FROM %s.\"T1\" " + + "WHERE \"theKey\" IS NOT NULL AND \"the;Clustering\" IS NOT NULL AND \"the Value\" IS NOT NULL " + + "PRIMARY KEY (\"theKey\", \"the;Clustering\")", + keyspace, keyspace); + // when + session().execute(createTable); + session().execute(createMV); + // then + TableMetadata table = cluster().getMetadata().getKeyspace(keyspace).getTable("\"T1\""); + MaterializedViewMetadata mv = + cluster().getMetadata().getKeyspace(keyspace).getMaterializedView("\"Mv1\""); + assertThat(table).isNotNull().hasName("T1").hasMaterializedView(mv).hasNumberOfColumns(3); + assertThat(table.getViews()).hasSize(1).containsOnly(mv); + assertThat(table.getColumns().get(0)) + .isNotNull() + .hasName("theKey") + .isPartitionKey() + .hasType(cint()); + assertThat(table.getColumns().get(1)) + .isNotNull() + .hasName("the;Clustering") + .isClusteringColumn() + .hasType(cint()); + assertThat(table.getColumns().get(2)) + .isNotNull() + .hasName("the Value") + .isRegularColumn() + .hasType(cint()); + assertThat(mv).isNotNull().hasName("Mv1").hasBaseTable(table).hasNumberOfColumns(3); + assertThat(mv.getColumns().get(0)) + .isNotNull() + .hasName("theKey") + .isPartitionKey() + .hasType(cint()); + assertThat(mv.getColumns().get(1)) + .isNotNull() + .hasName("the;Clustering") + .isClusteringColumn() + .hasType(cint()); + assertThat(mv.getColumns().get(2)) + .isNotNull() + .hasName("the Value") + .isRegularColumn() + .hasType(cint()); + assertThat(mv.asCQLQuery(false)).contains(createMV); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/MemoryAppender.java b/driver-core/src/test/java/com/datastax/driver/core/MemoryAppender.java index cc4bfbdf5b3..ecdad298355 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/MemoryAppender.java +++ b/driver-core/src/test/java/com/datastax/driver/core/MemoryAppender.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,130 +17,123 @@ */ package com.datastax.driver.core; -import org.apache.log4j.Logger; -import org.apache.log4j.PatternLayout; -import org.apache.log4j.WriterAppender; -import org.apache.log4j.spi.LoggingEvent; +import static java.util.concurrent.TimeUnit.MILLISECONDS; import java.io.StringWriter; import java.util.concurrent.locks.Condition; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; - -import static java.util.concurrent.TimeUnit.MILLISECONDS; +import org.apache.log4j.Logger; +import org.apache.log4j.PatternLayout; +import org.apache.log4j.WriterAppender; +import org.apache.log4j.spi.LoggingEvent; /** * Simple Log4J appender that captures logs to memory in order to inspect them in unit tests. - *

    - * There is no purging mechanism, so make sure it doesn't stay enabled for too long (this is best + * + *

    There is no purging mechanism, so make sure it doesn't stay enabled for too long (this is best * done with an {@code @After} method that removes it). */ public class MemoryAppender extends WriterAppender { - private final Lock appendLock = new ReentrantLock(); - - private final Condition append = appendLock.newCondition(); + private final Lock appendLock = new ReentrantLock(); - public final StringWriter writer = new StringWriter(); + private final Condition append = appendLock.newCondition(); - private int nextLogIdx = 0; - - public MemoryAppender() { - setWriter(writer); - setLayout(new PatternLayout("%m%n")); - } + public final StringWriter writer = new StringWriter(); - @Override - protected void subAppend(LoggingEvent event) { - appendLock.lock(); - try { - super.subAppend(event); - append.signal(); - } finally { - appendLock.unlock(); - } - } + private int nextLogIdx = 0; - public String get() { - return writer.toString(); - } + public MemoryAppender() { + setWriter(writer); + setLayout(new PatternLayout("%m%n")); + } - /** - * Wait until at least one log event is appended to the current appender, - * or a timeout occurs, whichever happens first, - * then return the appender contents. - * Not thread safe. - * Useful when asynchronous code needs to wait until - * the appender is actually invoked at least once. - * - * @param timeoutMillis timeout in milliseconds - * @return The appender contents. Not thread safe. - */ - public String waitAndGet(long timeoutMillis) throws InterruptedException { - long nanos = MILLISECONDS.toNanos(timeoutMillis); - appendLock.lock(); - try { - while (get().isEmpty()) { - if (nanos <= 0L) break; // timeout - nanos = append.awaitNanos(nanos); - } - return get(); - } finally { - appendLock.unlock(); - } + @Override + protected void subAppend(LoggingEvent event) { + appendLock.lock(); + try { + super.subAppend(event); + append.signal(); + } finally { + appendLock.unlock(); } - - /** - * @return The next set of logs after getNext was last called. Not thread safe. - */ - public String getNext() { - String next = get().substring(nextLogIdx); - nextLogIdx += next.length(); - return next; + } + + public String get() { + return writer.toString(); + } + + /** + * Wait until at least one log event is appended to the current appender, or a timeout occurs, + * whichever happens first, then return the appender contents. Not thread safe. Useful when + * asynchronous code needs to wait until the appender is actually invoked at least once. + * + * @param timeoutMillis timeout in milliseconds + * @return The appender contents. Not thread safe. + */ + public String waitAndGet(long timeoutMillis) throws InterruptedException { + long nanos = MILLISECONDS.toNanos(timeoutMillis); + appendLock.lock(); + try { + while (get().isEmpty()) { + if (nanos <= 0L) break; // timeout + nanos = append.awaitNanos(nanos); + } + return get(); + } finally { + appendLock.unlock(); } - - public MemoryAppender enableFor(Class... loggers) { - for (Class logger : loggers) { - enableFor(logger.getName()); - } - return this; + } + + /** @return The next set of logs after getNext was last called. Not thread safe. */ + public String getNext() { + String next = get().substring(nextLogIdx); + nextLogIdx += next.length(); + return next; + } + + public MemoryAppender enableFor(Class... loggers) { + for (Class logger : loggers) { + enableFor(logger.getName()); } + return this; + } - public MemoryAppender enableFor(org.slf4j.Logger... loggers) { - for (org.slf4j.Logger logger : loggers) { - enableFor(logger.getName()); - } - return this; + public MemoryAppender enableFor(org.slf4j.Logger... loggers) { + for (org.slf4j.Logger logger : loggers) { + enableFor(logger.getName()); } + return this; + } - public MemoryAppender enableFor(String... loggers) { - for (String logger : loggers) { - Logger log4jLogger = Logger.getLogger(logger); - log4jLogger.addAppender(this); - } - return this; + public MemoryAppender enableFor(String... loggers) { + for (String logger : loggers) { + Logger log4jLogger = Logger.getLogger(logger); + log4jLogger.addAppender(this); } + return this; + } - public MemoryAppender disableFor(Class... loggers) { - for (Class logger : loggers) { - disableFor(logger.getName()); - } - return this; + public MemoryAppender disableFor(Class... loggers) { + for (Class logger : loggers) { + disableFor(logger.getName()); } + return this; + } - public MemoryAppender disableFor(org.slf4j.Logger... loggers) { - for (org.slf4j.Logger logger : loggers) { - disableFor(logger.getName()); - } - return this; + public MemoryAppender disableFor(org.slf4j.Logger... loggers) { + for (org.slf4j.Logger logger : loggers) { + disableFor(logger.getName()); } + return this; + } - public MemoryAppender disableFor(String... loggers) { - for (String logger : loggers) { - Logger log4jLogger = Logger.getLogger(logger); - log4jLogger.removeAppender(this); - } - return this; + public MemoryAppender disableFor(String... loggers) { + for (String logger : loggers) { + Logger log4jLogger = Logger.getLogger(logger); + log4jLogger.removeAppender(this); } - + return this; + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/MetadataTest.java b/driver-core/src/test/java/com/datastax/driver/core/MetadataTest.java index d101791c543..d6a5fccf783 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/MetadataTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/MetadataTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,150 +17,175 @@ */ package com.datastax.driver.core; -import com.google.common.collect.Maps; -import org.testng.annotations.Test; - -import java.util.Map; - import static com.datastax.driver.core.Assertions.assertThat; import static com.datastax.driver.core.CreateCCM.TestMode.PER_METHOD; -import static com.datastax.driver.core.TestUtils.nonDebouncingQueryOptions; import static com.datastax.driver.core.TestUtils.waitForUp; +import com.google.common.collect.Maps; +import java.util.Map; +import org.testng.annotations.Test; + @CreateCCM(PER_METHOD) public class MetadataTest extends CCMTestsSupport { - /** - *

    - * Validates that when the topology of the cluster changes that Cluster Metadata is properly updated. - *

    - *

    - *

    - * This test does the following: - *

    - *

      - *
    1. Creates a 3 node cluster and capture token range data from the {@link Cluster}'s {@link Metadata}
    2. - *
    3. Decommission node 3.
    4. - *
    5. Validates that the token range data was updated to reflect node 3 leaving and the other nodes - * taking on its token range.
    6. - *
    7. Adds a new node, node 4.
    8. - *
    9. Validates that the token range data was updated to reflect node 4 joining the {@link Cluster} and - * the token ranges reflecting this.
    10. - *
    - * - * @test_category metadata:token - * @expected_result cluster metadata is properly updated in response to node remove and add events. - * @jira_ticket JAVA-312 - * @since 2.0.10, 2.1.5 - */ - @Test(groups = "long") - @CCMConfig(numberOfNodes = 3, dirtiesContext = true, createCluster = false) - public void should_update_metadata_on_topology_change() { - Cluster cluster = register(Cluster.builder() - .addContactPoints(getContactPoints().get(0)) - .withPort(ccm().getBinaryPort()) - .withQueryOptions(nonDebouncingQueryOptions()) - .build()); - Session session = cluster.connect(); - - String keyspace = "test"; - session.execute("CREATE KEYSPACE " + keyspace + " WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}"); - Metadata metadata = cluster.getMetadata(); - - // Capture all Token data. - assertThat(metadata.getTokenRanges()).hasSize(3); - Map tokensForHost = getTokenForHosts(metadata); - - // Capture host3s token and range before we take it down. - Host host3 = TestUtils.findHost(cluster, 3); - Token host3Token = tokensForHost.get(host3); - - ccm().decommission(3); - ccm().remove(3); - - // Ensure that the token ranges were updated, there should only be 2 ranges now. - assertThat(metadata.getTokenRanges()).hasSize(2); - - // The token should not be present for any Host. - assertThat(getTokenForHosts(metadata)).doesNotContainValue(host3Token); - - // The ring should be fully accounted for. - assertThat(cluster).hasValidTokenRanges("test"); - assertThat(cluster).hasValidTokenRanges(); - - // Add an additional node. - ccm().add(4); - ccm().start(4); - waitForUp(TestUtils.IP_PREFIX + '4', cluster); - - // Ensure that the token ranges were updated, there should only be 3 ranges now. - assertThat(metadata.getTokenRanges()).hasSize(3); - - Host host4 = TestUtils.findHost(cluster, 4); - TokenRange host4Range = metadata.getTokenRanges(keyspace, host4).iterator().next(); - - // Ensure no host token range intersects with node 4. - for (Host host : metadata.getAllHosts()) { - if (!host.equals(host4)) { - TokenRange hostRange = metadata.getTokenRanges(keyspace, host).iterator().next(); - assertThat(host4Range).doesNotIntersect(hostRange); - } - } - - // The ring should be fully accounted for. - assertThat(cluster).hasValidTokenRanges("test"); - assertThat(cluster).hasValidTokenRanges(); - } - - /** - * @return A mapping of Host -> Token for each Host in the given {@link Metadata} - */ - private Map getTokenForHosts(Metadata metadata) { - Map tokensByHost = Maps.newHashMap(); - for (Host host : metadata.getAllHosts()) { - tokensByHost.put(host, host.getTokens().iterator().next()); - } - return tokensByHost; + /** + * Validates that when the topology of the cluster changes that Cluster Metadata is properly + * updated. + * + *

    + * + *

    This test does the following: + * + *

    + * + *

      + *
    1. Creates a 3 node cluster and capture token range data from the {@link Cluster}'s {@link + * Metadata} + *
    2. Decommission node 3. + *
    3. Validates that the token range data was updated to reflect node 3 leaving and the other + * nodes taking on its token range. + *
    4. Adds a new node, node 4. + *
    5. Validates that the token range data was updated to reflect node 4 joining the {@link + * Cluster} and the token ranges reflecting this. + *
    + * + * @test_category metadata:token + * @expected_result cluster metadata is properly updated in response to node remove and add + * events. + * @jira_ticket JAVA-312 + * @since 2.0.10, 2.1.5 + */ + @Test(groups = "long") + @CCMConfig(numberOfNodes = 3, dirtiesContext = true, createCluster = false) + public void should_update_metadata_on_topology_change() { + Cluster cluster = register(createClusterBuilderNoDebouncing().build()); + Session session = cluster.connect(); + + String keyspace = "test"; + session.execute( + "CREATE KEYSPACE " + + keyspace + + " WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}"); + Metadata metadata = cluster.getMetadata(); + + // Capture all Token data. + assertThat(metadata.getTokenRanges()).hasSize(3); + Map tokensForHost = getTokenForHosts(metadata); + + // Capture host3s token and range before we take it down. + Host host3 = TestUtils.findHost(cluster, 3); + Token host3Token = tokensForHost.get(host3); + + ccm().decommission(3); + ccm().remove(3); + + // Ensure that the token ranges were updated, there should only be 2 ranges now. + assertThat(metadata.getTokenRanges()).hasSize(2); + + // The token should not be present for any Host. + assertThat(getTokenForHosts(metadata)).doesNotContainValue(host3Token); + + // The ring should be fully accounted for. + assertThat(cluster).hasValidTokenRanges("test"); + assertThat(cluster).hasValidTokenRanges(); + + // Add an additional node. + ccm().add(4); + ccm().start(4); + waitForUp(TestUtils.IP_PREFIX + '4', cluster); + + // Ensure that the token ranges were updated, there should only be 3 ranges now. + assertThat(metadata.getTokenRanges()).hasSize(3); + + Host host4 = TestUtils.findHost(cluster, 4); + TokenRange host4Range = metadata.getTokenRanges(keyspace, host4).iterator().next(); + + // Ensure no host token range intersects with node 4. + for (Host host : metadata.getAllHosts()) { + if (!host.equals(host4)) { + TokenRange hostRange = metadata.getTokenRanges(keyspace, host).iterator().next(); + assertThat(host4Range).doesNotIntersect(hostRange); + } } - @Test(groups = "unit") - public void handleId_should_lowercase_unquoted_alphanumeric_identifiers() { - assertThat(Metadata.handleId("FooBar1")).isEqualTo("foobar1"); - assertThat(Metadata.handleId("Foo_Bar_1")).isEqualTo("foo_bar_1"); - } - - @Test(groups = "unit") - public void handleId_should_unquote_and_preserve_case_of_quoted_identifiers() { - assertThat(Metadata.handleId("\"FooBar1\"")).isEqualTo("FooBar1"); - assertThat(Metadata.handleId("\"Foo_Bar_1\"")).isEqualTo("Foo_Bar_1"); - assertThat(Metadata.handleId("\"Foo Bar 1\"")).isEqualTo("Foo Bar 1"); - } - - @Test(groups = "unit") - public void handleId_should_unescape_duplicate_double_quotes_in_quoted_identifiers() { - assertThat(Metadata.handleId("\"Foo\"\"Bar\"")).isEqualTo("Foo\"Bar"); - } - - @Test(groups = "unit") - public void handleId_should_preserve_unquoted_non_alphanumeric_identifiers() { - assertThat(Metadata.handleId("Foo Bar")).isEqualTo("Foo Bar"); - } - - @Test(groups = "unit") - public void escapeId_should_not_quote_lowercase_identifiers() { - String id = "this_does_not_need_quoting_0123456789abcdefghijklmnopqrstuvwxyz"; - assertThat(Metadata.quoteIfNecessary(id)).isEqualTo(id); - } + // The ring should be fully accounted for. + assertThat(cluster).hasValidTokenRanges("test"); + assertThat(cluster).hasValidTokenRanges(); + } - @Test(groups = "unit") - public void escapeId_should_quote_non_lowercase_identifiers() { - assertThat(Metadata.quoteIfNecessary("This_Needs_Quoting_1234")).isEqualTo("\"This_Needs_Quoting_1234\""); - assertThat(Metadata.quoteIfNecessary("This Needs Quoting 1234!!")).isEqualTo("\"This Needs Quoting 1234!!\""); + /** @return A mapping of Host -> Token for each Host in the given {@link Metadata} */ + private Map getTokenForHosts(Metadata metadata) { + Map tokensByHost = Maps.newHashMap(); + for (Host host : metadata.getAllHosts()) { + tokensByHost.put(host, host.getTokens().iterator().next()); } - - @Test(groups = "unit") - public void escapeId_should_quote_reserved_cql_keywords() { - assertThat(Metadata.quoteIfNecessary("columnfamily")).isEqualTo("\"columnfamily\""); - } - + return tokensByHost; + } + + @Test(groups = "unit") + public void handleId_should_lowercase_unquoted_alphanumeric_identifiers() { + assertThat(Metadata.handleId("FooBar1")).isEqualTo("foobar1"); + assertThat(Metadata.handleId("Foo_Bar_1")).isEqualTo("foo_bar_1"); + assertThat(Metadata.handleId("foo_bar_1")).isEqualTo("foo_bar_1"); + } + + @Test(groups = "unit") + public void handleId_should_unquote_and_preserve_case_of_quoted_identifiers() { + assertThat(Metadata.handleId("\"FooBar1\"")).isEqualTo("FooBar1"); + assertThat(Metadata.handleId("\"Foo_Bar_1\"")).isEqualTo("Foo_Bar_1"); + assertThat(Metadata.handleId("\"Foo Bar 1\"")).isEqualTo("Foo Bar 1"); + } + + @Test(groups = "unit") + public void handleId_should_unescape_duplicate_double_quotes_in_quoted_identifiers() { + assertThat(Metadata.handleId("\"Foo\"\"Bar\"")).isEqualTo("Foo\"Bar"); + } + + @Test(groups = "unit") + public void handleId_should_preserve_unquoted_non_alphanumeric_identifiers() { + assertThat(Metadata.handleId("Foo Bar")).isEqualTo("Foo Bar"); + } + + @Test(groups = "unit") + public void escapeId_should_not_quote_lowercase_identifiers() { + String id = "this_does_not_need_quoting_0123456789abcdefghijklmnopqrstuvwxyz"; + assertThat(Metadata.quoteIfNecessary(id)).isEqualTo(id); + } + + @Test(groups = "unit") + public void escapeId_should_quote_non_lowercase_identifiers() { + assertThat(Metadata.quoteIfNecessary("This_Needs_Quoting_1234")) + .isEqualTo("\"This_Needs_Quoting_1234\""); + assertThat(Metadata.quoteIfNecessary("This Needs Quoting 1234!!")) + .isEqualTo("\"This Needs Quoting 1234!!\""); + } + + @Test(groups = "unit") + public void escapeId_should_quote_reserved_cql_keywords() { + assertThat(Metadata.quoteIfNecessary("columnfamily")).isEqualTo("\"columnfamily\""); + } + + /** @jira_ticket JAVA-2174 */ + @Test(groups = "unit") + public void escapeId_should_quote_empty_keyword() { + assertThat(Metadata.quoteIfNecessary("")).isEqualTo("\"\""); + } + + @Test(groups = "unit") + public void should_detect_reserved_keywords_in_upper_case() { + assertThat(Metadata.isReservedCqlKeyword("COLUMNFAMILY")).isTrue(); + assertThat(Metadata.isReservedCqlKeyword("TEST_COLUMNFAMILY")).isFalse(); + } + + @Test(groups = "unit") + public void should_detect_reserved_keywords_in_lower_case() { + assertThat(Metadata.isReservedCqlKeyword("columnfamily")).isTrue(); + assertThat(Metadata.isReservedCqlKeyword("test_columnfamily")).isFalse(); + } + + @Test(groups = "unit") + public void should_detect_reserved_keywords_in_mixed_case() { + assertThat(Metadata.isReservedCqlKeyword("ColumnFamily")).isTrue(); + assertThat(Metadata.isReservedCqlKeyword("Test_ColumnFamily")).isFalse(); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/MetricsInFlightTest.java b/driver-core/src/test/java/com/datastax/driver/core/MetricsInFlightTest.java new file mode 100644 index 00000000000..2bb72a36710 --- /dev/null +++ b/driver-core/src/test/java/com/datastax/driver/core/MetricsInFlightTest.java @@ -0,0 +1,110 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import static com.datastax.driver.core.TestUtils.nonQuietClusterCloseOptions; +import static org.assertj.core.api.Assertions.assertThat; +import static org.scassandra.http.client.PrimingRequest.then; + +import org.scassandra.Scassandra; +import org.scassandra.http.client.PrimingRequest; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; + +public class MetricsInFlightTest { + private ScassandraCluster sCluster; + + @BeforeMethod(groups = "short") + public void setUp() { + sCluster = ScassandraCluster.builder().withNodes(1).build(); + sCluster.init(); + } + + @AfterMethod(groups = "short") + public void tearDown() { + clearActivityLog(); + sCluster.stop(); + } + + public void clearActivityLog() { + for (Scassandra node : sCluster.nodes()) { + node.activityClient().clearAllRecordedActivity(); + } + } + + public Cluster.Builder builder() { + // Note: nonQuietClusterCloseOptions is used to speed up tests + return Cluster.builder() + .addContactPoints(sCluster.address(1).getAddress()) + .withPort(sCluster.getBinaryPort()) + .withNettyOptions(nonQuietClusterCloseOptions); + } + + @Test(groups = "short") + public void should_count_inflight_requests_metrics() { + sCluster + .node(1) + .primingClient() + .prime( + PrimingRequest.queryBuilder() + .withQuery("mock query") + .withThen(then().withFixedDelay(100000L)) + .build()); + + Cluster cluster = null; + try { + cluster = builder().build(); + Session session = cluster.connect(); + + assertThat(cluster.getMetrics().getInFlightRequests().getValue()).isEqualTo(0); + session.executeAsync("mock query"); + session.executeAsync("mock query"); + assertThat(cluster.getMetrics().getInFlightRequests().getValue()).isEqualTo(2); + + } finally { + if (cluster != null) { + cluster.close(); + } + } + } + + @Test(groups = "short") + public void should_countdown_inflight_requests_metrics() { + sCluster + .node(1) + .primingClient() + .prime(PrimingRequest.queryBuilder().withQuery("mock query").withThen(then()).build()); + + Cluster cluster = null; + try { + cluster = builder().build(); + Session session = cluster.connect(); + + assertThat(cluster.getMetrics().getInFlightRequests().getValue()).isEqualTo(0); + session.executeAsync("mock query").getUninterruptibly(); + session.executeAsync("mock query").getUninterruptibly(); + assertThat(cluster.getMetrics().getInFlightRequests().getValue()).isEqualTo(0); + + } finally { + if (cluster != null) { + cluster.close(); + } + } + } +} diff --git a/driver-core/src/test/java/com/datastax/driver/core/MetricsTest.java b/driver-core/src/test/java/com/datastax/driver/core/MetricsTest.java index 0fce918f6c1..bed176f9206 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/MetricsTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/MetricsTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,152 +17,165 @@ */ package com.datastax.driver.core; +import static com.datastax.driver.core.Assertions.assertThat; +import static org.testng.Assert.assertEquals; + import com.datastax.driver.core.Metrics.Errors; import com.datastax.driver.core.exceptions.DriverException; import com.datastax.driver.core.policies.RetryPolicy; import com.datastax.driver.core.policies.RetryPolicy.RetryDecision; -import org.testng.annotations.Test; - +import java.lang.management.ManagementFactory; import javax.management.InstanceNotFoundException; import javax.management.MBeanInfo; import javax.management.MBeanServer; import javax.management.ObjectName; -import java.lang.management.ManagementFactory; - -import static com.datastax.driver.core.Assertions.assertThat; -import static org.testng.Assert.assertEquals; +import org.testng.annotations.Test; public class MetricsTest extends CCMTestsSupport { - private volatile RetryDecision retryDecision; - - MBeanServer server = ManagementFactory.getPlatformMBeanServer(); - - @Override - public Cluster.Builder createClusterBuilder() { - return Cluster.builder().withRetryPolicy(new RetryPolicy() { - @Override - public RetryDecision onReadTimeout(Statement statement, ConsistencyLevel cl, int requiredResponses, int receivedResponses, boolean dataRetrieved, int nbRetry) { + private volatile RetryDecision retryDecision; + + MBeanServer server = ManagementFactory.getPlatformMBeanServer(); + + @Override + public Cluster.Builder createClusterBuilder() { + return super.createClusterBuilder() + .withRetryPolicy( + new RetryPolicy() { + @Override + public RetryDecision onReadTimeout( + Statement statement, + ConsistencyLevel cl, + int requiredResponses, + int receivedResponses, + boolean dataRetrieved, + int nbRetry) { return retryDecision; - } - - @Override - public RetryDecision onWriteTimeout(Statement statement, ConsistencyLevel cl, WriteType writeType, int requiredAcks, int receivedAcks, int nbRetry) { + } + + @Override + public RetryDecision onWriteTimeout( + Statement statement, + ConsistencyLevel cl, + WriteType writeType, + int requiredAcks, + int receivedAcks, + int nbRetry) { return retryDecision; - } - - @Override - public RetryDecision onUnavailable(Statement statement, ConsistencyLevel cl, int requiredReplica, int aliveReplica, int nbRetry) { + } + + @Override + public RetryDecision onUnavailable( + Statement statement, + ConsistencyLevel cl, + int requiredReplica, + int aliveReplica, + int nbRetry) { return retryDecision; - } + } - @Override - public RetryDecision onRequestError(Statement statement, ConsistencyLevel cl, DriverException e, int nbRetry) { + @Override + public RetryDecision onRequestError( + Statement statement, ConsistencyLevel cl, DriverException e, int nbRetry) { return retryDecision; - } - - @Override - public void init(Cluster cluster) { - } - - @Override - public void close() { - } - }); - } - - @Override - public void onTestContextInitialized() { - execute("CREATE TABLE test (k int primary key, v int)", - "INSERT INTO test (k, v) VALUES (1, 1)"); + } + + @Override + public void init(Cluster cluster) {} + + @Override + public void close() {} + }); + } + + @Override + public void onTestContextInitialized() { + execute( + "CREATE TABLE test (k int primary key, v int)", "INSERT INTO test (k, v) VALUES (1, 1)"); + } + + @Test(groups = "short") + public void retriesTest() { + retryDecision = RetryDecision.retry(ConsistencyLevel.ONE); + + // We only have one node, this will throw an unavailable exception + Statement statement = + new SimpleStatement("SELECT v FROM test WHERE k = 1") + .setConsistencyLevel(ConsistencyLevel.TWO); + session().execute(statement); + + Errors errors = cluster().getMetrics().getErrorMetrics(); + assertEquals(errors.getUnavailables().getCount(), 1); + assertEquals(errors.getRetries().getCount(), 1); + assertEquals(errors.getRetriesOnUnavailable().getCount(), 1); + + retryDecision = RetryDecision.ignore(); + session().execute(statement); + + assertEquals(errors.getUnavailables().getCount(), 2); + assertEquals(errors.getIgnores().getCount(), 1); + assertEquals(errors.getIgnoresOnUnavailable().getCount(), 1); + } + + /** + * Validates that metrics are enabled and exposed by JMX by default by checking that {@link + * Cluster#getMetrics()} is not null and 'clusterName-metrics:name=connected-to' MBean is present. + * + * @test_category metrics + */ + @Test(groups = "short") + public void should_enable_metrics_and_jmx_by_default() throws Exception { + assertThat(cluster().getMetrics()).isNotNull(); + ObjectName clusterMetricsON = + ObjectName.getInstance(cluster().getClusterName() + "-metrics:name=connected-to"); + MBeanInfo mBean = server.getMBeanInfo(clusterMetricsON); + assertThat(mBean).isNotNull(); + + assertThat(cluster().getConfiguration().getMetricsOptions().isEnabled()).isTrue(); + assertThat(cluster().getConfiguration().getMetricsOptions().isJMXReportingEnabled()).isTrue(); + } + + /** + * Validates that when metrics are disabled using {@link Cluster.Builder#withoutMetrics()} that + * {@link Cluster#getMetrics()} returns null and 'clusterName-metrics:name=connected-to' MBean is + * not present. + * + * @test_category metrics + */ + @Test(groups = "short", expectedExceptions = InstanceNotFoundException.class) + public void metrics_should_be_null_when_metrics_disabled() throws Exception { + Cluster cluster = register(createClusterBuilder().withoutMetrics().build()); + try { + cluster.init(); + assertThat(cluster.getMetrics()).isNull(); + assertThat(cluster.getConfiguration().getMetricsOptions().isEnabled()).isFalse(); + ObjectName clusterMetricsON = + ObjectName.getInstance(cluster.getClusterName() + "-metrics:name=connected-to"); + server.getMBeanInfo(clusterMetricsON); + } finally { + cluster.close(); } - - @Test(groups = "short") - public void retriesTest() { - retryDecision = RetryDecision.retry(ConsistencyLevel.ONE); - - // We only have one node, this will throw an unavailable exception - Statement statement = new SimpleStatement("SELECT v FROM test WHERE k = 1").setConsistencyLevel(ConsistencyLevel.TWO); - session().execute(statement); - - Errors errors = cluster().getMetrics().getErrorMetrics(); - assertEquals(errors.getUnavailables().getCount(), 1); - assertEquals(errors.getRetries().getCount(), 1); - assertEquals(errors.getRetriesOnUnavailable().getCount(), 1); - - retryDecision = RetryDecision.ignore(); - session().execute(statement); - - assertEquals(errors.getUnavailables().getCount(), 2); - assertEquals(errors.getIgnores().getCount(), 1); - assertEquals(errors.getIgnoresOnUnavailable().getCount(), 1); - } - - /** - * Validates that metrics are enabled and exposed by JMX by default by checking that - * {@link Cluster#getMetrics()} is not null and 'clusterName-metrics:name=connected-to' - * MBean is present. - * - * @test_category metrics - */ - @Test(groups = "short") - public void should_enable_metrics_and_jmx_by_default() throws Exception { - assertThat(cluster().getMetrics()).isNotNull(); - ObjectName clusterMetricsON = ObjectName.getInstance(cluster().getClusterName() + "-metrics:name=connected-to"); - MBeanInfo mBean = server.getMBeanInfo(clusterMetricsON); - assertThat(mBean).isNotNull(); - - assertThat(cluster().getConfiguration().getMetricsOptions().isEnabled()).isTrue(); - assertThat(cluster().getConfiguration().getMetricsOptions().isJMXReportingEnabled()).isTrue(); - } - - /** - * Validates that when metrics are disabled using {@link Cluster.Builder#withoutMetrics()} - * that {@link Cluster#getMetrics()} returns null and 'clusterName-metrics:name=connected-to' - * MBean is not present. - * - * @test_category metrics - */ - @Test(groups = "short", expectedExceptions = InstanceNotFoundException.class) - public void metrics_should_be_null_when_metrics_disabled() throws Exception { - Cluster cluster = register(Cluster.builder() - .addContactPoints(getContactPoints()) - .withPort(ccm().getBinaryPort()) - .withoutMetrics() - .build()); - try { - cluster.init(); - assertThat(cluster.getMetrics()).isNull(); - assertThat(cluster.getConfiguration().getMetricsOptions().isEnabled()).isFalse(); - ObjectName clusterMetricsON = ObjectName.getInstance(cluster.getClusterName() + "-metrics:name=connected-to"); - server.getMBeanInfo(clusterMetricsON); - } finally { - cluster.close(); - } - } - - /** - * Validates that when metrics are enabled but JMX reporting is disabled via - * {@link Cluster.Builder#withoutJMXReporting()} that {@link Cluster#getMetrics()} - * is not null and 'clusterName-metrics:name=connected-to' MBean is present. - * - * @test_category metrics - */ - @Test(groups = "short", expectedExceptions = InstanceNotFoundException.class) - public void should_be_no_jmx_mbean_when_jmx_is_disabled() throws Exception { - Cluster cluster = register(Cluster.builder() - .addContactPoints(getContactPoints()) - .withPort(ccm().getBinaryPort()) - .withoutJMXReporting() - .build()); - try { - cluster.init(); - assertThat(cluster.getMetrics()).isNotNull(); - assertThat(cluster.getConfiguration().getMetricsOptions().isEnabled()).isTrue(); - assertThat(cluster.getConfiguration().getMetricsOptions().isJMXReportingEnabled()).isFalse(); - ObjectName clusterMetricsON = ObjectName.getInstance(cluster.getClusterName() + "-metrics:name=connected-to"); - server.getMBeanInfo(clusterMetricsON); - } finally { - cluster.close(); - } + } + + /** + * Validates that when metrics are enabled but JMX reporting is disabled via {@link + * Cluster.Builder#withoutJMXReporting()} that {@link Cluster#getMetrics()} is not null and + * 'clusterName-metrics:name=connected-to' MBean is present. + * + * @test_category metrics + */ + @Test(groups = "short", expectedExceptions = InstanceNotFoundException.class) + public void should_be_no_jmx_mbean_when_jmx_is_disabled() throws Exception { + Cluster cluster = register(createClusterBuilder().withoutJMXReporting().build()); + try { + cluster.init(); + assertThat(cluster.getMetrics()).isNotNull(); + assertThat(cluster.getConfiguration().getMetricsOptions().isEnabled()).isTrue(); + assertThat(cluster.getConfiguration().getMetricsOptions().isJMXReportingEnabled()).isFalse(); + ObjectName clusterMetricsON = + ObjectName.getInstance(cluster.getClusterName() + "-metrics:name=connected-to"); + server.getMBeanInfo(clusterMetricsON); + } finally { + cluster.close(); } + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/MissingRpcAddressTest.java b/driver-core/src/test/java/com/datastax/driver/core/MissingRpcAddressTest.java index 56253d9080b..0695553fca1 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/MissingRpcAddressTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/MissingRpcAddressTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,57 +17,65 @@ */ package com.datastax.driver.core; +import static com.datastax.driver.core.CreateCCM.TestMode.PER_METHOD; +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertNull; + import com.datastax.driver.core.policies.Policies; import com.datastax.driver.core.policies.WhiteListPolicy; import com.google.common.collect.Lists; -import org.testng.annotations.Test; - import java.net.InetSocketAddress; - -import static com.datastax.driver.core.CreateCCM.TestMode.PER_METHOD; -import static org.testng.Assert.assertEquals; -import static org.testng.Assert.assertNull; +import org.testng.annotations.Test; /** - * Tests the behavior of the driver when some hosts have no rpc_address in the control host's system tables (JAVA-428). - *

    - * This can happen because of gossip bugs. We want to ignore these hosts because this is most likely indicative of an error state. + * Tests the behavior of the driver when some hosts have no rpc_address in the control host's system + * tables (JAVA-428). + * + *

    This can happen because of gossip bugs. We want to ignore these hosts because this is most + * likely indicative of an error state. */ @CreateCCM(PER_METHOD) @CCMConfig(numberOfNodes = 2, dirtiesContext = true, createCluster = false) public class MissingRpcAddressTest extends CCMTestsSupport { - @Test(groups = "short") - public void testMissingRpcAddressAtStartup() throws Exception { - deleteNode2RpcAddressFromNode1(); - // Use only one contact point to make sure that the control connection is on node1 - Cluster cluster = register(Cluster.builder() - .addContactPoints(getContactPoints().get(0)) - .withPort(ccm().getBinaryPort()) - .build()); - cluster.connect(); + @Test(groups = "short") + public void testMissingRpcAddressAtStartup() throws Exception { + deleteNode2RpcAddressFromNode1(); + // Use only one contact point to make sure that the control connection is on node1 + Cluster cluster = register(createClusterBuilder().build()); + cluster.connect(); - // Since node2's RPC address is unknown on our control host, it should have been ignored - assertEquals(cluster.getMetrics().getConnectedToHosts().getValue().intValue(), 1); - assertNull(cluster.getMetadata().getHost(getContactPointsWithPorts().get(1))); - } + // Since node2's RPC address is unknown on our control host, it should have been ignored + assertEquals(cluster.getMetrics().getConnectedToHosts().getValue().intValue(), 1); + assertNull(cluster.getMetadata().getHost(getContactPointsWithPorts().get(1))); + } - // Artificially modify the system tables to simulate the missing rpc_address. - private void deleteNode2RpcAddressFromNode1() throws Exception { - InetSocketAddress firstHost = ccm().addressOfNode(1); - Cluster cluster = register(Cluster.builder() - .addContactPoints(firstHost.getAddress()) - .withPort(ccm().getBinaryPort()) + // Artificially modify the system tables to simulate the missing rpc_address. + private void deleteNode2RpcAddressFromNode1() throws Exception { + InetSocketAddress firstHost = ccm().addressOfNode(1); + Cluster cluster = + register( + createClusterBuilder() // ensure we will only connect to node1 - .withLoadBalancingPolicy(new WhiteListPolicy(Policies.defaultLoadBalancingPolicy(), - Lists.newArrayList(firstHost))) + .withLoadBalancingPolicy( + new WhiteListPolicy( + Policies.defaultLoadBalancingPolicy(), Lists.newArrayList(firstHost))) .build()); - Session session = cluster.connect(); - String deleteStmt = String.format("DELETE rpc_address FROM system.peers WHERE peer = '%s'", - ccm().addressOfNode(2).getHostName()); - session.execute(deleteStmt); - session.close(); - cluster.close(); + Session session = cluster.connect(); + String deleteStmt = + String.format( + "DELETE rpc_address FROM system.peers WHERE peer = '%s'", + ccm().addressOfNode(2).getHostName()); + session.execute(deleteStmt); + // For Cassandra 4.0, we also need to remove the info from peers_v2 + if (ccm().getCassandraVersion().nextStable().compareTo(VersionNumber.parse("4.0")) >= 0) { + deleteStmt = + String.format( + "DELETE native_address, native_port FROM system.peers_v2 WHERE peer = '%s' and peer_port = %d", + ccm().addressOfNode(2).getHostName(), ccm().getStoragePort()); + session.execute(deleteStmt); } - + session.close(); + cluster.close(); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/MockClocks.java b/driver-core/src/test/java/com/datastax/driver/core/MockClocks.java index 9d988e5f5f4..03ad9702ffb 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/MockClocks.java +++ b/driver-core/src/test/java/com/datastax/driver/core/MockClocks.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,26 +18,26 @@ package com.datastax.driver.core; class MockClocks { - static class BackInTimeClock implements Clock { - final long arbitraryTimeStamp = 1412610226270L; - int calls; + static class BackInTimeClock implements Clock { + final long arbitraryTimeStamp = 1412610226270L; + int calls; - @Override - public long currentTimeMicros() { - return arbitraryTimeStamp - calls++; - } + @Override + public long currentTimeMicros() { + return arbitraryTimeStamp - calls++; } + } - static class FixedTimeClock implements Clock { - final long fixedTime; + static class FixedTimeClock implements Clock { + final long fixedTime; - public FixedTimeClock(long fixedTime) { - this.fixedTime = fixedTime; - } + public FixedTimeClock(long fixedTime) { + this.fixedTime = fixedTime; + } - @Override - public long currentTimeMicros() { - return fixedTime; - } + @Override + public long currentTimeMicros() { + return fixedTime; } + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/NativeTest.java b/driver-core/src/test/java/com/datastax/driver/core/NativeTest.java new file mode 100644 index 00000000000..cc0398319f4 --- /dev/null +++ b/driver-core/src/test/java/com/datastax/driver/core/NativeTest.java @@ -0,0 +1,35 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import static org.assertj.core.api.Assertions.assertThat; + +import org.testng.annotations.Test; + +public class NativeTest { + + /** + * Verifies that {@link Native#processId()} returns a non-zero process id. + * + * @jira_ticket JAVA-1797 + */ + @Test(groups = "unit") + public void should_return_non_zero_pid() { + assertThat(Native.processId()).isNotZero(); + } +} diff --git a/driver-core/src/test/java/com/datastax/driver/core/NettyOptionsTest.java b/driver-core/src/test/java/com/datastax/driver/core/NettyOptionsTest.java index 520cf79f8f0..65db81694a8 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/NettyOptionsTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/NettyOptionsTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,6 +17,16 @@ */ package com.datastax.driver.core; +import static com.datastax.driver.core.CreateCCM.TestMode.PER_METHOD; +import static org.mockito.Answers.CALLS_REAL_METHODS; +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoMoreInteractions; + import io.netty.bootstrap.Bootstrap; import io.netty.channel.ChannelHandler; import io.netty.channel.ChannelHandlerContext; @@ -23,81 +35,81 @@ import io.netty.channel.socket.SocketChannel; import io.netty.util.HashedWheelTimer; import io.netty.util.Timer; +import java.util.concurrent.ThreadFactory; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; import org.testng.annotations.Test; -import java.util.concurrent.ThreadFactory; - -import static com.datastax.driver.core.CreateCCM.TestMode.PER_METHOD; -import static org.mockito.Answers.CALLS_REAL_METHODS; -import static org.mockito.Matchers.any; -import static org.mockito.Mockito.*; - @CreateCCM(PER_METHOD) @CCMConfig(createCluster = false) public class NettyOptionsTest extends CCMTestsSupport { - @Test(groups = "short") - public void should_invoke_netty_options_hooks_single_node() throws Exception { - should_invoke_netty_options_hooks(1, 1); - } + @Test(groups = "short") + public void should_invoke_netty_options_hooks_single_node() throws Exception { + should_invoke_netty_options_hooks(1, 1); + } - @CCMConfig(numberOfNodes = 3) - @Test(groups = "short") - public void should_invoke_netty_options_hooks_multi_node() throws Exception { - should_invoke_netty_options_hooks(3, 4); - } + @CCMConfig(numberOfNodes = 3) + @Test(groups = "short") + public void should_invoke_netty_options_hooks_multi_node() throws Exception { + should_invoke_netty_options_hooks(3, 4); + } - private void should_invoke_netty_options_hooks(int hosts, int coreConnections) throws Exception { - NettyOptions nettyOptions = mock(NettyOptions.class, CALLS_REAL_METHODS.get()); - EventLoopGroup eventLoopGroup = new NioEventLoopGroup(); - Timer timer = new HashedWheelTimer(); - doReturn(eventLoopGroup).when(nettyOptions).eventLoopGroup(any(ThreadFactory.class)); - doReturn(timer).when(nettyOptions).timer(any(ThreadFactory.class)); - final ChannelHandler handler = mock(ChannelHandler.class); - doAnswer(new Answer() { - @Override - public Object answer(InvocationOnMock invocation) throws Throwable { + private void should_invoke_netty_options_hooks(int hosts, int coreConnections) throws Exception { + NettyOptions nettyOptions = mock(NettyOptions.class, CALLS_REAL_METHODS.get()); + EventLoopGroup eventLoopGroup = new NioEventLoopGroup(); + Timer timer = new HashedWheelTimer(); + doReturn(eventLoopGroup).when(nettyOptions).eventLoopGroup(any(ThreadFactory.class)); + doReturn(timer).when(nettyOptions).timer(any(ThreadFactory.class)); + final ChannelHandler handler = mock(ChannelHandler.class); + doAnswer( + new Answer() { + @Override + public Object answer(InvocationOnMock invocation) throws Throwable { SocketChannel channel = (SocketChannel) invocation.getArguments()[0]; channel.pipeline().addLast("test-handler", handler); return null; - } - }).when(nettyOptions).afterChannelInitialized(any(SocketChannel.class)); - Cluster cluster = register(Cluster.builder() - .addContactPoints(getContactPoints().get(0)) - .withPort(ccm().getBinaryPort()) - .withPoolingOptions(new PoolingOptions() - .setConnectionsPerHost(HostDistance.LOCAL, coreConnections, coreConnections) - ) + } + }) + .when(nettyOptions) + .afterChannelInitialized(any(SocketChannel.class)); + Cluster cluster = + register( + createClusterBuilder() + .withPoolingOptions( + new PoolingOptions() + .setConnectionsPerHost( + HostDistance.LOCAL, coreConnections, coreConnections)) .withNettyOptions(nettyOptions) .build()); - // when - cluster.connect();// force session creation to populate pools + // when + cluster.connect(); // force session creation to populate pools - int expectedNumberOfCalls = TestUtils.numberOfLocalCoreConnections(cluster) * hosts + 1; - // If the driver supports a more recent protocol version than C*, the negotiation at startup - // will open an additional connection for each protocol version tried. - ProtocolVersion version = ProtocolVersion.NEWEST_SUPPORTED; - ProtocolVersion usedVersion = ccm().getProtocolVersion(); - while (version != usedVersion && version != null) { - version = version.getLowerSupported(); - expectedNumberOfCalls++; - } - - cluster.close(); - // then - verify(nettyOptions, times(1)).eventLoopGroup(any(ThreadFactory.class)); - verify(nettyOptions, times(1)).channelClass(); - verify(nettyOptions, times(1)).timer(any(ThreadFactory.class)); - // per-connection hooks will be called coreConnections * hosts + 1 times: - // the extra call is for the control connection - verify(nettyOptions, times(expectedNumberOfCalls)).afterBootstrapInitialized(any(Bootstrap.class)); - verify(nettyOptions, times(expectedNumberOfCalls)).afterChannelInitialized(any(SocketChannel.class)); - verify(handler, times(expectedNumberOfCalls)).handlerAdded(any(ChannelHandlerContext.class)); - verify(handler, times(expectedNumberOfCalls)).handlerRemoved(any(ChannelHandlerContext.class)); - verify(nettyOptions, times(1)).onClusterClose(eventLoopGroup); - verify(nettyOptions, times(1)).onClusterClose(timer); - verifyNoMoreInteractions(nettyOptions); + int expectedNumberOfCalls = TestUtils.numberOfLocalCoreConnections(cluster) * hosts + 1; + // If the driver supports a more recent protocol version than C*, the negotiation at startup + // will open an additional connection for each protocol version tried. + ProtocolVersion version = ProtocolVersion.NEWEST_SUPPORTED; + ProtocolVersion usedVersion = ccm().getProtocolVersion(); + while (version != usedVersion && version != null) { + version = version.getLowerSupported(); + expectedNumberOfCalls++; } + + cluster.close(); + // then + verify(nettyOptions, times(1)).eventLoopGroup(any(ThreadFactory.class)); + verify(nettyOptions, times(1)).channelClass(); + verify(nettyOptions, times(1)).timer(any(ThreadFactory.class)); + // per-connection hooks will be called coreConnections * hosts + 1 times: + // the extra call is for the control connection + verify(nettyOptions, times(expectedNumberOfCalls)) + .afterBootstrapInitialized(any(Bootstrap.class)); + verify(nettyOptions, times(expectedNumberOfCalls)) + .afterChannelInitialized(any(SocketChannel.class)); + verify(handler, times(expectedNumberOfCalls)).handlerAdded(any(ChannelHandlerContext.class)); + verify(handler, times(expectedNumberOfCalls)).handlerRemoved(any(ChannelHandlerContext.class)); + verify(nettyOptions, times(1)).onClusterClose(eventLoopGroup); + verify(nettyOptions, times(1)).onClusterClose(timer); + verifyNoMoreInteractions(nettyOptions); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/NetworkTopologyStrategyTest.java b/driver-core/src/test/java/com/datastax/driver/core/NetworkTopologyStrategyTest.java index 483c3a4d370..ec895922d59 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/NetworkTopologyStrategyTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/NetworkTopologyStrategyTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,115 +17,316 @@ */ package com.datastax.driver.core; +import static org.assertj.core.api.Assertions.assertThat; + import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableMap.Builder; import com.google.common.collect.Lists; import com.google.common.collect.Maps; -import org.apache.log4j.Level; -import org.apache.log4j.Logger; -import org.testng.annotations.Test; - import java.net.InetSocketAddress; import java.util.List; import java.util.Map; import java.util.Set; - -import static org.assertj.core.api.Assertions.assertThat; +import org.apache.log4j.Level; +import org.apache.log4j.Logger; +import org.testng.annotations.Test; public class NetworkTopologyStrategyTest extends AbstractReplicationStrategyTest { - private static class ReplicationFactorDefinition { - public final String dc; - public final int replicationFactor; + private static class ReplicationFactorDefinition { + public final String dc; + public final int replicationFactor; - public ReplicationFactorDefinition(String dc, int replicationFactor) { - this.dc = dc; - this.replicationFactor = replicationFactor; - } + public ReplicationFactorDefinition(String dc, int replicationFactor) { + this.dc = dc; + this.replicationFactor = replicationFactor; } - - private static ReplicationFactorDefinition rf(String dc, int replicationFactor) { - return new ReplicationFactorDefinition(dc, replicationFactor); + } + + private static ReplicationFactorDefinition rf(String dc, int replicationFactor) { + return new ReplicationFactorDefinition(dc, replicationFactor); + } + + private static ReplicationStrategy networkTopologyStrategy(ReplicationFactorDefinition... rfs) { + Builder builder = + ImmutableMap.builder().put("class", "NetworkTopologyStrategy"); + + for (ReplicationFactorDefinition rf : rfs) + builder.put(rf.dc, String.valueOf(rf.replicationFactor)); + + return ReplicationStrategy.create(builder.build()); + } + + /* + * --------------------------------------------------------------------------- + * Ring, replication, etc... setup. These are reusable for the tests + * This data is based on a real ring topology. Most tests are using + * smaller and more specific topologies instead. + * --------------------------------------------------------------------------- + */ + + private static final String DC1 = "DC1"; + private static final String DC2 = "DC2"; + private static final String DC3 = "DC3"; + private static final String RACK11 = "RACK11"; + private static final String RACK12 = "RACK12"; + private static final String RACK21 = "RACK21"; + private static final String RACK22 = "RACK22"; + private static final String RACK31 = "RACK31"; + + private static final Token TOKEN01 = token("-9000000000000000000"); + private static final Token TOKEN02 = token("-8000000000000000000"); + private static final Token TOKEN03 = token("-7000000000000000000"); + private static final Token TOKEN04 = token("-6000000000000000000"); + private static final Token TOKEN05 = token("-5000000000000000000"); + private static final Token TOKEN06 = token("-4000000000000000000"); + private static final Token TOKEN07 = token("-3000000000000000000"); + private static final Token TOKEN08 = token("-2000000000000000000"); + private static final Token TOKEN09 = token("-1000000000000000000"); + private static final Token TOKEN10 = token("0"); + private static final Token TOKEN11 = token("1000000000000000000"); + private static final Token TOKEN12 = token("2000000000000000000"); + private static final Token TOKEN13 = token("3000000000000000000"); + private static final Token TOKEN14 = token("4000000000000000000"); + private static final Token TOKEN15 = token("5000000000000000000"); + private static final Token TOKEN16 = token("6000000000000000000"); + private static final Token TOKEN17 = token("7000000000000000000"); + private static final Token TOKEN18 = token("8000000000000000000"); + private static final Token TOKEN19 = token("9000000000000000000"); + + private static final InetSocketAddress IP1 = socketAddress("127.0.0.101"); + private static final InetSocketAddress IP2 = socketAddress("127.0.0.102"); + private static final InetSocketAddress IP3 = socketAddress("127.0.0.103"); + private static final InetSocketAddress IP4 = socketAddress("127.0.0.104"); + private static final InetSocketAddress IP5 = socketAddress("127.0.0.105"); + private static final InetSocketAddress IP6 = socketAddress("127.0.0.106"); + private static final InetSocketAddress IP7 = socketAddress("127.0.0.107"); + private static final InetSocketAddress IP8 = socketAddress("127.0.0.108"); + + private static final ReplicationStrategy exampleStrategy = + networkTopologyStrategy(rf(DC1, 2), rf(DC2, 2)); + + private static final ReplicationStrategy exampleStrategyTooManyReplicas = + networkTopologyStrategy(rf(DC1, 4), rf(DC2, 4)); + + private static final List largeRing = Lists.newArrayList(); + private static final Map largeRingTokenToPrimary = Maps.newHashMap(); + + private static final String keyspace = "Excelsior"; + + static { + for (int i = 0; i < 100; i++) { + InetSocketAddress address = socketAddress("127.0.0." + i); + for (int vnodes = 0; vnodes < 256; vnodes++) { + Token token = token("" + ((i * 256) + vnodes)); + largeRing.add(token); + largeRingTokenToPrimary.put(token, host(address, DC1, RACK11)); + } } + } + + private static final List exampleRing = + ImmutableList.builder() + .add(TOKEN01) + .add(TOKEN02) + .add(TOKEN03) + .add(TOKEN04) + .add(TOKEN05) + .add(TOKEN06) + .add(TOKEN07) + .add(TOKEN08) + .add(TOKEN09) + .add(TOKEN10) + .add(TOKEN11) + .add(TOKEN12) + .add(TOKEN13) + .add(TOKEN14) + .add(TOKEN15) + .add(TOKEN16) + .add(TOKEN17) + .add(TOKEN18) + .build(); + + private static final Map exampleTokenToPrimary = + ImmutableMap.builder() + .put(TOKEN01, host(IP1, DC1, RACK11)) + .put(TOKEN02, host(IP1, DC1, RACK11)) + .put(TOKEN03, host(IP5, DC1, RACK12)) + .put(TOKEN04, host(IP3, DC1, RACK11)) + .put(TOKEN05, host(IP1, DC1, RACK11)) + .put(TOKEN06, host(IP5, DC1, RACK12)) + .put(TOKEN07, host(IP2, DC2, RACK21)) + .put(TOKEN08, host(IP6, DC2, RACK22)) + .put(TOKEN09, host(IP3, DC1, RACK11)) + .put(TOKEN10, host(IP4, DC2, RACK21)) + .put(TOKEN11, host(IP5, DC1, RACK12)) + .put(TOKEN12, host(IP4, DC2, RACK21)) + .put(TOKEN13, host(IP4, DC2, RACK21)) + .put(TOKEN14, host(IP2, DC2, RACK21)) + .put(TOKEN15, host(IP6, DC2, RACK22)) + .put(TOKEN16, host(IP3, DC1, RACK11)) + .put(TOKEN17, host(IP2, DC2, RACK21)) + .put(TOKEN18, host(IP6, DC2, RACK22)) + .build(); + + /* + * -------------- + * Tests + * -------------- + */ + + @Test(groups = "unit") + public void networkTopologyWithSimpleDCLayoutTest1() { + List ring = + ImmutableList.builder().add(TOKEN01).add(TOKEN04).add(TOKEN14).add(TOKEN19).build(); + + Map tokenToPrimary = + ImmutableMap.builder() + .put(TOKEN01, host(IP1, DC1, RACK11)) + .put(TOKEN04, host(IP2, DC2, RACK21)) + .put(TOKEN14, host(IP1, DC1, RACK11)) + .put(TOKEN19, host(IP2, DC2, RACK21)) + .build(); - private static ReplicationStrategy networkTopologyStrategy(ReplicationFactorDefinition... rfs) { - Builder builder = ImmutableMap.builder() - .put("class", "NetworkTopologyStrategy"); + ReplicationStrategy strategy = networkTopologyStrategy(rf(DC1, 1), rf(DC2, 1)); - for (ReplicationFactorDefinition rf : rfs) - builder.put(rf.dc, String.valueOf(rf.replicationFactor)); + Map> replicaMap = + strategy.computeTokenToReplicaMap(keyspace, tokenToPrimary, ring); - return ReplicationStrategy.create(builder.build()); - } + assertReplicaPlacement(replicaMap, TOKEN01, IP1, IP2); + assertReplicaPlacement(replicaMap, TOKEN04, IP2, IP1); + assertReplicaPlacement(replicaMap, TOKEN14, IP1, IP2); + assertReplicaPlacement(replicaMap, TOKEN19, IP2, IP1); + } - /* - * --------------------------------------------------------------------------- - * Ring, replication, etc... setup. These are reusable for the tests - * This data is based on a real ring topology. Most tests are using - * smaller and more specific topologies instead. - * --------------------------------------------------------------------------- - */ - - private static final String DC1 = "DC1"; - private static final String DC2 = "DC2"; - private static final String DC3 = "DC3"; - private static final String RACK11 = "RACK11"; - private static final String RACK12 = "RACK12"; - private static final String RACK21 = "RACK21"; - private static final String RACK22 = "RACK22"; - private static final String RACK31 = "RACK31"; - - private static final Token TOKEN01 = token("-9000000000000000000"); - private static final Token TOKEN02 = token("-8000000000000000000"); - private static final Token TOKEN03 = token("-7000000000000000000"); - private static final Token TOKEN04 = token("-6000000000000000000"); - private static final Token TOKEN05 = token("-5000000000000000000"); - private static final Token TOKEN06 = token("-4000000000000000000"); - private static final Token TOKEN07 = token("-3000000000000000000"); - private static final Token TOKEN08 = token("-2000000000000000000"); - private static final Token TOKEN09 = token("-1000000000000000000"); - private static final Token TOKEN10 = token("0"); - private static final Token TOKEN11 = token("1000000000000000000"); - private static final Token TOKEN12 = token("2000000000000000000"); - private static final Token TOKEN13 = token("3000000000000000000"); - private static final Token TOKEN14 = token("4000000000000000000"); - private static final Token TOKEN15 = token("5000000000000000000"); - private static final Token TOKEN16 = token("6000000000000000000"); - private static final Token TOKEN17 = token("7000000000000000000"); - private static final Token TOKEN18 = token("8000000000000000000"); - private static final Token TOKEN19 = token("9000000000000000000"); - - private static final InetSocketAddress IP1 = socketAddress("127.0.0.101"); - private static final InetSocketAddress IP2 = socketAddress("127.0.0.102"); - private static final InetSocketAddress IP3 = socketAddress("127.0.0.103"); - private static final InetSocketAddress IP4 = socketAddress("127.0.0.104"); - private static final InetSocketAddress IP5 = socketAddress("127.0.0.105"); - private static final InetSocketAddress IP6 = socketAddress("127.0.0.106"); - private static final InetSocketAddress IP7 = socketAddress("127.0.0.107"); - private static final InetSocketAddress IP8 = socketAddress("127.0.0.108"); - - private static final ReplicationStrategy exampleStrategy = networkTopologyStrategy(rf(DC1, 2), rf(DC2, 2)); - - private static final ReplicationStrategy exampleStrategyTooManyReplicas = networkTopologyStrategy(rf(DC1, 4), rf(DC2, 4)); - - private static final List largeRing = Lists.newArrayList(); - private static final Map largeRingTokenToPrimary = Maps.newHashMap(); - - private static final String keyspace = "Excelsior"; - - static { - for (int i = 0; i < 100; i++) { - InetSocketAddress address = socketAddress("127.0.0." + i); - for (int vnodes = 0; vnodes < 256; vnodes++) { - Token token = token("" + ((i * 256) + vnodes)); - largeRing.add(token); - largeRingTokenToPrimary.put(token, host(address, DC1, RACK11)); - } - } - } + @Test(groups = "unit") + public void networkTopologyWithSimpleDCLayoutTest2() { + List ring = + ImmutableList.builder() + .add(TOKEN01) + .add(TOKEN03) + .add(TOKEN05) + .add(TOKEN07) + .add(TOKEN13) + .add(TOKEN15) + .add(TOKEN17) + .add(TOKEN19) + .build(); - private static final List exampleRing = ImmutableList.builder() + Map tokenToPrimary = + ImmutableMap.builder() + .put(TOKEN01, host(IP1, DC1, RACK11)) + .put(TOKEN03, host(IP2, DC2, RACK21)) + .put(TOKEN05, host(IP3, DC1, RACK11)) + .put(TOKEN07, host(IP4, DC2, RACK21)) + .put(TOKEN13, host(IP1, DC1, RACK11)) + .put(TOKEN15, host(IP2, DC2, RACK21)) + .put(TOKEN17, host(IP3, DC1, RACK11)) + .put(TOKEN19, host(IP4, DC2, RACK21)) + .build(); + + ReplicationStrategy strategy = networkTopologyStrategy(rf(DC1, 1), rf(DC2, 1)); + + Map> replicaMap = + strategy.computeTokenToReplicaMap(keyspace, tokenToPrimary, ring); + + assertReplicaPlacement(replicaMap, TOKEN01, IP1, IP2); + assertReplicaPlacement(replicaMap, TOKEN03, IP2, IP3); + assertReplicaPlacement(replicaMap, TOKEN05, IP3, IP4); + assertReplicaPlacement(replicaMap, TOKEN07, IP4, IP1); + assertReplicaPlacement(replicaMap, TOKEN13, IP1, IP2); + assertReplicaPlacement(replicaMap, TOKEN15, IP2, IP3); + assertReplicaPlacement(replicaMap, TOKEN17, IP3, IP4); + assertReplicaPlacement(replicaMap, TOKEN19, IP4, IP1); + } + + @Test(groups = "unit") + public void networkTopologyWithSimple3DCLayoutTest() { + List ring = + ImmutableList.builder() + .add(TOKEN01) + .add(TOKEN05) + .add(TOKEN09) + .add(TOKEN11) + .add(TOKEN15) + .add(TOKEN19) + .build(); + + Map tokenToPrimary = + ImmutableMap.builder() + .put(TOKEN01, host(IP1, DC1, RACK11)) + .put(TOKEN05, host(IP2, DC2, RACK21)) + .put(TOKEN09, host(IP3, DC3, RACK31)) + .put(TOKEN11, host(IP1, DC1, RACK11)) + .put(TOKEN15, host(IP2, DC2, RACK21)) + .put(TOKEN19, host(IP3, DC3, RACK31)) + .build(); + + ReplicationStrategy strategy = networkTopologyStrategy(rf(DC1, 1), rf(DC2, 1), rf(DC3, 1)); + + Map> replicaMap = + strategy.computeTokenToReplicaMap(keyspace, tokenToPrimary, ring); + + assertReplicaPlacement(replicaMap, TOKEN01, IP1, IP2, IP3); + assertReplicaPlacement(replicaMap, TOKEN05, IP2, IP3, IP1); + assertReplicaPlacement(replicaMap, TOKEN09, IP3, IP1, IP2); + assertReplicaPlacement(replicaMap, TOKEN11, IP1, IP2, IP3); + assertReplicaPlacement(replicaMap, TOKEN15, IP2, IP3, IP1); + assertReplicaPlacement(replicaMap, TOKEN19, IP3, IP1, IP2); + } + + @Test(groups = "unit") + public void networkTopologyWithUnbalancedRingTest() { + List ring = + ImmutableList.builder() + .add(TOKEN01) + .add(TOKEN03) + .add(TOKEN05) + .add(TOKEN07) + .add(TOKEN09) + .add(TOKEN11) + .add(TOKEN13) + .add(TOKEN15) + .add(TOKEN17) + .add(TOKEN19) + .build(); + + Map tokenToPrimary = + ImmutableMap.builder() + .put(TOKEN01, host(IP1, DC1, RACK11)) + .put(TOKEN03, host(IP1, DC1, RACK11)) + .put(TOKEN05, host(IP2, DC2, RACK21)) + .put(TOKEN07, host(IP3, DC1, RACK11)) + .put(TOKEN09, host(IP4, DC2, RACK21)) + .put(TOKEN11, host(IP1, DC1, RACK11)) + .put(TOKEN13, host(IP1, DC1, RACK11)) + .put(TOKEN15, host(IP2, DC2, RACK21)) + .put(TOKEN17, host(IP3, DC1, RACK11)) + .put(TOKEN19, host(IP4, DC2, RACK21)) + .build(); + + ReplicationStrategy strategy = networkTopologyStrategy(rf(DC1, 2), rf(DC2, 2)); + + Map> replicaMap = + strategy.computeTokenToReplicaMap(keyspace, tokenToPrimary, ring); + + assertReplicaPlacement(replicaMap, TOKEN01, IP1, IP2, IP3, IP4); + assertReplicaPlacement(replicaMap, TOKEN03, IP1, IP2, IP3, IP4); + assertReplicaPlacement(replicaMap, TOKEN05, IP2, IP3, IP4, IP1); + assertReplicaPlacement(replicaMap, TOKEN07, IP3, IP4, IP1, IP2); + assertReplicaPlacement(replicaMap, TOKEN09, IP4, IP1, IP2, IP3); + assertReplicaPlacement(replicaMap, TOKEN11, IP1, IP2, IP3, IP4); + assertReplicaPlacement(replicaMap, TOKEN13, IP1, IP2, IP3, IP4); + assertReplicaPlacement(replicaMap, TOKEN15, IP2, IP3, IP4, IP1); + assertReplicaPlacement(replicaMap, TOKEN17, IP3, IP4, IP1, IP2); + assertReplicaPlacement(replicaMap, TOKEN19, IP4, IP1, IP2, IP3); + } + + @Test(groups = "unit") + public void networkTopologyWithDCMultirackLayoutTest() { + List ring = + ImmutableList.builder() .add(TOKEN01) .add(TOKEN02) .add(TOKEN03) @@ -132,9 +335,6 @@ private static ReplicationStrategy networkTopologyStrategy(ReplicationFactorDefi .add(TOKEN06) .add(TOKEN07) .add(TOKEN08) - .add(TOKEN09) - .add(TOKEN10) - .add(TOKEN11) .add(TOKEN12) .add(TOKEN13) .add(TOKEN14) @@ -142,538 +342,372 @@ private static ReplicationStrategy networkTopologyStrategy(ReplicationFactorDefi .add(TOKEN16) .add(TOKEN17) .add(TOKEN18) + .add(TOKEN19) .build(); - private static final Map exampleTokenToPrimary = ImmutableMap.builder() + Map tokenToPrimary = + ImmutableMap.builder() .put(TOKEN01, host(IP1, DC1, RACK11)) - .put(TOKEN02, host(IP1, DC1, RACK11)) - .put(TOKEN03, host(IP5, DC1, RACK12)) - .put(TOKEN04, host(IP3, DC1, RACK11)) - .put(TOKEN05, host(IP1, DC1, RACK11)) - .put(TOKEN06, host(IP5, DC1, RACK12)) - .put(TOKEN07, host(IP2, DC2, RACK21)) - .put(TOKEN08, host(IP6, DC2, RACK22)) - .put(TOKEN09, host(IP3, DC1, RACK11)) - .put(TOKEN10, host(IP4, DC2, RACK21)) - .put(TOKEN11, host(IP5, DC1, RACK12)) - .put(TOKEN12, host(IP4, DC2, RACK21)) - .put(TOKEN13, host(IP4, DC2, RACK21)) - .put(TOKEN14, host(IP2, DC2, RACK21)) - .put(TOKEN15, host(IP6, DC2, RACK22)) - .put(TOKEN16, host(IP3, DC1, RACK11)) - .put(TOKEN17, host(IP2, DC2, RACK21)) - .put(TOKEN18, host(IP6, DC2, RACK22)) + .put(TOKEN02, host(IP2, DC2, RACK21)) + .put(TOKEN03, host(IP3, DC1, RACK12)) + .put(TOKEN04, host(IP4, DC2, RACK22)) + .put(TOKEN05, host(IP5, DC1, RACK11)) + .put(TOKEN06, host(IP6, DC2, RACK21)) + .put(TOKEN07, host(IP7, DC1, RACK12)) + .put(TOKEN08, host(IP8, DC2, RACK22)) + .put(TOKEN12, host(IP1, DC1, RACK11)) + .put(TOKEN13, host(IP2, DC2, RACK21)) + .put(TOKEN14, host(IP3, DC1, RACK12)) + .put(TOKEN15, host(IP4, DC2, RACK22)) + .put(TOKEN16, host(IP5, DC1, RACK11)) + .put(TOKEN17, host(IP6, DC2, RACK21)) + .put(TOKEN18, host(IP7, DC1, RACK12)) + .put(TOKEN19, host(IP8, DC2, RACK22)) .build(); - /* - * -------------- - * Tests - * -------------- - */ - - @Test(groups = "unit") - public void networkTopologyWithSimpleDCLayoutTest1() { - List ring = ImmutableList.builder() - .add(TOKEN01) - .add(TOKEN04) - .add(TOKEN14) - .add(TOKEN19) - .build(); - - Map tokenToPrimary = ImmutableMap.builder() - .put(TOKEN01, host(IP1, DC1, RACK11)) - .put(TOKEN04, host(IP2, DC2, RACK21)) - .put(TOKEN14, host(IP1, DC1, RACK11)) - .put(TOKEN19, host(IP2, DC2, RACK21)) - .build(); - - ReplicationStrategy strategy = networkTopologyStrategy(rf(DC1, 1), rf(DC2, 1)); - - Map> replicaMap = strategy.computeTokenToReplicaMap(keyspace, tokenToPrimary, ring); - - assertReplicaPlacement(replicaMap, TOKEN01, IP1, IP2); - assertReplicaPlacement(replicaMap, TOKEN04, IP2, IP1); - assertReplicaPlacement(replicaMap, TOKEN14, IP1, IP2); - assertReplicaPlacement(replicaMap, TOKEN19, IP2, IP1); - } - - @Test(groups = "unit") - public void networkTopologyWithSimpleDCLayoutTest2() { - List ring = ImmutableList.builder() - .add(TOKEN01) - .add(TOKEN03) - .add(TOKEN05) - .add(TOKEN07) - .add(TOKEN13) - .add(TOKEN15) - .add(TOKEN17) - .add(TOKEN19) - .build(); - - Map tokenToPrimary = ImmutableMap.builder() - .put(TOKEN01, host(IP1, DC1, RACK11)) - .put(TOKEN03, host(IP2, DC2, RACK21)) - .put(TOKEN05, host(IP3, DC1, RACK11)) - .put(TOKEN07, host(IP4, DC2, RACK21)) - .put(TOKEN13, host(IP1, DC1, RACK11)) - .put(TOKEN15, host(IP2, DC2, RACK21)) - .put(TOKEN17, host(IP3, DC1, RACK11)) - .put(TOKEN19, host(IP4, DC2, RACK21)) - .build(); - - ReplicationStrategy strategy = networkTopologyStrategy(rf(DC1, 1), rf(DC2, 1)); - - Map> replicaMap = strategy.computeTokenToReplicaMap(keyspace, tokenToPrimary, ring); - - assertReplicaPlacement(replicaMap, TOKEN01, IP1, IP2); - assertReplicaPlacement(replicaMap, TOKEN03, IP2, IP3); - assertReplicaPlacement(replicaMap, TOKEN05, IP3, IP4); - assertReplicaPlacement(replicaMap, TOKEN07, IP4, IP1); - assertReplicaPlacement(replicaMap, TOKEN13, IP1, IP2); - assertReplicaPlacement(replicaMap, TOKEN15, IP2, IP3); - assertReplicaPlacement(replicaMap, TOKEN17, IP3, IP4); - assertReplicaPlacement(replicaMap, TOKEN19, IP4, IP1); - } - - @Test(groups = "unit") - public void networkTopologyWithSimple3DCLayoutTest() { - List ring = ImmutableList.builder() - .add(TOKEN01) - .add(TOKEN05) - .add(TOKEN09) - .add(TOKEN11) - .add(TOKEN15) - .add(TOKEN19) - .build(); - - Map tokenToPrimary = ImmutableMap.builder() - .put(TOKEN01, host(IP1, DC1, RACK11)) - .put(TOKEN05, host(IP2, DC2, RACK21)) - .put(TOKEN09, host(IP3, DC3, RACK31)) - .put(TOKEN11, host(IP1, DC1, RACK11)) - .put(TOKEN15, host(IP2, DC2, RACK21)) - .put(TOKEN19, host(IP3, DC3, RACK31)) - .build(); - - ReplicationStrategy strategy = networkTopologyStrategy(rf(DC1, 1), rf(DC2, 1), rf(DC3, 1)); - - Map> replicaMap = strategy.computeTokenToReplicaMap(keyspace, tokenToPrimary, ring); - - assertReplicaPlacement(replicaMap, TOKEN01, IP1, IP2, IP3); - assertReplicaPlacement(replicaMap, TOKEN05, IP2, IP3, IP1); - assertReplicaPlacement(replicaMap, TOKEN09, IP3, IP1, IP2); - assertReplicaPlacement(replicaMap, TOKEN11, IP1, IP2, IP3); - assertReplicaPlacement(replicaMap, TOKEN15, IP2, IP3, IP1); - assertReplicaPlacement(replicaMap, TOKEN19, IP3, IP1, IP2); - } - - @Test(groups = "unit") - public void networkTopologyWithUnbalancedRingTest() { - List ring = ImmutableList.builder() - .add(TOKEN01) - .add(TOKEN03) - .add(TOKEN05) - .add(TOKEN07) - .add(TOKEN09) - .add(TOKEN11) - .add(TOKEN13) - .add(TOKEN15) - .add(TOKEN17) - .add(TOKEN19) - .build(); - - Map tokenToPrimary = ImmutableMap.builder() - .put(TOKEN01, host(IP1, DC1, RACK11)) - .put(TOKEN03, host(IP1, DC1, RACK11)) - .put(TOKEN05, host(IP2, DC2, RACK21)) - .put(TOKEN07, host(IP3, DC1, RACK11)) - .put(TOKEN09, host(IP4, DC2, RACK21)) - .put(TOKEN11, host(IP1, DC1, RACK11)) - .put(TOKEN13, host(IP1, DC1, RACK11)) - .put(TOKEN15, host(IP2, DC2, RACK21)) - .put(TOKEN17, host(IP3, DC1, RACK11)) - .put(TOKEN19, host(IP4, DC2, RACK21)) - .build(); - - ReplicationStrategy strategy = networkTopologyStrategy(rf(DC1, 2), rf(DC2, 2)); - - Map> replicaMap = strategy.computeTokenToReplicaMap(keyspace, tokenToPrimary, ring); - - assertReplicaPlacement(replicaMap, TOKEN01, IP1, IP2, IP3, IP4); - assertReplicaPlacement(replicaMap, TOKEN03, IP1, IP2, IP3, IP4); - assertReplicaPlacement(replicaMap, TOKEN05, IP2, IP3, IP4, IP1); - assertReplicaPlacement(replicaMap, TOKEN07, IP3, IP4, IP1, IP2); - assertReplicaPlacement(replicaMap, TOKEN09, IP4, IP1, IP2, IP3); - assertReplicaPlacement(replicaMap, TOKEN11, IP1, IP2, IP3, IP4); - assertReplicaPlacement(replicaMap, TOKEN13, IP1, IP2, IP3, IP4); - assertReplicaPlacement(replicaMap, TOKEN15, IP2, IP3, IP4, IP1); - assertReplicaPlacement(replicaMap, TOKEN17, IP3, IP4, IP1, IP2); - assertReplicaPlacement(replicaMap, TOKEN19, IP4, IP1, IP2, IP3); - } - - @Test(groups = "unit") - public void networkTopologyWithDCMultirackLayoutTest() { - List ring = ImmutableList.builder() - .add(TOKEN01) - .add(TOKEN02) - .add(TOKEN03) - .add(TOKEN04) - .add(TOKEN05) - .add(TOKEN06) - .add(TOKEN07) - .add(TOKEN08) - .add(TOKEN12) - .add(TOKEN13) - .add(TOKEN14) - .add(TOKEN15) - .add(TOKEN16) - .add(TOKEN17) - .add(TOKEN18) - .add(TOKEN19) - .build(); - - Map tokenToPrimary = ImmutableMap.builder() - .put(TOKEN01, host(IP1, DC1, RACK11)) - .put(TOKEN02, host(IP2, DC2, RACK21)) - .put(TOKEN03, host(IP3, DC1, RACK12)) - .put(TOKEN04, host(IP4, DC2, RACK22)) - .put(TOKEN05, host(IP5, DC1, RACK11)) - .put(TOKEN06, host(IP6, DC2, RACK21)) - .put(TOKEN07, host(IP7, DC1, RACK12)) - .put(TOKEN08, host(IP8, DC2, RACK22)) - .put(TOKEN12, host(IP1, DC1, RACK11)) - .put(TOKEN13, host(IP2, DC2, RACK21)) - .put(TOKEN14, host(IP3, DC1, RACK12)) - .put(TOKEN15, host(IP4, DC2, RACK22)) - .put(TOKEN16, host(IP5, DC1, RACK11)) - .put(TOKEN17, host(IP6, DC2, RACK21)) - .put(TOKEN18, host(IP7, DC1, RACK12)) - .put(TOKEN19, host(IP8, DC2, RACK22)) - .build(); - - ReplicationStrategy strategy = networkTopologyStrategy(rf(DC1, 2), rf(DC2, 2)); - - Map> replicaMap = strategy.computeTokenToReplicaMap(keyspace, tokenToPrimary, ring); - - assertReplicaPlacement(replicaMap, TOKEN01, IP1, IP2, IP3, IP4); - assertReplicaPlacement(replicaMap, TOKEN02, IP2, IP3, IP4, IP5); - assertReplicaPlacement(replicaMap, TOKEN03, IP3, IP4, IP5, IP6); - assertReplicaPlacement(replicaMap, TOKEN04, IP4, IP5, IP6, IP7); - assertReplicaPlacement(replicaMap, TOKEN05, IP5, IP6, IP7, IP8); - assertReplicaPlacement(replicaMap, TOKEN06, IP6, IP7, IP8, IP1); - assertReplicaPlacement(replicaMap, TOKEN07, IP7, IP8, IP1, IP2); - assertReplicaPlacement(replicaMap, TOKEN08, IP8, IP1, IP2, IP3); - assertReplicaPlacement(replicaMap, TOKEN12, IP1, IP2, IP3, IP4); - assertReplicaPlacement(replicaMap, TOKEN13, IP2, IP3, IP4, IP5); - assertReplicaPlacement(replicaMap, TOKEN14, IP3, IP4, IP5, IP6); - assertReplicaPlacement(replicaMap, TOKEN15, IP4, IP5, IP6, IP7); - assertReplicaPlacement(replicaMap, TOKEN16, IP5, IP6, IP7, IP8); - assertReplicaPlacement(replicaMap, TOKEN17, IP6, IP7, IP8, IP1); - assertReplicaPlacement(replicaMap, TOKEN18, IP7, IP8, IP1, IP2); - assertReplicaPlacement(replicaMap, TOKEN19, IP8, IP1, IP2, IP3); - } - - @Test(groups = "unit") - public void networkTopologyWithMultirackHostSkippingTest1() { - List ring = ImmutableList.builder() - .add(TOKEN01) - .add(TOKEN02) - .add(TOKEN03) - .add(TOKEN04) - .add(TOKEN05) - .add(TOKEN06) - .add(TOKEN07) - .add(TOKEN08) - .add(TOKEN12) - .add(TOKEN13) - .add(TOKEN14) - .add(TOKEN15) - .add(TOKEN16) - .add(TOKEN17) - .add(TOKEN18) - .add(TOKEN19) - .build(); - - //this is to simulate when we hit the same rack in a DC first as a second replica - //so that'll get skipped and re-added later as a third - Map tokenToPrimary = ImmutableMap.builder() - .put(TOKEN01, host(IP1, DC1, RACK11)) - .put(TOKEN02, host(IP2, DC2, RACK21)) - .put(TOKEN03, host(IP3, DC1, RACK11)) - .put(TOKEN04, host(IP4, DC2, RACK21)) - .put(TOKEN05, host(IP5, DC1, RACK12)) - .put(TOKEN06, host(IP6, DC2, RACK22)) - .put(TOKEN07, host(IP7, DC1, RACK12)) - .put(TOKEN08, host(IP8, DC2, RACK22)) - .put(TOKEN12, host(IP1, DC1, RACK11)) - .put(TOKEN13, host(IP2, DC2, RACK21)) - .put(TOKEN14, host(IP3, DC1, RACK11)) - .put(TOKEN15, host(IP4, DC2, RACK21)) - .put(TOKEN16, host(IP5, DC1, RACK12)) - .put(TOKEN17, host(IP6, DC2, RACK22)) - .put(TOKEN18, host(IP7, DC1, RACK12)) - .put(TOKEN19, host(IP8, DC2, RACK22)) - .build(); - - ReplicationStrategy strategy = networkTopologyStrategy(rf(DC1, 3), rf(DC2, 3)); - - Map> replicaMap = strategy.computeTokenToReplicaMap(keyspace, tokenToPrimary, ring); - - assertReplicaPlacement(replicaMap, TOKEN01, IP1, IP2, IP5, IP3, IP6, IP4); - assertReplicaPlacement(replicaMap, TOKEN02, IP2, IP3, IP5, IP6, IP4, IP7); - assertReplicaPlacement(replicaMap, TOKEN03, IP3, IP4, IP5, IP6, IP7, IP8); - assertReplicaPlacement(replicaMap, TOKEN04, IP4, IP5, IP6, IP8, IP1, IP7); - assertReplicaPlacement(replicaMap, TOKEN05, IP5, IP6, IP1, IP7, IP2, IP8); - assertReplicaPlacement(replicaMap, TOKEN06, IP6, IP7, IP1, IP2, IP8, IP3); - assertReplicaPlacement(replicaMap, TOKEN07, IP7, IP8, IP1, IP2, IP3, IP4); - assertReplicaPlacement(replicaMap, TOKEN08, IP8, IP1, IP2, IP4, IP5, IP3); - assertReplicaPlacement(replicaMap, TOKEN12, IP1, IP2, IP5, IP3, IP6, IP4); - assertReplicaPlacement(replicaMap, TOKEN13, IP2, IP3, IP5, IP6, IP4, IP7); - assertReplicaPlacement(replicaMap, TOKEN14, IP3, IP4, IP5, IP6, IP7, IP8); - assertReplicaPlacement(replicaMap, TOKEN15, IP4, IP5, IP6, IP8, IP1, IP7); - assertReplicaPlacement(replicaMap, TOKEN16, IP5, IP6, IP1, IP7, IP2, IP8); - assertReplicaPlacement(replicaMap, TOKEN17, IP6, IP7, IP1, IP2, IP8, IP3); - assertReplicaPlacement(replicaMap, TOKEN18, IP7, IP8, IP1, IP2, IP3, IP4); - assertReplicaPlacement(replicaMap, TOKEN19, IP8, IP1, IP2, IP4, IP5, IP3); - - } + ReplicationStrategy strategy = networkTopologyStrategy(rf(DC1, 2), rf(DC2, 2)); + + Map> replicaMap = + strategy.computeTokenToReplicaMap(keyspace, tokenToPrimary, ring); + + assertReplicaPlacement(replicaMap, TOKEN01, IP1, IP2, IP3, IP4); + assertReplicaPlacement(replicaMap, TOKEN02, IP2, IP3, IP4, IP5); + assertReplicaPlacement(replicaMap, TOKEN03, IP3, IP4, IP5, IP6); + assertReplicaPlacement(replicaMap, TOKEN04, IP4, IP5, IP6, IP7); + assertReplicaPlacement(replicaMap, TOKEN05, IP5, IP6, IP7, IP8); + assertReplicaPlacement(replicaMap, TOKEN06, IP6, IP7, IP8, IP1); + assertReplicaPlacement(replicaMap, TOKEN07, IP7, IP8, IP1, IP2); + assertReplicaPlacement(replicaMap, TOKEN08, IP8, IP1, IP2, IP3); + assertReplicaPlacement(replicaMap, TOKEN12, IP1, IP2, IP3, IP4); + assertReplicaPlacement(replicaMap, TOKEN13, IP2, IP3, IP4, IP5); + assertReplicaPlacement(replicaMap, TOKEN14, IP3, IP4, IP5, IP6); + assertReplicaPlacement(replicaMap, TOKEN15, IP4, IP5, IP6, IP7); + assertReplicaPlacement(replicaMap, TOKEN16, IP5, IP6, IP7, IP8); + assertReplicaPlacement(replicaMap, TOKEN17, IP6, IP7, IP8, IP1); + assertReplicaPlacement(replicaMap, TOKEN18, IP7, IP8, IP1, IP2); + assertReplicaPlacement(replicaMap, TOKEN19, IP8, IP1, IP2, IP3); + } + + @Test(groups = "unit") + public void networkTopologyWithMultirackHostSkippingTest1() { + List ring = + ImmutableList.builder() + .add(TOKEN01) + .add(TOKEN02) + .add(TOKEN03) + .add(TOKEN04) + .add(TOKEN05) + .add(TOKEN06) + .add(TOKEN07) + .add(TOKEN08) + .add(TOKEN12) + .add(TOKEN13) + .add(TOKEN14) + .add(TOKEN15) + .add(TOKEN16) + .add(TOKEN17) + .add(TOKEN18) + .add(TOKEN19) + .build(); - @Test(groups = "unit") - public void networkTopologyWithMultirackHostSkippingTest2() { - List ring = ImmutableList.builder() - .add(TOKEN01) - .add(TOKEN02) - .add(TOKEN03) - .add(TOKEN04) - .add(TOKEN05) - .add(TOKEN06) - .add(TOKEN07) - .add(TOKEN08) - .add(TOKEN12) - .add(TOKEN13) - .add(TOKEN14) - .add(TOKEN15) - .add(TOKEN16) - .add(TOKEN17) - .add(TOKEN18) - .add(TOKEN19) - .build(); - - Map tokenToPrimary = ImmutableMap.builder() - .put(TOKEN01, host(IP1, DC1, RACK11)) - .put(TOKEN02, host(IP1, DC1, RACK11)) - .put(TOKEN03, host(IP3, DC1, RACK11)) - .put(TOKEN04, host(IP3, DC1, RACK11)) - .put(TOKEN05, host(IP5, DC1, RACK12)) - .put(TOKEN06, host(IP5, DC1, RACK12)) - .put(TOKEN07, host(IP7, DC1, RACK12)) - .put(TOKEN08, host(IP7, DC1, RACK12)) - .put(TOKEN12, host(IP2, DC2, RACK21)) - .put(TOKEN13, host(IP2, DC2, RACK21)) - .put(TOKEN14, host(IP4, DC2, RACK21)) - .put(TOKEN15, host(IP4, DC2, RACK21)) - .put(TOKEN16, host(IP6, DC2, RACK22)) - .put(TOKEN17, host(IP6, DC2, RACK22)) - .put(TOKEN18, host(IP8, DC2, RACK22)) - .put(TOKEN19, host(IP8, DC2, RACK22)) - .build(); - - ReplicationStrategy strategy = networkTopologyStrategy(rf(DC1, 3), rf(DC2, 3)); - - Map> replicaMap = strategy.computeTokenToReplicaMap(keyspace, tokenToPrimary, ring); - - assertReplicaPlacement(replicaMap, TOKEN01, IP1, IP5, IP3, IP2, IP6, IP4); - assertReplicaPlacement(replicaMap, TOKEN02, IP1, IP5, IP3, IP2, IP6, IP4); - assertReplicaPlacement(replicaMap, TOKEN03, IP3, IP5, IP7, IP2, IP6, IP4); - assertReplicaPlacement(replicaMap, TOKEN04, IP3, IP5, IP7, IP2, IP6, IP4); - assertReplicaPlacement(replicaMap, TOKEN05, IP5, IP2, IP6, IP4, IP1, IP7); - assertReplicaPlacement(replicaMap, TOKEN06, IP5, IP2, IP6, IP4, IP1, IP7); - assertReplicaPlacement(replicaMap, TOKEN07, IP7, IP2, IP6, IP4, IP1, IP3); - assertReplicaPlacement(replicaMap, TOKEN08, IP7, IP2, IP6, IP4, IP1, IP3); - assertReplicaPlacement(replicaMap, TOKEN12, IP2, IP6, IP4, IP1, IP5, IP3); - assertReplicaPlacement(replicaMap, TOKEN13, IP2, IP6, IP4, IP1, IP5, IP3); - assertReplicaPlacement(replicaMap, TOKEN14, IP4, IP6, IP8, IP1, IP5, IP3); - assertReplicaPlacement(replicaMap, TOKEN15, IP4, IP6, IP8, IP1, IP5, IP3); - assertReplicaPlacement(replicaMap, TOKEN16, IP6, IP1, IP5, IP3, IP2, IP8); - assertReplicaPlacement(replicaMap, TOKEN17, IP6, IP1, IP5, IP3, IP2, IP8); - assertReplicaPlacement(replicaMap, TOKEN18, IP8, IP1, IP5, IP3, IP2, IP4); - assertReplicaPlacement(replicaMap, TOKEN19, IP8, IP1, IP5, IP3, IP2, IP4); - } + // this is to simulate when we hit the same rack in a DC first as a second replica + // so that'll get skipped and re-added later as a third + Map tokenToPrimary = + ImmutableMap.builder() + .put(TOKEN01, host(IP1, DC1, RACK11)) + .put(TOKEN02, host(IP2, DC2, RACK21)) + .put(TOKEN03, host(IP3, DC1, RACK11)) + .put(TOKEN04, host(IP4, DC2, RACK21)) + .put(TOKEN05, host(IP5, DC1, RACK12)) + .put(TOKEN06, host(IP6, DC2, RACK22)) + .put(TOKEN07, host(IP7, DC1, RACK12)) + .put(TOKEN08, host(IP8, DC2, RACK22)) + .put(TOKEN12, host(IP1, DC1, RACK11)) + .put(TOKEN13, host(IP2, DC2, RACK21)) + .put(TOKEN14, host(IP3, DC1, RACK11)) + .put(TOKEN15, host(IP4, DC2, RACK21)) + .put(TOKEN16, host(IP5, DC1, RACK12)) + .put(TOKEN17, host(IP6, DC2, RACK22)) + .put(TOKEN18, host(IP7, DC1, RACK12)) + .put(TOKEN19, host(IP8, DC2, RACK22)) + .build(); - @Test(groups = "unit") - public void networkTopologyWithMultirackHostSkippingTest3() { - //this is the same topology as in the previous test, but with different rfs - List ring = ImmutableList.builder() - .add(TOKEN01) - .add(TOKEN02) - .add(TOKEN03) - .add(TOKEN04) - .add(TOKEN05) - .add(TOKEN06) - .add(TOKEN07) - .add(TOKEN08) - .add(TOKEN12) - .add(TOKEN13) - .add(TOKEN14) - .add(TOKEN15) - .add(TOKEN16) - .add(TOKEN17) - .add(TOKEN18) - .add(TOKEN19) - .build(); - - Map tokenToPrimary = ImmutableMap.builder() - .put(TOKEN01, host(IP1, DC1, RACK11)) - .put(TOKEN02, host(IP1, DC1, RACK11)) - .put(TOKEN03, host(IP3, DC1, RACK11)) - .put(TOKEN04, host(IP3, DC1, RACK11)) - .put(TOKEN05, host(IP5, DC1, RACK12)) - .put(TOKEN06, host(IP5, DC1, RACK12)) - .put(TOKEN07, host(IP7, DC1, RACK12)) - .put(TOKEN08, host(IP7, DC1, RACK12)) - .put(TOKEN12, host(IP2, DC2, RACK21)) - .put(TOKEN13, host(IP2, DC2, RACK21)) - .put(TOKEN14, host(IP4, DC2, RACK21)) - .put(TOKEN15, host(IP4, DC2, RACK21)) - .put(TOKEN16, host(IP6, DC2, RACK22)) - .put(TOKEN17, host(IP6, DC2, RACK22)) - .put(TOKEN18, host(IP8, DC2, RACK22)) - .put(TOKEN19, host(IP8, DC2, RACK22)) - .build(); - - //all nodes will contain all data, question is the replica order - ReplicationStrategy strategy = networkTopologyStrategy(rf(DC1, 4), rf(DC2, 4)); - - Map> replicaMap = strategy.computeTokenToReplicaMap(keyspace, tokenToPrimary, ring); - - assertReplicaPlacement(replicaMap, TOKEN01, IP1, IP5, IP3, IP7, IP2, IP6, IP4, IP8); - assertReplicaPlacement(replicaMap, TOKEN02, IP1, IP5, IP3, IP7, IP2, IP6, IP4, IP8); - assertReplicaPlacement(replicaMap, TOKEN03, IP3, IP5, IP7, IP2, IP6, IP4, IP8, IP1); - assertReplicaPlacement(replicaMap, TOKEN04, IP3, IP5, IP7, IP2, IP6, IP4, IP8, IP1); - assertReplicaPlacement(replicaMap, TOKEN05, IP5, IP2, IP6, IP4, IP8, IP1, IP7, IP3); - assertReplicaPlacement(replicaMap, TOKEN06, IP5, IP2, IP6, IP4, IP8, IP1, IP7, IP3); - assertReplicaPlacement(replicaMap, TOKEN07, IP7, IP2, IP6, IP4, IP8, IP1, IP3, IP5); - assertReplicaPlacement(replicaMap, TOKEN08, IP7, IP2, IP6, IP4, IP8, IP1, IP3, IP5); - assertReplicaPlacement(replicaMap, TOKEN12, IP2, IP6, IP4, IP8, IP1, IP5, IP3, IP7); - assertReplicaPlacement(replicaMap, TOKEN13, IP2, IP6, IP4, IP8, IP1, IP5, IP3, IP7); - assertReplicaPlacement(replicaMap, TOKEN14, IP4, IP6, IP8, IP1, IP5, IP3, IP7, IP2); - assertReplicaPlacement(replicaMap, TOKEN15, IP4, IP6, IP8, IP1, IP5, IP3, IP7, IP2); - assertReplicaPlacement(replicaMap, TOKEN16, IP6, IP1, IP5, IP3, IP7, IP2, IP8, IP4); - assertReplicaPlacement(replicaMap, TOKEN17, IP6, IP1, IP5, IP3, IP7, IP2, IP8, IP4); - assertReplicaPlacement(replicaMap, TOKEN18, IP8, IP1, IP5, IP3, IP7, IP2, IP4, IP6); - assertReplicaPlacement(replicaMap, TOKEN19, IP8, IP1, IP5, IP3, IP7, IP2, IP4, IP6); - } + ReplicationStrategy strategy = networkTopologyStrategy(rf(DC1, 3), rf(DC2, 3)); + + Map> replicaMap = + strategy.computeTokenToReplicaMap(keyspace, tokenToPrimary, ring); + + assertReplicaPlacement(replicaMap, TOKEN01, IP1, IP2, IP5, IP3, IP6, IP4); + assertReplicaPlacement(replicaMap, TOKEN02, IP2, IP3, IP5, IP6, IP4, IP7); + assertReplicaPlacement(replicaMap, TOKEN03, IP3, IP4, IP5, IP6, IP7, IP8); + assertReplicaPlacement(replicaMap, TOKEN04, IP4, IP5, IP6, IP8, IP1, IP7); + assertReplicaPlacement(replicaMap, TOKEN05, IP5, IP6, IP1, IP7, IP2, IP8); + assertReplicaPlacement(replicaMap, TOKEN06, IP6, IP7, IP1, IP2, IP8, IP3); + assertReplicaPlacement(replicaMap, TOKEN07, IP7, IP8, IP1, IP2, IP3, IP4); + assertReplicaPlacement(replicaMap, TOKEN08, IP8, IP1, IP2, IP4, IP5, IP3); + assertReplicaPlacement(replicaMap, TOKEN12, IP1, IP2, IP5, IP3, IP6, IP4); + assertReplicaPlacement(replicaMap, TOKEN13, IP2, IP3, IP5, IP6, IP4, IP7); + assertReplicaPlacement(replicaMap, TOKEN14, IP3, IP4, IP5, IP6, IP7, IP8); + assertReplicaPlacement(replicaMap, TOKEN15, IP4, IP5, IP6, IP8, IP1, IP7); + assertReplicaPlacement(replicaMap, TOKEN16, IP5, IP6, IP1, IP7, IP2, IP8); + assertReplicaPlacement(replicaMap, TOKEN17, IP6, IP7, IP1, IP2, IP8, IP3); + assertReplicaPlacement(replicaMap, TOKEN18, IP7, IP8, IP1, IP2, IP3, IP4); + assertReplicaPlacement(replicaMap, TOKEN19, IP8, IP1, IP2, IP4, IP5, IP3); + } + + @Test(groups = "unit") + public void networkTopologyWithMultirackHostSkippingTest2() { + List ring = + ImmutableList.builder() + .add(TOKEN01) + .add(TOKEN02) + .add(TOKEN03) + .add(TOKEN04) + .add(TOKEN05) + .add(TOKEN06) + .add(TOKEN07) + .add(TOKEN08) + .add(TOKEN12) + .add(TOKEN13) + .add(TOKEN14) + .add(TOKEN15) + .add(TOKEN16) + .add(TOKEN17) + .add(TOKEN18) + .add(TOKEN19) + .build(); - @Test(groups = "unit") - public void networkTopologyStrategyExampleTopologyTest() { - Map> replicaMap = exampleStrategy.computeTokenToReplicaMap(keyspace, exampleTokenToPrimary, exampleRing); - - //105 and 106 will appear as replica for all as they're in separate racks - assertReplicaPlacement(replicaMap, TOKEN01, IP1, IP5, IP2, IP6); - assertReplicaPlacement(replicaMap, TOKEN02, IP1, IP5, IP2, IP6); - assertReplicaPlacement(replicaMap, TOKEN03, IP5, IP3, IP2, IP6); - assertReplicaPlacement(replicaMap, TOKEN04, IP3, IP5, IP2, IP6); - assertReplicaPlacement(replicaMap, TOKEN05, IP1, IP5, IP2, IP6); - assertReplicaPlacement(replicaMap, TOKEN06, IP5, IP2, IP6, IP3); - assertReplicaPlacement(replicaMap, TOKEN07, IP2, IP6, IP3, IP5); - assertReplicaPlacement(replicaMap, TOKEN08, IP6, IP3, IP4, IP5); - assertReplicaPlacement(replicaMap, TOKEN09, IP3, IP4, IP5, IP6); - assertReplicaPlacement(replicaMap, TOKEN10, IP4, IP5, IP6, IP3); - assertReplicaPlacement(replicaMap, TOKEN11, IP5, IP4, IP6, IP3); - assertReplicaPlacement(replicaMap, TOKEN12, IP4, IP6, IP3, IP5); - assertReplicaPlacement(replicaMap, TOKEN13, IP4, IP6, IP3, IP5); - assertReplicaPlacement(replicaMap, TOKEN14, IP2, IP6, IP3, IP5); - assertReplicaPlacement(replicaMap, TOKEN15, IP6, IP3, IP2, IP5); - assertReplicaPlacement(replicaMap, TOKEN16, IP3, IP2, IP6, IP5); - assertReplicaPlacement(replicaMap, TOKEN17, IP2, IP6, IP1, IP5); - assertReplicaPlacement(replicaMap, TOKEN18, IP6, IP1, IP5, IP2); - } + Map tokenToPrimary = + ImmutableMap.builder() + .put(TOKEN01, host(IP1, DC1, RACK11)) + .put(TOKEN02, host(IP1, DC1, RACK11)) + .put(TOKEN03, host(IP3, DC1, RACK11)) + .put(TOKEN04, host(IP3, DC1, RACK11)) + .put(TOKEN05, host(IP5, DC1, RACK12)) + .put(TOKEN06, host(IP5, DC1, RACK12)) + .put(TOKEN07, host(IP7, DC1, RACK12)) + .put(TOKEN08, host(IP7, DC1, RACK12)) + .put(TOKEN12, host(IP2, DC2, RACK21)) + .put(TOKEN13, host(IP2, DC2, RACK21)) + .put(TOKEN14, host(IP4, DC2, RACK21)) + .put(TOKEN15, host(IP4, DC2, RACK21)) + .put(TOKEN16, host(IP6, DC2, RACK22)) + .put(TOKEN17, host(IP6, DC2, RACK22)) + .put(TOKEN18, host(IP8, DC2, RACK22)) + .put(TOKEN19, host(IP8, DC2, RACK22)) + .build(); - @Test(groups = "unit") - public void networkTopologyStrategyNoNodesInDCTest() { - long t1 = System.currentTimeMillis(); - Map> replicaMap = networkTopologyStrategy(rf(DC1, 2), rf(DC2, 2)) - .computeTokenToReplicaMap(keyspace, largeRingTokenToPrimary, largeRing); - assertThat(System.currentTimeMillis() - t1).isLessThan(10000); - - InetSocketAddress currNode = null; - InetSocketAddress nextNode; - for (int node = 0; node < 99; node++) { // 100th wraps so doesn't match this, check after - if (currNode == null) { - currNode = socketAddress("127.0.0." + node); - } - nextNode = socketAddress("127.0.0." + (node + 1)); - for (int vnodes = 0; vnodes < 256; vnodes++) { - Token token = token("" + ((node * 256) + vnodes)); - assertReplicaPlacement(replicaMap, token, currNode, nextNode); - } - currNode = nextNode; - } - assertReplicaPlacement(replicaMap, token("" + 99 * 256), currNode, socketAddress("127.0.0.0")); - } + ReplicationStrategy strategy = networkTopologyStrategy(rf(DC1, 3), rf(DC2, 3)); + + Map> replicaMap = + strategy.computeTokenToReplicaMap(keyspace, tokenToPrimary, ring); + + assertReplicaPlacement(replicaMap, TOKEN01, IP1, IP5, IP3, IP2, IP6, IP4); + assertReplicaPlacement(replicaMap, TOKEN02, IP1, IP5, IP3, IP2, IP6, IP4); + assertReplicaPlacement(replicaMap, TOKEN03, IP3, IP5, IP7, IP2, IP6, IP4); + assertReplicaPlacement(replicaMap, TOKEN04, IP3, IP5, IP7, IP2, IP6, IP4); + assertReplicaPlacement(replicaMap, TOKEN05, IP5, IP2, IP6, IP4, IP1, IP7); + assertReplicaPlacement(replicaMap, TOKEN06, IP5, IP2, IP6, IP4, IP1, IP7); + assertReplicaPlacement(replicaMap, TOKEN07, IP7, IP2, IP6, IP4, IP1, IP3); + assertReplicaPlacement(replicaMap, TOKEN08, IP7, IP2, IP6, IP4, IP1, IP3); + assertReplicaPlacement(replicaMap, TOKEN12, IP2, IP6, IP4, IP1, IP5, IP3); + assertReplicaPlacement(replicaMap, TOKEN13, IP2, IP6, IP4, IP1, IP5, IP3); + assertReplicaPlacement(replicaMap, TOKEN14, IP4, IP6, IP8, IP1, IP5, IP3); + assertReplicaPlacement(replicaMap, TOKEN15, IP4, IP6, IP8, IP1, IP5, IP3); + assertReplicaPlacement(replicaMap, TOKEN16, IP6, IP1, IP5, IP3, IP2, IP8); + assertReplicaPlacement(replicaMap, TOKEN17, IP6, IP1, IP5, IP3, IP2, IP8); + assertReplicaPlacement(replicaMap, TOKEN18, IP8, IP1, IP5, IP3, IP2, IP4); + assertReplicaPlacement(replicaMap, TOKEN19, IP8, IP1, IP5, IP3, IP2, IP4); + } + + @Test(groups = "unit") + public void networkTopologyWithMultirackHostSkippingTest3() { + // this is the same topology as in the previous test, but with different rfs + List ring = + ImmutableList.builder() + .add(TOKEN01) + .add(TOKEN02) + .add(TOKEN03) + .add(TOKEN04) + .add(TOKEN05) + .add(TOKEN06) + .add(TOKEN07) + .add(TOKEN08) + .add(TOKEN12) + .add(TOKEN13) + .add(TOKEN14) + .add(TOKEN15) + .add(TOKEN16) + .add(TOKEN17) + .add(TOKEN18) + .add(TOKEN19) + .build(); + Map tokenToPrimary = + ImmutableMap.builder() + .put(TOKEN01, host(IP1, DC1, RACK11)) + .put(TOKEN02, host(IP1, DC1, RACK11)) + .put(TOKEN03, host(IP3, DC1, RACK11)) + .put(TOKEN04, host(IP3, DC1, RACK11)) + .put(TOKEN05, host(IP5, DC1, RACK12)) + .put(TOKEN06, host(IP5, DC1, RACK12)) + .put(TOKEN07, host(IP7, DC1, RACK12)) + .put(TOKEN08, host(IP7, DC1, RACK12)) + .put(TOKEN12, host(IP2, DC2, RACK21)) + .put(TOKEN13, host(IP2, DC2, RACK21)) + .put(TOKEN14, host(IP4, DC2, RACK21)) + .put(TOKEN15, host(IP4, DC2, RACK21)) + .put(TOKEN16, host(IP6, DC2, RACK22)) + .put(TOKEN17, host(IP6, DC2, RACK22)) + .put(TOKEN18, host(IP8, DC2, RACK22)) + .put(TOKEN19, host(IP8, DC2, RACK22)) + .build(); - @Test(groups = "unit") - public void networkTopologyStrategyExampleTopologyTooManyReplicasTest() { - Map> replicaMap = exampleStrategyTooManyReplicas.computeTokenToReplicaMap(keyspace, exampleTokenToPrimary, exampleRing); - - assertReplicaPlacement(replicaMap, TOKEN01, IP1, IP5, IP3, IP2, IP6, IP4); - assertReplicaPlacement(replicaMap, TOKEN02, IP1, IP5, IP3, IP2, IP6, IP4); - assertReplicaPlacement(replicaMap, TOKEN03, IP5, IP3, IP1, IP2, IP6, IP4); - assertReplicaPlacement(replicaMap, TOKEN04, IP3, IP5, IP1, IP2, IP6, IP4); - assertReplicaPlacement(replicaMap, TOKEN05, IP1, IP5, IP2, IP6, IP3, IP4); - assertReplicaPlacement(replicaMap, TOKEN06, IP5, IP2, IP6, IP3, IP4, IP1); - assertReplicaPlacement(replicaMap, TOKEN07, IP2, IP6, IP3, IP4, IP5, IP1); - assertReplicaPlacement(replicaMap, TOKEN08, IP6, IP3, IP4, IP5, IP2, IP1); - assertReplicaPlacement(replicaMap, TOKEN09, IP3, IP4, IP5, IP6, IP2, IP1); - assertReplicaPlacement(replicaMap, TOKEN10, IP4, IP5, IP6, IP2, IP3, IP1); - assertReplicaPlacement(replicaMap, TOKEN11, IP5, IP4, IP6, IP2, IP3, IP1); - assertReplicaPlacement(replicaMap, TOKEN12, IP4, IP6, IP2, IP3, IP5, IP1); - assertReplicaPlacement(replicaMap, TOKEN13, IP4, IP6, IP2, IP3, IP5, IP1); - assertReplicaPlacement(replicaMap, TOKEN14, IP2, IP6, IP3, IP5, IP1, IP4); - assertReplicaPlacement(replicaMap, TOKEN15, IP6, IP3, IP2, IP5, IP1, IP4); - assertReplicaPlacement(replicaMap, TOKEN16, IP3, IP2, IP6, IP5, IP1, IP4); - assertReplicaPlacement(replicaMap, TOKEN17, IP2, IP6, IP1, IP5, IP3, IP4); - assertReplicaPlacement(replicaMap, TOKEN18, IP6, IP1, IP5, IP3, IP2, IP4); + // all nodes will contain all data, question is the replica order + ReplicationStrategy strategy = networkTopologyStrategy(rf(DC1, 4), rf(DC2, 4)); + + Map> replicaMap = + strategy.computeTokenToReplicaMap(keyspace, tokenToPrimary, ring); + + assertReplicaPlacement(replicaMap, TOKEN01, IP1, IP5, IP3, IP7, IP2, IP6, IP4, IP8); + assertReplicaPlacement(replicaMap, TOKEN02, IP1, IP5, IP3, IP7, IP2, IP6, IP4, IP8); + assertReplicaPlacement(replicaMap, TOKEN03, IP3, IP5, IP7, IP2, IP6, IP4, IP8, IP1); + assertReplicaPlacement(replicaMap, TOKEN04, IP3, IP5, IP7, IP2, IP6, IP4, IP8, IP1); + assertReplicaPlacement(replicaMap, TOKEN05, IP5, IP2, IP6, IP4, IP8, IP1, IP7, IP3); + assertReplicaPlacement(replicaMap, TOKEN06, IP5, IP2, IP6, IP4, IP8, IP1, IP7, IP3); + assertReplicaPlacement(replicaMap, TOKEN07, IP7, IP2, IP6, IP4, IP8, IP1, IP3, IP5); + assertReplicaPlacement(replicaMap, TOKEN08, IP7, IP2, IP6, IP4, IP8, IP1, IP3, IP5); + assertReplicaPlacement(replicaMap, TOKEN12, IP2, IP6, IP4, IP8, IP1, IP5, IP3, IP7); + assertReplicaPlacement(replicaMap, TOKEN13, IP2, IP6, IP4, IP8, IP1, IP5, IP3, IP7); + assertReplicaPlacement(replicaMap, TOKEN14, IP4, IP6, IP8, IP1, IP5, IP3, IP7, IP2); + assertReplicaPlacement(replicaMap, TOKEN15, IP4, IP6, IP8, IP1, IP5, IP3, IP7, IP2); + assertReplicaPlacement(replicaMap, TOKEN16, IP6, IP1, IP5, IP3, IP7, IP2, IP8, IP4); + assertReplicaPlacement(replicaMap, TOKEN17, IP6, IP1, IP5, IP3, IP7, IP2, IP8, IP4); + assertReplicaPlacement(replicaMap, TOKEN18, IP8, IP1, IP5, IP3, IP7, IP2, IP4, IP6); + assertReplicaPlacement(replicaMap, TOKEN19, IP8, IP1, IP5, IP3, IP7, IP2, IP4, IP6); + } + + @Test(groups = "unit") + public void networkTopologyStrategyExampleTopologyTest() { + Map> replicaMap = + exampleStrategy.computeTokenToReplicaMap(keyspace, exampleTokenToPrimary, exampleRing); + + // 105 and 106 will appear as replica for all as they're in separate racks + assertReplicaPlacement(replicaMap, TOKEN01, IP1, IP5, IP2, IP6); + assertReplicaPlacement(replicaMap, TOKEN02, IP1, IP5, IP2, IP6); + assertReplicaPlacement(replicaMap, TOKEN03, IP5, IP3, IP2, IP6); + assertReplicaPlacement(replicaMap, TOKEN04, IP3, IP5, IP2, IP6); + assertReplicaPlacement(replicaMap, TOKEN05, IP1, IP5, IP2, IP6); + assertReplicaPlacement(replicaMap, TOKEN06, IP5, IP2, IP6, IP3); + assertReplicaPlacement(replicaMap, TOKEN07, IP2, IP6, IP3, IP5); + assertReplicaPlacement(replicaMap, TOKEN08, IP6, IP3, IP4, IP5); + assertReplicaPlacement(replicaMap, TOKEN09, IP3, IP4, IP5, IP6); + assertReplicaPlacement(replicaMap, TOKEN10, IP4, IP5, IP6, IP3); + assertReplicaPlacement(replicaMap, TOKEN11, IP5, IP4, IP6, IP3); + assertReplicaPlacement(replicaMap, TOKEN12, IP4, IP6, IP3, IP5); + assertReplicaPlacement(replicaMap, TOKEN13, IP4, IP6, IP3, IP5); + assertReplicaPlacement(replicaMap, TOKEN14, IP2, IP6, IP3, IP5); + assertReplicaPlacement(replicaMap, TOKEN15, IP6, IP3, IP2, IP5); + assertReplicaPlacement(replicaMap, TOKEN16, IP3, IP2, IP6, IP5); + assertReplicaPlacement(replicaMap, TOKEN17, IP2, IP6, IP1, IP5); + assertReplicaPlacement(replicaMap, TOKEN18, IP6, IP1, IP5, IP2); + } + + @Test(groups = "unit") + public void networkTopologyStrategyNoNodesInDCTest() { + long t1 = System.currentTimeMillis(); + Map> replicaMap = + networkTopologyStrategy(rf(DC1, 2), rf(DC2, 2)) + .computeTokenToReplicaMap(keyspace, largeRingTokenToPrimary, largeRing); + assertThat(System.currentTimeMillis() - t1).isLessThan(10000); + + InetSocketAddress currNode = null; + InetSocketAddress nextNode; + for (int node = 0; node < 99; node++) { // 100th wraps so doesn't match this, check after + if (currNode == null) { + currNode = socketAddress("127.0.0." + node); + } + nextNode = socketAddress("127.0.0." + (node + 1)); + for (int vnodes = 0; vnodes < 256; vnodes++) { + Token token = token("" + ((node * 256) + vnodes)); + assertReplicaPlacement(replicaMap, token, currNode, nextNode); + } + currNode = nextNode; } - - @Test(groups = "unit") - public void should_warn_if_replication_factor_cannot_be_met() { - Logger logger = Logger.getLogger(ReplicationStrategy.NetworkTopologyStrategy.class); - MemoryAppender logs = new MemoryAppender(); - Level originalLevel = logger.getLevel(); - try { - logger.setLevel(Level.WARN); - logger.addAppender(logs); - - List ring = ImmutableList.builder() - .add(TOKEN01) - .add(TOKEN02) - .add(TOKEN03) - .add(TOKEN04) - .build(); - - Map tokenToPrimary = ImmutableMap.builder() - .put(TOKEN01, host(IP1, DC1, RACK11)) - .put(TOKEN02, host(IP2, DC1, RACK12)) - .put(TOKEN03, host(IP3, DC2, RACK21)) - .put(TOKEN04, host(IP4, DC2, RACK22)) - .build(); - - // Wrong configuration: impossible replication factor for DC2 - networkTopologyStrategy(rf(DC1, 2), rf(DC2, 3)) - .computeTokenToReplicaMap(keyspace, tokenToPrimary, ring); - assertThat(logs.getNext()) - .contains(String.format("Error while computing token map for keyspace %s with datacenter %s", keyspace, DC2)); - - // Wrong configuration: non-existing datacenter - networkTopologyStrategy(rf(DC1, 2), rf("does_not_exist", 2)) - .computeTokenToReplicaMap(keyspace, tokenToPrimary, ring); - assertThat(logs.getNext()) - .contains(String.format("Error while computing token map for keyspace %s with datacenter %s", keyspace, "does_not_exist")); - } finally { - logger.setLevel(originalLevel); - logger.removeAppender(logs); - } + assertReplicaPlacement(replicaMap, token("" + 99 * 256), currNode, socketAddress("127.0.0.0")); + } + + @Test(groups = "unit") + public void networkTopologyStrategyExampleTopologyTooManyReplicasTest() { + Map> replicaMap = + exampleStrategyTooManyReplicas.computeTokenToReplicaMap( + keyspace, exampleTokenToPrimary, exampleRing); + + assertReplicaPlacement(replicaMap, TOKEN01, IP1, IP5, IP3, IP2, IP6, IP4); + assertReplicaPlacement(replicaMap, TOKEN02, IP1, IP5, IP3, IP2, IP6, IP4); + assertReplicaPlacement(replicaMap, TOKEN03, IP5, IP3, IP1, IP2, IP6, IP4); + assertReplicaPlacement(replicaMap, TOKEN04, IP3, IP5, IP1, IP2, IP6, IP4); + assertReplicaPlacement(replicaMap, TOKEN05, IP1, IP5, IP2, IP6, IP3, IP4); + assertReplicaPlacement(replicaMap, TOKEN06, IP5, IP2, IP6, IP3, IP4, IP1); + assertReplicaPlacement(replicaMap, TOKEN07, IP2, IP6, IP3, IP4, IP5, IP1); + assertReplicaPlacement(replicaMap, TOKEN08, IP6, IP3, IP4, IP5, IP2, IP1); + assertReplicaPlacement(replicaMap, TOKEN09, IP3, IP4, IP5, IP6, IP2, IP1); + assertReplicaPlacement(replicaMap, TOKEN10, IP4, IP5, IP6, IP2, IP3, IP1); + assertReplicaPlacement(replicaMap, TOKEN11, IP5, IP4, IP6, IP2, IP3, IP1); + assertReplicaPlacement(replicaMap, TOKEN12, IP4, IP6, IP2, IP3, IP5, IP1); + assertReplicaPlacement(replicaMap, TOKEN13, IP4, IP6, IP2, IP3, IP5, IP1); + assertReplicaPlacement(replicaMap, TOKEN14, IP2, IP6, IP3, IP5, IP1, IP4); + assertReplicaPlacement(replicaMap, TOKEN15, IP6, IP3, IP2, IP5, IP1, IP4); + assertReplicaPlacement(replicaMap, TOKEN16, IP3, IP2, IP6, IP5, IP1, IP4); + assertReplicaPlacement(replicaMap, TOKEN17, IP2, IP6, IP1, IP5, IP3, IP4); + assertReplicaPlacement(replicaMap, TOKEN18, IP6, IP1, IP5, IP3, IP2, IP4); + } + + @Test(groups = "unit") + public void should_warn_if_replication_factor_cannot_be_met() { + Logger logger = Logger.getLogger(ReplicationStrategy.NetworkTopologyStrategy.class); + MemoryAppender logs = new MemoryAppender(); + Level originalLevel = logger.getLevel(); + try { + logger.setLevel(Level.WARN); + logger.addAppender(logs); + + List ring = + ImmutableList.builder() + .add(TOKEN01) + .add(TOKEN02) + .add(TOKEN03) + .add(TOKEN04) + .build(); + + Map tokenToPrimary = + ImmutableMap.builder() + .put(TOKEN01, host(IP1, DC1, RACK11)) + .put(TOKEN02, host(IP2, DC1, RACK12)) + .put(TOKEN03, host(IP3, DC2, RACK21)) + .put(TOKEN04, host(IP4, DC2, RACK22)) + .build(); + + // Wrong configuration: impossible replication factor for DC2 + networkTopologyStrategy(rf(DC1, 2), rf(DC2, 3)) + .computeTokenToReplicaMap(keyspace, tokenToPrimary, ring); + assertThat(logs.getNext()) + .contains( + String.format( + "Error while computing token map for keyspace %s with datacenter %s", + keyspace, DC2)); + + // Wrong configuration: non-existing datacenter + networkTopologyStrategy(rf(DC1, 2), rf("does_not_exist", 2)) + .computeTokenToReplicaMap(keyspace, tokenToPrimary, ring); + assertThat(logs.getNext()) + .contains( + String.format( + "Error while computing token map for keyspace %s with datacenter %s", + keyspace, "does_not_exist")); + } finally { + logger.setLevel(originalLevel); + logger.removeAppender(logs); } + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/NodeListRefreshDebouncerTest.java b/driver-core/src/test/java/com/datastax/driver/core/NodeListRefreshDebouncerTest.java index 12dbaca03f8..99f99ccd86a 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/NodeListRefreshDebouncerTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/NodeListRefreshDebouncerTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,59 +17,58 @@ */ package com.datastax.driver.core; +import static com.datastax.driver.core.CreateCCM.TestMode.PER_METHOD; +import static org.mockito.Mockito.reset; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.timeout; +import static org.mockito.Mockito.verify; + import org.testng.annotations.BeforeMethod; import org.testng.annotations.Test; -import static com.datastax.driver.core.CreateCCM.TestMode.PER_METHOD; -import static org.mockito.Mockito.*; - @CreateCCM(PER_METHOD) @CCMConfig(dirtiesContext = true, createKeyspace = false) public class NodeListRefreshDebouncerTest extends CCMTestsSupport { - private static final int DEBOUNCE_TIME = 2000; - - private Cluster cluster2; + private static final int DEBOUNCE_TIME = 2000; - // Control Connection to be spied. - private ControlConnection controlConnection; + private Cluster cluster2; - @BeforeMethod(groups = "short") - public void setup() { - QueryOptions queryOptions = new QueryOptions(); - queryOptions.setRefreshNodeListIntervalMillis(DEBOUNCE_TIME); - queryOptions.setMaxPendingRefreshNodeListRequests(5); - queryOptions.setRefreshSchemaIntervalMillis(0); - // Create a separate cluster that will receive the schema events on its control connection. - cluster2 = register(Cluster.builder() - .addContactPoints(getContactPoints()) - .withPort(ccm().getBinaryPort()) - .withQueryOptions(queryOptions) - .build()); + // Control Connection to be spied. + private ControlConnection controlConnection; - cluster2.init(); + @BeforeMethod(groups = "short") + public void setup() { + QueryOptions queryOptions = new QueryOptions(); + queryOptions.setRefreshNodeListIntervalMillis(DEBOUNCE_TIME); + queryOptions.setMaxPendingRefreshNodeListRequests(5); + queryOptions.setRefreshSchemaIntervalMillis(0); + // Create a separate cluster that will receive the schema events on its control connection. + cluster2 = register(createClusterBuilder().withQueryOptions(queryOptions).build()); - // Create a spy of the Cluster's control connection and replace it with the spy. - controlConnection = spy(cluster2.manager.controlConnection); - cluster2.manager.controlConnection = controlConnection; - reset(controlConnection); - } + cluster2.init(); - /** - * Ensures that when enough refreshNodeList requests have been received - * to reach {@link QueryOptions#getMaxPendingRefreshNodeListRequests()} that a - * node list refresh is submitted right away. - * - * @jira_ticket JAVA-657 - * @since 2.0.11 - */ - @Test(groups = "short") - public void should_refresh_when_max_pending_requests_reached() { - for (int i = 0; i < 5; i++) { - cluster2.manager.submitNodeListRefresh(); - } + // Create a spy of the Cluster's control connection and replace it with the spy. + controlConnection = spy(cluster2.manager.controlConnection); + cluster2.manager.controlConnection = controlConnection; + reset(controlConnection); + } - // add delay to account for executor submit. - verify(controlConnection, timeout(DEBOUNCE_TIME)).refreshNodeListAndTokenMap(); + /** + * Ensures that when enough refreshNodeList requests have been received to reach {@link + * QueryOptions#getMaxPendingRefreshNodeListRequests()} that a node list refresh is submitted + * right away. + * + * @jira_ticket JAVA-657 + * @since 2.0.11 + */ + @Test(groups = "short") + public void should_refresh_when_max_pending_requests_reached() { + for (int i = 0; i < 5; i++) { + cluster2.manager.submitNodeListRefresh(); } + + // add delay to account for executor submit. + verify(controlConnection, timeout(DEBOUNCE_TIME)).refreshNodeListAndTokenMap(); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/NodeRefreshDebouncerTest.java b/driver-core/src/test/java/com/datastax/driver/core/NodeRefreshDebouncerTest.java index 3ac7c0ec8f9..0fe4b3d5145 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/NodeRefreshDebouncerTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/NodeRefreshDebouncerTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,56 +17,64 @@ */ package com.datastax.driver.core; -import org.mockito.ArgumentCaptor; -import org.testng.annotations.Test; - -import java.util.concurrent.TimeUnit; - import static com.datastax.driver.core.Assertions.assertThat; import static org.mockito.ArgumentCaptor.forClass; -import static org.mockito.Mockito.*; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.timeout; +import static org.mockito.Mockito.verify; + +import java.util.concurrent.Callable; +import java.util.concurrent.TimeUnit; +import org.mockito.ArgumentCaptor; +import org.testng.annotations.Test; @CCMConfig(dirtiesContext = true, createCluster = false) public class NodeRefreshDebouncerTest extends CCMTestsSupport { - /** - * Ensures that when a new node is bootstrapped into the cluster, stopped, and then subsequently - * started within {@link QueryOptions#setRefreshNodeIntervalMillis(int)} that an 'onAdd' event - * event is the only one processed and that the {@link Host} is marked up. - *

    - * Since NEW_NODE_DELAY_SECONDS is typically configured with a high value (60 seconds default - * in the maven profile) this test can take a very long time. - * - * @jira_ticket JAVA-657 - * @since 2.0.11 - */ - @Test(groups = "long") - public void should_call_onAdd_with_bootstrap_stop_start() { - int refreshNodeInterval = 30000; - QueryOptions queryOptions = new QueryOptions().setRefreshNodeIntervalMillis(refreshNodeInterval); - Cluster cluster = register(Cluster.builder() - .addContactPoints(getContactPoints().get(0)) - .withPort(ccm().getBinaryPort()) - .withQueryOptions(queryOptions) - .build()); - cluster.connect(); - Host.StateListener listener = mock(Host.StateListener.class); - cluster.register(listener); - ccm().add(2); - ccm().start(2); - ccm().stop(2); - ccm().start(2); + /** + * Ensures that when a new node is bootstrapped into the cluster, stopped, and then subsequently + * started within {@link QueryOptions#setRefreshNodeIntervalMillis(int)}, then an 'onAdd' event is + * emitted and the {@link Host} is marked up. + * + *

    Since NEW_NODE_DELAY_SECONDS is typically configured with a high value (60 seconds default + * in the maven profile) this test can take a very long time. + * + * @jira_ticket JAVA-657 + * @since 2.0.11 + */ + @Test(groups = "long") + public void should_call_onAdd_with_bootstrap_stop_start() { + int refreshNodeInterval = 30000; + QueryOptions queryOptions = + new QueryOptions().setRefreshNodeIntervalMillis(refreshNodeInterval); + Cluster cluster = register(createClusterBuilder().withQueryOptions(queryOptions).build()); + cluster.connect(); + Host.StateListener listener = mock(Host.StateListener.class); + cluster.register(listener); + ccm().add(2); + ccm().start(2); + ccm().stop(2); + ccm().start(2); - ArgumentCaptor captor = forClass(Host.class); + ArgumentCaptor captor = forClass(Host.class); - // Only register and onAdd should be called, since stop and start should be discarded. - verify(listener).onRegister(cluster); - long addDelay = refreshNodeInterval + TimeUnit.MILLISECONDS.convert(Cluster.NEW_NODE_DELAY_SECONDS, TimeUnit.SECONDS); - verify(listener, timeout(addDelay)).onAdd(captor.capture()); - verifyNoMoreInteractions(listener); + verify(listener).onRegister(cluster); + long addDelay = + refreshNodeInterval + + TimeUnit.MILLISECONDS.convert(Cluster.NEW_NODE_DELAY_SECONDS, TimeUnit.SECONDS); + verify(listener, timeout(addDelay)).onAdd(captor.capture()); - // The hosts state should be UP. - assertThat(captor.getValue().getState()).isEqualTo("UP"); - assertThat(cluster).host(2).hasState(Host.State.UP); - } + // The host should eventually come UP + final Host host = captor.getValue(); + ConditionChecker.check() + .that( + new Callable() { + @Override + public Boolean call() { + return host.getState().equals("UP"); + } + }) + .becomesTrue(); + assertThat(cluster).host(2).hasState(Host.State.UP); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/NowInSecondsTest.java b/driver-core/src/test/java/com/datastax/driver/core/NowInSecondsTest.java new file mode 100644 index 00000000000..9568479678c --- /dev/null +++ b/driver-core/src/test/java/com/datastax/driver/core/NowInSecondsTest.java @@ -0,0 +1,71 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.datastax.driver.core.utils.CassandraVersion; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; + +@CassandraVersion("4.0") +public class NowInSecondsTest extends CCMTestsSupport { + + private static final String WRITE_QUERY = "INSERT INTO test (k,v) VALUES (1,1) USING TTL 20"; + private static final Statement READ_STATEMENT = + new SimpleStatement("SELECT TTL(v) FROM test WHERE k = 1"); + + @Override + public Cluster.Builder createClusterBuilder() { + return super.createClusterBuilder().allowBetaProtocolVersion(); + } + + @BeforeMethod(groups = "short") + public void setup() { + execute("DROP TABLE IF EXISTS test", "CREATE TABLE test(k int PRIMARY KEY, v int)"); + } + + @Test(groups = "short") + public void should_use_now_in_seconds_with_simple_statement() { + should_use_now_in_seconds(new SimpleStatement(WRITE_QUERY)); + } + + @Test(groups = "short") + public void should_use_now_in_seconds_with_bound_statement() { + PreparedStatement preparedStatement = session().prepare(WRITE_QUERY); + should_use_now_in_seconds(preparedStatement.bind()); + } + + @Test(groups = "short") + public void should_use_now_in_seconds_with_batch_statement() { + should_use_now_in_seconds( + new BatchStatement(BatchStatement.Type.LOGGED).add(new SimpleStatement(WRITE_QUERY))); + } + + private void should_use_now_in_seconds(Statement writeStatement) { + // When + // insert at t = 0 with TTL 20 + session().execute(writeStatement.setNowInSeconds(0)); + // read TTL at t = 10 + ResultSet rs = session().execute(READ_STATEMENT.setNowInSeconds(10)); + int remainingTtl = rs.one().getInt(0); + + // Then + assertThat(remainingTtl).isEqualTo(10); + } +} diff --git a/driver-core/src/test/java/com/datastax/driver/core/OPPTokenFactoryTest.java b/driver-core/src/test/java/com/datastax/driver/core/OPPTokenFactoryTest.java index 33a325ade46..acad71557e3 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/OPPTokenFactoryTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/OPPTokenFactoryTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,88 +17,75 @@ */ package com.datastax.driver.core; -import org.testng.annotations.Test; +import static org.assertj.core.api.Assertions.assertThat; +import com.datastax.driver.core.utils.Bytes; import java.math.BigInteger; import java.nio.ByteBuffer; import java.util.List; - -import static org.assertj.core.api.Assertions.assertThat; +import org.testng.annotations.Test; public class OPPTokenFactoryTest { - private static final Token.Factory factory = Token.OPPToken.FACTORY; - - private static final Token minToken = token(ByteBuffer.allocate(0)); - - @Test(groups = "unit") - public void should_split_range() { - List splits = factory.split(token('a'), token('d'), 3); - assertThat(splits).containsExactly( - token('b'), - token('c') - ); - } - - @Test(groups = "unit") - public void should_split_range_producing_empty_splits_near_ring_end() { - // the first token following min - ByteBuffer buffer = ByteBuffer.wrap(new byte[]{0}); - Token zero = token(buffer); - - // These are edge cases where we want to make sure we don't accidentally generate the ]min,min] range (which is the whole ring) - List splits = factory.split(minToken, zero, 3); - assertThat(splits).containsExactly( - zero, - zero - ); + private static final Token.Factory factory = Token.OPPToken.FACTORY; + + private static final Token minToken = token(ByteBuffer.allocate(0)); + + @Test(groups = "unit") + public void should_split_range() { + List splits = factory.split(token('a'), token('d'), 3); + assertThat(splits).containsExactly(token('b'), token('c')); + } + + @Test(groups = "unit") + public void should_split_range_producing_empty_splits_near_ring_end() { + // the first token following min + ByteBuffer buffer = ByteBuffer.wrap(new byte[] {0}); + Token zero = token(buffer); + + // These are edge cases where we want to make sure we don't accidentally generate the ]min,min] + // range (which is the whole ring) + List splits = factory.split(minToken, zero, 3); + assertThat(splits).containsExactly(zero, zero); + } + + @Test(groups = "unit") + public void should_split_range_where_start_as_int_equals_end_as_int() { + Token start = token(Bytes.fromHexString("0x11")); + Token end = token(Bytes.fromHexString("0x1100")); + + List splits = factory.split(start, end, 3); + assertThat(splits).containsExactly(end, end); + } + + @Test(groups = "unit") + public void should_split_range_that_wraps_around_the_ring() { + for (int start = 1; start < 128; start++) { + for (int end = start; end < start; end++) { + // Take the midpoint of the ring and offset by the midpoint of start+end. + long expected = 0x81 + ((start + end) / 2); + TokenRange tr = new TokenRange(token(start), token(end), factory); + assertThat(factory.split(tr.getStart(), tr.getEnd(), 2)) + .as("Expected 0x%X for start: %d, end: %d", expected, start, end) + .containsExactly(token(expected)); + } } + } - @Test(groups = "unit") - public void should_strip_trailing_0_bytes() { - Token with0Bytes = token(ByteBuffer.wrap(new byte[]{4, 0, 0, 0})); - Token without0Bytes = token(ByteBuffer.wrap(new byte[]{4})); - Token fromStringWith0Bytes = factory.fromString("040000"); + @Test(groups = "unit", expectedExceptions = IllegalArgumentException.class) + public void should_not_be_allowed_to_split_with_min_token() { + factory.split(minToken, minToken, 1); + } - assertThat(with0Bytes) - .isEqualTo(without0Bytes) - .isEqualTo(fromStringWith0Bytes); + private static Token token(char c) { + return new Token.OPPToken(ByteBuffer.wrap(new byte[] {(byte) c})); + } - Token withMixed0Bytes = factory.fromString("0004000400"); - Token withoutMixed0Bytes = factory.fromString("00040004"); + private static Token token(long i) { + return new Token.OPPToken(ByteBuffer.wrap(BigInteger.valueOf(i).toByteArray())); + } - assertThat(withMixed0Bytes) - .isEqualTo(withoutMixed0Bytes); - } - - @Test(groups = "unit") - public void should_split_range_that_wraps_around_the_ring() { - for (int start = 1; start < 128; start++) { - for (int end = start; end < start; end++) { - // Take the midpoint of the ring and offset by the midpoint of start+end. - long expected = 0x81 + ((start + end) / 2); - TokenRange tr = new TokenRange(token(start), token(end), factory); - assertThat(factory.split(tr.getStart(), tr.getEnd(), 2)) - .as("Expected 0x%X for start: %d, end: %d", expected, start, end) - .containsExactly(token(expected)); - } - } - } - - @Test(groups = "unit", expectedExceptions = IllegalArgumentException.class) - public void should_not_be_allowed_to_split_with_min_token() { - factory.split(minToken, minToken, 1); - } - - private static Token token(char c) { - return new Token.OPPToken(ByteBuffer.wrap(new byte[]{(byte) c})); - } - - private static Token token(long i) { - return new Token.OPPToken(ByteBuffer.wrap(BigInteger.valueOf(i).toByteArray())); - } - - private static Token token(ByteBuffer buffer) { - // note - protocol version is ignored in OPPFactory, so we actually don't care about the value - return factory.deserialize(buffer, ProtocolVersion.NEWEST_SUPPORTED); - } + private static Token token(ByteBuffer buffer) { + // note - protocol version is ignored in OPPFactory, so we actually don't care about the value + return factory.deserialize(buffer, ProtocolVersion.NEWEST_SUPPORTED); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/OPPTokenIntegrationTest.java b/driver-core/src/test/java/com/datastax/driver/core/OPPTokenIntegrationTest.java index cd89a27b909..e1098bddb2d 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/OPPTokenIntegrationTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/OPPTokenIntegrationTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,12 +22,18 @@ @CCMConfig(options = "-p ByteOrderedPartitioner") public class OPPTokenIntegrationTest extends TokenIntegrationTest { - public OPPTokenIntegrationTest() { - super(DataType.blob(), false); - } + public OPPTokenIntegrationTest() { + super(DataType.blob(), false); + } + + @Override + protected Token.Factory tokenFactory() { + return OPPToken.FACTORY; + } - @Override - protected Token.Factory tokenFactory() { - return OPPToken.FACTORY; - } + @Override + public void beforeTestClass(Object testInstance) throws Exception { + skipTestWithCassandraVersionOrHigher("4.0.0", "ByteOrderedPartitioner"); + super.beforeTestClass(testInstance); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/OPPTokenVnodeIntegrationTest.java b/driver-core/src/test/java/com/datastax/driver/core/OPPTokenVnodeIntegrationTest.java index 5875bcbb8a4..9d630e81c16 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/OPPTokenVnodeIntegrationTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/OPPTokenVnodeIntegrationTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,12 +20,18 @@ @CCMConfig(options = {"-p ByteOrderedPartitioner", "--vnodes"}) public class OPPTokenVnodeIntegrationTest extends TokenIntegrationTest { - public OPPTokenVnodeIntegrationTest() { - super(DataType.blob(), true); - } + public OPPTokenVnodeIntegrationTest() { + super(DataType.blob(), true); + } + + @Override + protected Token.Factory tokenFactory() { + return Token.OPPToken.FACTORY; + } - @Override - protected Token.Factory tokenFactory() { - return Token.OPPToken.FACTORY; - } + @Override + public void beforeTestClass(Object testInstance) throws Exception { + skipTestWithCassandraVersionOrHigher("4.0.0", "ByteOrderedPartitioner"); + super.beforeTestClass(testInstance); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/PagingStateTest.java b/driver-core/src/test/java/com/datastax/driver/core/PagingStateTest.java index 27873fa1e2b..336ba4b51a9 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/PagingStateTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/PagingStateTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,240 +17,261 @@ */ package com.datastax.driver.core; +import static org.assertj.core.api.Assertions.assertThat; + import com.datastax.driver.core.exceptions.PagingStateException; import com.datastax.driver.core.utils.CassandraVersion; +import java.util.Iterator; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.testng.annotations.Test; -import java.util.Iterator; - -import static org.assertj.core.api.Assertions.assertThat; - @CassandraVersion("2.0.0") public class PagingStateTest extends CCMTestsSupport { - private static final Logger logger = LoggerFactory.getLogger(PagingStateTest.class); - - public static final String KEY = "paging_test"; - - @Override - public void onTestContextInitialized() { - execute("CREATE TABLE test (k text, v int, PRIMARY KEY (k, v))"); - for (int i = 0; i < 100; i++) { - execute(String.format("INSERT INTO test (k, v) VALUES ('%s', %d)", KEY, i)); - } - } - - /** - * Validates that {@link PagingState} can be reused with the same Statement. - * - * @test_category paging - * @expected_result {@link ResultSet} from the query with the provided {@link PagingState} starts from the - * subsequent row from the first query. - */ - @Test(groups = "short") - public void should_complete_when_using_paging_state() { - SimpleStatement st = new SimpleStatement(String.format("SELECT v FROM test WHERE k='%s'", KEY)); - ResultSet result = session().execute(st.setFetchSize(20)); - int pageSize = result.getAvailableWithoutFetching(); - String savedPagingStateString = result.getExecutionInfo().getPagingState().toString(); - - st = new SimpleStatement(String.format("SELECT v FROM test WHERE k='%s'", KEY)); - result = session().execute(st.setFetchSize(20).setPagingState(PagingState.fromString(savedPagingStateString))); - - //We have the result starting from the next page we stopped - assertThat(result.one().getInt("v")).isEqualTo(pageSize); - } - - /** - *

    - * Validates that if the {@link PagingState} is altered in any way that it may not be reused. - * The paging state is altered in the following ways: - *

    - *

      - *
    1. Altering a byte in the paging state raw bytes.
    2. - *
    3. Setting the {@link PagingState} on a different Statement. (should fail hash validation)
    4. - *
    - * - * @test_category paging - * @expected_result {@link PagingState} refused to be reused if it is changed or used on a different statement. - */ - @Test(groups = "short") - public void should_fail_if_paging_state_altered() { - boolean setWithFalseContent = false; - boolean setWithWrongStatement = false; - - SimpleStatement st = new SimpleStatement(String.format("SELECT v FROM test WHERE k='%s'", KEY)); - ResultSet result = session().execute(st.setFetchSize(20)); - - PagingState savedPagingState = result.getExecutionInfo().getPagingState(); - byte[] savedPagingStateBuffer = savedPagingState.toBytes(); - String savedPagingStateString = savedPagingState.toString(); - - // corrupting the paging state - savedPagingStateBuffer[6] = (byte) 42; - - try { - st.setFetchSize(20).setPagingState(PagingState.fromBytes(savedPagingStateBuffer)); - } catch (PagingStateException e) { - setWithFalseContent = true; - logger.debug(e.getMessage()); - } finally { - assertThat(setWithFalseContent).isTrue(); - assertThat(st.getPagingState()).isNull(); - } - - // Changing the statement - st = new SimpleStatement(String.format("SELECT v FROM test WHERE k='%s'", "paging")); - try { - st.setFetchSize(20).setPagingState(PagingState.fromString(savedPagingStateString)); - } catch (PagingStateException e) { - setWithWrongStatement = true; - logger.debug(e.getMessage()); - } finally { - assertThat(setWithWrongStatement).isTrue(); - assertThat(st.getPagingState()).isNull(); - } - } + private static final Logger logger = LoggerFactory.getLogger(PagingStateTest.class); - /** - * Validates that {@link PagingState} can be reused with a wrapped Statement. - * - * @test_category paging - * @expected_result {@link ResultSet} from the query with the provided {@link PagingState} starts from the - * subsequent row from the first query. - */ - @Test(groups = "short") - public void should_use_state_with_wrapped_statement() { - Statement st = new TestWrapper(new SimpleStatement(String.format("SELECT v FROM test WHERE k='%s'", KEY))); - ResultSet result = session().execute(st.setFetchSize(20)); - int pageSize = result.getAvailableWithoutFetching(); - String savedPagingStateString = result.getExecutionInfo().getPagingState().toString(); - - st = new TestWrapper(new SimpleStatement(String.format("SELECT v FROM test WHERE k='%s'", KEY))); - result = session().execute(st.setFetchSize(20).setPagingState(PagingState.fromString(savedPagingStateString))); - - //We have the result starting from the next page we stopped - assertThat(result.one().getInt("v")).isEqualTo(pageSize); - } + public static final String KEY = "paging_test"; - /** - * Validates that {@link PagingState} can be reused with the same {@link BoundStatement}. - * - * @test_category paging - * @expected_result {@link ResultSet} from the query with the provided paging state starts from the subsequent row - * from the first query. - */ - @Test(groups = "short") - @CassandraVersion("2.0.0") - public void should_be_able_to_use_state_with_bound_statement() { - PreparedStatement prepared = session().prepare("SELECT v from test where k=?"); - BoundStatement bs = prepared.bind(KEY); - - ResultSet result = session().execute(bs.setFetchSize(20)); - int pageSize = result.getAvailableWithoutFetching(); - PagingState pagingState = result.getExecutionInfo().getPagingState(); - - result = session().execute(bs.setFetchSize(20).setPagingState(pagingState)); - - //We have the result starting from the next page we stopped - assertThat(result.one().getInt("v")).isEqualTo(pageSize); + @Override + public void onTestContextInitialized() { + execute("CREATE TABLE test (k text, v int, PRIMARY KEY (k, v))"); + for (int i = 0; i < 100; i++) { + execute(String.format("INSERT INTO test (k, v) VALUES ('%s', %d)", KEY, i)); } - - /** - * Validates that {@link PagingState} cannot be reused with a different {@link BoundStatement} than the original, - * even if its source {@link PreparedStatement} was the same. - * - * @test_category paging - * @expected_result A failure is thrown when setting paging state on a different {@link BoundStatement}. - */ - @Test(groups = "short", expectedExceptions = {PagingStateException.class}) - @CassandraVersion("2.0.0") - public void should_not_be_able_to_use_state_with_different_bound_statement() { - PreparedStatement prepared = session().prepare("SELECT v from test where k=?"); - BoundStatement bs0 = prepared.bind(KEY); - - ResultSet result = session().execute(bs0.setFetchSize(20)); - PagingState pagingState = result.getExecutionInfo().getPagingState(); - - BoundStatement bs1 = prepared.bind("different_key"); - session().execute(bs1.setFetchSize(20).setPagingState(pagingState)); + } + + /** + * Validates that {@link PagingState} can be reused with the same Statement. + * + * @test_category paging + * @expected_result {@link ResultSet} from the query with the provided {@link PagingState} starts + * from the subsequent row from the first query. + */ + @Test(groups = "short") + public void should_complete_when_using_paging_state() { + SimpleStatement st = new SimpleStatement(String.format("SELECT v FROM test WHERE k='%s'", KEY)); + ResultSet result = session().execute(st.setFetchSize(20)); + int pageSize = result.getAvailableWithoutFetching(); + String savedPagingStateString = result.getExecutionInfo().getPagingState().toString(); + + st = new SimpleStatement(String.format("SELECT v FROM test WHERE k='%s'", KEY)); + result = + session() + .execute( + st.setFetchSize(20).setPagingState(PagingState.fromString(savedPagingStateString))); + + // We have the result starting from the next page we stopped + assertThat(result.one().getInt("v")).isEqualTo(pageSize); + } + + /** + * Validates that if the {@link PagingState} is altered in any way that it may not be reused. The + * paging state is altered in the following ways: + * + *

    + * + *

      + *
    1. Altering a byte in the paging state raw bytes. + *
    2. Setting the {@link PagingState} on a different Statement. (should fail hash validation) + *
    + * + * @test_category paging + * @expected_result {@link PagingState} refused to be reused if it is changed or used on a + * different statement. + */ + @Test(groups = "short") + public void should_fail_if_paging_state_altered() { + boolean setWithFalseContent = false; + boolean setWithWrongStatement = false; + + SimpleStatement st = new SimpleStatement(String.format("SELECT v FROM test WHERE k='%s'", KEY)); + ResultSet result = session().execute(st.setFetchSize(20)); + + PagingState savedPagingState = result.getExecutionInfo().getPagingState(); + byte[] savedPagingStateBuffer = savedPagingState.toBytes(); + String savedPagingStateString = savedPagingState.toString(); + + // corrupting the paging state + savedPagingStateBuffer[6] = (byte) 42; + + try { + st.setFetchSize(20).setPagingState(PagingState.fromBytes(savedPagingStateBuffer)); + } catch (PagingStateException e) { + setWithFalseContent = true; + logger.debug(e.getMessage()); + } finally { + assertThat(setWithFalseContent).isTrue(); + assertThat(st.getPagingState()).isNull(); } - /** - * Validates if all results of a query are paged in through a queries result set that the {@link PagingState} it - * returns will return an empty set when queried with. - * - * @test_category paging - * @expected_result Query with the {@link PagingState} returns 0 rows. - */ - @Test(groups = "short") - public void should_return_no_rows_when_paged_to_end() { - SimpleStatement st = new SimpleStatement(String.format("SELECT v FROM test WHERE k='%s'", KEY)); - ResultSet result = session().execute(st.setFetchSize(20)); - - // Consume enough of the iterator to cause all the results to be paged in. - Iterator rowIt = result.iterator(); - for (int i = 0; i < 83; i++) { - rowIt.next().getInt("v"); - } - - String savedPagingStateString = result.getExecutionInfo().getPagingState().toString(); - - st = new SimpleStatement(String.format("SELECT v FROM test WHERE k='%s'", KEY)); - result = session().execute(st.setFetchSize(20).setPagingState(PagingState.fromString(savedPagingStateString))); - - assertThat(result.one()).isNull(); + // Changing the statement + st = new SimpleStatement(String.format("SELECT v FROM test WHERE k='%s'", "paging")); + try { + st.setFetchSize(20).setPagingState(PagingState.fromString(savedPagingStateString)); + } catch (PagingStateException e) { + setWithWrongStatement = true; + logger.debug(e.getMessage()); + } finally { + assertThat(setWithWrongStatement).isTrue(); + assertThat(st.getPagingState()).isNull(); } - - @Test(groups = "unit", expectedExceptions = {PagingStateException.class}) - public void should_fail_when_given_invalid_string() { - // An invalid string cannot be parsed and a PagingStateException is thrown. - PagingState.fromString("0101"); + } + + /** + * Validates that {@link PagingState} can be reused with a wrapped Statement. + * + * @test_category paging + * @expected_result {@link ResultSet} from the query with the provided {@link PagingState} starts + * from the subsequent row from the first query. + */ + @Test(groups = "short") + public void should_use_state_with_wrapped_statement() { + Statement st = + new TestWrapper(new SimpleStatement(String.format("SELECT v FROM test WHERE k='%s'", KEY))); + ResultSet result = session().execute(st.setFetchSize(20)); + int pageSize = result.getAvailableWithoutFetching(); + String savedPagingStateString = result.getExecutionInfo().getPagingState().toString(); + + st = + new TestWrapper(new SimpleStatement(String.format("SELECT v FROM test WHERE k='%s'", KEY))); + result = + session() + .execute( + st.setFetchSize(20).setPagingState(PagingState.fromString(savedPagingStateString))); + + // We have the result starting from the next page we stopped + assertThat(result.one().getInt("v")).isEqualTo(pageSize); + } + + /** + * Validates that {@link PagingState} can be reused with the same {@link BoundStatement}. + * + * @test_category paging + * @expected_result {@link ResultSet} from the query with the provided paging state starts from + * the subsequent row from the first query. + */ + @Test(groups = "short") + @CassandraVersion("2.0.0") + public void should_be_able_to_use_state_with_bound_statement() { + PreparedStatement prepared = session().prepare("SELECT v from test where k=?"); + BoundStatement bs = prepared.bind(KEY); + + ResultSet result = session().execute(bs.setFetchSize(20)); + int pageSize = result.getAvailableWithoutFetching(); + PagingState pagingState = result.getExecutionInfo().getPagingState(); + + result = session().execute(bs.setFetchSize(20).setPagingState(pagingState)); + + // We have the result starting from the next page we stopped + assertThat(result.one().getInt("v")).isEqualTo(pageSize); + } + + /** + * Validates that {@link PagingState} cannot be reused with a different {@link BoundStatement} + * than the original, even if its source {@link PreparedStatement} was the same. + * + * @test_category paging + * @expected_result A failure is thrown when setting paging state on a different {@link + * BoundStatement}. + */ + @Test( + groups = "short", + expectedExceptions = {PagingStateException.class}) + @CassandraVersion("2.0.0") + public void should_not_be_able_to_use_state_with_different_bound_statement() { + PreparedStatement prepared = session().prepare("SELECT v from test where k=?"); + BoundStatement bs0 = prepared.bind(KEY); + + ResultSet result = session().execute(bs0.setFetchSize(20)); + PagingState pagingState = result.getExecutionInfo().getPagingState(); + + BoundStatement bs1 = prepared.bind("different_key"); + session().execute(bs1.setFetchSize(20).setPagingState(pagingState)); + } + + /** + * Validates if all results of a query are paged in through a queries result set that the {@link + * PagingState} it returns will return an empty set when queried with. + * + * @test_category paging + * @expected_result Query with the {@link PagingState} returns 0 rows. + */ + @Test(groups = "short") + public void should_return_no_rows_when_paged_to_end() { + SimpleStatement st = new SimpleStatement(String.format("SELECT v FROM test WHERE k='%s'", KEY)); + ResultSet result = session().execute(st.setFetchSize(20)); + + // Consume enough of the iterator to cause all the results to be paged in. + Iterator rowIt = result.iterator(); + for (int i = 0; i < 83; i++) { + rowIt.next().getInt("v"); } - @Test(groups = "unit", expectedExceptions = {PagingStateException.class}) - public void should_fail_when_given_invalid_byte_array() { - // Given an expected page state of size 1 and hash of size 1, we should expect 6 bytes, but only receive 5. - byte[] complete = {0x00, 0x01, 0x00, 0x01, 0x00}; - PagingState.fromBytes(complete); - } - - @Test(groups = "unit", expectedExceptions = {UnsupportedOperationException.class}) - public void should_fail_when_setting_paging_state_on_batch_statement() { - // Should not be able to set paging state on a batch statement. - PagingState emptyStatement = PagingState.fromString("00000000"); - - BatchStatement batch = new BatchStatement(); - batch.setPagingState(emptyStatement); - } - - /** - * Validates that the "unsafe" paging state can be reused with the same Statement. - * - * @test_category paging - * @expected_result {@link ResultSet} from the query with the provided raw paging state starts from the - * subsequent row from the first query. - */ - @Test(groups = "short") - public void should_complete_when_using_unsafe_paging_state() { - SimpleStatement st = new SimpleStatement(String.format("SELECT v FROM test WHERE k='%s'", KEY)); - ResultSet result = session().execute(st.setFetchSize(20)); - int pageSize = result.getAvailableWithoutFetching(); - byte[] savedPagingState = result.getExecutionInfo().getPagingStateUnsafe(); - - st = new SimpleStatement(String.format("SELECT v FROM test WHERE k='%s'", KEY)); - result = session().execute(st.setFetchSize(20).setPagingStateUnsafe(savedPagingState)); - - //We have the result starting from the next page we stopped - assertThat(result.one().getInt("v")).isEqualTo(pageSize); - } - - static class TestWrapper extends StatementWrapper { - TestWrapper(Statement wrapped) { - super(wrapped); - } + String savedPagingStateString = result.getExecutionInfo().getPagingState().toString(); + + st = new SimpleStatement(String.format("SELECT v FROM test WHERE k='%s'", KEY)); + result = + session() + .execute( + st.setFetchSize(20).setPagingState(PagingState.fromString(savedPagingStateString))); + + assertThat(result.one()).isNull(); + } + + @Test( + groups = "unit", + expectedExceptions = {PagingStateException.class}) + public void should_fail_when_given_invalid_string() { + // An invalid string cannot be parsed and a PagingStateException is thrown. + PagingState.fromString("0101"); + } + + @Test( + groups = "unit", + expectedExceptions = {PagingStateException.class}) + public void should_fail_when_given_invalid_byte_array() { + // Given an expected page state of size 1 and hash of size 1, we should expect 6 bytes, but only + // receive 5. + byte[] complete = {0x00, 0x01, 0x00, 0x01, 0x00}; + PagingState.fromBytes(complete); + } + + @Test( + groups = "unit", + expectedExceptions = {UnsupportedOperationException.class}) + public void should_fail_when_setting_paging_state_on_batch_statement() { + // Should not be able to set paging state on a batch statement. + PagingState emptyStatement = PagingState.fromString("00000000"); + + BatchStatement batch = new BatchStatement(); + batch.setPagingState(emptyStatement); + } + + /** + * Validates that the "unsafe" paging state can be reused with the same Statement. + * + * @test_category paging + * @expected_result {@link ResultSet} from the query with the provided raw paging state starts + * from the subsequent row from the first query. + */ + @Test(groups = "short") + public void should_complete_when_using_unsafe_paging_state() { + SimpleStatement st = new SimpleStatement(String.format("SELECT v FROM test WHERE k='%s'", KEY)); + ResultSet result = session().execute(st.setFetchSize(20)); + int pageSize = result.getAvailableWithoutFetching(); + byte[] savedPagingState = result.getExecutionInfo().getPagingStateUnsafe(); + + st = new SimpleStatement(String.format("SELECT v FROM test WHERE k='%s'", KEY)); + result = session().execute(st.setFetchSize(20).setPagingStateUnsafe(savedPagingState)); + + // We have the result starting from the next page we stopped + assertThat(result.one().getInt("v")).isEqualTo(pageSize); + } + + static class TestWrapper extends StatementWrapper { + TestWrapper(Statement wrapped) { + super(wrapped); } + } } - diff --git a/driver-core/src/test/java/com/datastax/driver/core/ParseUtilsTest.java b/driver-core/src/test/java/com/datastax/driver/core/ParseUtilsTest.java index 6393f6ae3e9..b80f9adba55 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/ParseUtilsTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/ParseUtilsTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,94 +17,95 @@ */ package com.datastax.driver.core; -import org.testng.annotations.Test; - import static com.datastax.driver.core.Assertions.assertThat; -public class ParseUtilsTest { +import org.testng.annotations.Test; - @Test(groups = "unit") - public void testQuote() { - assertThat(ParseUtils.quote(null)).isEqualTo("''"); - assertThat(ParseUtils.quote("")).isEqualTo("''"); - assertThat(ParseUtils.quote(" ")).isEqualTo("' '"); - assertThat(ParseUtils.quote("foo")).isEqualTo("'foo'"); - assertThat(ParseUtils.quote(" 'foo' ")).isEqualTo("' ''foo'' '"); - } +public class ParseUtilsTest { - @Test(groups = "unit") - public void testUnquote() { - assertThat(ParseUtils.unquote(null)).isNull(); - assertThat(ParseUtils.unquote("")).isEqualTo(""); - assertThat(ParseUtils.unquote(" ")).isEqualTo(" "); - assertThat(ParseUtils.unquote("'")).isEqualTo("'"); // malformed string left untouched - assertThat(ParseUtils.unquote("foo")).isEqualTo("foo"); - assertThat(ParseUtils.unquote("''")).isEqualTo(""); - assertThat(ParseUtils.unquote("' '")).isEqualTo(" "); - assertThat(ParseUtils.unquote("'foo")).isEqualTo("'foo"); // malformed string left untouched - assertThat(ParseUtils.unquote("'foo'")).isEqualTo("foo"); - assertThat(ParseUtils.unquote(" 'foo' ")).isEqualTo(" 'foo' "); // considered unquoted - assertThat(ParseUtils.unquote("'''foo'''")).isEqualTo("'foo'"); - assertThat(ParseUtils.unquote("'''")).isEqualTo("'"); - assertThat(ParseUtils.unquote("''foo'")).isEqualTo("'foo"); - assertThat(ParseUtils.unquote("'foo''")).isEqualTo("foo'"); - } + @Test(groups = "unit") + public void testQuote() { + assertThat(ParseUtils.quote(null)).isEqualTo("''"); + assertThat(ParseUtils.quote("")).isEqualTo("''"); + assertThat(ParseUtils.quote(" ")).isEqualTo("' '"); + assertThat(ParseUtils.quote("foo")).isEqualTo("'foo'"); + assertThat(ParseUtils.quote(" 'foo' ")).isEqualTo("' ''foo'' '"); + } - @Test(groups = "unit") - public void testIsQuoted() { - assertThat(ParseUtils.isQuoted(null)).isFalse(); - assertThat(ParseUtils.isQuoted("")).isFalse(); - assertThat(ParseUtils.isQuoted(" ")).isFalse(); - assertThat(ParseUtils.isQuoted("'")).isFalse(); // malformed string considered unquoted - assertThat(ParseUtils.isQuoted("foo")).isFalse(); - assertThat(ParseUtils.isQuoted("''")).isTrue(); - assertThat(ParseUtils.isQuoted("' '")).isTrue(); - assertThat(ParseUtils.isQuoted("'foo")).isFalse(); // malformed string considered unquoted - assertThat(ParseUtils.isQuoted("'foo'")).isTrue(); - assertThat(ParseUtils.isQuoted(" 'foo' ")).isFalse(); // considered unquoted - assertThat(ParseUtils.isQuoted("'''foo'''")).isTrue(); - } + @Test(groups = "unit") + public void testUnquote() { + assertThat(ParseUtils.unquote(null)).isNull(); + assertThat(ParseUtils.unquote("")).isEqualTo(""); + assertThat(ParseUtils.unquote(" ")).isEqualTo(" "); + assertThat(ParseUtils.unquote("'")).isEqualTo("'"); // malformed string left untouched + assertThat(ParseUtils.unquote("foo")).isEqualTo("foo"); + assertThat(ParseUtils.unquote("''")).isEqualTo(""); + assertThat(ParseUtils.unquote("' '")).isEqualTo(" "); + assertThat(ParseUtils.unquote("'foo")).isEqualTo("'foo"); // malformed string left untouched + assertThat(ParseUtils.unquote("'foo'")).isEqualTo("foo"); + assertThat(ParseUtils.unquote(" 'foo' ")).isEqualTo(" 'foo' "); // considered unquoted + assertThat(ParseUtils.unquote("'''foo'''")).isEqualTo("'foo'"); + assertThat(ParseUtils.unquote("'''")).isEqualTo("'"); + assertThat(ParseUtils.unquote("''foo'")).isEqualTo("'foo"); + assertThat(ParseUtils.unquote("'foo''")).isEqualTo("foo'"); + } - @Test(groups = "unit") - public void testDoubleQuote() { - assertThat(ParseUtils.doubleQuote(null)).isEqualTo("\"\""); - assertThat(ParseUtils.doubleQuote("")).isEqualTo("\"\""); - assertThat(ParseUtils.doubleQuote(" ")).isEqualTo("\" \""); - assertThat(ParseUtils.doubleQuote("foo")).isEqualTo("\"foo\""); - assertThat(ParseUtils.doubleQuote(" \"foo\" ")).isEqualTo("\" \"\"foo\"\" \""); - } + @Test(groups = "unit") + public void testIsQuoted() { + assertThat(ParseUtils.isQuoted(null)).isFalse(); + assertThat(ParseUtils.isQuoted("")).isFalse(); + assertThat(ParseUtils.isQuoted(" ")).isFalse(); + assertThat(ParseUtils.isQuoted("'")).isFalse(); // malformed string considered unquoted + assertThat(ParseUtils.isQuoted("foo")).isFalse(); + assertThat(ParseUtils.isQuoted("''")).isTrue(); + assertThat(ParseUtils.isQuoted("' '")).isTrue(); + assertThat(ParseUtils.isQuoted("'foo")).isFalse(); // malformed string considered unquoted + assertThat(ParseUtils.isQuoted("'foo'")).isTrue(); + assertThat(ParseUtils.isQuoted(" 'foo' ")).isFalse(); // considered unquoted + assertThat(ParseUtils.isQuoted("'''foo'''")).isTrue(); + } - @Test(groups = "unit") - public void testDoubleUnquote() { - assertThat(ParseUtils.unDoubleQuote(null)).isNull(); - assertThat(ParseUtils.unDoubleQuote("")).isEqualTo(""); - assertThat(ParseUtils.unDoubleQuote(" ")).isEqualTo(" "); - assertThat(ParseUtils.unDoubleQuote("\"")).isEqualTo("\""); // malformed string left untouched - assertThat(ParseUtils.unDoubleQuote("foo")).isEqualTo("foo"); - assertThat(ParseUtils.unDoubleQuote("\"\"")).isEqualTo(""); - assertThat(ParseUtils.unDoubleQuote("\" \"")).isEqualTo(" "); - assertThat(ParseUtils.unDoubleQuote("\"foo")).isEqualTo("\"foo"); // malformed string left untouched - assertThat(ParseUtils.unDoubleQuote("\"foo\"")).isEqualTo("foo"); - assertThat(ParseUtils.unDoubleQuote(" \"foo\" ")).isEqualTo(" \"foo\" "); // considered unquoted - assertThat(ParseUtils.unDoubleQuote("\"\"\"foo\"\"\"")).isEqualTo("\"foo\""); - assertThat(ParseUtils.unDoubleQuote("\"\"\"")).isEqualTo("\""); - assertThat(ParseUtils.unDoubleQuote("\"\"foo\"")).isEqualTo("\"foo"); - assertThat(ParseUtils.unDoubleQuote("\"foo\"\"")).isEqualTo("foo\""); - } + @Test(groups = "unit") + public void testDoubleQuote() { + assertThat(ParseUtils.doubleQuote(null)).isEqualTo("\"\""); + assertThat(ParseUtils.doubleQuote("")).isEqualTo("\"\""); + assertThat(ParseUtils.doubleQuote(" ")).isEqualTo("\" \""); + assertThat(ParseUtils.doubleQuote("foo")).isEqualTo("\"foo\""); + assertThat(ParseUtils.doubleQuote(" \"foo\" ")).isEqualTo("\" \"\"foo\"\" \""); + } - @Test(groups = "unit") - public void testIsDoubleQuoted() { - assertThat(ParseUtils.isDoubleQuoted(null)).isFalse(); - assertThat(ParseUtils.isDoubleQuoted("")).isFalse(); - assertThat(ParseUtils.isDoubleQuoted(" ")).isFalse(); - assertThat(ParseUtils.isDoubleQuoted("\"")).isFalse(); // malformed string considered unquoted - assertThat(ParseUtils.isDoubleQuoted("foo")).isFalse(); - assertThat(ParseUtils.isDoubleQuoted("\"\"")).isTrue(); - assertThat(ParseUtils.isDoubleQuoted("\" \"")).isTrue(); - assertThat(ParseUtils.isDoubleQuoted("\"foo")).isFalse(); // malformed string considered unquoted - assertThat(ParseUtils.isDoubleQuoted("\"foo\"")).isTrue(); - assertThat(ParseUtils.isDoubleQuoted(" \"foo\" ")).isFalse(); // considered unquoted - assertThat(ParseUtils.isDoubleQuoted("\"\"\"foo\"\"\"")).isTrue(); - } + @Test(groups = "unit") + public void testDoubleUnquote() { + assertThat(ParseUtils.unDoubleQuote(null)).isNull(); + assertThat(ParseUtils.unDoubleQuote("")).isEqualTo(""); + assertThat(ParseUtils.unDoubleQuote(" ")).isEqualTo(" "); + assertThat(ParseUtils.unDoubleQuote("\"")).isEqualTo("\""); // malformed string left untouched + assertThat(ParseUtils.unDoubleQuote("foo")).isEqualTo("foo"); + assertThat(ParseUtils.unDoubleQuote("\"\"")).isEqualTo(""); + assertThat(ParseUtils.unDoubleQuote("\" \"")).isEqualTo(" "); + assertThat(ParseUtils.unDoubleQuote("\"foo")) + .isEqualTo("\"foo"); // malformed string left untouched + assertThat(ParseUtils.unDoubleQuote("\"foo\"")).isEqualTo("foo"); + assertThat(ParseUtils.unDoubleQuote(" \"foo\" ")).isEqualTo(" \"foo\" "); // considered unquoted + assertThat(ParseUtils.unDoubleQuote("\"\"\"foo\"\"\"")).isEqualTo("\"foo\""); + assertThat(ParseUtils.unDoubleQuote("\"\"\"")).isEqualTo("\""); + assertThat(ParseUtils.unDoubleQuote("\"\"foo\"")).isEqualTo("\"foo"); + assertThat(ParseUtils.unDoubleQuote("\"foo\"\"")).isEqualTo("foo\""); + } + @Test(groups = "unit") + public void testIsDoubleQuoted() { + assertThat(ParseUtils.isDoubleQuoted(null)).isFalse(); + assertThat(ParseUtils.isDoubleQuoted("")).isFalse(); + assertThat(ParseUtils.isDoubleQuoted(" ")).isFalse(); + assertThat(ParseUtils.isDoubleQuoted("\"")).isFalse(); // malformed string considered unquoted + assertThat(ParseUtils.isDoubleQuoted("foo")).isFalse(); + assertThat(ParseUtils.isDoubleQuoted("\"\"")).isTrue(); + assertThat(ParseUtils.isDoubleQuoted("\" \"")).isTrue(); + assertThat(ParseUtils.isDoubleQuoted("\"foo")) + .isFalse(); // malformed string considered unquoted + assertThat(ParseUtils.isDoubleQuoted("\"foo\"")).isTrue(); + assertThat(ParseUtils.isDoubleQuoted(" \"foo\" ")).isFalse(); // considered unquoted + assertThat(ParseUtils.isDoubleQuoted("\"\"\"foo\"\"\"")).isTrue(); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/PerHostPercentileTrackerTest.java b/driver-core/src/test/java/com/datastax/driver/core/PerHostPercentileTrackerTest.java index e4febf6906a..f99f570d45a 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/PerHostPercentileTrackerTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/PerHostPercentileTrackerTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,66 +17,74 @@ */ package com.datastax.driver.core; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.mock; + import com.datastax.driver.core.exceptions.ReadTimeoutException; import com.google.common.collect.Lists; import com.google.common.util.concurrent.Uninterruptibles; -import org.testng.annotations.Test; - import java.util.List; import java.util.concurrent.TimeUnit; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.mock; +import org.testng.annotations.Test; public class PerHostPercentileTrackerTest - extends PercentileTrackerTest { + extends PercentileTrackerTest { - @Test(groups = "unit") - public void should_track_measurements_by_host() { - // given - a per host percentile tracker. - Cluster cluster0 = mock(Cluster.class); - PerHostPercentileTracker tracker = builder() - .withInterval(1, TimeUnit.SECONDS) - .withMinRecordedValues(100).build(); - tracker.onRegister(cluster0); + @Test(groups = "unit") + public void should_track_measurements_by_host() { + // given - a per host percentile tracker. + Cluster cluster0 = mock(Cluster.class); + PerHostPercentileTracker tracker = + builder().withInterval(1, TimeUnit.SECONDS).withMinRecordedValues(100).build(); + tracker.onRegister(cluster0); - List hosts = Lists.newArrayList(mock(Host.class), mock(Host.class), mock(Host.class)); - List statements = Lists.newArrayList(mock(Statement.class), mock(Statement.class)); - List exceptions = Lists.newArrayList(new Exception(), null, new ReadTimeoutException(ConsistencyLevel.ANY, 1, 1, true), null, null); + List hosts = Lists.newArrayList(mock(Host.class), mock(Host.class), mock(Host.class)); + List statements = Lists.newArrayList(mock(Statement.class), mock(Statement.class)); + List exceptions = + Lists.newArrayList( + new Exception(), + null, + new ReadTimeoutException(ConsistencyLevel.ANY, 1, 1, true), + null, + null); - // when - recording latencies over a linear progression with varying hosts, statements and exceptions. - for (int i = 0; i < 100; i++) { - tracker.update(hosts.get(0), - statements.get(i % statements.size()), - exceptions.get(i % exceptions.size()), - TimeUnit.NANOSECONDS.convert(i + 1, TimeUnit.MILLISECONDS)); + // when - recording latencies over a linear progression with varying hosts, statements and + // exceptions. + for (int i = 0; i < 100; i++) { + tracker.update( + hosts.get(0), + statements.get(i % statements.size()), + exceptions.get(i % exceptions.size()), + TimeUnit.NANOSECONDS.convert(i + 1, TimeUnit.MILLISECONDS)); - tracker.update(hosts.get(1), - statements.get(i % statements.size()), - exceptions.get(i % exceptions.size()), - TimeUnit.NANOSECONDS.convert((i + 1) * 2, TimeUnit.MILLISECONDS)); + tracker.update( + hosts.get(1), + statements.get(i % statements.size()), + exceptions.get(i % exceptions.size()), + TimeUnit.NANOSECONDS.convert((i + 1) * 2, TimeUnit.MILLISECONDS)); - tracker.update(hosts.get(2), - statements.get(i % statements.size()), - exceptions.get(i % exceptions.size()), - TimeUnit.NANOSECONDS.convert((i + 1) * 3, TimeUnit.MILLISECONDS)); - } - Uninterruptibles.sleepUninterruptibly(2000, TimeUnit.MILLISECONDS); - - // then - the resulting tracker's percentiles should represent that linear progression for each host individually. - // host0: (x percentile == x) - // host1: (x percentile == 2x) - // host2: (x percentile == 3x) - for (int i = 1; i <= 99; i++) { - assertThat(tracker.getLatencyAtPercentile(hosts.get(0), null, null, i)).isEqualTo(i); - assertThat(tracker.getLatencyAtPercentile(hosts.get(1), null, null, i)).isEqualTo(i * 2); - assertThat(tracker.getLatencyAtPercentile(hosts.get(2), null, null, i)).isEqualTo(i * 3); - } + tracker.update( + hosts.get(2), + statements.get(i % statements.size()), + exceptions.get(i % exceptions.size()), + TimeUnit.NANOSECONDS.convert((i + 1) * 3, TimeUnit.MILLISECONDS)); } + Uninterruptibles.sleepUninterruptibly(2000, TimeUnit.MILLISECONDS); - - @Override - public PerHostPercentileTracker.Builder builder() { - return PerHostPercentileTracker.builder(defaultMaxLatency); + // then - the resulting tracker's percentiles should represent that linear progression for each + // host individually. + // host0: (x percentile == x) + // host1: (x percentile == 2x) + // host2: (x percentile == 3x) + for (int i = 1; i <= 99; i++) { + assertThat(tracker.getLatencyAtPercentile(hosts.get(0), null, null, i)).isEqualTo(i); + assertThat(tracker.getLatencyAtPercentile(hosts.get(1), null, null, i)).isEqualTo(i * 2); + assertThat(tracker.getLatencyAtPercentile(hosts.get(2), null, null, i)).isEqualTo(i * 3); } + } + + @Override + public PerHostPercentileTracker.Builder builder() { + return PerHostPercentileTracker.builder(defaultMaxLatency); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/PercentileTrackerTest.java b/driver-core/src/test/java/com/datastax/driver/core/PercentileTrackerTest.java index aa69faeeb7d..c6a1a4a6f27 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/PercentileTrackerTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/PercentileTrackerTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,152 +17,189 @@ */ package com.datastax.driver.core; -import com.datastax.driver.core.exceptions.*; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.mock; + +import com.datastax.driver.core.exceptions.BootstrappingException; +import com.datastax.driver.core.exceptions.InvalidQueryException; +import com.datastax.driver.core.exceptions.OverloadedException; +import com.datastax.driver.core.exceptions.UnavailableException; +import com.datastax.driver.core.exceptions.UnpreparedException; import com.google.common.collect.Lists; import com.google.common.util.concurrent.Uninterruptibles; +import java.util.List; +import java.util.concurrent.TimeUnit; import org.apache.log4j.Level; import org.apache.log4j.Logger; import org.testng.annotations.Test; -import java.util.List; -import java.util.concurrent.TimeUnit; +public abstract class PercentileTrackerTest< + B extends PercentileTracker.Builder, T extends PercentileTracker> { -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.mock; + Host defaultHost = mock(Host.class); -public abstract class PercentileTrackerTest, T extends PercentileTracker> { - - Host defaultHost = mock(Host.class); - - @SuppressWarnings("ThrowableResultOfMethodCallIgnored") - Exception defaultException = mock(Exception.class); - - Statement defaultStatement = mock(Statement.class); - - long defaultMaxLatency = 1000; - - public abstract B builder(); - - @Test(groups = "unit") - public void should_ignore_certain_exceptions() throws Exception { - // given - a percentile tracker. - Cluster cluster0 = mock(Cluster.class); - T tracker = builder() - .withInterval(50, TimeUnit.MILLISECONDS) - .withMinRecordedValues(100).build(); - tracker.onRegister(cluster0); - - // when - recording measurements with the following exceptions. - List exceptionsToIgnore = Lists.newArrayList( - new UnavailableException(ConsistencyLevel.ANY, 0, 0), - new OverloadedException(null, "Overloaded"), - new BootstrappingException(null, "Bootstrapping"), - new UnpreparedException(null, "Unprepared"), - new InvalidQueryException("Validation", new Exception())); - - long startTime = System.currentTimeMillis(); - for (Exception exception : exceptionsToIgnore) { - tracker.update(defaultHost, defaultStatement, exception, TimeUnit.NANOSECONDS.convert(999, TimeUnit.MILLISECONDS)); - } - for (int i = 0; i < 100; i++) { - tracker.update(defaultHost, defaultStatement, defaultException, TimeUnit.NANOSECONDS.convert(1, TimeUnit.MILLISECONDS)); - } - long waitTime = 50 - (System.currentTimeMillis() - startTime); - Uninterruptibles.sleepUninterruptibly(waitTime + 100, TimeUnit.MILLISECONDS); - - // then - the resulting tracker's percentiles should all be 1, indicating those exceptions were ignored. - for (int i = 1; i <= 99; i++) { - long latencyAtPct = tracker.getLatencyAtPercentile(defaultHost, defaultStatement, defaultException, i); - assertThat(latencyAtPct).isEqualTo(1); - } - } + @SuppressWarnings("ThrowableResultOfMethodCallIgnored") + Exception defaultException = mock(Exception.class); - @Test(groups = "unit") - public void should_not_record_anything_if_not_enough_measurements() throws Exception { - // given - a percentile tracker with 100 min recorded values. - Cluster cluster0 = mock(Cluster.class); - T tracker = builder() - .withInterval(50, TimeUnit.MILLISECONDS) - .withMinRecordedValues(100).build(); - tracker.onRegister(cluster0); - - // when - recording less measurements then required. - long startTime = System.currentTimeMillis(); - for (int i = 0; i < 99; i++) { - tracker.update(defaultHost, defaultStatement, defaultException, TimeUnit.NANOSECONDS.convert(1, TimeUnit.MILLISECONDS)); - } - long waitTime = 50 - (System.currentTimeMillis() - startTime); - Uninterruptibles.sleepUninterruptibly(waitTime + 100, TimeUnit.MILLISECONDS); - - // then - the resulting tracker's percentiles should all be -1, indicating there were not enough values to consider. - for (int i = 1; i <= 99; i++) { - long latencyAtPct = tracker.getLatencyAtPercentile(defaultHost, defaultStatement, defaultException, i); - assertThat(latencyAtPct).isEqualTo(-1); - } - } + Statement defaultStatement = mock(Statement.class); + + long defaultMaxLatency = 1000; + + public abstract B builder(); - @Test(groups = "short") - public void should_return_negative_value_when_interval_hasnt_elapsed() throws Exception { - // given - a percentile tracker with a long interval. - Cluster cluster0 = mock(Cluster.class); - T tracker = builder() - .withInterval(50, TimeUnit.MINUTES) - .withMinRecordedValues(100).build(); - tracker.onRegister(cluster0); - - // when - recording enough measurements. - for (int i = 0; i < 99; i++) { - tracker.update(defaultHost, defaultStatement, defaultException, TimeUnit.NANOSECONDS.convert(1, TimeUnit.MILLISECONDS)); - } - - // then - the resulting tracker's percentiles should all be -1, since not enough time was given to elapse interval. - for (int i = 1; i <= 99; i++) { - long latencyAtPct = tracker.getLatencyAtPercentile(defaultHost, defaultStatement, defaultException, i); - assertThat(latencyAtPct).isEqualTo(-1); - } + @Test(groups = "unit") + public void should_ignore_certain_exceptions() throws Exception { + // given - a percentile tracker. + Cluster cluster0 = mock(Cluster.class); + T tracker = + builder().withInterval(50, TimeUnit.MILLISECONDS).withMinRecordedValues(100).build(); + tracker.onRegister(cluster0); + + // when - recording measurements with the following exceptions. + List exceptionsToIgnore = + Lists.newArrayList( + new UnavailableException(ConsistencyLevel.ANY, 0, 0), + new OverloadedException(null, "Overloaded"), + new BootstrappingException(null, "Bootstrapping"), + new UnpreparedException(null, "Unprepared"), + new InvalidQueryException("Validation", new Exception())); + + long startTime = System.currentTimeMillis(); + for (Exception exception : exceptionsToIgnore) { + tracker.update( + defaultHost, + defaultStatement, + exception, + TimeUnit.NANOSECONDS.convert(999, TimeUnit.MILLISECONDS)); + } + for (int i = 0; i < 100; i++) { + tracker.update( + defaultHost, + defaultStatement, + defaultException, + TimeUnit.NANOSECONDS.convert(1, TimeUnit.MILLISECONDS)); + } + long waitTime = 50 - (System.currentTimeMillis() - startTime); + Uninterruptibles.sleepUninterruptibly(waitTime + 100, TimeUnit.MILLISECONDS); + + // then - the resulting tracker's percentiles should all be 1, indicating those exceptions were + // ignored. + for (int i = 1; i <= 99; i++) { + long latencyAtPct = + tracker.getLatencyAtPercentile(defaultHost, defaultStatement, defaultException, i); + assertThat(latencyAtPct).isEqualTo(1); + } + } + + @Test(groups = "unit") + public void should_not_record_anything_if_not_enough_measurements() throws Exception { + // given - a percentile tracker with 100 min recorded values. + Cluster cluster0 = mock(Cluster.class); + T tracker = + builder().withInterval(50, TimeUnit.MILLISECONDS).withMinRecordedValues(100).build(); + tracker.onRegister(cluster0); + + // when - recording less measurements then required. + long startTime = System.currentTimeMillis(); + for (int i = 0; i < 99; i++) { + tracker.update( + defaultHost, + defaultStatement, + defaultException, + TimeUnit.NANOSECONDS.convert(1, TimeUnit.MILLISECONDS)); + } + long waitTime = 50 - (System.currentTimeMillis() - startTime); + Uninterruptibles.sleepUninterruptibly(waitTime + 100, TimeUnit.MILLISECONDS); + + // then - the resulting tracker's percentiles should all be -1, indicating there were not enough + // values to consider. + for (int i = 1; i <= 99; i++) { + long latencyAtPct = + tracker.getLatencyAtPercentile(defaultHost, defaultStatement, defaultException, i); + assertThat(latencyAtPct).isEqualTo(-1); + } + } + + @Test(groups = "short") + public void should_return_negative_value_when_interval_hasnt_elapsed() throws Exception { + // given - a percentile tracker with a long interval. + Cluster cluster0 = mock(Cluster.class); + T tracker = builder().withInterval(50, TimeUnit.MINUTES).withMinRecordedValues(100).build(); + tracker.onRegister(cluster0); + + // when - recording enough measurements. + for (int i = 0; i < 99; i++) { + tracker.update( + defaultHost, + defaultStatement, + defaultException, + TimeUnit.NANOSECONDS.convert(1, TimeUnit.MILLISECONDS)); } - @Test(groups = "unit") - public void should_not_record_value_and_log_when_measurement_higher_than_max_trackable_value() throws Exception { - // given - a percentile tracker with a long interval. - Cluster cluster0 = mock(Cluster.class); - T tracker = builder() - .withInterval(50, TimeUnit.MILLISECONDS) - .withMinRecordedValues(100).build(); - tracker.onRegister(cluster0); - - Logger percentileLogger = Logger.getLogger(PercentileTracker.class); - Level originalLevel = percentileLogger.getLevel(); - percentileLogger.setLevel(Level.WARN); - MemoryAppender appender = new MemoryAppender(); - percentileLogger.addAppender(appender); - - try { - long startTime = System.currentTimeMillis(); - for (int i = 0; i < 100; i++) { - tracker.update(defaultHost, defaultStatement, defaultException, TimeUnit.NANOSECONDS.convert(1, TimeUnit.MILLISECONDS)); - } - - // HdrHistogram adjusts its max based on bucket size, with these values it allows a max value of 2047 (2^11-1). - long largeLatency = 2048; - // when - recording a value larger than max trackable value. - tracker.update(defaultHost, defaultStatement, defaultException, TimeUnit.NANOSECONDS.convert(largeLatency, TimeUnit.MILLISECONDS)); - - assertThat(appender.get()) - .contains("Got request with latency of " + largeLatency - + " ms, which exceeds the configured maximum trackable value " + defaultMaxLatency); - - long waitTime = 50 - (System.currentTimeMillis() - startTime); - Uninterruptibles.sleepUninterruptibly(waitTime + 100, TimeUnit.MILLISECONDS); - - // then - the resulting tracker's percentiles should all be 1, indicating the large value was ignored. - for (int i = 1; i <= 99; i++) { - long latencyAtPct = tracker.getLatencyAtPercentile(defaultHost, defaultStatement, defaultException, i); - assertThat(latencyAtPct).isEqualTo(1); - } - } finally { - percentileLogger.setLevel(originalLevel); - percentileLogger.removeAppender(appender); - } + // then - the resulting tracker's percentiles should all be -1, since not enough time was given + // to elapse interval. + for (int i = 1; i <= 99; i++) { + long latencyAtPct = + tracker.getLatencyAtPercentile(defaultHost, defaultStatement, defaultException, i); + assertThat(latencyAtPct).isEqualTo(-1); + } + } + + @Test(groups = "unit") + public void should_not_record_value_and_log_when_measurement_higher_than_max_trackable_value() + throws Exception { + // given - a percentile tracker with a long interval. + Cluster cluster0 = mock(Cluster.class); + T tracker = + builder().withInterval(50, TimeUnit.MILLISECONDS).withMinRecordedValues(100).build(); + tracker.onRegister(cluster0); + + Logger percentileLogger = Logger.getLogger(PercentileTracker.class); + Level originalLevel = percentileLogger.getLevel(); + percentileLogger.setLevel(Level.WARN); + MemoryAppender appender = new MemoryAppender(); + percentileLogger.addAppender(appender); + + try { + long startTime = System.currentTimeMillis(); + for (int i = 0; i < 100; i++) { + tracker.update( + defaultHost, + defaultStatement, + defaultException, + TimeUnit.NANOSECONDS.convert(1, TimeUnit.MILLISECONDS)); + } + + // HdrHistogram adjusts its max based on bucket size, with these values it allows a max value + // of 2047 (2^11-1). + long largeLatency = 2048; + // when - recording a value larger than max trackable value. + tracker.update( + defaultHost, + defaultStatement, + defaultException, + TimeUnit.NANOSECONDS.convert(largeLatency, TimeUnit.MILLISECONDS)); + + assertThat(appender.get()) + .contains( + "Got request with latency of " + + largeLatency + + " ms, which exceeds the configured maximum trackable value " + + defaultMaxLatency); + + long waitTime = 50 - (System.currentTimeMillis() - startTime); + Uninterruptibles.sleepUninterruptibly(waitTime + 100, TimeUnit.MILLISECONDS); + + // then - the resulting tracker's percentiles should all be 1, indicating the large value was + // ignored. + for (int i = 1; i <= 99; i++) { + long latencyAtPct = + tracker.getLatencyAtPercentile(defaultHost, defaultStatement, defaultException, i); + assertThat(latencyAtPct).isEqualTo(1); + } + } finally { + percentileLogger.setLevel(originalLevel); + percentileLogger.removeAppender(appender); } + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/PoolingOptionsIntegrationTest.java b/driver-core/src/test/java/com/datastax/driver/core/PoolingOptionsIntegrationTest.java index d8092eeab43..86220860516 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/PoolingOptionsIntegrationTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/PoolingOptionsIntegrationTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,73 +17,75 @@ */ package com.datastax.driver.core; -import org.testng.annotations.AfterMethod; -import org.testng.annotations.Test; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.atLeastOnce; +import static org.mockito.Mockito.reset; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.verify; import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Matchers.any; -import static org.mockito.Mockito.*; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.Test; @CCMConfig(createSession = false) public class PoolingOptionsIntegrationTest extends CCMTestsSupport { - private ThreadPoolExecutor executor; - - @Override - public Cluster.Builder createClusterBuilder() { - executor = spy(new ThreadPoolExecutor(1, 1, 60, TimeUnit.SECONDS, new LinkedBlockingQueue())); - PoolingOptions poolingOptions = new PoolingOptions(); - poolingOptions.setInitializationExecutor(executor); - return Cluster.builder().withPoolingOptions(poolingOptions); - } - - @AfterMethod(groups = "short") - public void shutdownExecutor() { - if (executor != null) - executor.shutdown(); - } - - /** - *

    - * Validates that if a custom executor is provided via {@link PoolingOptions#setInitializationExecutor} that it - * is used to create and tear down connections. - *

    - * - * @test_category connection:connection_pool - * @expected_result executor is used and successfully able to connect and tear down connections. - * @jira_ticket JAVA-692 - * @since 2.0.10, 2.1.6 - */ - @Test(groups = "short") - public void should_be_able_to_use_custom_initialization_executor() { - cluster().init(); - // Ensure executor used. - verify(executor, atLeastOnce()).execute(any(Runnable.class)); - - // Reset invocation count. - reset(); - - Session session = cluster().connect(); - - // Ensure executor used again to establish core connections. - verify(executor, atLeastOnce()).execute(any(Runnable.class)); - - // Expect core connections + control connection. - assertThat(cluster().getMetrics().getOpenConnections().getValue()).isEqualTo( - TestUtils.numberOfLocalCoreConnections(cluster()) + 1); - - reset(); - - session.close(); - - // Executor should have been used to close connections associated with the session. - verify(executor, atLeastOnce()).execute(any(Runnable.class)); - - // Only the control connection should remain. - assertThat(cluster().getMetrics().getOpenConnections().getValue()).isEqualTo(1); - } + private ThreadPoolExecutor executor; + + @Override + public Cluster.Builder createClusterBuilder() { + executor = + spy( + new ThreadPoolExecutor( + 1, 1, 60, TimeUnit.SECONDS, new LinkedBlockingQueue())); + PoolingOptions poolingOptions = new PoolingOptions(); + poolingOptions.setInitializationExecutor(executor); + return Cluster.builder().withPoolingOptions(poolingOptions); + } + + @AfterMethod(groups = "short") + public void shutdownExecutor() { + if (executor != null) executor.shutdown(); + } + + /** + * Validates that if a custom executor is provided via {@link + * PoolingOptions#setInitializationExecutor} that it is used to create and tear down connections. + * + * @test_category connection:connection_pool + * @expected_result executor is used and successfully able to connect and tear down connections. + * @jira_ticket JAVA-692 + * @since 2.0.10, 2.1.6 + */ + @Test(groups = "short") + public void should_be_able_to_use_custom_initialization_executor() { + cluster().init(); + // Ensure executor used. + verify(executor, atLeastOnce()).execute(any(Runnable.class)); + + // Reset invocation count. + reset(); + + Session session = cluster().connect(); + + // Ensure executor used again to establish core connections. + verify(executor, atLeastOnce()).execute(any(Runnable.class)); + + // Expect core connections + control connection. + assertThat(cluster().getMetrics().getOpenConnections().getValue()) + .isEqualTo(TestUtils.numberOfLocalCoreConnections(cluster()) + 1); + + reset(); + + session.close(); + + // Executor should have been used to close connections associated with the session. + verify(executor, atLeastOnce()).execute(any(Runnable.class)); + + // Only the control connection should remain. + assertThat(cluster().getMetrics().getOpenConnections().getValue()).isEqualTo(1); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/PoolingOptionsTest.java b/driver-core/src/test/java/com/datastax/driver/core/PoolingOptionsTest.java index eda425ff6f1..565667baef8 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/PoolingOptionsTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/PoolingOptionsTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,127 +17,142 @@ */ package com.datastax.driver.core; -import org.testng.annotations.Test; - import static com.datastax.driver.core.HostDistance.LOCAL; import static com.datastax.driver.core.HostDistance.REMOTE; import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.fail; +import org.testng.annotations.Test; + public class PoolingOptionsTest { - @Test(groups = "unit") - public void should_initialize_to_v2_defaults_if_v2_or_below() { - PoolingOptions options = new PoolingOptions(); - options.setProtocolVersion(ProtocolVersion.V1); - - assertThat(options.getCoreConnectionsPerHost(LOCAL)).isEqualTo(2); - assertThat(options.getMaxConnectionsPerHost(LOCAL)).isEqualTo(8); - assertThat(options.getCoreConnectionsPerHost(REMOTE)).isEqualTo(1); - assertThat(options.getMaxConnectionsPerHost(REMOTE)).isEqualTo(2); - assertThat(options.getNewConnectionThreshold(LOCAL)).isEqualTo(100); - assertThat(options.getNewConnectionThreshold(REMOTE)).isEqualTo(100); - assertThat(options.getMaxRequestsPerConnection(LOCAL)).isEqualTo(128); - assertThat(options.getMaxRequestsPerConnection(REMOTE)).isEqualTo(128); + @Test(groups = "unit") + public void should_initialize_to_v2_defaults_if_v2_or_below() { + PoolingOptions options = new PoolingOptions(); + options.setProtocolVersion(ProtocolVersion.V1); + + assertThat(options.getCoreConnectionsPerHost(LOCAL)).isEqualTo(2); + assertThat(options.getMaxConnectionsPerHost(LOCAL)).isEqualTo(8); + assertThat(options.getCoreConnectionsPerHost(REMOTE)).isEqualTo(1); + assertThat(options.getMaxConnectionsPerHost(REMOTE)).isEqualTo(2); + assertThat(options.getNewConnectionThreshold(LOCAL)).isEqualTo(100); + assertThat(options.getNewConnectionThreshold(REMOTE)).isEqualTo(100); + assertThat(options.getMaxRequestsPerConnection(LOCAL)).isEqualTo(128); + assertThat(options.getMaxRequestsPerConnection(REMOTE)).isEqualTo(128); + } + + @Test(groups = "unit") + public void should_initialize_to_v3_defaults_if_v3_or_above() { + PoolingOptions options = new PoolingOptions(); + options.setProtocolVersion(ProtocolVersion.V3); + + assertThat(options.getCoreConnectionsPerHost(LOCAL)).isEqualTo(1); + assertThat(options.getMaxConnectionsPerHost(LOCAL)).isEqualTo(1); + assertThat(options.getCoreConnectionsPerHost(REMOTE)).isEqualTo(1); + assertThat(options.getMaxConnectionsPerHost(REMOTE)).isEqualTo(1); + assertThat(options.getNewConnectionThreshold(LOCAL)).isEqualTo(800); + assertThat(options.getNewConnectionThreshold(REMOTE)).isEqualTo(200); + assertThat(options.getMaxRequestsPerConnection(LOCAL)).isEqualTo(1024); + assertThat(options.getMaxRequestsPerConnection(REMOTE)).isEqualTo(256); + } + + @Test(groups = "unit") + public void should_enforce_invariants_once_protocol_version_known() { + // OK for v2 (default max = 8) + PoolingOptions options = new PoolingOptions().setCoreConnectionsPerHost(LOCAL, 3); + options.setCoreConnectionsPerHost(LOCAL, 3); + options.setProtocolVersion(ProtocolVersion.V2); + assertThat(options.getCoreConnectionsPerHost(LOCAL)).isEqualTo(3); + assertThat(options.getMaxConnectionsPerHost(LOCAL)).isEqualTo(8); + + // KO for v3 (default max = 1) + options = new PoolingOptions().setCoreConnectionsPerHost(LOCAL, 3); + try { + options.setProtocolVersion(ProtocolVersion.V3); + fail("Expected an IllegalArgumentException"); + } catch (IllegalArgumentException e) { + /*expected*/ } - @Test(groups = "unit") - public void should_initialize_to_v3_defaults_if_v3_or_above() { - PoolingOptions options = new PoolingOptions(); - options.setProtocolVersion(ProtocolVersion.V3); - - assertThat(options.getCoreConnectionsPerHost(LOCAL)).isEqualTo(1); - assertThat(options.getMaxConnectionsPerHost(LOCAL)).isEqualTo(1); - assertThat(options.getCoreConnectionsPerHost(REMOTE)).isEqualTo(1); - assertThat(options.getMaxConnectionsPerHost(REMOTE)).isEqualTo(1); - assertThat(options.getNewConnectionThreshold(LOCAL)).isEqualTo(800); - assertThat(options.getNewConnectionThreshold(REMOTE)).isEqualTo(200); - assertThat(options.getMaxRequestsPerConnection(LOCAL)).isEqualTo(1024); - assertThat(options.getMaxRequestsPerConnection(REMOTE)).isEqualTo(256); + // OK for v3 (up to 32K stream ids) + options = new PoolingOptions().setMaxRequestsPerConnection(LOCAL, 5000); + options.setProtocolVersion(ProtocolVersion.V3); + assertThat(options.getMaxRequestsPerConnection(LOCAL)).isEqualTo(5000); + + // KO for v2 (up to 128) + options = new PoolingOptions().setMaxRequestsPerConnection(LOCAL, 5000); + try { + options.setProtocolVersion(ProtocolVersion.V2); + fail("Expected an IllegalArgumentException"); + } catch (IllegalArgumentException e) { + /*expected*/ } - - @Test(groups = "unit") - public void should_enforce_invariants_once_protocol_version_known() { - // OK for v2 (default max = 8) - PoolingOptions options = new PoolingOptions().setCoreConnectionsPerHost(LOCAL, 3); - options.setCoreConnectionsPerHost(LOCAL, 3); - options.setProtocolVersion(ProtocolVersion.V2); - assertThat(options.getCoreConnectionsPerHost(LOCAL)).isEqualTo(3); - assertThat(options.getMaxConnectionsPerHost(LOCAL)).isEqualTo(8); - - // KO for v3 (default max = 1) - options = new PoolingOptions().setCoreConnectionsPerHost(LOCAL, 3); - try { - options.setProtocolVersion(ProtocolVersion.V3); - fail("Expected an IllegalArgumentException"); - } catch (IllegalArgumentException e) {/*expected*/} - - // OK for v3 (up to 32K stream ids) - options = new PoolingOptions().setMaxRequestsPerConnection(LOCAL, 5000); - options.setProtocolVersion(ProtocolVersion.V3); - assertThat(options.getMaxRequestsPerConnection(LOCAL)).isEqualTo(5000); - - // KO for v2 (up to 128) - options = new PoolingOptions().setMaxRequestsPerConnection(LOCAL, 5000); - try { - options.setProtocolVersion(ProtocolVersion.V2); - fail("Expected an IllegalArgumentException"); - } catch (IllegalArgumentException e) {/*expected*/} + } + + @Test(groups = "unit") + public void should_set_core_and_max_connections_simultaneously() { + PoolingOptions options = new PoolingOptions(); + options.setProtocolVersion(ProtocolVersion.V2); + + options.setConnectionsPerHost(LOCAL, 10, 15); + + assertThat(options.getCoreConnectionsPerHost(LOCAL)).isEqualTo(10); + assertThat(options.getMaxConnectionsPerHost(LOCAL)).isEqualTo(15); + } + + @Test(groups = "unit") + public void should_leave_connection_options_unset_until_protocol_version_known() { + PoolingOptions options = new PoolingOptions(); + + assertThat(options.getCoreConnectionsPerHost(LOCAL)).isEqualTo(PoolingOptions.UNSET); + assertThat(options.getCoreConnectionsPerHost(REMOTE)).isEqualTo(PoolingOptions.UNSET); + assertThat(options.getMaxConnectionsPerHost(LOCAL)).isEqualTo(PoolingOptions.UNSET); + assertThat(options.getMaxConnectionsPerHost(REMOTE)).isEqualTo(PoolingOptions.UNSET); + assertThat(options.getNewConnectionThreshold(LOCAL)).isEqualTo(PoolingOptions.UNSET); + assertThat(options.getNewConnectionThreshold(REMOTE)).isEqualTo(PoolingOptions.UNSET); + assertThat(options.getMaxRequestsPerConnection(LOCAL)).isEqualTo(PoolingOptions.UNSET); + assertThat(options.getMaxRequestsPerConnection(REMOTE)).isEqualTo(PoolingOptions.UNSET); + } + + @Test(groups = "unit") + public void should_reject_negative_connection_options_even_when_protocol_version_unknown() { + PoolingOptions options = new PoolingOptions(); + + try { + options.setCoreConnectionsPerHost(LOCAL, -1); + fail("expected an IllegalArgumentException"); + } catch (IllegalArgumentException e) { + /*expected*/ } - - @Test(groups = "unit") - public void should_set_core_and_max_connections_simultaneously() { - PoolingOptions options = new PoolingOptions(); - options.setProtocolVersion(ProtocolVersion.V2); - - options.setConnectionsPerHost(LOCAL, 10, 15); - - assertThat(options.getCoreConnectionsPerHost(LOCAL)).isEqualTo(10); - assertThat(options.getMaxConnectionsPerHost(LOCAL)).isEqualTo(15); + try { + options.setMaxConnectionsPerHost(LOCAL, -1); + fail("expected an IllegalArgumentException"); + } catch (IllegalArgumentException e) { + /*expected*/ } - - @Test(groups = "unit") - public void should_leave_connection_options_unset_until_protocol_version_known() { - PoolingOptions options = new PoolingOptions(); - - assertThat(options.getCoreConnectionsPerHost(LOCAL)).isEqualTo(PoolingOptions.UNSET); - assertThat(options.getCoreConnectionsPerHost(REMOTE)).isEqualTo(PoolingOptions.UNSET); - assertThat(options.getMaxConnectionsPerHost(LOCAL)).isEqualTo(PoolingOptions.UNSET); - assertThat(options.getMaxConnectionsPerHost(REMOTE)).isEqualTo(PoolingOptions.UNSET); - assertThat(options.getNewConnectionThreshold(LOCAL)).isEqualTo(PoolingOptions.UNSET); - assertThat(options.getNewConnectionThreshold(REMOTE)).isEqualTo(PoolingOptions.UNSET); - assertThat(options.getMaxRequestsPerConnection(LOCAL)).isEqualTo(PoolingOptions.UNSET); - assertThat(options.getMaxRequestsPerConnection(REMOTE)).isEqualTo(PoolingOptions.UNSET); + try { + options.setConnectionsPerHost(LOCAL, -1, 1); + fail("expected an IllegalArgumentException"); + } catch (IllegalArgumentException e) { + /*expected*/ } - - @Test(groups = "unit") - public void should_reject_negative_connection_options_even_when_protocol_version_unknown() { - PoolingOptions options = new PoolingOptions(); - - try { - options.setCoreConnectionsPerHost(LOCAL, -1); - fail("expected an IllegalArgumentException"); - } catch (IllegalArgumentException e) { /*expected*/ } - try { - options.setMaxConnectionsPerHost(LOCAL, -1); - fail("expected an IllegalArgumentException"); - } catch (IllegalArgumentException e) { /*expected*/ } - try { - options.setConnectionsPerHost(LOCAL, -1, 1); - fail("expected an IllegalArgumentException"); - } catch (IllegalArgumentException e) { /*expected*/ } - try { - options.setConnectionsPerHost(LOCAL, -2, -1); - fail("expected an IllegalArgumentException"); - } catch (IllegalArgumentException e) { /*expected*/ } - try { - options.setNewConnectionThreshold(LOCAL, -1); - fail("expected an IllegalArgumentException"); - } catch (IllegalArgumentException e) { /*expected*/ } - try { - options.setMaxRequestsPerConnection(LOCAL, -1); - fail("expected an IllegalArgumentException"); - } catch (IllegalArgumentException e) { /*expected*/ } + try { + options.setConnectionsPerHost(LOCAL, -2, -1); + fail("expected an IllegalArgumentException"); + } catch (IllegalArgumentException e) { + /*expected*/ } + try { + options.setNewConnectionThreshold(LOCAL, -1); + fail("expected an IllegalArgumentException"); + } catch (IllegalArgumentException e) { + /*expected*/ + } + try { + options.setMaxRequestsPerConnection(LOCAL, -1); + fail("expected an IllegalArgumentException"); + } catch (IllegalArgumentException e) { + /*expected*/ + } + } } - diff --git a/driver-core/src/test/java/com/datastax/driver/core/PreparedIdTest.java b/driver-core/src/test/java/com/datastax/driver/core/PreparedIdTest.java index cfe007975d1..640c3ad124a 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/PreparedIdTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/PreparedIdTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,49 +17,51 @@ */ package com.datastax.driver.core; -import org.testng.annotations.Test; - import static com.datastax.driver.core.Assertions.assertThat; +import org.testng.annotations.Test; + public class PreparedIdTest extends CCMTestsSupport { - @Override - public void onTestContextInitialized() { - execute( - "CREATE TABLE foo(k1 int, k2 int, k3 int, v int, PRIMARY KEY ((k1, k2, k3)))" - ); - } + @Override + public void onTestContextInitialized() { + execute("CREATE TABLE foo(k1 int, k2 int, k3 int, v int, PRIMARY KEY ((k1, k2, k3)))"); + } - /** - * Validates that the correct routing key indexes are present for a fully-bound prepared statement. - * - * @test_category prepared_statements:metadata - */ - @Test(groups = "short") - public void should_have_routing_key_indexes_when_all_bound() { - PreparedStatement pst = session().prepare("INSERT INTO foo (k3, k1, k2, v) VALUES (?, ?, ?, ?)"); - assertThat(pst.getPreparedId().routingKeyIndexes).containsExactly(1, 2, 0); - } + /** + * Validates that the correct routing key indexes are present for a fully-bound prepared + * statement. + * + * @test_category prepared_statements:metadata + */ + @Test(groups = "short") + public void should_have_routing_key_indexes_when_all_bound() { + PreparedStatement pst = + session().prepare("INSERT INTO foo (k3, k1, k2, v) VALUES (?, ?, ?, ?)"); + assertThat(pst.getPreparedId().routingKeyIndexes).containsExactly(1, 2, 0); + } - /** - * Validates that no routing key indexes are present for a partially-bound prepared statement. - * - * @test_category prepared_statements:metadata - */ - @Test(groups = "short") - public void should_not_have_routing_key_indexes_when_some_not_bound() { - PreparedStatement pst = session().prepare("INSERT INTO foo (k3, k1, k2, v) VALUES (1, ?, ?, ?)"); - assertThat(pst.getPreparedId().routingKeyIndexes).isNull(); - } + /** + * Validates that no routing key indexes are present for a partially-bound prepared statement. + * + * @test_category prepared_statements:metadata + */ + @Test(groups = "short") + public void should_not_have_routing_key_indexes_when_some_not_bound() { + PreparedStatement pst = + session().prepare("INSERT INTO foo (k3, k1, k2, v) VALUES (1, ?, ?, ?)"); + assertThat(pst.getPreparedId().routingKeyIndexes).isNull(); + } - /** - * Validates that no routing key indexes are present for a none-bound prepared statement. - * - * @test_category prepared_statements:metadata - */ - @Test(groups = "short") - public void should_not_have_routing_key_indexes_when_none_bound() { - PreparedStatement pst = session().prepare("INSERT INTO foo (k3, k1, k2, v) VALUES (1, 1, 1, 1)"); - assertThat(pst.getPreparedId().routingKeyIndexes).isNull(); - } + /** + * Validates that no routing key indexes are present for a none-bound prepared statement. + * + * @test_category prepared_statements:metadata + */ + @Test(groups = "short") + public void should_not_have_routing_key_indexes_when_none_bound() { + PreparedStatement pst = + session().prepare("INSERT INTO foo (k3, k1, k2, v) VALUES (1, 1, 1, 1)"); + assertThat(pst.getPreparedId().routingKeyIndexes).isNull(); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/PreparedStatementInvalidationTest.java b/driver-core/src/test/java/com/datastax/driver/core/PreparedStatementInvalidationTest.java new file mode 100644 index 00000000000..853e638d99b --- /dev/null +++ b/driver-core/src/test/java/com/datastax/driver/core/PreparedStatementInvalidationTest.java @@ -0,0 +1,242 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import static com.datastax.driver.core.Assertions.assertThat; +import static junit.framework.TestCase.fail; + +import com.datastax.driver.core.exceptions.NoHostAvailableException; +import com.datastax.driver.core.utils.CassandraVersion; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; + +@CassandraVersion("4.0") +public class PreparedStatementInvalidationTest extends CCMTestsSupport { + + @BeforeMethod(groups = "short", alwaysRun = true) + public void setup() throws Exception { + execute("CREATE TABLE prepared_statement_invalidation_test (a int PRIMARY KEY, b int, c int);"); + execute("INSERT INTO prepared_statement_invalidation_test (a, b, c) VALUES (1, 1, 1);"); + execute("INSERT INTO prepared_statement_invalidation_test (a, b, c) VALUES (2, 2, 2);"); + execute("INSERT INTO prepared_statement_invalidation_test (a, b, c) VALUES (3, 3, 3);"); + execute("INSERT INTO prepared_statement_invalidation_test (a, b, c) VALUES (4, 4, 4);"); + } + + @AfterMethod(groups = "short", alwaysRun = true) + public void teardown() throws Exception { + execute("DROP TABLE prepared_statement_invalidation_test"); + } + + @Test(groups = "short") + public void should_update_statement_id_when_metadata_changed_across_executions() { + // given + PreparedStatement ps = + session().prepare("SELECT * FROM prepared_statement_invalidation_test WHERE a = ?"); + MD5Digest idBefore = ps.getPreparedId().resultSetMetadata.id; + // when + session().execute("ALTER TABLE prepared_statement_invalidation_test ADD d int"); + BoundStatement bs = ps.bind(1); + ResultSet rows = session().execute(bs); + // then + MD5Digest idAfter = ps.getPreparedId().resultSetMetadata.id; + assertThat(idBefore).isNotEqualTo(idAfter); + assertThat(ps.getPreparedId().resultSetMetadata.variables) + .hasSize(4) + .containsVariable("d", DataType.cint()); + assertThat(bs.preparedStatement().getPreparedId().resultSetMetadata.variables) + .hasSize(4) + .containsVariable("d", DataType.cint()); + assertThat(rows.getColumnDefinitions()).hasSize(4).containsVariable("d", DataType.cint()); + } + + @Test(groups = "short") + public void should_update_statement_id_when_metadata_changed_across_pages() throws Exception { + // given + PreparedStatement ps = session().prepare("SELECT * FROM prepared_statement_invalidation_test"); + ResultSet rows = session().execute(ps.bind().setFetchSize(2)); + assertThat(rows.isFullyFetched()).isFalse(); + MD5Digest idBefore = ps.getPreparedId().resultSetMetadata.id; + ColumnDefinitions definitionsBefore = rows.getColumnDefinitions(); + assertThat(definitionsBefore).hasSize(3).doesNotContainVariable("d"); + // consume the first page + int remaining = rows.getAvailableWithoutFetching(); + while (remaining-- > 0) { + try { + rows.one().getInt("d"); + fail("expected an error"); + } catch (IllegalArgumentException e) { + /*expected*/ + } + } + + // when + session().execute("ALTER TABLE prepared_statement_invalidation_test ADD d int"); + + // then + // this should trigger a background fetch of the second page, and therefore update the + // definitions + for (Row row : rows) { + assertThat(row.isNull("d")).isTrue(); + } + MD5Digest idAfter = ps.getPreparedId().resultSetMetadata.id; + ColumnDefinitions definitionsAfter = rows.getColumnDefinitions(); + assertThat(idBefore).isNotEqualTo(idAfter); + assertThat(definitionsAfter).hasSize(4).containsVariable("d", DataType.cint()); + } + + @Test(groups = "short") + public void should_update_statement_id_when_metadata_changed_across_sessions() { + Session session1 = cluster().connect(); + useKeyspace(session1, keyspace); + Session session2 = cluster().connect(); + useKeyspace(session2, keyspace); + + PreparedStatement ps1 = + session1.prepare("SELECT * FROM prepared_statement_invalidation_test WHERE a = ?"); + PreparedStatement ps2 = + session2.prepare("SELECT * FROM prepared_statement_invalidation_test WHERE a = ?"); + + MD5Digest id1a = ps1.getPreparedId().resultSetMetadata.id; + MD5Digest id2a = ps2.getPreparedId().resultSetMetadata.id; + + ResultSet rows1 = session1.execute(ps1.bind(1)); + ResultSet rows2 = session2.execute(ps2.bind(1)); + + assertThat(rows1.getColumnDefinitions()) + .hasSize(3) + .containsVariable("a", DataType.cint()) + .containsVariable("b", DataType.cint()) + .containsVariable("c", DataType.cint()); + assertThat(rows2.getColumnDefinitions()) + .hasSize(3) + .containsVariable("a", DataType.cint()) + .containsVariable("b", DataType.cint()) + .containsVariable("c", DataType.cint()); + + session1.execute("ALTER TABLE prepared_statement_invalidation_test ADD d int"); + + rows1 = session1.execute(ps1.bind(1)); + rows2 = session2.execute(ps2.bind(1)); + + MD5Digest id1b = ps1.getPreparedId().resultSetMetadata.id; + MD5Digest id2b = ps2.getPreparedId().resultSetMetadata.id; + + assertThat(id1a).isNotEqualTo(id1b); + assertThat(id2a).isNotEqualTo(id2b); + + assertThat(ps1.getPreparedId().resultSetMetadata.variables) + .hasSize(4) + .containsVariable("d", DataType.cint()); + assertThat(ps2.getPreparedId().resultSetMetadata.variables) + .hasSize(4) + .containsVariable("d", DataType.cint()); + assertThat(rows1.getColumnDefinitions()).hasSize(4).containsVariable("d", DataType.cint()); + assertThat(rows2.getColumnDefinitions()).hasSize(4).containsVariable("d", DataType.cint()); + } + + @Test(groups = "short", expectedExceptions = NoHostAvailableException.class) + public void should_not_reprepare_invalid_statements() { + // given + session().execute("ALTER TABLE prepared_statement_invalidation_test ADD d int"); + PreparedStatement ps = + session() + .prepare("SELECT a, b, c, d FROM prepared_statement_invalidation_test WHERE a = ?"); + session().execute("ALTER TABLE prepared_statement_invalidation_test DROP d"); + // when + session().execute(ps.bind()); + } + + @Test(groups = "short") + public void should_never_update_statement_id_for_conditional_updates_in_modern_protocol() { + should_never_update_statement_id_for_conditional_updates(session()); + } + + private void should_never_update_statement_id_for_conditional_updates(Session session) { + // Given + PreparedStatement ps = + session.prepare( + "INSERT INTO prepared_statement_invalidation_test (a, b, c) VALUES (?, ?, ?) IF NOT EXISTS"); + + // Never store metadata in the prepared statement for conditional updates, since the result set + // can change + // depending on the outcome. + assertThat(ps.getPreparedId().resultSetMetadata.variables).isNull(); + MD5Digest idBefore = ps.getPreparedId().resultSetMetadata.id; + + // When + ResultSet rs = session.execute(ps.bind(5, 5, 5)); + + // Then + // Successful conditional update => only contains the [applied] column + assertThat(rs.wasApplied()).isTrue(); + assertThat(rs.getColumnDefinitions()) + .hasSize(1) + .containsVariable("[applied]", DataType.cboolean()); + // However the prepared statement shouldn't have changed + assertThat(ps.getPreparedId().resultSetMetadata.variables).isNull(); + assertThat(ps.getPreparedId().resultSetMetadata.id).isEqualTo(idBefore); + + // When + rs = session.execute(ps.bind(5, 5, 5)); + + // Then + // Failed conditional update => regular metadata + assertThat(rs.wasApplied()).isFalse(); + assertThat(rs.getColumnDefinitions()).hasSize(4); + Row row = rs.one(); + assertThat(row.getBool("[applied]")).isFalse(); + assertThat(row.getInt("a")).isEqualTo(5); + assertThat(row.getInt("b")).isEqualTo(5); + assertThat(row.getInt("c")).isEqualTo(5); + // The prepared statement still shouldn't have changed + assertThat(ps.getPreparedId().resultSetMetadata.variables).isNull(); + assertThat(ps.getPreparedId().resultSetMetadata.id).isEqualTo(idBefore); + + // When + session.execute("ALTER TABLE prepared_statement_invalidation_test ADD d int"); + rs = session.execute(ps.bind(5, 5, 5)); + + // Then + // Failed conditional update => regular metadata that should also contain the new column + assertThat(rs.wasApplied()).isFalse(); + assertThat(rs.getColumnDefinitions()).hasSize(5); + row = rs.one(); + assertThat(row.getBool("[applied]")).isFalse(); + assertThat(row.getInt("a")).isEqualTo(5); + assertThat(row.getInt("b")).isEqualTo(5); + assertThat(row.getInt("c")).isEqualTo(5); + assertThat(row.isNull("d")).isTrue(); + assertThat(ps.getPreparedId().resultSetMetadata.variables).isNull(); + assertThat(ps.getPreparedId().resultSetMetadata.id).isEqualTo(idBefore); + } + + @Test(groups = "short") + public void should_never_update_statement_for_conditional_updates_in_legacy_protocols() { + // Given + Cluster cluster = + register( + Cluster.builder() + .addContactPoints(getContactPoints()) + .withPort(ccm().getBinaryPort()) + .withProtocolVersion(ccm().getProtocolVersion(ProtocolVersion.V4)) + .build()); + Session session = cluster.connect(keyspace); + should_never_update_statement_id_for_conditional_updates(session); + } +} diff --git a/driver-core/src/test/java/com/datastax/driver/core/PreparedStatementTest.java b/driver-core/src/test/java/com/datastax/driver/core/PreparedStatementTest.java index be68e2fac7b..67a669d6ee0 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/PreparedStatementTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/PreparedStatementTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,15 +17,11 @@ */ package com.datastax.driver.core; -import java.net.InetAddress; -import java.util.*; -import java.util.concurrent.TimeUnit; - -import com.google.common.collect.ImmutableMap; -import com.google.common.util.concurrent.Uninterruptibles; -import org.testng.annotations.AfterMethod; -import org.testng.annotations.Test; - +import static com.datastax.driver.core.ProtocolVersion.V4; +import static com.datastax.driver.core.TestUtils.getFixedValue; +import static com.datastax.driver.core.TestUtils.getFixedValue2; +import static com.datastax.driver.core.TestUtils.getValue; +import static com.datastax.driver.core.TestUtils.setValue; import static org.assertj.core.api.Assertions.assertThat; import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertFalse; @@ -35,715 +33,826 @@ import com.datastax.driver.core.policies.FallthroughRetryPolicy; import com.datastax.driver.core.utils.Bytes; import com.datastax.driver.core.utils.CassandraVersion; - -import static com.datastax.driver.core.ProtocolVersion.V4; -import static com.datastax.driver.core.TestUtils.getFixedValue; -import static com.datastax.driver.core.TestUtils.getFixedValue2; -import static com.datastax.driver.core.TestUtils.getValue; -import static com.datastax.driver.core.TestUtils.setValue; +import com.google.common.collect.ImmutableMap; +import com.google.common.util.concurrent.Uninterruptibles; +import java.net.InetAddress; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.TimeUnit; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.Test; /** * Prepared statement tests. - *

    - * Note: this class also happens to test all the get methods from Row. + * + *

    Note: this class also happens to test all the get methods from Row. */ @CCMConfig(clusterProvider = "createClusterBuilderNoDebouncing") public class PreparedStatementTest extends CCMTestsSupport { - private static final String ALL_NATIVE_TABLE = "all_native"; - private static final String ALL_LIST_TABLE = "all_list"; - private static final String ALL_SET_TABLE = "all_set"; - private static final String ALL_MAP_TABLE = "all_map"; - private static final String SIMPLE_TABLE = "test"; - private static final String SIMPLE_TABLE2 = "test2"; - - private ProtocolVersion protocolVersion; - private Collection primitiveTypes; - - private boolean exclude(DataType t) { - // duration is not supported in collections - return t.getName() == DataType.Name.COUNTER || t.getName() == DataType.Name.DURATION; + private static final String ALL_NATIVE_TABLE = "all_native"; + private static final String ALL_LIST_TABLE = "all_list"; + private static final String ALL_SET_TABLE = "all_set"; + private static final String ALL_MAP_TABLE = "all_map"; + private static final String SIMPLE_TABLE = "test"; + private static final String SIMPLE_TABLE2 = "test2"; + + private ProtocolVersion protocolVersion; + private Collection primitiveTypes; + + private boolean exclude(DataType t) { + // duration is not supported in collections + return t.getName() == DataType.Name.COUNTER || t.getName() == DataType.Name.DURATION; + } + + @Override + public void onTestContextInitialized() { + protocolVersion = ccm().getProtocolVersion(); + primitiveTypes = TestUtils.allPrimitiveTypes(protocolVersion); + execute(createTestFixtures()); + } + + @AfterMethod(groups = "short") + public void tearDown() throws Exception { + execute( + String.format("TRUNCATE %s", ALL_NATIVE_TABLE), + String.format("TRUNCATE %s", ALL_LIST_TABLE), + String.format("TRUNCATE %s", ALL_SET_TABLE), + String.format("TRUNCATE %s", ALL_MAP_TABLE), + String.format("TRUNCATE %s", SIMPLE_TABLE), + String.format("TRUNCATE %s", SIMPLE_TABLE2)); + } + + private List createTestFixtures() { + List defs = new ArrayList(4); + + StringBuilder sb = new StringBuilder(); + sb.append("CREATE TABLE ").append(ALL_NATIVE_TABLE).append(" (k text PRIMARY KEY"); + for (DataType type : primitiveTypes) { + if (exclude(type)) continue; + sb.append(", c_").append(type).append(' ').append(type); } - - @Override - public void onTestContextInitialized() { - protocolVersion = ccm().getProtocolVersion(); - primitiveTypes = TestUtils.allPrimitiveTypes(protocolVersion); - execute(createTestFixtures()); + sb.append(')'); + defs.add(sb.toString()); + + sb = new StringBuilder(); + sb.append("CREATE TABLE ").append(ALL_LIST_TABLE).append(" (k text PRIMARY KEY"); + for (DataType type : primitiveTypes) { + if (exclude(type)) continue; + sb.append(", c_list_").append(type).append(" list<").append(type).append('>'); } - - @AfterMethod(groups = "short") - public void tearDown() throws Exception { - execute( - String.format("TRUNCATE %s", ALL_NATIVE_TABLE), - String.format("TRUNCATE %s", ALL_LIST_TABLE), - String.format("TRUNCATE %s", ALL_SET_TABLE), - String.format("TRUNCATE %s", ALL_MAP_TABLE), - String.format("TRUNCATE %s", SIMPLE_TABLE), - String.format("TRUNCATE %s", SIMPLE_TABLE2) - ); + sb.append(')'); + defs.add(sb.toString()); + + sb = new StringBuilder(); + sb.append("CREATE TABLE ").append(ALL_SET_TABLE).append(" (k text PRIMARY KEY"); + for (DataType type : primitiveTypes) { + // This must be handled separately + if (exclude(type)) continue; + sb.append(", c_set_").append(type).append(" set<").append(type).append('>'); } - - private List createTestFixtures() { - List defs = new ArrayList(4); - - StringBuilder sb = new StringBuilder(); - sb.append("CREATE TABLE ").append(ALL_NATIVE_TABLE).append(" (k text PRIMARY KEY"); - for (DataType type : primitiveTypes) { - if (exclude(type)) - continue; - sb.append(", c_").append(type).append(' ').append(type); - } - sb.append(')'); - defs.add(sb.toString()); - - sb = new StringBuilder(); - sb.append("CREATE TABLE ").append(ALL_LIST_TABLE).append(" (k text PRIMARY KEY"); - for (DataType type : primitiveTypes) { - if (exclude(type)) - continue; - sb.append(", c_list_").append(type).append(" list<").append(type).append('>'); - } - sb.append(')'); - defs.add(sb.toString()); - - sb = new StringBuilder(); - sb.append("CREATE TABLE ").append(ALL_SET_TABLE).append(" (k text PRIMARY KEY"); - for (DataType type : primitiveTypes) { - // This must be handled separately - if (exclude(type)) - continue; - sb.append(", c_set_").append(type).append(" set<").append(type).append('>'); - } - sb.append(')'); - defs.add(sb.toString()); - - sb = new StringBuilder(); - sb.append("CREATE TABLE ").append(ALL_MAP_TABLE).append(" (k text PRIMARY KEY"); - for (DataType keyType : primitiveTypes) { - // This must be handled separately - if (exclude(keyType)) - continue; - - for (DataType valueType : primitiveTypes) { - // This must be handled separately - if (exclude(valueType)) - continue; - sb.append(", c_map_").append(keyType).append('_').append(valueType).append(" map<").append(keyType).append(',').append(valueType).append('>'); - } - } - sb.append(')'); - defs.add(sb.toString()); - - defs.add(String.format("CREATE TABLE %s (k text PRIMARY KEY, i int)", SIMPLE_TABLE)); - defs.add(String.format("CREATE TABLE %s (k text PRIMARY KEY, v text)", SIMPLE_TABLE2)); - return defs; + sb.append(')'); + defs.add(sb.toString()); + + sb = new StringBuilder(); + sb.append("CREATE TABLE ").append(ALL_MAP_TABLE).append(" (k text PRIMARY KEY"); + for (DataType keyType : primitiveTypes) { + // This must be handled separately + if (exclude(keyType)) continue; + + for (DataType valueType : primitiveTypes) { + // This must be handled separately + if (exclude(valueType)) continue; + sb.append(", c_map_") + .append(keyType) + .append('_') + .append(valueType) + .append(" map<") + .append(keyType) + .append(',') + .append(valueType) + .append('>'); + } } - - @Test(groups = "short") - public void preparedNativeTest() { - // Test preparing/bounding for all native types - for (DataType type : primitiveTypes) { - // This must be handled separately - if (exclude(type)) - continue; - - String name = "c_" + type; - PreparedStatement ps = session().prepare(String.format("INSERT INTO %s(k, %s) VALUES ('prepared_native', ?)", ALL_NATIVE_TABLE, name)); - BoundStatement bs = ps.bind(); - setValue(bs, name, type, getFixedValue(type)); - session().execute(bs); - - Row row = session().execute(String.format("SELECT %s FROM %s WHERE k='prepared_native'", name, ALL_NATIVE_TABLE)).one(); - assertEquals(getValue(row, name, type, cluster().getConfiguration().getCodecRegistry()), getFixedValue(type), "For type " + type); - } + sb.append(')'); + defs.add(sb.toString()); + + defs.add(String.format("CREATE TABLE %s (k text PRIMARY KEY, i int)", SIMPLE_TABLE)); + defs.add(String.format("CREATE TABLE %s (k text PRIMARY KEY, v text)", SIMPLE_TABLE2)); + return defs; + } + + @Test(groups = "short") + public void preparedNativeTest() { + // Test preparing/bounding for all native types + for (DataType type : primitiveTypes) { + // This must be handled separately + if (exclude(type)) continue; + + String name = "c_" + type; + PreparedStatement ps = + session() + .prepare( + String.format( + "INSERT INTO %s(k, %s) VALUES ('prepared_native', ?)", + ALL_NATIVE_TABLE, name)); + BoundStatement bs = ps.bind(); + setValue(bs, name, type, getFixedValue(type)); + session().execute(bs); + + Row row = + session() + .execute( + String.format( + "SELECT %s FROM %s WHERE k='prepared_native'", name, ALL_NATIVE_TABLE)) + .one(); + assertEquals( + getValue(row, name, type, cluster().getConfiguration().getCodecRegistry()), + getFixedValue(type), + "For type " + type); } - - /** - * Almost the same as preparedNativeTest, but it uses getFixedValue2() instead. - */ - @Test(groups = "short") - public void preparedNativeTest2() { - // Test preparing/bounding for all native types - for (DataType type : primitiveTypes) { - // This must be handled separately - if (exclude(type)) - continue; - - String name = "c_" + type; - PreparedStatement ps = session().prepare(String.format("INSERT INTO %s(k, %s) VALUES ('prepared_native', ?)", ALL_NATIVE_TABLE, name)); - BoundStatement bs = ps.bind(); - setValue(bs, name, type, getFixedValue2(type)); - session().execute(bs); - - Row row = session().execute(String.format("SELECT %s FROM %s WHERE k='prepared_native'", name, ALL_NATIVE_TABLE)).one(); - assertEquals(getValue(row, name, type, cluster().getConfiguration().getCodecRegistry()), getFixedValue2(type), "For type " + type); - } - } - - @Test(groups = "short") - @SuppressWarnings("unchecked") - public void prepareListTest() { - // Test preparing/bounding for all possible list types - for (DataType rawType : primitiveTypes) { - // This must be handled separately - if (exclude(rawType)) - continue; - - String name = "c_list_" + rawType; - DataType type = DataType.list(rawType); - List value = (List) getFixedValue(type); - PreparedStatement ps = session().prepare(String.format("INSERT INTO %s(k, %s) VALUES ('prepared_list', ?)", ALL_LIST_TABLE, name)); - BoundStatement bs = ps.bind(); - setValue(bs, name, type, value); - session().execute(bs); - - Row row = session().execute(String.format("SELECT %s FROM %s WHERE k='prepared_list'", name, ALL_LIST_TABLE)).one(); - assertEquals(getValue(row, name, type, cluster().getConfiguration().getCodecRegistry()), value, "For type " + type); - } - } - - /** - * Almost the same as prepareListTest, but it uses getFixedValue2() instead. - */ - @Test(groups = "short") - @SuppressWarnings("unchecked") - public void prepareListTest2() { - // Test preparing/bounding for all possible list types - for (DataType rawType : primitiveTypes) { - // This must be handled separately - if (exclude(rawType)) - continue; - - String name = "c_list_" + rawType; - DataType type = DataType.list(rawType); - List value = (List) getFixedValue2(type); - PreparedStatement ps = session().prepare(String.format("INSERT INTO %s(k, %s) VALUES ('prepared_list', ?)", ALL_LIST_TABLE, name)); - BoundStatement bs = ps.bind(); - setValue(bs, name, type, value); - session().execute(bs); - - Row row = session().execute(String.format("SELECT %s FROM %s WHERE k='prepared_list'", name, ALL_LIST_TABLE)).one(); - assertEquals(getValue(row, name, type, cluster().getConfiguration().getCodecRegistry()), value, "For type " + type); - } + } + + /** Almost the same as preparedNativeTest, but it uses getFixedValue2() instead. */ + @Test(groups = "short") + public void preparedNativeTest2() { + // Test preparing/bounding for all native types + for (DataType type : primitiveTypes) { + // This must be handled separately + if (exclude(type)) continue; + + String name = "c_" + type; + PreparedStatement ps = + session() + .prepare( + String.format( + "INSERT INTO %s(k, %s) VALUES ('prepared_native', ?)", + ALL_NATIVE_TABLE, name)); + BoundStatement bs = ps.bind(); + setValue(bs, name, type, getFixedValue2(type)); + session().execute(bs); + + Row row = + session() + .execute( + String.format( + "SELECT %s FROM %s WHERE k='prepared_native'", name, ALL_NATIVE_TABLE)) + .one(); + assertEquals( + getValue(row, name, type, cluster().getConfiguration().getCodecRegistry()), + getFixedValue2(type), + "For type " + type); } - - @Test(groups = "short") - @SuppressWarnings("unchecked") - public void prepareSetTest() { - // Test preparing/bounding for all possible set types - for (DataType rawType : primitiveTypes) { - // This must be handled separately - if (exclude(rawType)) - continue; - - String name = "c_set_" + rawType; - DataType type = DataType.set(rawType); - Set value = (Set) getFixedValue(type); - PreparedStatement ps = session().prepare(String.format("INSERT INTO %s(k, %s) VALUES ('prepared_set', ?)", ALL_SET_TABLE, name)); - BoundStatement bs = ps.bind(); - setValue(bs, name, type, value); - session().execute(bs); - - Row row = session().execute(String.format("SELECT %s FROM %s WHERE k='prepared_set'", name, ALL_SET_TABLE)).one(); - assertEquals(getValue(row, name, type, cluster().getConfiguration().getCodecRegistry()), value, "For type " + type); - } + } + + @Test(groups = "short") + @SuppressWarnings("unchecked") + public void prepareListTest() { + // Test preparing/bounding for all possible list types + for (DataType rawType : primitiveTypes) { + // This must be handled separately + if (exclude(rawType)) continue; + + String name = "c_list_" + rawType; + DataType type = DataType.list(rawType); + List value = (List) getFixedValue(type); + PreparedStatement ps = + session() + .prepare( + String.format( + "INSERT INTO %s(k, %s) VALUES ('prepared_list', ?)", ALL_LIST_TABLE, name)); + BoundStatement bs = ps.bind(); + setValue(bs, name, type, value); + session().execute(bs); + + Row row = + session() + .execute( + String.format("SELECT %s FROM %s WHERE k='prepared_list'", name, ALL_LIST_TABLE)) + .one(); + assertEquals( + getValue(row, name, type, cluster().getConfiguration().getCodecRegistry()), + value, + "For type " + type); } - - /** - * Almost the same as prepareSetTest, but it uses getFixedValue2() instead. - */ - @Test(groups = "short") - @SuppressWarnings("unchecked") - public void prepareSetTest2() { - // Test preparing/bounding for all possible set types - for (DataType rawType : primitiveTypes) { - // This must be handled separately - if (exclude(rawType)) - continue; - - String name = "c_set_" + rawType; - DataType type = DataType.set(rawType); - Set value = (Set) getFixedValue2(type); - PreparedStatement ps = session().prepare(String.format("INSERT INTO %s(k, %s) VALUES ('prepared_set', ?)", ALL_SET_TABLE, name)); - BoundStatement bs = ps.bind(); - setValue(bs, name, type, value); - session().execute(bs); - - Row row = session().execute(String.format("SELECT %s FROM %s WHERE k='prepared_set'", name, ALL_SET_TABLE)).one(); - assertEquals(getValue(row, name, type, cluster().getConfiguration().getCodecRegistry()), value, "For type " + type); - } + } + + /** Almost the same as prepareListTest, but it uses getFixedValue2() instead. */ + @Test(groups = "short") + @SuppressWarnings("unchecked") + public void prepareListTest2() { + // Test preparing/bounding for all possible list types + for (DataType rawType : primitiveTypes) { + // This must be handled separately + if (exclude(rawType)) continue; + + String name = "c_list_" + rawType; + DataType type = DataType.list(rawType); + List value = (List) getFixedValue2(type); + PreparedStatement ps = + session() + .prepare( + String.format( + "INSERT INTO %s(k, %s) VALUES ('prepared_list', ?)", ALL_LIST_TABLE, name)); + BoundStatement bs = ps.bind(); + setValue(bs, name, type, value); + session().execute(bs); + + Row row = + session() + .execute( + String.format("SELECT %s FROM %s WHERE k='prepared_list'", name, ALL_LIST_TABLE)) + .one(); + assertEquals( + getValue(row, name, type, cluster().getConfiguration().getCodecRegistry()), + value, + "For type " + type); } - - @Test(groups = "short") - @SuppressWarnings("unchecked") - public void prepareMapTest() { - // Test preparing/bounding for all possible map types - for (DataType rawKeyType : primitiveTypes) { - // This must be handled separately - if (exclude(rawKeyType)) - continue; - - for (DataType rawValueType : primitiveTypes) { - // This must be handled separately - if (exclude(rawValueType)) - continue; - - String name = "c_map_" + rawKeyType + '_' + rawValueType; - DataType type = DataType.map(rawKeyType, rawValueType); - Map value = (Map) getFixedValue(type); - PreparedStatement ps = session().prepare(String.format("INSERT INTO %s(k, %s) VALUES ('prepared_map', ?)", ALL_MAP_TABLE, name)); - BoundStatement bs = ps.bind(); - setValue(bs, name, type, value); - session().execute(bs); - - Row row = session().execute(String.format("SELECT %s FROM %s WHERE k='prepared_map'", name, ALL_MAP_TABLE)).one(); - assertEquals(getValue(row, name, type, cluster().getConfiguration().getCodecRegistry()), value, "For type " + type); - } - } + } + + @Test(groups = "short") + @SuppressWarnings("unchecked") + public void prepareSetTest() { + // Test preparing/bounding for all possible set types + for (DataType rawType : primitiveTypes) { + // This must be handled separately + if (exclude(rawType)) continue; + + String name = "c_set_" + rawType; + DataType type = DataType.set(rawType); + Set value = (Set) getFixedValue(type); + PreparedStatement ps = + session() + .prepare( + String.format( + "INSERT INTO %s(k, %s) VALUES ('prepared_set', ?)", ALL_SET_TABLE, name)); + BoundStatement bs = ps.bind(); + setValue(bs, name, type, value); + session().execute(bs); + + Row row = + session() + .execute( + String.format("SELECT %s FROM %s WHERE k='prepared_set'", name, ALL_SET_TABLE)) + .one(); + assertEquals( + getValue(row, name, type, cluster().getConfiguration().getCodecRegistry()), + value, + "For type " + type); } - - /** - * Almost the same as prepareMapTest, but it uses getFixedValue2() instead. - */ - @Test(groups = "short") - @SuppressWarnings("unchecked") - public void prepareMapTest2() { - // Test preparing/bounding for all possible map types - for (DataType rawKeyType : primitiveTypes) { - // This must be handled separately - if (exclude(rawKeyType)) - continue; - - for (DataType rawValueType : primitiveTypes) { - // This must be handled separately - if (exclude(rawValueType)) - continue; - - String name = "c_map_" + rawKeyType + '_' + rawValueType; - DataType type = DataType.map(rawKeyType, rawValueType); - Map value = (Map) getFixedValue2(type); - PreparedStatement ps = session().prepare(String.format("INSERT INTO %s(k, %s) VALUES ('prepared_map', ?)", ALL_MAP_TABLE, name)); - BoundStatement bs = ps.bind(); - setValue(bs, name, type, value); - session().execute(bs); - - Row row = session().execute(String.format("SELECT %s FROM %s WHERE k='prepared_map'", name, ALL_MAP_TABLE)).one(); - assertEquals(getValue(row, name, type, cluster().getConfiguration().getCodecRegistry()), value, "For type " + type); - } - } + } + + /** Almost the same as prepareSetTest, but it uses getFixedValue2() instead. */ + @Test(groups = "short") + @SuppressWarnings("unchecked") + public void prepareSetTest2() { + // Test preparing/bounding for all possible set types + for (DataType rawType : primitiveTypes) { + // This must be handled separately + if (exclude(rawType)) continue; + + String name = "c_set_" + rawType; + DataType type = DataType.set(rawType); + Set value = (Set) getFixedValue2(type); + PreparedStatement ps = + session() + .prepare( + String.format( + "INSERT INTO %s(k, %s) VALUES ('prepared_set', ?)", ALL_SET_TABLE, name)); + BoundStatement bs = ps.bind(); + setValue(bs, name, type, value); + session().execute(bs); + + Row row = + session() + .execute( + String.format("SELECT %s FROM %s WHERE k='prepared_set'", name, ALL_SET_TABLE)) + .one(); + assertEquals( + getValue(row, name, type, cluster().getConfiguration().getCodecRegistry()), + value, + "For type " + type); } - - @Test(groups = "short") - public void prepareWithNullValuesTest() throws Exception { - - PreparedStatement ps = session().prepare("INSERT INTO " + SIMPLE_TABLE2 + "(k, v) VALUES (?, ?)"); - - session().execute(ps.bind("prepWithNull1", null)); - + } + + @Test(groups = "short") + @SuppressWarnings("unchecked") + public void prepareMapTest() { + // Test preparing/bounding for all possible map types + for (DataType rawKeyType : primitiveTypes) { + // This must be handled separately + if (exclude(rawKeyType)) continue; + + for (DataType rawValueType : primitiveTypes) { + // This must be handled separately + if (exclude(rawValueType)) continue; + + String name = "c_map_" + rawKeyType + '_' + rawValueType; + DataType type = DataType.map(rawKeyType, rawValueType); + Map value = (Map) getFixedValue(type); + PreparedStatement ps = + session() + .prepare( + String.format( + "INSERT INTO %s(k, %s) VALUES ('prepared_map', ?)", ALL_MAP_TABLE, name)); BoundStatement bs = ps.bind(); - bs.setString("k", "prepWithNull2"); - bs.setString("v", null); + setValue(bs, name, type, value); session().execute(bs); - ResultSet rs = session().execute("SELECT * FROM " + SIMPLE_TABLE2 + " WHERE k IN ('prepWithNull1', 'prepWithNull2')"); - Row r1 = rs.one(); - Row r2 = rs.one(); - assertTrue(rs.isExhausted()); - - assertEquals(r1.getString("k"), "prepWithNull1"); - assertEquals(r1.getString("v"), null); - - assertEquals(r2.getString("k"), "prepWithNull2"); - assertEquals(r2.getString("v"), null); + Row row = + session() + .execute( + String.format("SELECT %s FROM %s WHERE k='prepared_map'", name, ALL_MAP_TABLE)) + .one(); + assertEquals( + getValue(row, name, type, cluster().getConfiguration().getCodecRegistry()), + value, + "For type " + type); + } } + } + + /** Almost the same as prepareMapTest, but it uses getFixedValue2() instead. */ + @Test(groups = "short") + @SuppressWarnings("unchecked") + public void prepareMapTest2() { + // Test preparing/bounding for all possible map types + for (DataType rawKeyType : primitiveTypes) { + // This must be handled separately + if (exclude(rawKeyType)) continue; + + for (DataType rawValueType : primitiveTypes) { + // This must be handled separately + if (exclude(rawValueType)) continue; + + String name = "c_map_" + rawKeyType + '_' + rawValueType; + DataType type = DataType.map(rawKeyType, rawValueType); + Map value = (Map) getFixedValue2(type); + PreparedStatement ps = + session() + .prepare( + String.format( + "INSERT INTO %s(k, %s) VALUES ('prepared_map', ?)", ALL_MAP_TABLE, name)); + BoundStatement bs = ps.bind(); + setValue(bs, name, type, value); + session().execute(bs); - @Test(groups = "short") - public void prepareStatementInheritPropertiesTest() { - - RegularStatement toPrepare = new SimpleStatement("SELECT * FROM test WHERE k=?"); - toPrepare.setConsistencyLevel(ConsistencyLevel.QUORUM); - toPrepare.setSerialConsistencyLevel(ConsistencyLevel.LOCAL_SERIAL); - toPrepare.setRetryPolicy(FallthroughRetryPolicy.INSTANCE); - if (protocolVersion.compareTo(V4) >= 0) - toPrepare.setOutgoingPayload(ImmutableMap.of("foo", Bytes.fromHexString("0xcafebabe"))); - toPrepare.setIdempotent(true); - toPrepare.enableTracing(); - - PreparedStatement prepared = session().prepare(toPrepare); - assertThat(prepared.getConsistencyLevel()).isEqualTo(ConsistencyLevel.QUORUM); - assertThat(prepared.getSerialConsistencyLevel()).isEqualTo(ConsistencyLevel.LOCAL_SERIAL); - assertThat(prepared.getRetryPolicy()).isEqualTo(FallthroughRetryPolicy.INSTANCE); - if (protocolVersion.compareTo(V4) >= 0) - assertThat(prepared.getOutgoingPayload()).isEqualTo(ImmutableMap.of("foo", Bytes.fromHexString("0xcafebabe"))); - assertThat(prepared.isIdempotent()).isTrue(); - assertThat(prepared.isTracing()).isTrue(); - - BoundStatement bs = prepared.bind("someValue"); - assertThat(bs.getConsistencyLevel()).isEqualTo(ConsistencyLevel.QUORUM); - assertThat(bs.getSerialConsistencyLevel()).isEqualTo(ConsistencyLevel.LOCAL_SERIAL); - assertThat(bs.getRetryPolicy()).isEqualTo(FallthroughRetryPolicy.INSTANCE); - if (protocolVersion.compareTo(V4) >= 0) - assertThat(bs.getOutgoingPayload()).isEqualTo(ImmutableMap.of("foo", Bytes.fromHexString("0xcafebabe"))); - assertThat(bs.isIdempotent()).isTrue(); - assertThat(bs.isTracing()).isTrue(); + Row row = + session() + .execute( + String.format("SELECT %s FROM %s WHERE k='prepared_map'", name, ALL_MAP_TABLE)) + .one(); + assertEquals( + getValue(row, name, type, cluster().getConfiguration().getCodecRegistry()), + value, + "For type " + type); + } } - - /** - * Prints the table definitions that will be used in testing - * (for exporting purposes) - */ - @Test(groups = {"docs"}) - public void printTableDefinitions() { - for (String definition : createTestFixtures()) { - System.out.println(definition); - } + } + + @Test(groups = "short") + public void prepareWithNullValuesTest() throws Exception { + + PreparedStatement ps = + session().prepare("INSERT INTO " + SIMPLE_TABLE2 + "(k, v) VALUES (?, ?)"); + + session().execute(ps.bind("prepWithNull1", null)); + + BoundStatement bs = ps.bind(); + bs.setString("k", "prepWithNull2"); + bs.setString("v", null); + session().execute(bs); + + ResultSet rs = + session() + .execute( + "SELECT * FROM " + + SIMPLE_TABLE2 + + " WHERE k IN ('prepWithNull1', 'prepWithNull2')"); + Row r1 = rs.one(); + Row r2 = rs.one(); + assertTrue(rs.isExhausted()); + + assertEquals(r1.getString("k"), "prepWithNull1"); + assertEquals(r1.getString("v"), null); + + assertEquals(r2.getString("k"), "prepWithNull2"); + assertEquals(r2.getString("v"), null); + } + + @Test(groups = "short") + public void prepareStatementInheritPropertiesTest() { + + RegularStatement toPrepare = new SimpleStatement("SELECT * FROM test WHERE k=?"); + toPrepare.setConsistencyLevel(ConsistencyLevel.QUORUM); + toPrepare.setSerialConsistencyLevel(ConsistencyLevel.LOCAL_SERIAL); + toPrepare.setRetryPolicy(FallthroughRetryPolicy.INSTANCE); + if (protocolVersion.compareTo(V4) >= 0) + toPrepare.setOutgoingPayload(ImmutableMap.of("foo", Bytes.fromHexString("0xcafebabe"))); + toPrepare.setIdempotent(true); + toPrepare.enableTracing(); + + PreparedStatement prepared = session().prepare(toPrepare); + assertThat(prepared.getConsistencyLevel()).isEqualTo(ConsistencyLevel.QUORUM); + assertThat(prepared.getSerialConsistencyLevel()).isEqualTo(ConsistencyLevel.LOCAL_SERIAL); + assertThat(prepared.getRetryPolicy()).isEqualTo(FallthroughRetryPolicy.INSTANCE); + if (protocolVersion.compareTo(V4) >= 0) + assertThat(prepared.getOutgoingPayload()) + .isEqualTo(ImmutableMap.of("foo", Bytes.fromHexString("0xcafebabe"))); + assertThat(prepared.isIdempotent()).isTrue(); + assertThat(prepared.isTracing()).isTrue(); + + BoundStatement bs = prepared.bind("someValue"); + assertThat(bs.getConsistencyLevel()).isEqualTo(ConsistencyLevel.QUORUM); + assertThat(bs.getSerialConsistencyLevel()).isEqualTo(ConsistencyLevel.LOCAL_SERIAL); + assertThat(bs.getRetryPolicy()).isEqualTo(FallthroughRetryPolicy.INSTANCE); + if (protocolVersion.compareTo(V4) >= 0) + assertThat(bs.getOutgoingPayload()) + .isEqualTo(ImmutableMap.of("foo", Bytes.fromHexString("0xcafebabe"))); + assertThat(bs.isIdempotent()).isTrue(); + assertThat(bs.isTracing()).isTrue(); + } + + /** Prints the table definitions that will be used in testing (for exporting purposes) */ + @Test(groups = {"docs"}) + public void printTableDefinitions() { + for (String definition : createTestFixtures()) { + System.out.println(definition); } + } - @Test(groups = "short") - public void batchTest() throws Exception { + @Test(groups = "short") + public void batchTest() throws Exception { - try { - PreparedStatement ps1 = session().prepare("INSERT INTO " + SIMPLE_TABLE2 + "(k, v) VALUES (?, ?)"); - PreparedStatement ps2 = session().prepare("INSERT INTO " + SIMPLE_TABLE2 + "(k, v) VALUES (?, 'bar')"); + try { + PreparedStatement ps1 = + session().prepare("INSERT INTO " + SIMPLE_TABLE2 + "(k, v) VALUES (?, ?)"); + PreparedStatement ps2 = + session().prepare("INSERT INTO " + SIMPLE_TABLE2 + "(k, v) VALUES (?, 'bar')"); - BatchStatement bs = new BatchStatement(); - bs.add(ps1.bind("one", "foo")); - bs.add(ps2.bind("two")); - bs.add(new SimpleStatement("INSERT INTO " + SIMPLE_TABLE2 + " (k, v) VALUES ('three', 'foobar')")); + BatchStatement bs = new BatchStatement(); + bs.add(ps1.bind("one", "foo")); + bs.add(ps2.bind("two")); + bs.add( + new SimpleStatement( + "INSERT INTO " + SIMPLE_TABLE2 + " (k, v) VALUES ('three', 'foobar')")); - session().execute(bs); + session().execute(bs); - List all = session().execute("SELECT * FROM " + SIMPLE_TABLE2).all(); + List all = session().execute("SELECT * FROM " + SIMPLE_TABLE2).all(); - assertEquals("three", all.get(0).getString("k")); - assertEquals("foobar", all.get(0).getString("v")); + assertEquals("three", all.get(0).getString("k")); + assertEquals("foobar", all.get(0).getString("v")); - assertEquals("one", all.get(1).getString("k")); - assertEquals("foo", all.get(1).getString("v")); + assertEquals("one", all.get(1).getString("k")); + assertEquals("foo", all.get(1).getString("v")); - assertEquals("two", all.get(2).getString("k")); - assertEquals("bar", all.get(2).getString("v")); - } catch (UnsupportedFeatureException e) { - // This is expected when testing the protocol v1 - if (cluster().getConfiguration().getProtocolOptions().getProtocolVersion() != ProtocolVersion.V1) - throw e; - } + assertEquals("two", all.get(2).getString("k")); + assertEquals("bar", all.get(2).getString("v")); + } catch (UnsupportedFeatureException e) { + // This is expected when testing the protocol v1 + if (cluster().getConfiguration().getProtocolOptions().getProtocolVersion() + != ProtocolVersion.V1) throw e; } - - @Test(groups = "short") - public void should_set_routing_key_on_case_insensitive_keyspace_and_table() { - session().execute(String.format("CREATE TABLE %s.foo (i int PRIMARY KEY)", keyspace)); - - PreparedStatement ps = session().prepare(String.format("INSERT INTO %s.foo (i) VALUES (?)", keyspace)); - BoundStatement bs = ps.bind(1); - assertThat(bs.getRoutingKey(ProtocolVersion.NEWEST_SUPPORTED, CodecRegistry.DEFAULT_INSTANCE)).isNotNull(); - } - - @Test(groups = "short") - public void should_set_routing_key_on_case_sensitive_keyspace_and_table() { - session().execute("CREATE KEYSPACE \"Test\" WITH replication = { " + } + + @Test(groups = "short") + public void should_set_routing_key_on_case_insensitive_keyspace_and_table() { + session().execute(String.format("CREATE TABLE %s.foo (i int PRIMARY KEY)", keyspace)); + + PreparedStatement ps = + session().prepare(String.format("INSERT INTO %s.foo (i) VALUES (?)", keyspace)); + BoundStatement bs = ps.bind(1); + assertThat(bs.getRoutingKey(ProtocolVersion.NEWEST_SUPPORTED, CodecRegistry.DEFAULT_INSTANCE)) + .isNotNull(); + } + + @Test(groups = "short") + public void should_set_routing_key_on_case_sensitive_keyspace_and_table() { + session() + .execute( + "CREATE KEYSPACE \"Test\" WITH replication = { " + " 'class': 'SimpleStrategy'," + " 'replication_factor': '1'" + "}"); - session().execute("CREATE TABLE \"Test\".\"Foo\" (i int PRIMARY KEY)"); - - PreparedStatement ps = session().prepare("INSERT INTO \"Test\".\"Foo\" (i) VALUES (?)"); - BoundStatement bs = ps.bind(1); - assertThat(bs.getRoutingKey(ProtocolVersion.NEWEST_SUPPORTED, CodecRegistry.DEFAULT_INSTANCE)).isNotNull(); - } - - @Test(groups = "short", expectedExceptions = InvalidQueryException.class) - public void should_fail_when_prepared_on_another_cluster() throws Exception { - Cluster otherCluster = Cluster.builder() - .addContactPoints(getContactPoints()) - .withPort(ccm().getBinaryPort()) - .build(); - try { - PreparedStatement pst = otherCluster.connect().prepare("select * from system.peers where inet = ?"); - BoundStatement bs = pst.bind().setInet(0, InetAddress.getByName("localhost")); - - // We expect that the error gets detected without a roundtrip to the server, so use executeAsync - session().executeAsync(bs); - } finally { - otherCluster.close(); - } + session().execute("CREATE TABLE \"Test\".\"Foo\" (i int PRIMARY KEY)"); + + PreparedStatement ps = session().prepare("INSERT INTO \"Test\".\"Foo\" (i) VALUES (?)"); + BoundStatement bs = ps.bind(1); + assertThat(bs.getRoutingKey(ProtocolVersion.NEWEST_SUPPORTED, CodecRegistry.DEFAULT_INSTANCE)) + .isNotNull(); + } + + @Test(groups = "short", expectedExceptions = InvalidQueryException.class) + public void should_fail_when_prepared_on_another_cluster() throws Exception { + Cluster otherCluster = createClusterBuilder().build(); + try { + PreparedStatement pst = + otherCluster.connect().prepare("select * from system.peers where inet = ?"); + BoundStatement bs = pst.bind().setInet(0, InetAddress.getByName("localhost")); + + // We expect that the error gets detected without a roundtrip to the server, so use + // executeAsync + session().executeAsync(bs); + } finally { + otherCluster.close(); } - - /** - * Tests that, under protocol versions lesser than V4, - * it is NOT possible to execute a prepared statement with unbound values. - * Note that we have to force protocol version to less than V4 because - * higher protocol versions would allow such unbound values to be sent. - * - * @test_category prepared_statements:binding - * @jira_ticket JAVA-777 - * @since 2.2.0 - */ - @Test(groups = "short") - public void should_not_allow_unbound_value_on_bound_statement_when_protocol_lesser_than_v4() { - Cluster cluster = register(Cluster.builder() - .addContactPoints(getContactPoints()) - .withPort(ccm().getBinaryPort()) + } + + /** + * Tests that, under protocol versions lesser than V4, it is NOT possible to execute a prepared + * statement with unbound values. Note that we have to force protocol version to less than V4 + * because higher protocol versions would allow such unbound values to be sent. + * + * @test_category prepared_statements:binding + * @jira_ticket JAVA-777 + * @since 2.2.0 + */ + @Test(groups = "short") + public void should_not_allow_unbound_value_on_bound_statement_when_protocol_lesser_than_v4() { + Cluster cluster = + register( + createClusterBuilder() .withProtocolVersion(ccm().getProtocolVersion(ProtocolVersion.V3)) .build()); - Session session = cluster.connect(); - try { - PreparedStatement ps = session.prepare("INSERT INTO " + keyspace + "." + SIMPLE_TABLE + " (k, i) VALUES (?, ?)"); - BoundStatement bs = ps.bind("foo"); - assertFalse(bs.isSet("i")); - session.execute(bs); - fail("Should not have executed statement with UNSET values in protocol V3"); - } catch (IllegalStateException e) { - assertThat(e.getMessage()).contains("Unset value at index 1"); - } + Session session = cluster.connect(); + try { + PreparedStatement ps = + session.prepare("INSERT INTO " + keyspace + "." + SIMPLE_TABLE + " (k, i) VALUES (?, ?)"); + BoundStatement bs = ps.bind("foo"); + assertFalse(bs.isSet("i")); + session.execute(bs); + fail("Should not have executed statement with UNSET values in protocol V3"); + } catch (IllegalStateException e) { + assertThat(e.getMessage()).contains("Unset value at index 1"); + } finally { + session.close(); + cluster.close(); } - - /** - * Tests that, under protocol versions lesser that V4, - * it is NOT possible to execute a prepared statement with unbound values. - * Note that we have to force protocol version to less than V4 because - * higher protocol versions would allow such unbound values to be sent. - * - * @test_category prepared_statements:binding - * @jira_ticket JAVA-777 - * @since 2.2.0 - */ - @Test(groups = "short") - @CassandraVersion("2.0.0") - public void should_not_allow_unbound_value_on_batch_statement_when_protocol_lesser_than_v4() { - Cluster cluster = register(Cluster.builder() - .addContactPoints(getContactPoints()) - .withPort(ccm().getBinaryPort()) + } + + /** + * Tests that, under protocol versions lesser that V4, it is NOT possible to execute a prepared + * statement with unbound values. Note that we have to force protocol version to less than V4 + * because higher protocol versions would allow such unbound values to be sent. + * + * @test_category prepared_statements:binding + * @jira_ticket JAVA-777 + * @since 2.2.0 + */ + @Test(groups = "short") + @CassandraVersion("2.0.0") + public void should_not_allow_unbound_value_on_batch_statement_when_protocol_lesser_than_v4() { + Cluster cluster = + register( + createClusterBuilder() .withProtocolVersion(ccm().getProtocolVersion(ProtocolVersion.V3)) .build()); - Session session = cluster.connect(); - try { - PreparedStatement ps = session.prepare("INSERT INTO " + keyspace + "." + SIMPLE_TABLE + " (k, i) VALUES (?, ?)"); - BatchStatement batch = new BatchStatement(); - batch.add(ps.bind("foo")); - // i is UNSET - session.execute(batch); - fail("Should not have executed statement with UNSET values in protocol V3"); - } catch (IllegalStateException e) { - assertThat(e.getMessage()).contains("Unset value at index 1"); - } - } - - /** - * Tests that a tombstone is NOT created when a column in a prepared statement - * is not bound (UNSET flag). - * This only works from protocol V4 onwards. - * - * @test_category prepared_statements:binding - * @jira_ticket JAVA-777 - * @since 2.2.0 - */ - @Test(groups = "short") - @CassandraVersion("2.2.0") - public void should_not_create_tombstone_when_unbound_value_on_bound_statement_and_protocol_v4() { - PreparedStatement prepared = session().prepare("INSERT INTO " + SIMPLE_TABLE + " (k, i) VALUES (?, ?)"); - BoundStatement st1 = prepared.bind(); - st1.setString(0, "foo"); - st1.setInt(1, 1234); - session().execute(st1); - BoundStatement st2 = prepared.bind(); - st2.setString(0, "foo"); - // i is UNSET - session().execute(st2); - Statement st3 = new SimpleStatement("SELECT i from " + SIMPLE_TABLE + " where k = 'foo'"); - st3.enableTracing(); - ResultSet rows = session().execute(st3); - assertThat(rows.one().getInt("i")).isEqualTo(1234); - // sleep 10 seconds to make sure the trace will be complete - Uninterruptibles.sleepUninterruptibly(10, TimeUnit.SECONDS); - QueryTrace queryTrace = rows.getExecutionInfo().getQueryTrace(); - assertEventsContain(queryTrace, "0 tombstone"); + Session session = cluster.connect(); + try { + PreparedStatement ps = + session.prepare("INSERT INTO " + keyspace + "." + SIMPLE_TABLE + " (k, i) VALUES (?, ?)"); + BatchStatement batch = new BatchStatement(); + batch.add(ps.bind("foo")); + // i is UNSET + session.execute(batch); + fail("Should not have executed statement with UNSET values in protocol V3"); + } catch (IllegalStateException e) { + assertThat(e.getMessage()).contains("Unset value at index 1"); + } finally { + session.close(); + cluster.close(); } - - /** - * Tests that a value that was previously set on a bound statement can be unset by index. - * This only works from protocol V4 onwards. - * - * @test_category prepared_statements:binding - * @jira_ticket JAVA-930 - * @since 2.2.0 - */ - @Test(groups = "short") - @CassandraVersion("2.2.0") - public void should_unset_value_by_index() { - PreparedStatement prepared = session().prepare("INSERT INTO " + SIMPLE_TABLE + " (k, i) VALUES (?, ?)"); - BoundStatement bound = prepared.bind(); - bound.setString(0, "foo"); - bound.setInt(1, 1234); - - bound.unset(1); - assertThat(bound.isSet(1)).isFalse(); - session().execute(bound); - - ResultSet rows = session().execute( + } + + /** + * Tests that a tombstone is NOT created when a column in a prepared statement is not bound (UNSET + * flag). This only works from protocol V4 onwards. + * + * @test_category prepared_statements:binding + * @jira_ticket JAVA-777 + * @since 2.2.0 + */ + @Test(groups = "short") + @CassandraVersion("2.2.0") + public void should_not_create_tombstone_when_unbound_value_on_bound_statement_and_protocol_v4() { + PreparedStatement prepared = + session().prepare("INSERT INTO " + SIMPLE_TABLE + " (k, i) VALUES (?, ?)"); + BoundStatement st1 = prepared.bind(); + st1.setString(0, "foo"); + st1.setInt(1, 1234); + session().execute(st1); + BoundStatement st2 = prepared.bind(); + st2.setString(0, "foo"); + // i is UNSET + session().execute(st2); + Statement st3 = new SimpleStatement("SELECT i from " + SIMPLE_TABLE + " where k = 'foo'"); + st3.enableTracing(); + ResultSet rows = session().execute(st3); + assertThat(rows.one().getInt("i")).isEqualTo(1234); + // sleep 10 seconds to make sure the trace will be complete + Uninterruptibles.sleepUninterruptibly(10, TimeUnit.SECONDS); + QueryTrace queryTrace = rows.getExecutionInfo().getQueryTrace(); + assertEventsContain(queryTrace, "0 tombstone"); + } + + /** + * Tests that a value that was previously set on a bound statement can be unset by index. This + * only works from protocol V4 onwards. + * + * @test_category prepared_statements:binding + * @jira_ticket JAVA-930 + * @since 2.2.0 + */ + @Test(groups = "short") + @CassandraVersion("2.2.0") + public void should_unset_value_by_index() { + PreparedStatement prepared = + session().prepare("INSERT INTO " + SIMPLE_TABLE + " (k, i) VALUES (?, ?)"); + BoundStatement bound = prepared.bind(); + bound.setString(0, "foo"); + bound.setInt(1, 1234); + + bound.unset(1); + assertThat(bound.isSet(1)).isFalse(); + session().execute(bound); + + ResultSet rows = + session() + .execute( new SimpleStatement("SELECT i from " + SIMPLE_TABLE + " where k = 'foo'") - .enableTracing()); - - assertThat(rows.one().isNull("i")); - // sleep 10 seconds to make sure the trace will be complete - Uninterruptibles.sleepUninterruptibly(10, TimeUnit.SECONDS); - QueryTrace queryTrace = rows.getExecutionInfo().getQueryTrace(); - assertEventsContain(queryTrace, "0 tombstone"); - } - - /** - * Tests that a value that was previously set on a bound statement can be unset by name. - * This only works from protocol V4 onwards. - * - * @test_category prepared_statements:binding - * @jira_ticket JAVA-930 - * @since 2.2.0 - */ - @Test(groups = "short") - @CassandraVersion("2.2.0") - public void should_unset_value_by_name() { - PreparedStatement prepared = session().prepare("INSERT INTO " + SIMPLE_TABLE + " (k, i) VALUES (:k, :i)"); - BoundStatement bound = prepared.bind(); - bound.setString("k", "foo"); - bound.setInt("i", 1234); - - bound.unset("i"); - assertThat(bound.isSet("i")).isFalse(); - session().execute(bound); - - ResultSet rows = session().execute( + .enableTracing()); + + assertThat(rows.one().isNull("i")); + // sleep 10 seconds to make sure the trace will be complete + Uninterruptibles.sleepUninterruptibly(10, TimeUnit.SECONDS); + QueryTrace queryTrace = rows.getExecutionInfo().getQueryTrace(); + assertEventsContain(queryTrace, "0 tombstone"); + } + + /** + * Tests that a value that was previously set on a bound statement can be unset by name. This only + * works from protocol V4 onwards. + * + * @test_category prepared_statements:binding + * @jira_ticket JAVA-930 + * @since 2.2.0 + */ + @Test(groups = "short") + @CassandraVersion("2.2.0") + public void should_unset_value_by_name() { + PreparedStatement prepared = + session().prepare("INSERT INTO " + SIMPLE_TABLE + " (k, i) VALUES (:k, :i)"); + BoundStatement bound = prepared.bind(); + bound.setString("k", "foo"); + bound.setInt("i", 1234); + + bound.unset("i"); + assertThat(bound.isSet("i")).isFalse(); + session().execute(bound); + + ResultSet rows = + session() + .execute( new SimpleStatement("SELECT i from " + SIMPLE_TABLE + " where k = 'foo'") - .enableTracing()); - - assertThat(rows.one().isNull("i")); - // sleep 10 seconds to make sure the trace will be complete - Uninterruptibles.sleepUninterruptibly(10, TimeUnit.SECONDS); - QueryTrace queryTrace = rows.getExecutionInfo().getQueryTrace(); - assertEventsContain(queryTrace, "0 tombstone"); + .enableTracing()); + + assertThat(rows.one().isNull("i")); + // sleep 10 seconds to make sure the trace will be complete + Uninterruptibles.sleepUninterruptibly(10, TimeUnit.SECONDS); + QueryTrace queryTrace = rows.getExecutionInfo().getQueryTrace(); + assertEventsContain(queryTrace, "0 tombstone"); + } + + /** + * Tests that a tombstone is NOT created when a column in a prepared statement is not bound (UNSET + * flag). This only works from protocol V4 onwards. + * + * @test_category prepared_statements:binding + * @jira_ticket JAVA-777 + * @since 2.2.0 + */ + @Test(groups = "short") + @CassandraVersion("2.2.0") + public void should_not_create_tombstone_when_unbound_value_on_batch_statement_and_protocol_v4() { + PreparedStatement prepared = + session().prepare("INSERT INTO " + SIMPLE_TABLE + " (k, i) VALUES (?, ?)"); + BoundStatement st1 = prepared.bind(); + st1.setString(0, "foo"); + st1.setInt(1, 1234); + session().execute(new BatchStatement().add(st1)); + BoundStatement st2 = prepared.bind(); + st2.setString(0, "foo"); + // i is UNSET + session().execute(new BatchStatement().add(st2)); + Statement st3 = new SimpleStatement("SELECT i from " + SIMPLE_TABLE + " where k = 'foo'"); + st3.enableTracing(); + ResultSet rows = session().execute(st3); + assertThat(rows.one().getInt("i")).isEqualTo(1234); + // sleep 10 seconds to make sure the trace will be complete + Uninterruptibles.sleepUninterruptibly(10, TimeUnit.SECONDS); + QueryTrace queryTrace = rows.getExecutionInfo().getQueryTrace(); + assertEventsContain(queryTrace, "0 tombstone"); + } + + /** + * Tests that a tombstone is created when binding a null value to a column in a prepared + * statement. + * + * @test_category prepared_statements:binding + * @jira_ticket JAVA-777 + * @since 2.2.0 + */ + @Test(groups = "long") + public void should_create_tombstone_when_null_value_on_bound_statement() { + PreparedStatement prepared = + session().prepare("INSERT INTO " + SIMPLE_TABLE + " (k, i) VALUES (?, ?)"); + BoundStatement st1 = prepared.bind(); + st1.setString(0, "foo"); + st1.setToNull(1); + session().execute(st1); + Statement st2 = new SimpleStatement("SELECT i from " + SIMPLE_TABLE + " where k = 'foo'"); + st2.enableTracing(); + ResultSet rows = session().execute(st2); + assertThat(rows.one().isNull(0)).isTrue(); + // sleep 10 seconds to make sure the trace will be complete + Uninterruptibles.sleepUninterruptibly(10, TimeUnit.SECONDS); + QueryTrace queryTrace = rows.getExecutionInfo().getQueryTrace(); + assertEventsContain(queryTrace, "1 tombstone"); + } + + /** + * Tests that a tombstone is created when binding a null value to a column in a batch statement. + * + * @test_category prepared_statements:binding + * @jira_ticket JAVA-777 + * @since 2.2.0 + */ + @Test(groups = "short") + @CassandraVersion("2.0.0") + public void should_create_tombstone_when_null_value_on_batch_statement() { + PreparedStatement prepared = + session().prepare("INSERT INTO " + SIMPLE_TABLE + " (k, i) VALUES (?, ?)"); + BoundStatement st1 = prepared.bind(); + st1.setString(0, "foo"); + st1.setToNull(1); + session().execute(new BatchStatement().add(st1)); + Statement st2 = new SimpleStatement("SELECT i from " + SIMPLE_TABLE + " where k = 'foo'"); + st2.enableTracing(); + ResultSet rows = session().execute(st2); + assertThat(rows.one().isNull(0)).isTrue(); + // sleep 10 seconds to make sure the trace will be complete + Uninterruptibles.sleepUninterruptibly(10, TimeUnit.SECONDS); + QueryTrace queryTrace = rows.getExecutionInfo().getQueryTrace(); + assertEventsContain(queryTrace, "1 tombstone"); + } + + private void assertEventsContain(QueryTrace queryTrace, String toFind) { + for (QueryTrace.Event event : queryTrace.getEvents()) { + if (event.getDescription().contains(toFind)) return; } + fail("Did not find '" + toFind + "' in trace"); + } - /** - * Tests that a tombstone is NOT created when a column in a prepared statement - * is not bound (UNSET flag). - * This only works from protocol V4 onwards. - * - * @test_category prepared_statements:binding - * @jira_ticket JAVA-777 - * @since 2.2.0 - */ - @Test(groups = "short") - @CassandraVersion("2.2.0") - public void should_not_create_tombstone_when_unbound_value_on_batch_statement_and_protocol_v4() { - PreparedStatement prepared = session().prepare("INSERT INTO " + SIMPLE_TABLE + " (k, i) VALUES (?, ?)"); - BoundStatement st1 = prepared.bind(); - st1.setString(0, "foo"); - st1.setInt(1, 1234); - session().execute(new BatchStatement().add(st1)); - BoundStatement st2 = prepared.bind(); - st2.setString(0, "foo"); - // i is UNSET - session().execute(new BatchStatement().add(st2)); - Statement st3 = new SimpleStatement("SELECT i from " + SIMPLE_TABLE + " where k = 'foo'"); - st3.enableTracing(); - ResultSet rows = session().execute(st3); - assertThat(rows.one().getInt("i")).isEqualTo(1234); - // sleep 10 seconds to make sure the trace will be complete - Uninterruptibles.sleepUninterruptibly(10, TimeUnit.SECONDS); - QueryTrace queryTrace = rows.getExecutionInfo().getQueryTrace(); - assertEventsContain(queryTrace, "0 tombstone"); - } + @Test(groups = "short") + public void should_propagate_idempotence_in_statements() { + session() + .execute(String.format("CREATE TABLE %s.idempotencetest (i int PRIMARY KEY)", keyspace)); - /** - * Tests that a tombstone is created when binding a null value to a column in a prepared statement. - * - * @test_category prepared_statements:binding - * @jira_ticket JAVA-777 - * @since 2.2.0 - */ - @Test(groups = "long") - public void should_create_tombstone_when_null_value_on_bound_statement() { - PreparedStatement prepared = session().prepare("INSERT INTO " + SIMPLE_TABLE + " (k, i) VALUES (?, ?)"); - BoundStatement st1 = prepared.bind(); - st1.setString(0, "foo"); - st1.setToNull(1); - session().execute(st1); - Statement st2 = new SimpleStatement("SELECT i from " + SIMPLE_TABLE + " where k = 'foo'"); - st2.enableTracing(); - ResultSet rows = session().execute(st2); - assertThat(rows.one().isNull(0)).isTrue(); - // sleep 10 seconds to make sure the trace will be complete - Uninterruptibles.sleepUninterruptibly(10, TimeUnit.SECONDS); - QueryTrace queryTrace = rows.getExecutionInfo().getQueryTrace(); - assertEventsContain(queryTrace, "1 tombstone"); - } + SimpleStatement statement; + PreparedStatement prepared; + BoundStatement bound; - /** - * Tests that a tombstone is created when binding a null value to a column in a batch statement. - * - * @test_category prepared_statements:binding - * @jira_ticket JAVA-777 - * @since 2.2.0 - */ - @Test(groups = "short") - @CassandraVersion("2.0.0") - public void should_create_tombstone_when_null_value_on_batch_statement() { - PreparedStatement prepared = session().prepare("INSERT INTO " + SIMPLE_TABLE + " (k, i) VALUES (?, ?)"); - BoundStatement st1 = prepared.bind(); - st1.setString(0, "foo"); - st1.setToNull(1); - session().execute(new BatchStatement().add(st1)); - Statement st2 = new SimpleStatement("SELECT i from " + SIMPLE_TABLE + " where k = 'foo'"); - st2.enableTracing(); - ResultSet rows = session().execute(st2); - assertThat(rows.one().isNull(0)).isTrue(); - // sleep 10 seconds to make sure the trace will be complete - Uninterruptibles.sleepUninterruptibly(10, TimeUnit.SECONDS); - QueryTrace queryTrace = rows.getExecutionInfo().getQueryTrace(); - assertEventsContain(queryTrace, "1 tombstone"); - } - - private void assertEventsContain(QueryTrace queryTrace, String toFind) { - for (QueryTrace.Event event : queryTrace.getEvents()) { - if (event.getDescription().contains(toFind)) - return; - } - fail("Did not find '" + toFind + "' in trace"); - } + statement = + new SimpleStatement( + String.format("SELECT * FROM %s.idempotencetest WHERE i = ?", keyspace)); - @Test(groups = "short") - public void should_propagate_idempotence_in_statements() { - session().execute(String.format("CREATE TABLE %s.idempotencetest (i int PRIMARY KEY)", keyspace)); + prepared = session().prepare(statement); + bound = prepared.bind(1); - SimpleStatement statement; - PreparedStatement prepared; - BoundStatement bound; + assertThat(prepared.isIdempotent()).isNull(); + assertThat(bound.isIdempotent()).isNull(); - statement = new SimpleStatement(String.format("SELECT * FROM %s.idempotencetest WHERE i = ?", keyspace)); + statement.setIdempotent(true); + prepared = session().prepare(statement); + bound = prepared.bind(1); - prepared = session().prepare(statement); - bound = prepared.bind(1); + assertThat(prepared.isIdempotent()).isTrue(); + assertThat(bound.isIdempotent()).isTrue(); - assertThat(prepared.isIdempotent()).isNull(); - assertThat(bound.isIdempotent()).isNull(); + statement.setIdempotent(false); + prepared = session().prepare(statement); + bound = prepared.bind(1); - statement.setIdempotent(true); - prepared = session().prepare(statement); - bound = prepared.bind(1); + assertThat(prepared.isIdempotent()).isFalse(); + assertThat(bound.isIdempotent()).isFalse(); - assertThat(prepared.isIdempotent()).isTrue(); - assertThat(bound.isIdempotent()).isTrue(); + prepared.setIdempotent(true); + bound = prepared.bind(1); - statement.setIdempotent(false); - prepared = session().prepare(statement); - bound = prepared.bind(1); - - assertThat(prepared.isIdempotent()).isFalse(); - assertThat(bound.isIdempotent()).isFalse(); - - prepared.setIdempotent(true); - bound = prepared.bind(1); - - assertThat(bound.isIdempotent()).isTrue(); - } + assertThat(bound.isIdempotent()).isTrue(); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/PrimitiveTypeSamples.java b/driver-core/src/test/java/com/datastax/driver/core/PrimitiveTypeSamples.java index efb3351f586..8055ac12cd4 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/PrimitiveTypeSamples.java +++ b/driver-core/src/test/java/com/datastax/driver/core/PrimitiveTypeSamples.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,75 +17,84 @@ */ package com.datastax.driver.core; +import static org.assertj.core.api.Assertions.assertThat; + import com.datastax.driver.core.utils.Bytes; import com.google.common.base.Predicate; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Lists; import com.google.common.collect.Maps; - import java.math.BigDecimal; import java.math.BigInteger; import java.net.InetAddress; import java.net.UnknownHostException; -import java.util.*; - -import static org.assertj.core.api.Assertions.assertThat; +import java.util.Collection; +import java.util.Date; +import java.util.List; +import java.util.Map; +import java.util.UUID; /** * This class provides sample values for each primitive data type. - *

    - * These values have no particular meaning, the goal is just to have an instance that can be used in automated tests. + * + *

    These values have no particular meaning, the goal is just to have an instance that can be used + * in automated tests. */ public class PrimitiveTypeSamples { - static Map samples(ProtocolVersion protocolVersion) { - try { - final Collection primitiveTypes = TestUtils.allPrimitiveTypes(protocolVersion); - ImmutableMap data = ImmutableMap.builder() - .put(DataType.ascii(), "ascii") - .put(DataType.bigint(), Long.MAX_VALUE) - .put(DataType.blob(), Bytes.fromHexString("0xCAFE")) - .put(DataType.cboolean(), Boolean.TRUE) - .put(DataType.decimal(), new BigDecimal("12.3E+7")) - .put(DataType.cdouble(), Double.MAX_VALUE) - .put(DataType.cfloat(), Float.MAX_VALUE) - .put(DataType.inet(), InetAddress.getByName("123.123.123.123")) - .put(DataType.tinyint(), Byte.MAX_VALUE) - .put(DataType.smallint(), Short.MAX_VALUE) - .put(DataType.cint(), Integer.MAX_VALUE) - .put(DataType.duration(), Duration.from("PT30H20M")) - .put(DataType.text(), "text") - .put(DataType.timestamp(), new Date(872835240000L)) - .put(DataType.date(), LocalDate.fromDaysSinceEpoch(16071)) - .put(DataType.time(), 54012123450000L) - .put(DataType.timeuuid(), UUID.fromString("FE2B4360-28C6-11E2-81C1-0800200C9A66")) - .put(DataType.uuid(), UUID.fromString("067e6162-3b6f-4ae2-a171-2470b63dff00")) - .put(DataType.varint(), new BigInteger(Integer.toString(Integer.MAX_VALUE) + "000")) - .build(); + static Map samples(ProtocolVersion protocolVersion) { + try { + final Collection primitiveTypes = TestUtils.allPrimitiveTypes(protocolVersion); + ImmutableMap data = + ImmutableMap.builder() + .put(DataType.ascii(), "ascii") + .put(DataType.bigint(), Long.MAX_VALUE) + .put(DataType.blob(), Bytes.fromHexString("0xCAFE")) + .put(DataType.cboolean(), Boolean.TRUE) + .put(DataType.decimal(), new BigDecimal("12.3E+7")) + .put(DataType.cdouble(), Double.MAX_VALUE) + .put(DataType.cfloat(), Float.MAX_VALUE) + .put(DataType.inet(), InetAddress.getByName("123.123.123.123")) + .put(DataType.tinyint(), Byte.MAX_VALUE) + .put(DataType.smallint(), Short.MAX_VALUE) + .put(DataType.cint(), Integer.MAX_VALUE) + .put(DataType.duration(), Duration.from("PT30H20M")) + .put(DataType.text(), "text") + .put(DataType.timestamp(), new Date(872835240000L)) + .put(DataType.date(), LocalDate.fromDaysSinceEpoch(16071)) + .put(DataType.time(), 54012123450000L) + .put(DataType.timeuuid(), UUID.fromString("FE2B4360-28C6-11E2-81C1-0800200C9A66")) + .put(DataType.uuid(), UUID.fromString("067e6162-3b6f-4ae2-a171-2470b63dff00")) + .put(DataType.varint(), new BigInteger(Integer.toString(Integer.MAX_VALUE) + "000")) + .build(); - // Only include data types that support the desired protocol version. - Map result = Maps.filterKeys(data, new Predicate() { + // Only include data types that support the desired protocol version. + Map result = + Maps.filterKeys( + data, + new Predicate() { @Override public boolean apply(DataType input) { - return primitiveTypes.contains(input); + return primitiveTypes.contains(input); } - }); + }); - // Check that we cover all types (except counter and duration) - // Duration is excluded because it can't be used in collections and udts. It is tested separately - // in DurationIntegrationTest. - List tmp = Lists.newArrayList(primitiveTypes); - tmp.removeAll(result.keySet()); + // Check that we cover all types (except counter and duration) + // Duration is excluded because it can't be used in collections and udts. It is tested + // separately + // in DurationIntegrationTest. + List tmp = Lists.newArrayList(primitiveTypes); + tmp.removeAll(result.keySet()); - List expectedFilteredTypes = Lists.newArrayList(DataType.counter()); + List expectedFilteredTypes = Lists.newArrayList(DataType.counter()); - assertThat(tmp) - .as("new datatype not covered in test") - .containsOnlyElementsOf(expectedFilteredTypes); + assertThat(tmp) + .as("new datatype not covered in test") + .containsOnlyElementsOf(expectedFilteredTypes); - return result; - } catch (UnknownHostException e) { - throw new AssertionError(e); - } + return result; + } catch (UnknownHostException e) { + throw new AssertionError(e); } + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/ProtocolBetaVersionTest.java b/driver-core/src/test/java/com/datastax/driver/core/ProtocolBetaVersionTest.java index a5da6d0b787..974e390d47e 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/ProtocolBetaVersionTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/ProtocolBetaVersionTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,122 +17,131 @@ */ package com.datastax.driver.core; -import com.datastax.driver.core.utils.CassandraVersion; -import org.testng.annotations.Test; - import static com.datastax.driver.core.ProtocolVersion.V4; -import static com.datastax.driver.core.ProtocolVersion.V5; +import static com.datastax.driver.core.ProtocolVersion.V6; import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.fail; -/** - * Tests for the new USE_BETA flag introduced in protocol v5 - * and Cassandra 3.10. - */ +import com.datastax.driver.core.utils.CassandraVersion; +import org.testng.SkipException; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Test; + +/** Tests for the new USE_BETA flag introduced in protocol v5 and Cassandra 3.10. */ @CassandraVersion("3.10") +@CCMConfig(createCluster = false) public class ProtocolBetaVersionTest extends CCMTestsSupport { - /** - * Verifies that the cluster builder fails when version is explicitly set and user attempts to set beta flag. - * - * @jira_ticket JAVA-1248 - */ - @Test(groups = "short") - public void should_not_initialize_when_version_explicitly_required_and_beta_flag_is_set() throws Exception { - try { - Cluster.builder() - .addContactPoints(getContactPoints()) - .withPort(ccm().getBinaryPort()) - .withProtocolVersion(V4) - .allowBetaProtocolVersion() - .build(); - fail("Expected IllegalArgumentException"); - } catch (IllegalArgumentException e) { - assertThat(e.getMessage()).isEqualTo("Can't use beta flag with initial protocol version of V4"); - } + @BeforeClass + public void checkNotCassandra4OrHigher() { + if (ccm().getCassandraVersion().getMajor() > 3) { + throw new SkipException( + "ProtocolBetaVersionTest should only be executed against C* versions >= 3.10 and < 4.0"); } + } - /** - * Verifies that the cluster builder fails when beta flag is set and user attempts to pass a version explicitly. - * - * @jira_ticket JAVA-1248 - */ - @Test(groups = "short") - public void should_not_initialize_when_beta_flag_is_set_and_version_explicitly_required() throws Exception { - try { - Cluster.builder() - .addContactPoints(getContactPoints()) - .withPort(ccm().getBinaryPort()) - .allowBetaProtocolVersion() - .withProtocolVersion(V4) - .build(); - fail("Expected IllegalStateException"); - } catch (IllegalStateException e) { - assertThat(e.getMessage()).isEqualTo("Can not set the version explicitly if `allowBetaProtocolVersion` was used."); - } + /** + * Verifies that the cluster builder fails when version is explicitly set and user attempts to set + * beta flag. + * + * @jira_ticket JAVA-1248 + */ + @Test(groups = "short") + public void should_not_initialize_when_version_explicitly_required_and_beta_flag_is_set() { + try { + Cluster.builder() + .addContactPoints(getContactPoints()) + .withPort(ccm().getBinaryPort()) + .withProtocolVersion(V4) + .allowBetaProtocolVersion() + .build(); + fail("Expected IllegalArgumentException"); + } catch (IllegalArgumentException e) { + assertThat(e.getMessage()) + .isEqualTo("Can't use beta flag with initial protocol version of V4"); } + } - /** - * Verifies that the driver CANNOT connect to 3.10 with the following combination of options: - * Version V5 - * Flag UNSET - * - * @jira_ticket JAVA-1248 - */ - @Test(groups = "short") - public void should_not_connect_when_beta_version_explicitly_required_and_flag_not_set() throws Exception { - try { - Cluster.builder() - .addContactPoints(getContactPoints()) - .withPort(ccm().getBinaryPort()) - .withProtocolVersion(V5) - .build(); - fail("Expected IllegalArgumentException"); - } catch (IllegalArgumentException e) { - assertThat(e.getMessage()).startsWith("Can not use V5 protocol version. Newest supported protocol version is: V4"); - } + /** + * Verifies that the cluster builder fails when beta flag is set and user attempts to pass a + * version explicitly. + * + * @jira_ticket JAVA-1248 + */ + @Test(groups = "short") + public void should_not_initialize_when_beta_flag_is_set_and_version_explicitly_required() { + try { + Cluster.builder() + .addContactPoints(getContactPoints()) + .withPort(ccm().getBinaryPort()) + .allowBetaProtocolVersion() + .withProtocolVersion(V4) + .build(); + fail("Expected IllegalStateException"); + } catch (IllegalStateException e) { + assertThat(e.getMessage()) + .isEqualTo("Can not set the version explicitly if `allowBetaProtocolVersion` was used."); } + } - /** - * Verifies that the driver can connect to 3.10 with the following combination of options: - * Version UNSET - * Flag SET - * Expected version: V5 - * - * @jira_ticket JAVA-1248 - */ - @Test(groups = "short") - public void should_connect_with_beta_when_no_version_explicitly_required_and_flag_set() throws Exception { - // Note: when the driver's ProtocolVersion.NEWEST_SUPPORTED will be incremented to V6 or higher - // a renegotiation will start taking place here and will downgrade the version from V6 to V5, - // but the test should remain valid since it's executed against 3.10 exclusively - Cluster cluster = Cluster.builder() - .addContactPoints(getContactPoints()) - .withPort(ccm().getBinaryPort()) - .allowBetaProtocolVersion() - .build(); - cluster.connect(); - assertThat(cluster.getConfiguration().getProtocolOptions().getProtocolVersion()).isEqualTo(V5); + /** + * Verifies that the driver CANNOT connect to 3.10 with the following combination of options: + * Version V6 Flag UNSET + * + * @jira_ticket JAVA-1248 + */ + @Test(groups = "short") + public void should_not_connect_when_beta_version_explicitly_required_and_flag_not_set() { + try { + Cluster.builder() + .addContactPoints(getContactPoints()) + .withPort(ccm().getBinaryPort()) + .withProtocolVersion(V6) + .build(); + fail("Expected IllegalArgumentException"); + } catch (IllegalArgumentException e) { + assertThat(e.getMessage()) + .startsWith("Can not use V6 protocol version. Newest supported protocol version is: V5"); } + } - /** - * Verifies that the driver can connect to 3.10 with the following combination of options: - * Version UNSET - * Flag UNSET - * Expected version: V4 - * - * @jira_ticket JAVA-1248 - */ - @Test(groups = "short") - public void should_connect_after_renegotiation_when_no_version_explicitly_required_and_flag_not_set() throws Exception { - // Note: when the driver's ProtocolVersion.NEWEST_SUPPORTED will be incremented to V6 or higher - // the renegotiation will start downgrading the version from V6 to V4 instead of V5 to V4, - // but the test should remain valid since it's executed against 3.10 exclusively - Cluster cluster = Cluster.builder() - .addContactPoints(getContactPoints()) - .withPort(ccm().getBinaryPort()) - .build(); - cluster.connect(); - assertThat(cluster.getConfiguration().getProtocolOptions().getProtocolVersion()).isEqualTo(V4); - } + /** + * Verifies that the driver can connect to 3.10 with the following combination of options: Version + * UNSET Flag SET Expected version: V5 + * + * @jira_ticket JAVA-1248 + */ + @Test(groups = "short", enabled = false) + public void should_connect_with_beta_when_no_version_explicitly_required_and_flag_set() { + Cluster cluster = + Cluster.builder() + .addContactPoints(getContactPoints()) + .withPort(ccm().getBinaryPort()) + .allowBetaProtocolVersion() + .build(); + cluster.connect(); + assertThat(cluster.getConfiguration().getProtocolOptions().getProtocolVersion()).isEqualTo(V6); + } + + /** + * Verifies that the driver can connect to 3.10 with the following combination of options: Version + * UNSET Flag UNSET Expected version: V4 + * + *

    This test has been disabled as of driver 3.11 because v5 is not beta anymore in the driver. + * As a consequence, protocol negotiation without specifying an initial version is not possible + * anymore against C* >= 3.10 and < 4.0. + * + * @jira_ticket JAVA-1248 + */ + @Test(groups = "short", enabled = false) + public void + should_connect_after_renegotiation_when_no_version_explicitly_required_and_flag_not_set() { + Cluster cluster = + Cluster.builder() + .addContactPoints(getContactPoints()) + .withPort(ccm().getBinaryPort()) + .build(); + cluster.connect(); + assertThat(cluster.getConfiguration().getProtocolOptions().getProtocolVersion()).isEqualTo(V4); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/ProtocolOptionsTest.java b/driver-core/src/test/java/com/datastax/driver/core/ProtocolOptionsTest.java index 2ef4c36e198..26ea95cc9f5 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/ProtocolOptionsTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/ProtocolOptionsTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,27 +17,24 @@ */ package com.datastax.driver.core; -import org.testng.annotations.Test; - import static com.datastax.driver.core.Assertions.assertThat; +import org.testng.annotations.Test; + public class ProtocolOptionsTest extends CCMTestsSupport { - /** - * @jira_ticket JAVA-1209 - */ - @Test(groups = "unit") - public void getProtocolVersion_should_return_null_if_not_connected() { - Cluster cluster = Cluster.builder().addContactPoint("127.0.0.1").build(); - assertThat(cluster.getConfiguration().getProtocolOptions().getProtocolVersion()).isNull(); - } + /** @jira_ticket JAVA-1209 */ + @Test(groups = "unit") + public void getProtocolVersion_should_return_null_if_not_connected() { + Cluster cluster = Cluster.builder().addContactPoint("127.0.0.1").build(); + assertThat(cluster.getConfiguration().getProtocolOptions().getProtocolVersion()).isNull(); + } - /** - * @jira_ticket JAVA-1209 - */ - @Test(groups = "short") - public void getProtocolVersion_should_return_version() throws InterruptedException { - ProtocolVersion version = cluster().getConfiguration().getProtocolOptions().getProtocolVersion(); - assertThat(version).isNotNull(); - } + /** @jira_ticket JAVA-1209 */ + @Test(groups = "short") + public void getProtocolVersion_should_return_version() throws InterruptedException { + ProtocolVersion version = + cluster().getConfiguration().getProtocolOptions().getProtocolVersion(); + assertThat(version).isNotNull(); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/ProtocolV1Test.java b/driver-core/src/test/java/com/datastax/driver/core/ProtocolV1Test.java index 07c2c2f2c1a..bb06cdb2b45 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/ProtocolV1Test.java +++ b/driver-core/src/test/java/com/datastax/driver/core/ProtocolV1Test.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,73 +17,75 @@ */ package com.datastax.driver.core; +import static com.datastax.driver.core.ProtocolVersion.V1; +import static org.assertj.core.api.Assertions.assertThat; + import com.datastax.driver.core.exceptions.UnsupportedFeatureException; import org.testng.SkipException; import org.testng.annotations.Test; -import static com.datastax.driver.core.ProtocolVersion.V1; -import static org.assertj.core.api.Assertions.assertThat; - -/** - * Tests targeting protocol v1 specifically. - */ -@CCMConfig(version = "1.2.19", dse = false) +/** Tests targeting protocol v1 specifically. */ public class ProtocolV1Test extends CCMTestsSupport { - @Override - public Cluster.Builder createClusterBuilder() { - return super.createClusterBuilder() - .withProtocolVersion(V1); - } - - @Override - public void beforeTestClass(Object testInstance) throws Exception { - if (CCMBridge.isWindows()) - throw new SkipException("C* 1.2 is not supported on Windows."); - super.beforeTestClass(testInstance); - } + @Override + public Cluster.Builder createClusterBuilder() { + return super.createClusterBuilder().withProtocolVersion(V1); + } - /** - * Validates that a simple query with no variables is correctly executed. - * @jira_ticket JAVA-1132 - */ - @Test(groups = "short") - public void should_execute_query_with_no_variables() throws Exception { - session().execute("select * from system.local"); + @Override + public void beforeTestClass(Object testInstance) throws Exception { + if (CCMBridge.getGlobalCassandraVersion().compareTo(VersionNumber.parse("3.0")) >= 0) { + throw new SkipException("C* 3.0+ does not support Protocol V1"); } + super.beforeTestClass(testInstance); + } - /** - * Validates that a simple query with variables is not allowed with protocol V1. - * (Values in protocol V1 are only allowed in prepared statements). - * @jira_ticket JAVA-1132 - */ - @Test(groups = "short") - public void should_not_execute_query_with_variables() throws Exception { - try { - session().execute(new SimpleStatement("select * from system.local where key=?", "local")); - } catch (UnsupportedFeatureException e) { - assertThat(e).hasMessageContaining("Unsupported feature with the native protocol V1 (which is currently in use): Binary values are not supported"); - } - } + /** + * Validates that a simple query with no variables is correctly executed. + * + * @jira_ticket JAVA-1132 + */ + @Test(groups = "short") + public void should_execute_query_with_no_variables() throws Exception { + session().execute("select * from system.local"); + } - /** - * Validates that a prepared statement with no variables is correctly prepared and executed. - * @jira_ticket JAVA-1132 - */ - @Test(groups = "short") - public void should_execute_prepared_statement_with_no_variables() throws Exception { - PreparedStatement ps = session().prepare("select * from system.local"); - session().execute(ps.bind()); + /** + * Validates that a simple query with variables is not allowed with protocol V1. (Values in + * protocol V1 are only allowed in prepared statements). + * + * @jira_ticket JAVA-1132 + */ + @Test(groups = "short") + public void should_not_execute_query_with_variables() throws Exception { + try { + session().execute(new SimpleStatement("select * from system.local where key=?", "local")); + } catch (UnsupportedFeatureException e) { + assertThat(e) + .hasMessageContaining( + "Unsupported feature with the native protocol V1 (which is currently in use): Binary values are not supported"); } + } - /** - * Validates that a prepared statement with variables is correctly prepared and executed. - * @jira_ticket JAVA-1132 - */ - @Test(groups = "short") - public void should_execute_prepared_statement_with_variables() throws Exception { - PreparedStatement ps = session().prepare("select * from system.local where key=?"); - session().execute(ps.bind("local")); - } + /** + * Validates that a prepared statement with no variables is correctly prepared and executed. + * + * @jira_ticket JAVA-1132 + */ + @Test(groups = "short") + public void should_execute_prepared_statement_with_no_variables() throws Exception { + PreparedStatement ps = session().prepare("select * from system.local"); + session().execute(ps.bind()); + } + /** + * Validates that a prepared statement with variables is correctly prepared and executed. + * + * @jira_ticket JAVA-1132 + */ + @Test(groups = "short") + public void should_execute_prepared_statement_with_variables() throws Exception { + PreparedStatement ps = session().prepare("select * from system.local where key=?"); + session().execute(ps.bind("local")); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/ProtocolVersionRenegotiationTest.java b/driver-core/src/test/java/com/datastax/driver/core/ProtocolVersionRenegotiationTest.java index 38fe4d46c72..f0c7fd07a08 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/ProtocolVersionRenegotiationTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/ProtocolVersionRenegotiationTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,137 +17,139 @@ */ package com.datastax.driver.core; +import static com.datastax.driver.core.ProtocolVersion.V1; +import static com.datastax.driver.core.ProtocolVersion.V5; +import static com.datastax.driver.core.ProtocolVersion.V6; +import static org.assertj.core.api.Assertions.assertThat; + import com.datastax.driver.core.exceptions.UnsupportedProtocolVersionException; import com.datastax.driver.core.utils.CassandraVersion; import org.testng.SkipException; import org.testng.annotations.BeforeMethod; import org.testng.annotations.Test; -import static com.datastax.driver.core.ProtocolVersion.*; -import static org.assertj.core.api.Assertions.assertThat; - +@CCMConfig(createCluster = false) public class ProtocolVersionRenegotiationTest extends CCMTestsSupport { - private ProtocolVersion protocolVersion; - - @BeforeMethod(groups = "short") - public void setUp() { - protocolVersion = ccm().getProtocolVersion(); - } - - /** - * @jira_ticket JAVA-1367 - */ - @Test(groups = "short") - public void should_succeed_when_version_provided_and_matches() throws Exception { - Cluster cluster = connectWithVersion(protocolVersion); - assertThat(actualProtocolVersion(cluster)).isEqualTo(protocolVersion); - } - - /** - * @jira_ticket JAVA-1367 - */ - @Test(groups = "short") - @CassandraVersion("3.8") - public void should_fail_when_version_provided_and_too_low_3_8_plus() throws Exception { - UnsupportedProtocolVersionException e = connectWithUnsupportedVersion(V1); - assertThat(e.getUnsupportedVersion()).isEqualTo(V1); - // post-CASSANDRA-11464: server replies with client's version - assertThat(e.getServerVersion()).isEqualTo(V1); - } - - /** - * @jira_ticket JAVA-1367 - */ - @Test(groups = "short") - public void should_fail_when_version_provided_and_too_high() throws Exception { - if (ccm().getCassandraVersion().compareTo(VersionNumber.parse("2.2")) >= 0) { - throw new SkipException("Server supports protocol V4"); - } - UnsupportedProtocolVersionException e = connectWithUnsupportedVersion(V4); - assertThat(e.getUnsupportedVersion()).isEqualTo(V4); - // pre-CASSANDRA-11464: server replies with its own version - assertThat(e.getServerVersion()).isEqualTo(protocolVersion); + private ProtocolVersion protocolVersion; + + @BeforeMethod(groups = "short") + public void setUp() { + protocolVersion = ccm().getProtocolVersion(); + } + + /** @jira_ticket JAVA-1367 */ + @Test(groups = "short") + public void should_succeed_when_version_provided_and_matches() { + Cluster cluster = connectWithVersion(protocolVersion); + assertThat(actualProtocolVersion(cluster)).isEqualTo(protocolVersion); + } + + /** @jira_ticket JAVA-1367 */ + @Test(groups = "short") + @CassandraVersion("3.8") + public void should_fail_when_version_provided_and_too_low_3_8_plus() { + UnsupportedProtocolVersionException e = connectWithUnsupportedVersion(V1); + assertThat(e.getUnsupportedVersion()).isEqualTo(V1); + // post-CASSANDRA-11464: server replies with client's version + assertThat(e.getServerVersion()).isEqualTo(V1); + } + + /** @jira_ticket JAVA-1367 */ + @Test(groups = "short") + public void should_fail_when_version_provided_and_too_high() { + if (ccm().getCassandraVersion().compareTo(VersionNumber.parse("3.10")) >= 0) { + throw new SkipException("Server supports protocol V5"); } - - /** - * @jira_ticket JAVA-1367 - */ - @Test(groups = "short") - public void should_fail_when_beta_allowed_and_too_high() throws Exception { - if (ccm().getCassandraVersion().compareTo(VersionNumber.parse("3.10")) >= 0) { - throw new SkipException("Server supports protocol protocol V5 beta"); - } - UnsupportedProtocolVersionException e = connectWithUnsupportedBetaVersion(); - assertThat(e.getUnsupportedVersion()).isEqualTo(V5); + UnsupportedProtocolVersionException e = connectWithUnsupportedVersion(V5); + assertThat(e.getUnsupportedVersion()).isEqualTo(V5); + // see CASSANDRA-11464: for C* < 3.0.9 and 3.8, server replies with its own version; + // otherwise it replies with the client's version. + assertThat(e.getServerVersion()).isIn(V5, protocolVersion); + } + + /** @jira_ticket JAVA-1367 */ + @Test(groups = "short") + public void should_fail_when_beta_allowed_and_too_high() { + if (ccm().getCassandraVersion().compareTo(VersionNumber.parse("4.0.0")) >= 0) { + throw new SkipException("Server supports protocol protocol V6 beta"); } - - /** - * @jira_ticket JAVA-1367 - */ - @Test(groups = "short") - @CCMConfig(version = "2.1.16", createCluster = false) - public void should_negotiate_when_no_version_provided() throws Exception { - if (protocolVersion.compareTo(ProtocolVersion.NEWEST_SUPPORTED) >= 0) { - throw new SkipException("Server supports newest protocol version driver supports"); - } - Cluster cluster = connectWithoutVersion(); - assertThat(actualProtocolVersion(cluster)).isEqualTo(protocolVersion); + UnsupportedProtocolVersionException e = connectWithUnsupportedBetaVersion(); + assertThat(e.getUnsupportedVersion()).isEqualTo(V6); + } + + /** @jira_ticket JAVA-1367 */ + @Test(groups = "short") + @CCMConfig(version = "2.1.16", createCluster = false) + public void should_negotiate_when_no_version_provided() { + if (protocolVersion.compareTo(ProtocolVersion.NEWEST_SUPPORTED) >= 0) { + throw new SkipException("Server supports newest protocol version driver supports"); } - - private UnsupportedProtocolVersionException connectWithUnsupportedVersion(ProtocolVersion version) { - Cluster cluster = register(Cluster.builder() + Cluster cluster = connectWithoutVersion(); + assertThat(actualProtocolVersion(cluster)).isEqualTo(protocolVersion); + } + + private UnsupportedProtocolVersionException connectWithUnsupportedVersion( + ProtocolVersion version) { + Cluster cluster = + register( + Cluster.builder() .addContactPoints(getContactPoints()) .withPort(ccm().getBinaryPort()) .withProtocolVersion(version) .build()); - return initWithUnsupportedVersion(cluster); - } + return initWithUnsupportedVersion(cluster); + } - private UnsupportedProtocolVersionException connectWithUnsupportedBetaVersion() { - Cluster cluster = register(Cluster.builder() + private UnsupportedProtocolVersionException connectWithUnsupportedBetaVersion() { + Cluster cluster = + register( + Cluster.builder() .addContactPoints(getContactPoints()) .withPort(ccm().getBinaryPort()) .allowBetaProtocolVersion() .build()); - return initWithUnsupportedVersion(cluster); + return initWithUnsupportedVersion(cluster); + } + + private UnsupportedProtocolVersionException initWithUnsupportedVersion(Cluster cluster) { + Throwable t = null; + try { + cluster.init(); + } catch (Throwable t2) { + t = t2; } - - private UnsupportedProtocolVersionException initWithUnsupportedVersion(Cluster cluster) { - Throwable t = null; - try { - cluster.init(); - } catch (Throwable t2) { - t = t2; - } - if (t instanceof UnsupportedProtocolVersionException) { - return (UnsupportedProtocolVersionException) t; - } else { - throw new AssertionError("Expected UnsupportedProtocolVersionException, got " + t); - } + if (t instanceof UnsupportedProtocolVersionException) { + return (UnsupportedProtocolVersionException) t; + } else { + throw new AssertionError("Expected UnsupportedProtocolVersionException, got " + t); } + } - private Cluster connectWithVersion(ProtocolVersion version) { - Cluster cluster = register(Cluster.builder() + private Cluster connectWithVersion(ProtocolVersion version) { + Cluster cluster = + register( + Cluster.builder() .addContactPoints(getContactPoints()) .withPort(ccm().getBinaryPort()) .withProtocolVersion(version) .build()); - cluster.init(); - return cluster; - } - - private Cluster connectWithoutVersion() { - Cluster cluster = register(Cluster.builder() + cluster.init(); + return cluster; + } + + private Cluster connectWithoutVersion() { + Cluster cluster = + register( + Cluster.builder() .addContactPoints(getContactPoints()) .withPort(ccm().getBinaryPort()) .build()); - cluster.init(); - return cluster; - } - - private ProtocolVersion actualProtocolVersion(Cluster cluster) { - return cluster.getConfiguration().getProtocolOptions().getProtocolVersion(); - } + cluster.init(); + return cluster; + } + private ProtocolVersion actualProtocolVersion(Cluster cluster) { + return cluster.getConfiguration().getProtocolOptions().getProtocolVersion(); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/QueryLoggerErrorsTest.java b/driver-core/src/test/java/com/datastax/driver/core/QueryLoggerErrorsTest.java index af549880a7e..d23661f2bd8 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/QueryLoggerErrorsTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/QueryLoggerErrorsTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,9 +17,48 @@ */ package com.datastax.driver.core; -import com.datastax.driver.core.exceptions.*; +import static com.datastax.driver.core.QueryLogger.builder; +import static org.apache.log4j.Level.DEBUG; +import static org.apache.log4j.Level.INFO; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.fail; +import static org.scassandra.http.client.PrimingRequest.queryBuilder; +import static org.scassandra.http.client.PrimingRequest.then; +import static org.scassandra.http.client.Result.already_exists; +import static org.scassandra.http.client.Result.bad_credentials; +import static org.scassandra.http.client.Result.config_error; +import static org.scassandra.http.client.Result.invalid; +import static org.scassandra.http.client.Result.is_bootstrapping; +import static org.scassandra.http.client.Result.overloaded; +import static org.scassandra.http.client.Result.protocol_error; +import static org.scassandra.http.client.Result.read_request_timeout; +import static org.scassandra.http.client.Result.server_error; +import static org.scassandra.http.client.Result.syntax_error; +import static org.scassandra.http.client.Result.truncate_error; +import static org.scassandra.http.client.Result.unavailable; +import static org.scassandra.http.client.Result.unprepared; +import static org.scassandra.http.client.Result.write_request_timeout; + +import com.datastax.driver.core.exceptions.AlreadyExistsException; +import com.datastax.driver.core.exceptions.AuthenticationException; +import com.datastax.driver.core.exceptions.BootstrappingException; +import com.datastax.driver.core.exceptions.DriverInternalError; +import com.datastax.driver.core.exceptions.InvalidConfigurationInQueryException; +import com.datastax.driver.core.exceptions.InvalidQueryException; +import com.datastax.driver.core.exceptions.NoHostAvailableException; +import com.datastax.driver.core.exceptions.OperationTimedOutException; +import com.datastax.driver.core.exceptions.OverloadedException; +import com.datastax.driver.core.exceptions.ProtocolError; +import com.datastax.driver.core.exceptions.ReadTimeoutException; +import com.datastax.driver.core.exceptions.ServerError; +import com.datastax.driver.core.exceptions.SyntaxError; +import com.datastax.driver.core.exceptions.TruncateException; +import com.datastax.driver.core.exceptions.UnavailableException; +import com.datastax.driver.core.exceptions.UnpreparedException; +import com.datastax.driver.core.exceptions.WriteTimeoutException; import com.datastax.driver.core.policies.FallthroughRetryPolicy; import com.google.common.util.concurrent.Uninterruptibles; +import java.util.concurrent.TimeUnit; import org.apache.log4j.Level; import org.apache.log4j.Logger; import org.scassandra.http.client.Result; @@ -26,227 +67,197 @@ import org.testng.annotations.DataProvider; import org.testng.annotations.Test; -import java.util.concurrent.TimeUnit; - -import static com.datastax.driver.core.QueryLogger.builder; -import static org.apache.log4j.Level.DEBUG; -import static org.apache.log4j.Level.INFO; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.fail; -import static org.scassandra.http.client.PrimingRequest.queryBuilder; -import static org.scassandra.http.client.PrimingRequest.then; -import static org.scassandra.http.client.Result.*; - /** - * Tests for {@link QueryLogger} using Scassandra. - * Contains only tests for exceptions (client timeout, read timeout, unavailable...); - * other tests can be found in {@link QueryLoggerTest}. + * Tests for {@link QueryLogger} using Scassandra. Contains only tests for exceptions (client + * timeout, read timeout, unavailable...); other tests can be found in {@link QueryLoggerTest}. */ public class QueryLoggerErrorsTest extends ScassandraTestBase.PerClassCluster { - private Logger slow = Logger.getLogger(QueryLogger.SLOW_LOGGER.getName()); - private Logger error = Logger.getLogger(QueryLogger.ERROR_LOGGER.getName()); + private Logger slow = Logger.getLogger(QueryLogger.SLOW_LOGGER.getName()); + private Logger error = Logger.getLogger(QueryLogger.ERROR_LOGGER.getName()); - private MemoryAppender slowAppender; - private MemoryAppender errorAppender; + private MemoryAppender slowAppender; + private MemoryAppender errorAppender; - private Cluster cluster = null; - private Session session = null; - private QueryLogger queryLogger = null; - private Level originalError; - private Level originalSlow; + private Cluster cluster = null; + private Session session = null; + private QueryLogger queryLogger = null; + private Level originalError; + private Level originalSlow; - @BeforeMethod(groups = {"short", "unit"}) - public void setUp() { - originalSlow = slow.getLevel(); - originalError = error.getLevel(); - slow.setLevel(INFO); - error.setLevel(INFO); - slow.addAppender(slowAppender = new MemoryAppender()); - error.addAppender(errorAppender = new MemoryAppender()); + @BeforeMethod(groups = {"short", "unit"}) + public void setUp() { + originalSlow = slow.getLevel(); + originalError = error.getLevel(); + slow.setLevel(INFO); + error.setLevel(INFO); + slow.addAppender(slowAppender = new MemoryAppender()); + error.addAppender(errorAppender = new MemoryAppender()); - queryLogger = null; + queryLogger = null; - cluster = createClusterBuilder().withRetryPolicy(FallthroughRetryPolicy.INSTANCE).build(); - session = cluster.connect(); - } + cluster = createClusterBuilder().withRetryPolicy(FallthroughRetryPolicy.INSTANCE).build(); + session = cluster.connect(); + } - @AfterMethod(groups = {"short", "unit"}, alwaysRun = true) - public void tearDown() { - slow.setLevel(originalSlow); - error.setLevel(originalError); - slow.removeAppender(slowAppender); - error.removeAppender(errorAppender); + @AfterMethod( + groups = {"short", "unit"}, + alwaysRun = true) + public void tearDown() { + slow.setLevel(originalSlow); + error.setLevel(originalError); + slow.removeAppender(slowAppender); + error.removeAppender(errorAppender); - queryLogger = null; + queryLogger = null; - if (cluster != null) { - cluster.close(); - } + if (cluster != null) { + cluster.close(); } + } - @Test(groups = "short") - public void should_log_queries_beyond_constant_threshold() throws Exception { - // given - slow.setLevel(DEBUG); - queryLogger = builder() - .withConstantThreshold(10) - .build(); - cluster.register(queryLogger); - String query = "SELECT foo FROM bar"; - primingClient.prime( - queryBuilder() - .withQuery(query) - .withThen(then().withFixedDelay(100L)) - .build() - ); - // when - session.execute(query); - // then - String line = slowAppender.waitAndGet(5000); - assertThat(line) - .contains("Query too slow") - .contains(ip) - .contains(query); - } + @Test(groups = "short") + public void should_log_queries_beyond_constant_threshold() throws Exception { + // given + slow.setLevel(DEBUG); + queryLogger = builder().withConstantThreshold(10).build(); + cluster.register(queryLogger); + String query = "SELECT foo FROM bar"; + primingClient.prime( + queryBuilder().withQuery(query).withThen(then().withFixedDelay(100L)).build()); + // when + session.execute(query); + // then + String line = slowAppender.waitAndGet(5000); + assertThat(line).contains("Query too slow").contains(ip).contains(query); + } - @Test(groups = "short") - public void should_log_queries_beyond_dynamic_threshold() throws Exception { - // given - slow.setLevel(DEBUG); - queryLogger = builder() - .withDynamicThreshold(ClusterWidePercentileTracker.builder(1000) - .withMinRecordedValues(100) - .withInterval(1, TimeUnit.SECONDS).build(), 99) - .build(); - cluster.register(queryLogger); - - // prime a fast query to respond right away. - String fastQuery = "SELECT foo FROM bar"; - primingClient.prime( - queryBuilder() - .withQuery(fastQuery) - .build() - ); - - // prime a slow query to respond after 100ms. - String slowQuery = "SELECT bar from foo"; - primingClient.prime( - queryBuilder() - .withQuery(slowQuery) - .withThen(then().withFixedDelay(100L)) - .build() - ); - - // submit 100 fast queries to prime to the histogram. - long startTime = System.currentTimeMillis(); - for (int i = 0; i < 100; i++) { - session.execute(fastQuery); - } - - // Wait up to 1 second to allow initial histogram to be cached. - long waitTime = 1000 - (System.currentTimeMillis() - startTime); - Uninterruptibles.sleepUninterruptibly(waitTime, TimeUnit.MILLISECONDS); - - // when - session.execute(slowQuery); - // then - String line = slowAppender.waitAndGet(5000); - assertThat(line) - .contains("Query too slow") - .contains(ip) - .contains(slowQuery) - .doesNotContain(fastQuery); - } + @Test(groups = "short") + public void should_log_queries_beyond_dynamic_threshold() throws Exception { + // given + slow.setLevel(DEBUG); + queryLogger = + builder() + .withDynamicThreshold( + ClusterWidePercentileTracker.builder(1000) + .withMinRecordedValues(100) + .withInterval(1, TimeUnit.SECONDS) + .build(), + 99) + .build(); + cluster.register(queryLogger); - @Test(groups = "short") - public void should_log_timed_out_queries() throws Exception { - // given - error.setLevel(DEBUG); - queryLogger = builder().build(); - cluster.register(queryLogger); - cluster.getConfiguration().getSocketOptions().setReadTimeoutMillis(1); - String query = "SELECT foo FROM bar"; - primingClient.prime( - queryBuilder() - .withQuery(query) - .withThen(then().withFixedDelay(100L)) - .build() - ); - // when - try { - session.execute(query); - fail("Should have thrown OperationTimedOutException"); - } catch (OperationTimedOutException e) { - // ok - } - // then - String line = errorAppender.waitAndGet(5000); - assertThat(line) - .contains("Query error") - .contains(ip) - .contains(Integer.toString(scassandra.getBinaryPort())) - .contains(query) - .contains("Timed out waiting for server response"); - } + // prime a fast query to respond right away. + String fastQuery = "SELECT foo FROM bar"; + primingClient.prime(queryBuilder().withQuery(fastQuery).build()); - @DataProvider(name = "errors") - public static Object[][] createErrors() { - return new Object[][]{ - {unavailable, UnavailableException.class}, - {write_request_timeout, WriteTimeoutException.class}, - {read_request_timeout, ReadTimeoutException.class}, - {server_error, ServerError.class}, - {protocol_error, ProtocolError.class}, - {bad_credentials, AuthenticationException.class}, - {overloaded, OverloadedException.class}, - {is_bootstrapping, BootstrappingException.class}, - {truncate_error, TruncateException.class}, - {syntax_error, SyntaxError.class}, - {invalid, InvalidQueryException.class}, - {config_error, InvalidConfigurationInQueryException.class}, - {already_exists, AlreadyExistsException.class}, - {unprepared, UnpreparedException.class} - }; + // prime a slow query to respond after 100ms. + String slowQuery = "SELECT bar from foo"; + primingClient.prime( + queryBuilder().withQuery(slowQuery).withThen(then().withFixedDelay(100L)).build()); + + // submit 100 fast queries to prime to the histogram. + long startTime = System.currentTimeMillis(); + for (int i = 0; i < 100; i++) { + session.execute(fastQuery); } - @Test(groups = "short", dataProvider = "errors") - public void should_log_exception_from_the_given_result(Result result, Class expectedException) throws Exception { - // given - error.setLevel(DEBUG); - queryLogger = builder().build(); - cluster.register(queryLogger); - String query = "SELECT foo FROM bar"; - primingClient.prime( - queryBuilder() - .withQuery(query) - .withThen(then().withResult(result)) - .build() - ); - // when - try { - session.execute(query); - fail("Should have thrown Exception"); - } catch (Exception e) { - if (expectedException == UnpreparedException.class) { - // Special case UnpreparedException, it raises DriverInternalError instead. - assertThat(e).isInstanceOf(DriverInternalError.class); - } else if (expectedException == BootstrappingException.class) { - // Special case BootstrappingException, it's wrapped in NHAE since it's always retried. - assertThat(e).isInstanceOf(NoHostAvailableException.class); - assertThat(((NoHostAvailableException) e).getErrors().get(hostAddress)).isInstanceOf(expectedException); - } else { - assertThat(e).isInstanceOf(expectedException); - } - } - // then - String line = errorAppender.waitAndGet(5000); - assertThat(line) - .contains("Query error") - .contains(ip) - .contains(Integer.toString(scassandra.getBinaryPort())) - .contains(query) - .contains(expectedException.getName()); + // Wait up to 1 second to allow initial histogram to be cached. + long waitTime = 1000 - (System.currentTimeMillis() - startTime); + Uninterruptibles.sleepUninterruptibly(waitTime, TimeUnit.MILLISECONDS); + + // when + session.execute(slowQuery); + // then + String line = slowAppender.waitAndGet(5000); + assertThat(line) + .contains("Query too slow") + .contains(ip) + .contains(slowQuery) + .doesNotContain(fastQuery); + } + + @Test(groups = "short") + public void should_log_timed_out_queries() throws Exception { + // given + error.setLevel(DEBUG); + queryLogger = builder().build(); + cluster.register(queryLogger); + cluster.getConfiguration().getSocketOptions().setReadTimeoutMillis(1); + String query = "SELECT foo FROM bar"; + primingClient.prime( + queryBuilder().withQuery(query).withThen(then().withFixedDelay(100L)).build()); + // when + try { + session.execute(query); + fail("Should have thrown OperationTimedOutException"); + } catch (OperationTimedOutException e) { + // ok } + // then + String line = errorAppender.waitAndGet(5000); + assertThat(line) + .contains("Query error") + .contains(ip) + .contains(Integer.toString(scassandra.getBinaryPort())) + .contains(query) + .contains("Timed out waiting for server response"); + } + + @DataProvider(name = "errors") + public static Object[][] createErrors() { + return new Object[][] { + {unavailable, UnavailableException.class}, + {write_request_timeout, WriteTimeoutException.class}, + {read_request_timeout, ReadTimeoutException.class}, + {server_error, ServerError.class}, + {protocol_error, ProtocolError.class}, + {bad_credentials, AuthenticationException.class}, + {overloaded, OverloadedException.class}, + {is_bootstrapping, BootstrappingException.class}, + {truncate_error, TruncateException.class}, + {syntax_error, SyntaxError.class}, + {invalid, InvalidQueryException.class}, + {config_error, InvalidConfigurationInQueryException.class}, + {already_exists, AlreadyExistsException.class}, + {unprepared, UnpreparedException.class} + }; + } + @Test(groups = "short", dataProvider = "errors") + public void should_log_exception_from_the_given_result( + Result result, Class expectedException) throws Exception { + // given + error.setLevel(DEBUG); + queryLogger = builder().build(); + cluster.register(queryLogger); + String query = "SELECT foo FROM bar"; + primingClient.prime( + queryBuilder().withQuery(query).withThen(then().withResult(result)).build()); + // when + try { + session.execute(query); + fail("Should have thrown Exception"); + } catch (Exception e) { + if (expectedException == UnpreparedException.class) { + // Special case UnpreparedException, it raises DriverInternalError instead. + assertThat(e).isInstanceOf(DriverInternalError.class); + } else if (expectedException == BootstrappingException.class) { + // Special case BootstrappingException, it's wrapped in NHAE since it's always retried. + assertThat(e).isInstanceOf(NoHostAvailableException.class); + assertThat(((NoHostAvailableException) e).getErrors().get(hostEndPoint)) + .isInstanceOf(expectedException); + } else { + assertThat(e).isInstanceOf(expectedException); + } + } + // then + String line = errorAppender.waitAndGet(5000); + assertThat(line) + .contains("Query error") + .contains(ip) + .contains(Integer.toString(scassandra.getBinaryPort())) + .contains(query) + .contains(expectedException.getName()); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/QueryLoggerTest.java b/driver-core/src/test/java/com/datastax/driver/core/QueryLoggerTest.java index c611af55a37..cec0b7c69e6 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/QueryLoggerTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/QueryLoggerTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,8 +17,28 @@ */ package com.datastax.driver.core; +import static com.datastax.driver.core.BatchStatement.Type.COUNTER; +import static com.datastax.driver.core.BatchStatement.Type.UNLOGGED; +import static com.datastax.driver.core.QueryLogger.DEFAULT_SLOW_QUERY_THRESHOLD_MS; +import static com.datastax.driver.core.QueryLogger.ERROR_LOGGER; +import static com.datastax.driver.core.QueryLogger.FURTHER_PARAMS_OMITTED; +import static com.datastax.driver.core.QueryLogger.NORMAL_LOGGER; +import static com.datastax.driver.core.QueryLogger.SLOW_LOGGER; +import static com.datastax.driver.core.QueryLogger.TRUNCATED_OUTPUT; +import static com.datastax.driver.core.TestUtils.getFixedValue; +import static com.datastax.driver.core.TestUtils.ipOfNode; +import static com.datastax.driver.core.querybuilder.QueryBuilder.eq; +import static com.datastax.driver.core.querybuilder.QueryBuilder.set; +import static com.datastax.driver.core.querybuilder.QueryBuilder.update; +import static org.apache.log4j.Level.DEBUG; +import static org.apache.log4j.Level.INFO; +import static org.apache.log4j.Level.TRACE; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.mock; + import com.datastax.driver.core.StatementWrapperTest.CustomStatement; import com.datastax.driver.core.exceptions.DriverException; +import com.datastax.driver.core.querybuilder.BuiltStatement; import com.datastax.driver.core.utils.CassandraVersion; import com.google.common.base.Function; import com.google.common.base.Joiner; @@ -24,1053 +46,1106 @@ import com.google.common.collect.Lists; import com.google.common.collect.Sets; import com.google.common.primitives.Bytes; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; import org.apache.log4j.Level; import org.apache.log4j.Logger; import org.testng.annotations.AfterMethod; import org.testng.annotations.BeforeMethod; import org.testng.annotations.Test; -import java.nio.ByteBuffer; -import java.util.*; - -import static com.datastax.driver.core.BatchStatement.Type.COUNTER; -import static com.datastax.driver.core.BatchStatement.Type.UNLOGGED; -import static com.datastax.driver.core.QueryLogger.*; -import static com.datastax.driver.core.TestUtils.getFixedValue; -import static com.datastax.driver.core.TestUtils.ipOfNode; -import static org.apache.log4j.Level.*; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.mock; - /** - * Main tests for {@link QueryLogger} using {@link com.datastax.driver.core.CCMBridge}. - * More tests, specifically targeting slow and unsuccessful queries, can be found in - * {@link QueryLoggerErrorsTest}. + * Main tests for {@link QueryLogger} using {@link com.datastax.driver.core.CCMBridge}. More tests, + * specifically targeting slow and unsuccessful queries, can be found in {@link + * QueryLoggerErrorsTest}. */ public class QueryLoggerTest extends CCMTestsSupport { - private List dataTypes; - private List values; - private String assignments; - - private Logger normal = Logger.getLogger(NORMAL_LOGGER.getName()); - private Logger slow = Logger.getLogger(SLOW_LOGGER.getName()); - private Logger error = Logger.getLogger(ERROR_LOGGER.getName()); - - private MemoryAppender normalAppender; - private MemoryAppender slowAppender; - private MemoryAppender errorAppender; - - private QueryLogger queryLogger; - private Level originalNormal; - private Level originalSlow; - private Level originalError; - - @Override - public void onTestContextInitialized() { - dataTypes = new ArrayList( - Sets.filter(TestUtils.allPrimitiveTypes(ccm().getProtocolVersion()), new Predicate() { - @Override - public boolean apply(DataType type) { - return type != DataType.counter(); - } + private List dataTypes; + private List values; + private String assignments; + + private final Logger normal = Logger.getLogger(NORMAL_LOGGER.getName()); + private final Logger slow = Logger.getLogger(SLOW_LOGGER.getName()); + private final Logger error = Logger.getLogger(ERROR_LOGGER.getName()); + + private MemoryAppender normalAppender; + private MemoryAppender slowAppender; + private MemoryAppender errorAppender; + + private QueryLogger queryLogger; + private Level originalNormal; + private Level originalSlow; + private Level originalError; + + @Override + public void onTestContextInitialized() { + dataTypes = + new ArrayList( + Sets.filter( + TestUtils.allPrimitiveTypes(ccm().getProtocolVersion()), + new Predicate() { + @Override + public boolean apply(DataType type) { + return type != DataType.counter(); + } })); - values = Lists.transform(dataTypes, new Function() { - @Override - public Object apply(DataType type) { - return getFixedValue(type); - } - } - ); - - String definitions = Joiner.on(", ").join( - Lists.transform(dataTypes, new Function() { - @Override - public String apply(DataType type) { - return "c_" + type + " " + type; - } - } - ) - ); - - assignments = Joiner.on(", ").join( - Lists.transform(dataTypes, new Function() { - @Override - public String apply(DataType type) { - return "c_" + type + " = ?"; - } - } - ) - ); - - execute("CREATE TABLE test (pk int PRIMARY KEY, " + definitions + ")"); - } - - @BeforeMethod(groups = {"short", "unit"}) - public void startCapturingLogs() { - originalNormal = normal.getLevel(); - originalSlow = slow.getLevel(); - originalError = error.getLevel(); - normal.setLevel(INFO); - slow.setLevel(INFO); - error.setLevel(INFO); - normal.addAppender(normalAppender = new MemoryAppender()); - slow.addAppender(slowAppender = new MemoryAppender()); - error.addAppender(errorAppender = new MemoryAppender()); - } - - @AfterMethod(groups = {"short", "unit"}, alwaysRun = true) - public void stopCapturingLogs() { - normal.setLevel(originalNormal); - slow.setLevel(originalSlow); - error.setLevel(originalError); - normal.removeAppender(normalAppender); - slow.removeAppender(slowAppender); - error.removeAppender(errorAppender); - } - - @AfterMethod(groups = {"short", "unit"}, alwaysRun = true) - public void unregisterQueryLogger() { - if (cluster() != null && queryLogger != null) { - cluster().unregister(queryLogger); - } - } - - // Tests for different types of statements (Regular, Bound, Batch) - - @Test(groups = "short") - public void should_log_regular_statements() throws Exception { - // given - normal.setLevel(DEBUG); - queryLogger = QueryLogger.builder() - .withConstantThreshold(Long.MAX_VALUE) - .build(); - cluster().register(queryLogger); - String query = "SELECT c_text FROM test WHERE pk = 42"; - session().execute(query); - // then - String line = normalAppender.waitAndGet(10000); - assertThat(line) - .contains("Query completed normally") - .contains(ipOfNode(1)) - .contains(query) - .doesNotContain("parameters"); - } - - @Test(groups = "short") - public void should_log_bound_statements() throws Exception { - // given - normal.setLevel(DEBUG); - queryLogger = QueryLogger.builder() - .withConstantThreshold(Long.MAX_VALUE) - .build(); - cluster().register(queryLogger); - String query = "SELECT * FROM test where pk = ?"; - PreparedStatement ps = session().prepare(query); - BoundStatement bs = ps.bind(42); - session().execute(bs); - // then - String line = normalAppender.waitAndGet(10000); - assertThat(line) - .contains("Query completed normally") - .contains(ipOfNode(1)) - .contains(query) - .doesNotContain("actual parameters"); - } - - @Test(groups = "short") - @CassandraVersion("2.0.0") - public void should_log_batch_statements() throws Exception { - // given - normal.setLevel(DEBUG); - queryLogger = QueryLogger.builder() - .withConstantThreshold(Long.MAX_VALUE) - .withMaxQueryStringLength(Integer.MAX_VALUE) - .build(); - cluster().register(queryLogger); - // when - String query1 = "INSERT INTO test (pk) VALUES (?)"; - String query2 = "UPDATE test SET c_int = ? WHERE pk = 42"; - PreparedStatement ps1 = session().prepare(query1); - PreparedStatement ps2 = session().prepare(query2); - BatchStatement batch = new BatchStatement(); - batch.add(ps1.bind(42)); - batch.add(ps2.bind(1234)); - session().execute(batch); - // then - String line = normalAppender.waitAndGet(10000); - assertThat(line) - .contains("Query completed normally") - .contains(ipOfNode(1)) - .contains("BEGIN BATCH") - .contains("APPLY BATCH") - .contains(query1) - .contains(query2) - .doesNotContain("c_int:"); - } - - @Test(groups = "short") - @CassandraVersion("2.0.0") - public void should_log_unlogged_batch_statements() throws Exception { - // given - normal.setLevel(DEBUG); - queryLogger = QueryLogger.builder() - .withConstantThreshold(Long.MAX_VALUE) - .withMaxQueryStringLength(Integer.MAX_VALUE) - .build(); - cluster().register(queryLogger); - // when - String query1 = "INSERT INTO test (pk) VALUES (?)"; - String query2 = "UPDATE test SET c_int = ? WHERE pk = 42"; - PreparedStatement ps1 = session().prepare(query1); - PreparedStatement ps2 = session().prepare(query2); - BatchStatement batch = new BatchStatement(UNLOGGED); - batch.add(ps1.bind(42)); - batch.add(ps2.bind(1234)); - session().execute(batch); - // then - String line = normalAppender.waitAndGet(10000); - assertThat(line) - .contains("Query completed normally") - .contains(ipOfNode(1)) - .contains("BEGIN UNLOGGED BATCH") - .contains("APPLY BATCH") - .contains(query1) - .contains(query2) - .doesNotContain("c_int:"); + values = + Lists.transform( + dataTypes, + new Function() { + @Override + public Object apply(DataType type) { + return getFixedValue(type); + } + }); + + String definitions = + Joiner.on(", ") + .join( + Lists.transform( + dataTypes, + new Function() { + @Override + public String apply(DataType type) { + return "c_" + type + " " + type; + } + })); + + assignments = + Joiner.on(", ") + .join( + Lists.transform( + dataTypes, + new Function() { + @Override + public String apply(DataType type) { + return "c_" + type + " = ?"; + } + })); + + execute("CREATE TABLE test (pk int PRIMARY KEY, " + definitions + ")"); + } + + @BeforeMethod(groups = {"short", "unit"}) + public void startCapturingLogs() { + originalNormal = normal.getLevel(); + originalSlow = slow.getLevel(); + originalError = error.getLevel(); + normal.setLevel(INFO); + slow.setLevel(INFO); + error.setLevel(INFO); + normal.addAppender(normalAppender = new MemoryAppender()); + slow.addAppender(slowAppender = new MemoryAppender()); + error.addAppender(errorAppender = new MemoryAppender()); + } + + @AfterMethod( + groups = {"short", "unit"}, + alwaysRun = true) + public void stopCapturingLogs() { + normal.setLevel(originalNormal); + slow.setLevel(originalSlow); + error.setLevel(originalError); + normal.removeAppender(normalAppender); + slow.removeAppender(slowAppender); + error.removeAppender(errorAppender); + } + + @AfterMethod( + groups = {"short", "unit"}, + alwaysRun = true) + public void unregisterQueryLogger() { + if (cluster() != null && queryLogger != null) { + cluster().unregister(queryLogger); } - - @Test(groups = "short") - @CassandraVersion("2.0.0") - public void should_log_counter_batch_statements() throws Exception { - // Create a special table for testing with counters. - session().execute( - "CREATE TABLE IF NOT EXISTS counter_test (pk int PRIMARY KEY, c_count COUNTER, c_count2 COUNTER)"); - - // given - normal.setLevel(DEBUG); - queryLogger = QueryLogger.builder() - .withConstantThreshold(Long.MAX_VALUE) - .withMaxQueryStringLength(Integer.MAX_VALUE) - .build(); - cluster().register(queryLogger); - // when - String query1 = "UPDATE counter_test SET c_count = c_count + ? WHERE pk = 42"; - String query2 = "UPDATE counter_test SET c_count2 = c_count2 + ? WHERE pk = 42"; - PreparedStatement ps1 = session().prepare(query1); - PreparedStatement ps2 = session().prepare(query2); - BatchStatement batch = new BatchStatement(COUNTER); - batch.add(ps1.bind(1234L)); - batch.add(ps2.bind(5678L)); - session().execute(batch); - // then - String line = normalAppender.waitAndGet(10000); - assertThat(line) - .contains("Query completed normally") - .contains(ipOfNode(1)) - .contains("BEGIN COUNTER BATCH") - .contains("APPLY BATCH") - .contains(query1) - .contains(query2) - .doesNotContain("c_count:"); - } - - @Test(groups = "unit") - public void should_log_unknown_statements() throws Exception { - // given - normal.setLevel(DEBUG); - Statement unknownStatement = new Statement() { - @Override - public ByteBuffer getRoutingKey(ProtocolVersion protocolVersion, CodecRegistry codecRegistry) { - return null; - } - - @Override - public String getKeyspace() { - return null; - } - - @Override - public String toString() { - return "weird statement"; - } + } + + // Tests for different types of statements (Regular, Bound, Batch) + + @Test(groups = "short") + public void should_log_regular_statements() throws Exception { + // given + normal.setLevel(DEBUG); + queryLogger = QueryLogger.builder().withConstantThreshold(Long.MAX_VALUE).build(); + cluster().register(queryLogger); + String query = "SELECT c_text FROM test WHERE pk = 42"; + session().execute(query); + // then + String line = normalAppender.waitAndGet(10000); + assertThat(line) + .contains("Query completed normally") + .contains(ipOfNode(1)) + .contains(query) + .doesNotContain("parameters"); + } + + @Test(groups = "short") + public void should_log_bound_statements() throws Exception { + // given + normal.setLevel(DEBUG); + queryLogger = QueryLogger.builder().withConstantThreshold(Long.MAX_VALUE).build(); + cluster().register(queryLogger); + String query = "SELECT * FROM test where pk = ?"; + PreparedStatement ps = session().prepare(query); + BoundStatement bs = ps.bind(42); + session().execute(bs); + // then + String line = normalAppender.waitAndGet(10000); + assertThat(line) + .contains("Query completed normally") + .contains(ipOfNode(1)) + .contains(query) + .doesNotContain("actual parameters"); + } + + @Test(groups = "short") + @CassandraVersion("2.0.0") + public void should_log_batch_statements() throws Exception { + // given + normal.setLevel(DEBUG); + queryLogger = + QueryLogger.builder() + .withConstantThreshold(Long.MAX_VALUE) + .withMaxQueryStringLength(Integer.MAX_VALUE) + .build(); + cluster().register(queryLogger); + // when + String query1 = "INSERT INTO test (pk) VALUES (?)"; + String query2 = "UPDATE test SET c_int = ? WHERE pk = 42"; + PreparedStatement ps1 = session().prepare(query1); + PreparedStatement ps2 = session().prepare(query2); + BatchStatement batch = new BatchStatement(); + batch.add(ps1.bind(42)); + batch.add(ps2.bind(1234)); + session().execute(batch); + // then + String line = normalAppender.waitAndGet(10000); + assertThat(line) + .contains("Query completed normally") + .contains(ipOfNode(1)) + .contains("BEGIN BATCH") + .contains("APPLY BATCH") + .contains(query1) + .contains(query2) + .doesNotContain("c_int:"); + } + + @Test(groups = "short") + @CassandraVersion("2.0.0") + public void should_log_unlogged_batch_statements() throws Exception { + // given + normal.setLevel(DEBUG); + queryLogger = + QueryLogger.builder() + .withConstantThreshold(Long.MAX_VALUE) + .withMaxQueryStringLength(Integer.MAX_VALUE) + .build(); + cluster().register(queryLogger); + // when + String query1 = "INSERT INTO test (pk) VALUES (?)"; + String query2 = "UPDATE test SET c_int = ? WHERE pk = 42"; + PreparedStatement ps1 = session().prepare(query1); + PreparedStatement ps2 = session().prepare(query2); + BatchStatement batch = new BatchStatement(UNLOGGED); + batch.add(ps1.bind(42)); + batch.add(ps2.bind(1234)); + session().execute(batch); + // then + String line = normalAppender.waitAndGet(10000); + assertThat(line) + .contains("Query completed normally") + .contains(ipOfNode(1)) + .contains("BEGIN UNLOGGED BATCH") + .contains("APPLY BATCH") + .contains(query1) + .contains(query2) + .doesNotContain("c_int:"); + } + + @Test(groups = "short") + @CassandraVersion("2.0.0") + public void should_log_counter_batch_statements() throws Exception { + // Create a special table for testing with counters. + session() + .execute( + "CREATE TABLE IF NOT EXISTS counter_test (pk int PRIMARY KEY, c_count COUNTER, c_count2 COUNTER)"); + + // given + normal.setLevel(DEBUG); + queryLogger = + QueryLogger.builder() + .withConstantThreshold(Long.MAX_VALUE) + .withMaxQueryStringLength(Integer.MAX_VALUE) + .build(); + cluster().register(queryLogger); + // when + String query1 = "UPDATE counter_test SET c_count = c_count + ? WHERE pk = 42"; + String query2 = "UPDATE counter_test SET c_count2 = c_count2 + ? WHERE pk = 42"; + PreparedStatement ps1 = session().prepare(query1); + PreparedStatement ps2 = session().prepare(query2); + BatchStatement batch = new BatchStatement(COUNTER); + batch.add(ps1.bind(1234L)); + batch.add(ps2.bind(5678L)); + session().execute(batch); + // then + String line = normalAppender.waitAndGet(10000); + assertThat(line) + .contains("Query completed normally") + .contains(ipOfNode(1)) + .contains("BEGIN COUNTER BATCH") + .contains("APPLY BATCH") + .contains(query1) + .contains(query2) + .doesNotContain("c_count:"); + } + + @Test(groups = "unit") + public void should_log_unknown_statements() throws Exception { + // given + normal.setLevel(DEBUG); + Statement unknownStatement = + new Statement() { + @Override + public ByteBuffer getRoutingKey( + ProtocolVersion protocolVersion, CodecRegistry codecRegistry) { + return null; + } + + @Override + public String getKeyspace() { + return null; + } + + @Override + public String toString() { + return "weird statement"; + } }; - // when - queryLogger = QueryLogger.builder().build(); - queryLogger.onRegister(mock(Cluster.class)); - queryLogger.update(null, unknownStatement, null, 0); - // then - String line = normalAppender.get(); - assertThat(line).contains("weird statement"); + // when + queryLogger = QueryLogger.builder().build(); + queryLogger.onRegister(mock(Cluster.class)); + queryLogger.update(null, unknownStatement, null, 0); + // then + String line = normalAppender.get(); + assertThat(line).contains("weird statement"); + } + + // Tests for different log levels + + @Test(groups = "unit") + public void should_not_log_normal_if_level_higher_than_DEBUG() throws Exception { + // given + normal.setLevel(INFO); + slow.setLevel(INFO); + error.setLevel(INFO); + // when + queryLogger = QueryLogger.builder().build(); + queryLogger.onRegister(mock(Cluster.class)); + queryLogger.update(null, mock(BoundStatement.class), null, 0); + // then + assertThat(normalAppender.get()).isEmpty(); + assertThat(slowAppender.get()).isEmpty(); + assertThat(errorAppender.get()).isEmpty(); + } + + @Test(groups = "unit") + public void should_not_log_slow_if_level_higher_than_DEBUG() throws Exception { + // given + normal.setLevel(INFO); + slow.setLevel(INFO); + error.setLevel(INFO); + // when + queryLogger = QueryLogger.builder().build(); + queryLogger.onRegister(mock(Cluster.class)); + queryLogger.update(null, mock(BoundStatement.class), null, DEFAULT_SLOW_QUERY_THRESHOLD_MS + 1); + // then + assertThat(normalAppender.get()).isEmpty(); + assertThat(slowAppender.get()).isEmpty(); + assertThat(errorAppender.get()).isEmpty(); + } + + @Test(groups = "unit") + public void should_not_log_error_if_level_higher_than_DEBUG() throws Exception { + // given + normal.setLevel(INFO); + slow.setLevel(INFO); + error.setLevel(INFO); + // when + queryLogger = QueryLogger.builder().build(); + queryLogger.onRegister(mock(Cluster.class)); + queryLogger.update(null, mock(BoundStatement.class), new DriverException("booh"), 0); + // then + assertThat(normalAppender.get()).isEmpty(); + assertThat(slowAppender.get()).isEmpty(); + assertThat(errorAppender.get()).isEmpty(); + } + + // Tests for different query types (normal, slow, exception) + + @Test(groups = "short") + public void should_log_normal_queries() throws Exception { + // given + normal.setLevel(DEBUG); + queryLogger = + QueryLogger.builder() + .withConstantThreshold(Long.MAX_VALUE) + .withMaxQueryStringLength(Integer.MAX_VALUE) + .build(); + cluster().register(queryLogger); + // when + String query = "SELECT * FROM test where pk = ?"; + PreparedStatement ps = session().prepare(query); + BoundStatement bs = ps.bind(42); + session().execute(bs); + // then + String line = normalAppender.waitAndGet(10000); + assertThat(line) + .contains("Query completed normally") + .contains(ipOfNode(1)) + .contains(query) + .doesNotContain("pk:42"); + } + + // Tests for slow and error queries are in QueryLoggerErrorsTest + + // Tests with query parameters (log level TRACE) + + /** @jira_ticket JAVA-2857 */ + @Test(groups = "short") + @CassandraVersion("2.0.0") + public void should_log_simple_statements_without_parameters() throws Exception { + // given + normal.setLevel(TRACE); + queryLogger = QueryLogger.builder().build(); + cluster().register(queryLogger); + + // when + String query = "UPDATE test SET c_int = 42 WHERE pk = 42"; + SimpleStatement stmt = new SimpleStatement(query); + session().execute(stmt); + + // then + String line = normalAppender.waitAndGet(10000); + assertThat(line).contains("Query completed normally").contains(query); + } + + /** @jira_ticket JAVA-2857 */ + @Test(groups = "short") + @CassandraVersion("2.0.0") + public void should_log_bound_statements_without_parameters() throws Exception { + // given + normal.setLevel(TRACE); + queryLogger = QueryLogger.builder().build(); + cluster().register(queryLogger); + + // when + String query = "UPDATE test SET c_int = 42 WHERE pk = 42"; + PreparedStatement ps = session().prepare(query); + BoundStatement bs = ps.bind(); + session().execute(bs); + + // then + String line = normalAppender.waitAndGet(10000); + assertThat(line).contains("Query completed normally").contains(query); + } + + /** @jira_ticket JAVA-2857 */ + @Test(groups = "short") + @CassandraVersion("2.0.0") + public void should_log_built_statements_without_parameters() throws Exception { + // given + normal.setLevel(TRACE); + queryLogger = QueryLogger.builder().build(); + cluster().register(queryLogger); + + // when + // both c_int and pk will be inlined + BuiltStatement update = update("test").with(set("c_int", 42)).where(eq("pk", 42)); + session().execute(update); + + // then + String line = normalAppender.waitAndGet(10000); + assertThat(line).contains("Query completed normally").contains(update.getQueryString()); + } + + @Test(groups = "short") + @CassandraVersion("2.0.0") + public void should_log_non_null_named_parameter_bound_statements() throws Exception { + // given + normal.setLevel(TRACE); + queryLogger = + QueryLogger.builder() + .withConstantThreshold(Long.MAX_VALUE) + .withMaxQueryStringLength(Integer.MAX_VALUE) + .build(); + cluster().register(queryLogger); + // when + String query = "UPDATE test SET c_text = :param1 WHERE pk = :param2"; + PreparedStatement ps = session().prepare(query); + BoundStatement bs = ps.bind(); + bs.setString("param1", "foo"); + bs.setInt("param2", 42); + session().execute(bs); + // then + String line = normalAppender.waitAndGet(10000); + assertThat(line) + .contains("Query completed normally") + .contains(ipOfNode(1)) + .contains(query) + .contains("param2:42") + .contains("param1:'foo'"); + } + + @Test(groups = "short") + public void should_log_non_null_positional_parameter_bound_statements() throws Exception { + // given + normal.setLevel(TRACE); + queryLogger = QueryLogger.builder().build(); + cluster().register(queryLogger); + // when + String query = "UPDATE test SET c_text = ? WHERE pk = ?"; + PreparedStatement ps = session().prepare(query); + BoundStatement bs = ps.bind(); + bs.setString("c_text", "foo"); + bs.setInt("pk", 42); + session().execute(bs); + // then + String line = normalAppender.waitAndGet(10000); + assertThat(line) + .contains("Query completed normally") + .contains(ipOfNode(1)) + .contains(query) + .contains("pk:42") + .contains("c_text:'foo'"); + } + + @Test(groups = "short") + @CassandraVersion("2.0.0") + public void should_log_non_null_parameters_built_statements() throws Exception { + // given + normal.setLevel(TRACE); + queryLogger = QueryLogger.builder().build(); + cluster().register(queryLogger); + + // when + BuiltStatement update = update("test").with(set("c_text", "foo")).where(eq("pk", 42)); + session().execute(update); + + // then + String line = normalAppender.waitAndGet(10000); + assertThat(line).contains("['foo']"); + } + + @Test(groups = "short") + @CassandraVersion("2.0.0") + public void should_log_non_null_positional_parameter_simple_statements() throws Exception { + // given + normal.setLevel(TRACE); + queryLogger = QueryLogger.builder().build(); + cluster().register(queryLogger); + // when + String query = "UPDATE test SET c_text = ? WHERE pk = ?"; + SimpleStatement ss = new SimpleStatement(query, "foo", 42); + session().execute(ss); + // then + String line = normalAppender.waitAndGet(10000); + assertThat(line) + .contains("Query completed normally") + .contains(ipOfNode(1)) + .contains(query) + .contains("42") + .contains("'foo'"); + } + + @Test(groups = "short") + public void should_log_null_parameter_bound_statements() throws Exception { + // given + normal.setLevel(TRACE); + queryLogger = QueryLogger.builder().build(); + cluster().register(queryLogger); + // when + String query = "UPDATE test SET c_text = ? WHERE pk = ?"; + PreparedStatement ps = session().prepare(query); + BoundStatement bs = ps.bind(); + bs.setString("c_text", null); + bs.setInt("pk", 42); + session().execute(bs); + // then + String line = normalAppender.waitAndGet(10000); + assertThat(line) + .contains("Query completed normally") + .contains(ipOfNode(1)) + .contains(query) + .contains("pk:42") + .contains("c_text:NULL"); + } + + @Test(groups = "short") + @CassandraVersion("2.0.0") + public void should_log_null_parameter_simple_statements() throws Exception { + // given + normal.setLevel(TRACE); + queryLogger = QueryLogger.builder().build(); + cluster().register(queryLogger); + // when + String query = "UPDATE test SET c_text = ? WHERE pk = ?"; + SimpleStatement ss = new SimpleStatement(query, null, 42); + session().execute(ss); + // then + String line = normalAppender.waitAndGet(10000); + assertThat(line) + .contains("Query completed normally") + .contains(ipOfNode(1)) + .contains(query) + .contains("42") + .contains("NULL"); + } + + @Test(groups = "short") + @CassandraVersion("3.0") + public void should_log_unset_parameter() throws Exception { + // given + normal.setLevel(TRACE); + queryLogger = QueryLogger.builder().build(); + cluster().register(queryLogger); + // when + String query = "UPDATE test SET c_text = ? WHERE pk = ?"; + PreparedStatement ps = session().prepare(query); + BoundStatement bs = ps.bind(); + bs.setInt("pk", 42); + session().execute(bs); + // then + String line = normalAppender.waitAndGet(10000); + assertThat(line) + .contains("Query completed normally") + .contains(ipOfNode(1)) + .contains(query) + .contains("pk:42") + .contains("c_text:"); + } + + @Test(groups = "short") + @CassandraVersion("2.0.0") + public void should_log_bound_statement_parameters_inside_batch_statement() throws Exception { + // given + normal.setLevel(TRACE); + queryLogger = QueryLogger.builder().build(); + cluster().register(queryLogger); + // when + String query1 = "UPDATE test SET c_text = ? WHERE pk = ?"; + String query2 = "UPDATE test SET c_int = ? WHERE pk = ?"; + BatchStatement batch = new BatchStatement(); + batch.add(session().prepare(query1).bind("foo", 42)); + batch.add(session().prepare(query2).bind(12345, 43)); + session().execute(batch); + // then + String line = normalAppender.waitAndGet(10000); + assertThat(line) + .contains("Query completed normally") + .contains(ipOfNode(1)) + .contains(query1) + .contains(query2) + .contains("pk:42") + .contains("pk:43") + .contains("c_text:'foo'") + .contains("c_int:12345"); + } + + @Test(groups = "short") + @CassandraVersion("2.0.0") + public void should_log_simple_statement_parameters_inside_batch_statement() throws Exception { + // given + normal.setLevel(TRACE); + queryLogger = QueryLogger.builder().build(); + cluster().register(queryLogger); + // when + String query1 = "UPDATE test SET c_text = ? WHERE pk = ?"; + String query2 = "UPDATE test SET c_int = ? WHERE pk = ?"; + BatchStatement batch = new BatchStatement(); + batch.add(new SimpleStatement(query1, "foo", 42)); + batch.add(new SimpleStatement(query2, 12345, 43)); + session().execute(batch); + // then + String line = normalAppender.waitAndGet(10000); + assertThat(line) + .contains("Query completed normally") + .contains(ipOfNode(1)) + .contains(query1) + .contains(query2) + .contains("42") + .contains("43") + .contains("'foo'") + .contains("12345"); + } + + // Test different CQL types + + @Test(groups = "short") + public void should_log_all_parameter_types_bound_statements() throws Exception { + // given + normal.setLevel(TRACE); + queryLogger = QueryLogger.builder().withMaxParameterValueLength(Integer.MAX_VALUE).build(); + cluster().register(queryLogger); + // when + String query = "UPDATE test SET " + assignments + " WHERE pk = 42"; + PreparedStatement ps = session().prepare(query); + BoundStatement bs = ps.bind(values.toArray()); + session().execute(bs); + // then + String line = normalAppender.waitAndGet(10000); + assertThat(line).contains("Query completed normally").contains(ipOfNode(1)).contains(query); + CodecRegistry codecRegistry = cluster().getConfiguration().getCodecRegistry(); + for (DataType type : dataTypes) { + TypeCodec codec = codecRegistry.codecFor(type); + assertThat(line).contains(codec.format(getFixedValue(type))); } - - // Tests for different log levels - - @Test(groups = "unit") - public void should_not_log_normal_if_level_higher_than_DEBUG() throws Exception { - // given - normal.setLevel(INFO); - slow.setLevel(INFO); - error.setLevel(INFO); - // when - queryLogger = QueryLogger.builder().build(); - queryLogger.onRegister(mock(Cluster.class)); - queryLogger.update(null, mock(BoundStatement.class), null, 0); - // then - assertThat(normalAppender.get()).isEmpty(); - assertThat(slowAppender.get()).isEmpty(); - assertThat(errorAppender.get()).isEmpty(); - } - - @Test(groups = "unit") - public void should_not_log_slow_if_level_higher_than_DEBUG() throws Exception { - // given - normal.setLevel(INFO); - slow.setLevel(INFO); - error.setLevel(INFO); - // when - queryLogger = QueryLogger.builder().build(); - queryLogger.onRegister(mock(Cluster.class)); - queryLogger.update(null, mock(BoundStatement.class), null, DEFAULT_SLOW_QUERY_THRESHOLD_MS + 1); - // then - assertThat(normalAppender.get()).isEmpty(); - assertThat(slowAppender.get()).isEmpty(); - assertThat(errorAppender.get()).isEmpty(); - } - - @Test(groups = "unit") - public void should_not_log_error_if_level_higher_than_DEBUG() throws Exception { - // given - normal.setLevel(INFO); - slow.setLevel(INFO); - error.setLevel(INFO); - // when - queryLogger = QueryLogger.builder().build(); - queryLogger.onRegister(mock(Cluster.class)); - queryLogger.update(null, mock(BoundStatement.class), new DriverException("booh"), 0); - // then - assertThat(normalAppender.get()).isEmpty(); - assertThat(slowAppender.get()).isEmpty(); - assertThat(errorAppender.get()).isEmpty(); - } - - // Tests for different query types (normal, slow, exception) - - @Test(groups = "short") - public void should_log_normal_queries() throws Exception { - // given - normal.setLevel(DEBUG); - queryLogger = QueryLogger.builder() - .withConstantThreshold(Long.MAX_VALUE) - .withMaxQueryStringLength(Integer.MAX_VALUE) - .build(); - cluster().register(queryLogger); - // when - String query = "SELECT * FROM test where pk = ?"; - PreparedStatement ps = session().prepare(query); - BoundStatement bs = ps.bind(42); - session().execute(bs); - // then - String line = normalAppender.waitAndGet(10000); - assertThat(line) - .contains("Query completed normally") - .contains(ipOfNode(1)) - .contains(query) - .doesNotContain("pk:42"); - } - - // Tests for slow and error queries are in QueryLoggerErrorsTest - - // Tests with query parameters (log level TRACE) - - @Test(groups = "short") - @CassandraVersion("2.0.0") - public void should_log_non_null_named_parameter_bound_statements() throws Exception { - // given - normal.setLevel(TRACE); - queryLogger = QueryLogger.builder() - .withConstantThreshold(Long.MAX_VALUE) - .withMaxQueryStringLength(Integer.MAX_VALUE) - .build(); - cluster().register(queryLogger); - // when - String query = "UPDATE test SET c_text = :param1 WHERE pk = :param2"; - PreparedStatement ps = session().prepare(query); - BoundStatement bs = ps.bind(); - bs.setString("param1", "foo"); - bs.setInt("param2", 42); - session().execute(bs); - // then - String line = normalAppender.waitAndGet(10000); - assertThat(line) - .contains("Query completed normally") - .contains(ipOfNode(1)) - .contains(query) - .contains("param2:42") - .contains("param1:'foo'"); - } - - @Test(groups = "short") - public void should_log_non_null_positional_parameter_bound_statements() throws Exception { - // given - normal.setLevel(TRACE); - queryLogger = QueryLogger.builder().build(); - cluster().register(queryLogger); - // when - String query = "UPDATE test SET c_text = ? WHERE pk = ?"; - PreparedStatement ps = session().prepare(query); - BoundStatement bs = ps.bind(); - bs.setString("c_text", "foo"); - bs.setInt("pk", 42); - session().execute(bs); - // then - String line = normalAppender.waitAndGet(10000); - assertThat(line) - .contains("Query completed normally") - .contains(ipOfNode(1)) - .contains(query) - .contains("pk:42") - .contains("c_text:'foo'"); - } - - @Test(groups = "short") - @CassandraVersion("2.0.0") - public void should_log_non_null_positional_parameter_simple_statements() throws Exception { - // given - normal.setLevel(TRACE); - queryLogger = QueryLogger.builder().build(); - cluster().register(queryLogger); - // when - String query = "UPDATE test SET c_text = ? WHERE pk = ?"; - SimpleStatement ss = new SimpleStatement(query, "foo", 42); - session().execute(ss); - // then - String line = normalAppender.waitAndGet(10000); - assertThat(line) - .contains("Query completed normally") - .contains(ipOfNode(1)) - .contains(query) - .contains("42") - .contains("'foo'"); - } - - @Test(groups = "short") - public void should_log_null_parameter_bound_statements() throws Exception { - // given - normal.setLevel(TRACE); - queryLogger = QueryLogger.builder().build(); - cluster().register(queryLogger); - // when - String query = "UPDATE test SET c_text = ? WHERE pk = ?"; - PreparedStatement ps = session().prepare(query); - BoundStatement bs = ps.bind(); - bs.setString("c_text", null); - bs.setInt("pk", 42); - session().execute(bs); - // then - String line = normalAppender.waitAndGet(10000); - assertThat(line) - .contains("Query completed normally") - .contains(ipOfNode(1)) - .contains(query) - .contains("pk:42") - .contains("c_text:NULL"); + } + + @Test(groups = "short") + @CassandraVersion("2.0.0") + public void should_log_all_parameter_types_simple_statements() throws Exception { + // given + normal.setLevel(TRACE); + queryLogger = QueryLogger.builder().withMaxParameterValueLength(Integer.MAX_VALUE).build(); + cluster().register(queryLogger); + // when + String query = "UPDATE test SET " + assignments + " WHERE pk = 42"; + SimpleStatement ss = new SimpleStatement(query, values.toArray()); + session().execute(ss); + // then + String line = normalAppender.waitAndGet(10000); + assertThat(line).contains("Query completed normally").contains(ipOfNode(1)).contains(query); + CodecRegistry codecRegistry = cluster().getConfiguration().getCodecRegistry(); + for (DataType type : dataTypes) { + TypeCodec codec; + if (type.equals(DataType.time())) { + codec = codecRegistry.codecFor(DataType.bigint()); + } else { + codec = codecRegistry.codecFor(type); + } + assertThat(line).contains(codec.format(getFixedValue(type))); } - - @Test(groups = "short") - @CassandraVersion("2.0.0") - public void should_log_null_parameter_simple_statements() throws Exception { - // given - normal.setLevel(TRACE); - queryLogger = QueryLogger.builder().build(); - cluster().register(queryLogger); - // when - String query = "UPDATE test SET c_text = ? WHERE pk = ?"; - SimpleStatement ss = new SimpleStatement(query, null, 42); - session().execute(ss); - // then - String line = normalAppender.waitAndGet(10000); - assertThat(line) - .contains("Query completed normally") - .contains(ipOfNode(1)) - .contains(query) - .contains("42") - .contains("NULL"); - } - - @Test(groups = "short") - @CassandraVersion("3.0") - public void should_log_unset_parameter() throws Exception { - // given - normal.setLevel(TRACE); - queryLogger = QueryLogger.builder().build(); - cluster().register(queryLogger); - // when - String query = "UPDATE test SET c_text = ? WHERE pk = ?"; - PreparedStatement ps = session().prepare(query); - BoundStatement bs = ps.bind(); - bs.setInt("pk", 42); - session().execute(bs); - // then - String line = normalAppender.waitAndGet(10000); - assertThat(line) - .contains("Query completed normally") - .contains(ipOfNode(1)) - .contains(query) - .contains("pk:42") - .contains("c_text:"); - } - - @Test(groups = "short") - @CassandraVersion("2.0.0") - public void should_log_bound_statement_parameters_inside_batch_statement() throws Exception { - // given - normal.setLevel(TRACE); - queryLogger = QueryLogger.builder().build(); - cluster().register(queryLogger); - // when - String query1 = "UPDATE test SET c_text = ? WHERE pk = ?"; - String query2 = "UPDATE test SET c_int = ? WHERE pk = ?"; - BatchStatement batch = new BatchStatement(); - batch.add(session().prepare(query1).bind("foo", 42)); - batch.add(session().prepare(query2).bind(12345, 43)); - session().execute(batch); - // then - String line = normalAppender.waitAndGet(10000); - assertThat(line) - .contains("Query completed normally") - .contains(ipOfNode(1)) - .contains(query1) - .contains(query2) - .contains("pk:42") - .contains("pk:43") - .contains("c_text:'foo'") - .contains("c_int:12345"); - } - - @Test(groups = "short") - @CassandraVersion("2.0.0") - public void should_log_simple_statement_parameters_inside_batch_statement() throws Exception { - // given - normal.setLevel(TRACE); - queryLogger = QueryLogger.builder().build(); - cluster().register(queryLogger); - // when - String query1 = "UPDATE test SET c_text = ? WHERE pk = ?"; - String query2 = "UPDATE test SET c_int = ? WHERE pk = ?"; - BatchStatement batch = new BatchStatement(); - batch.add(new SimpleStatement(query1, "foo", 42)); - batch.add(new SimpleStatement(query2, 12345, 43)); - session().execute(batch); - // then - String line = normalAppender.waitAndGet(10000); - assertThat(line) - .contains("Query completed normally") - .contains(ipOfNode(1)) - .contains(query1) - .contains(query2) - .contains("42") - .contains("43") - .contains("'foo'") - .contains("12345"); - } - - // Test different CQL types - - @Test(groups = "short") - public void should_log_all_parameter_types_bound_statements() throws Exception { - // given - normal.setLevel(TRACE); - queryLogger = QueryLogger.builder() - .withMaxParameterValueLength(Integer.MAX_VALUE) - .build(); - cluster().register(queryLogger); - // when - String query = "UPDATE test SET " + assignments + " WHERE pk = 42"; - PreparedStatement ps = session().prepare(query); - BoundStatement bs = ps.bind(values.toArray()); - session().execute(bs); - // then - String line = normalAppender.waitAndGet(10000); - assertThat(line) - .contains("Query completed normally") - .contains(ipOfNode(1)) - .contains(query); - CodecRegistry codecRegistry = cluster().getConfiguration().getCodecRegistry(); - for (DataType type : dataTypes) { - TypeCodec codec = codecRegistry.codecFor(type); - assertThat(line).contains(codec.format(getFixedValue(type))); - } - } - - @Test(groups = "short") - @CassandraVersion("2.0.0") - public void should_log_all_parameter_types_simple_statements() throws Exception { - // given - normal.setLevel(TRACE); - queryLogger = QueryLogger.builder() - .withMaxParameterValueLength(Integer.MAX_VALUE) - .build(); - cluster().register(queryLogger); - // when - String query = "UPDATE test SET " + assignments + " WHERE pk = 42"; - SimpleStatement ss = new SimpleStatement(query, values.toArray()); - session().execute(ss); - // then - String line = normalAppender.waitAndGet(10000); - assertThat(line) - .contains("Query completed normally") - .contains(ipOfNode(1)) - .contains(query); - CodecRegistry codecRegistry = cluster().getConfiguration().getCodecRegistry(); - for (DataType type : dataTypes) { - TypeCodec codec; - if (type.equals(DataType.time())) { - codec = codecRegistry.codecFor(DataType.bigint()); - } else { - codec = codecRegistry.codecFor(type); - } - assertThat(line).contains(codec.format(getFixedValue(type))); - } - } - - // Tests for truncation of query strings and parameter values - - @Test(groups = "short") - public void should_truncate_query_when_max_length_exceeded() throws Exception { - // given - normal.setLevel(DEBUG); - queryLogger = QueryLogger.builder() - .withMaxQueryStringLength(5) - .build(); - cluster().register(queryLogger); - // when - String query = "SELECT * FROM test WHERE pk = 42"; - session().execute(query); - // then - String line = normalAppender.waitAndGet(10000); - assertThat(line) - .contains("Query completed normally") - .contains(ipOfNode(1)) - .contains("SELEC" + TRUNCATED_OUTPUT) - .doesNotContain(query); - } - - @CassandraVersion("2.0.0") - @Test(groups = "short") - public void should_show_total_statements_for_batches_even_if_query_truncated() throws Exception { - // given - normal.setLevel(DEBUG); - queryLogger = QueryLogger.builder() - .withMaxQueryStringLength(5) - .build(); - cluster().register(queryLogger); - // when - String query1 = "UPDATE test SET c_text = ? WHERE pk = ?"; - String query2 = "UPDATE test SET c_int = ? WHERE pk = ?"; - BatchStatement batch = new BatchStatement(); - batch.add(session().prepare(query1).bind("foo", 42)); - batch.add(session().prepare(query2).bind(12345, 43)); - session().execute(batch); - // then - String line = normalAppender.waitAndGet(10000); - assertThat(line) - .contains("Query completed normally") - .contains(ipOfNode(1)) - .contains("BEGIN" + TRUNCATED_OUTPUT) - .doesNotContain(query1) - .doesNotContain(query2) - .contains(" [2 statements"); - } - - @Test(groups = "short") - public void should_not_truncate_query_when_max_length_unlimited() throws Exception { - // given - normal.setLevel(DEBUG); - queryLogger = QueryLogger.builder() - .withMaxQueryStringLength(-1) - .build(); - cluster().register(queryLogger); - // when - String query = "SELECT * FROM test WHERE pk = 42"; - session().execute(query); - // then - String line = normalAppender.waitAndGet(10000); - assertThat(line) - .contains("Query completed normally") - .contains(ipOfNode(1)) - .contains(query) - .doesNotContain(TRUNCATED_OUTPUT); - } - - @CassandraVersion("2.0.0") - @Test(groups = "short") - public void should_truncate_parameter_when_max_length_exceeded_bound_statements() throws Exception { - // given - normal.setLevel(TRACE); - queryLogger = QueryLogger.builder() - .withMaxParameterValueLength(5) - .build(); - cluster().register(queryLogger); - // when - String query = "UPDATE test SET c_int = ? WHERE pk = ?"; - PreparedStatement ps = session().prepare(query); - BoundStatement bs = ps.bind(); - bs.setInt("c_int", 123456); - bs.setInt("pk", 42); - session().execute(bs); - // then - String line = normalAppender.waitAndGet(10000); - assertThat(line) - .contains("Query completed normally") - .contains(ipOfNode(1)) - .contains("c_int:12345" + TRUNCATED_OUTPUT) - .doesNotContain("123456"); - } - - @CassandraVersion("2.0.0") - @Test(groups = "short") - public void should_truncate_parameter_when_max_length_exceeded_simple_statements() throws Exception { - // given - normal.setLevel(TRACE); - queryLogger = QueryLogger.builder() - .withMaxParameterValueLength(5) - .build(); - cluster().register(queryLogger); - // when - String query = "UPDATE test SET c_int = ? WHERE pk = ?"; - SimpleStatement ss = new SimpleStatement(query, 123456, 42); - session().execute(ss); - // then - String line = normalAppender.waitAndGet(10000); - assertThat(line) - .contains("Query completed normally") - .contains(ipOfNode(1)) - .contains("12345" + TRUNCATED_OUTPUT) - .doesNotContain("123456"); - } - - @Test(groups = "short") - public void should_truncate_blob_parameter_when_max_length_exceeded_bound_statements() throws Exception { - // given - normal.setLevel(TRACE); - queryLogger = QueryLogger.builder() - .withMaxParameterValueLength(6) - .build(); - cluster().register(queryLogger); - // when - String query = "UPDATE test SET c_blob = ? WHERE pk = ?"; - PreparedStatement ps = session().prepare(query); - BoundStatement bs = ps.bind(); - bs.setBytes("c_blob", ByteBuffer.wrap(Bytes.toArray(Lists.newArrayList(1, 2, 3)))); - bs.setInt("pk", 42); - session().execute(bs); - // then - String line = normalAppender.waitAndGet(10000); - assertThat(line) - .contains("Query completed normally") - .contains(ipOfNode(1)) - .contains("c_blob:0x0102" + TRUNCATED_OUTPUT) - .doesNotContain("0x010203"); - } - - @Test(groups = "short") - @CassandraVersion("2.0.0") - public void should_truncate_blob_parameter_when_max_length_exceeded_simple_statements() throws Exception { - // given - normal.setLevel(TRACE); - queryLogger = QueryLogger.builder() - .withMaxParameterValueLength(6) - .build(); - cluster().register(queryLogger); - // when - String query = "UPDATE test SET c_blob = ? WHERE pk = ?"; - SimpleStatement ss = new SimpleStatement(query, ByteBuffer.wrap(Bytes.toArray(Lists.newArrayList(1, 2, 3))), 42); - session().execute(ss); - // then - String line = normalAppender.waitAndGet(10000); - assertThat(line) - .contains("Query completed normally") - .contains(ipOfNode(1)) - .contains("0x0102" + TRUNCATED_OUTPUT) - .doesNotContain("0x010203"); - } - - @Test(groups = "short") - public void should_not_truncate_parameter_when_max_length_unlimited_bound_statements() throws Exception { - // given - normal.setLevel(TRACE); - queryLogger = QueryLogger.builder() - .withMaxParameterValueLength(-1) - .build(); - cluster().register(queryLogger); - // when - String query = "UPDATE test SET c_int = ? WHERE pk = ?"; - PreparedStatement ps = session().prepare(query); - BoundStatement bs = ps.bind(); - bs.setInt("c_int", 123456); - bs.setInt("pk", 42); - session().execute(bs); - // then - String line = normalAppender.waitAndGet(10000); - assertThat(line) - .contains("Query completed normally") - .contains(ipOfNode(1)) - .contains("c_int:123456") - .doesNotContain(TRUNCATED_OUTPUT); - } - - @Test(groups = "short") - @CassandraVersion("2.0.0") - public void should_not_truncate_parameter_when_max_length_unlimited_simple_statements() throws Exception { - // given - normal.setLevel(TRACE); - queryLogger = QueryLogger.builder() - .withMaxParameterValueLength(-1) - .build(); - cluster().register(queryLogger); - // when - String query = "UPDATE test SET c_int = ? WHERE pk = ?"; - SimpleStatement ss = new SimpleStatement(query, 123456, 42); - session().execute(ss); - // then - String line = normalAppender.waitAndGet(10000); - assertThat(line) - .contains("Query completed normally") - .contains(ipOfNode(1)) - .contains("123456") - .doesNotContain(TRUNCATED_OUTPUT); - } - - @Test(groups = "short") - public void should_not_log_exceeding_number_of_parameters_bound_statements() throws Exception { - // given - normal.setLevel(TRACE); - queryLogger = QueryLogger.builder() - .withMaxLoggedParameters(1) - .build(); - cluster().register(queryLogger); - // when - String query = "UPDATE test SET c_int = ? WHERE pk = ?"; - PreparedStatement ps = session().prepare(query); - BoundStatement bs = ps.bind(); - bs.setInt("c_int", 123456); - bs.setInt("pk", 42); - session().execute(bs); - // then - String line = normalAppender.waitAndGet(10000); - assertThat(line) - .contains("Query completed normally") - .contains(ipOfNode(1)) - .contains("c_int:123456") - .doesNotContain("pk:42") - .contains(FURTHER_PARAMS_OMITTED); - } - - @Test(groups = "short") - @CassandraVersion("2.0.0") - public void should_not_log_exceeding_number_of_parameters_simple_statements() throws Exception { - // given - normal.setLevel(TRACE); - queryLogger = QueryLogger.builder() - .withMaxLoggedParameters(1) - .build(); - cluster().register(queryLogger); - // when - String query = "UPDATE test SET c_int = ? WHERE pk = ?"; - SimpleStatement ss = new SimpleStatement(query, 123456, 42); - session().execute(ss); - // then - String line = normalAppender.waitAndGet(10000); - assertThat(line) - .contains("Query completed normally") - .contains(ipOfNode(1)) - .contains("123456") - .doesNotContain("123456, 42") - .contains(FURTHER_PARAMS_OMITTED); - } - - @Test(groups = "short") - @CassandraVersion("2.1.0") - public void should_not_log_exceeding_number_of_parameters_simple_statements_with_named_values() throws Exception { - // given - normal.setLevel(TRACE); - queryLogger = QueryLogger.builder() - .withMaxLoggedParameters(1) - .build(); - cluster().register(queryLogger); - // when - String query = "UPDATE test SET c_int = :c_int WHERE pk = :pk"; - Map namedValues = new LinkedHashMap(); - namedValues.put("c_int", 123456); - namedValues.put("pk", 42); - SimpleStatement ss = new SimpleStatement(query, namedValues); - session().execute(ss); - // then - String line = normalAppender.waitAndGet(10000); - assertThat(line) - .contains("Query completed normally") - .contains(ipOfNode(1)) - .contains("c_int:123456") - .doesNotContain("pk:42") - .contains(FURTHER_PARAMS_OMITTED); - } - - @Test(groups = "short") - @CassandraVersion("2.0.0") - public void should_not_log_exceeding_number_of_parameters_in_batch_statement_bound_statements() throws Exception { - // given - normal.setLevel(TRACE); - queryLogger = QueryLogger.builder() - .withMaxLoggedParameters(1) - .build(); - cluster().register(queryLogger); - // when - String query1 = "UPDATE test SET c_text = ? WHERE pk = ?"; - String query2 = "UPDATE test SET c_int = ? WHERE pk = ?"; - BatchStatement batch = new BatchStatement(); - batch.add(session().prepare(query1).bind("foo", 42)); - batch.add(session().prepare(query2).bind(12345, 43)); - session().execute(batch); - // then - String line = normalAppender.waitAndGet(10000); - assertThat(line) - .contains("Query completed normally") - .contains(ipOfNode(1)) - .contains(query1) - .contains(query2) - .contains("c_text:'foo'") - .doesNotContain("pk:42") - .doesNotContain("c_int:12345") - .doesNotContain("pk:43") - .contains(FURTHER_PARAMS_OMITTED); - } - - @Test(groups = "short") - @CassandraVersion("2.0.0") - public void should_not_log_exceeding_number_of_parameters_in_batch_statement_simple_statements() throws Exception { - // given - normal.setLevel(TRACE); - queryLogger = QueryLogger.builder() - .withMaxLoggedParameters(1) - .build(); - cluster().register(queryLogger); - // when - String query1 = "UPDATE test SET c_text = ? WHERE pk = ?"; - String query2 = "UPDATE test SET c_int = ? WHERE pk = ?"; - BatchStatement batch = new BatchStatement(); - batch.add(new SimpleStatement(query1, "foo", 42)); - batch.add(new SimpleStatement(query2, 12345, 43)); - session().execute(batch); - // then - String line = normalAppender.waitAndGet(10000); - assertThat(line) - .contains("Query completed normally") - .contains(ipOfNode(1)) - .contains(query1) - .contains(query2) - .contains("'foo'") - .doesNotContain("42, 12345, 43") - .contains(FURTHER_PARAMS_OMITTED); - } - - @Test(groups = "short") - public void should_log_all_parameters_when_max_unlimited_bound_statements() throws Exception { - // given - normal.setLevel(TRACE); - queryLogger = QueryLogger.builder() - .withMaxLoggedParameters(-1) - .build(); - cluster().register(queryLogger); - // when - String query = "UPDATE test SET c_int = ? WHERE pk = ?"; - PreparedStatement ps = session().prepare(query); - BoundStatement bs = ps.bind(); - bs.setInt("c_int", 123456); - bs.setInt("pk", 42); - session().execute(bs); - // then - String line = normalAppender.waitAndGet(10000); - assertThat(line) - .contains("Query completed normally") - .contains(ipOfNode(1)) - .contains("c_int:123456") - .contains("pk:42"); - } - - @Test(groups = "short") - @CassandraVersion("2.0.0") - public void should_log_all_parameters_when_max_unlimited_simple_statements() throws Exception { - // given - normal.setLevel(TRACE); - queryLogger = QueryLogger.builder() - .withMaxLoggedParameters(-1) - .build(); - cluster().register(queryLogger); - // when - String query = "UPDATE test SET c_int = ? WHERE pk = ?"; - SimpleStatement ss = new SimpleStatement(query, 123456, 42); - session().execute(ss); - // then - String line = normalAppender.waitAndGet(10000); - assertThat(line) - .contains("Query completed normally") - .contains(ipOfNode(1)) - .contains("123456") - .contains("42"); - } - - @Test(groups = "short") - @CassandraVersion("2.1.0") - public void should_log_all_parameters_when_max_unlimited_simple_statements_with_named_values() throws Exception { - // given - normal.setLevel(TRACE); - queryLogger = QueryLogger.builder() - .withMaxLoggedParameters(-1) - .build(); - cluster().register(queryLogger); - // when - String query = "UPDATE test SET c_int = :c_int WHERE pk = :pk"; - Map namedValues = new HashMap(); - namedValues.put("c_int", 123456); - namedValues.put("pk", 42); - SimpleStatement ss = new SimpleStatement(query, namedValues); - session().execute(ss); - // then - String line = normalAppender.waitAndGet(10000); - assertThat(line) - .contains("Query completed normally") - .contains(ipOfNode(1)) - .contains("123456") - .contains("42"); - } - - @CassandraVersion("2.0.0") - @Test(groups = "short") - public void should_log_wrapped_bound_statement() throws Exception { - // given - normal.setLevel(TRACE); - queryLogger = QueryLogger.builder() - .withConstantThreshold(Long.MAX_VALUE) - .withMaxQueryStringLength(Integer.MAX_VALUE) - .build(); - cluster().register(queryLogger); - // when - String query = "UPDATE test SET c_text = :param1 WHERE pk = :param2"; - PreparedStatement ps = session().prepare(query); - BoundStatement bs = ps.bind(); - bs.setString("param1", "foo"); - bs.setInt("param2", 42); - session().execute(new CustomStatement(bs)); - // then - String line = normalAppender.waitAndGet(10000); - assertThat(line) - .contains("Query completed normally") - .contains(ipOfNode(1)) - .contains(query) - .contains("param2:42") - .contains("param1:'foo'"); - } - + } + + // Tests for truncation of query strings and parameter values + + @Test(groups = "short") + public void should_truncate_query_when_max_length_exceeded() throws Exception { + // given + normal.setLevel(DEBUG); + queryLogger = QueryLogger.builder().withMaxQueryStringLength(5).build(); + cluster().register(queryLogger); + // when + String query = "SELECT * FROM test WHERE pk = 42"; + session().execute(query); + // then + String line = normalAppender.waitAndGet(10000); + assertThat(line) + .contains("Query completed normally") + .contains(ipOfNode(1)) + .contains("SELEC" + TRUNCATED_OUTPUT) + .doesNotContain(query); + } + + @CassandraVersion("2.0.0") + @Test(groups = "short") + public void should_show_total_statements_for_batches_even_if_query_truncated() throws Exception { + // given + normal.setLevel(DEBUG); + queryLogger = QueryLogger.builder().withMaxQueryStringLength(5).build(); + cluster().register(queryLogger); + // when + String query1 = "UPDATE test SET c_text = ? WHERE pk = ?"; + String query2 = "UPDATE test SET c_int = ? WHERE pk = ?"; + BatchStatement batch = new BatchStatement(); + batch.add(session().prepare(query1).bind("foo", 42)); + batch.add(session().prepare(query2).bind(12345, 43)); + session().execute(batch); + // then + String line = normalAppender.waitAndGet(10000); + assertThat(line) + .contains("Query completed normally") + .contains(ipOfNode(1)) + .contains("BEGIN" + TRUNCATED_OUTPUT) + .doesNotContain(query1) + .doesNotContain(query2) + .contains(" [2 statements"); + } + + @Test(groups = "short") + public void should_not_truncate_query_when_max_length_unlimited() throws Exception { + // given + normal.setLevel(DEBUG); + queryLogger = QueryLogger.builder().withMaxQueryStringLength(-1).build(); + cluster().register(queryLogger); + // when + String query = "SELECT * FROM test WHERE pk = 42"; + session().execute(query); + // then + String line = normalAppender.waitAndGet(10000); + assertThat(line) + .contains("Query completed normally") + .contains(ipOfNode(1)) + .contains(query) + .doesNotContain(TRUNCATED_OUTPUT); + } + + @CassandraVersion("2.0.0") + @Test(groups = "short") + public void should_truncate_parameter_when_max_length_exceeded_bound_statements() + throws Exception { + // given + normal.setLevel(TRACE); + queryLogger = QueryLogger.builder().withMaxParameterValueLength(5).build(); + cluster().register(queryLogger); + // when + String query = "UPDATE test SET c_int = ? WHERE pk = ?"; + PreparedStatement ps = session().prepare(query); + BoundStatement bs = ps.bind(); + bs.setInt("c_int", 123456); + bs.setInt("pk", 42); + session().execute(bs); + // then + String line = normalAppender.waitAndGet(10000); + assertThat(line) + .contains("Query completed normally") + .contains(ipOfNode(1)) + .contains("c_int:12345" + TRUNCATED_OUTPUT) + .doesNotContain("123456"); + } + + @CassandraVersion("2.0.0") + @Test(groups = "short") + public void should_truncate_parameter_when_max_length_exceeded_simple_statements() + throws Exception { + // given + normal.setLevel(TRACE); + queryLogger = QueryLogger.builder().withMaxParameterValueLength(5).build(); + cluster().register(queryLogger); + // when + String query = "UPDATE test SET c_int = ? WHERE pk = ?"; + SimpleStatement ss = new SimpleStatement(query, 123456, 42); + session().execute(ss); + // then + String line = normalAppender.waitAndGet(10000); + assertThat(line) + .contains("Query completed normally") + .contains(ipOfNode(1)) + .contains("12345" + TRUNCATED_OUTPUT) + .doesNotContain("123456"); + } + + @Test(groups = "short") + public void should_truncate_blob_parameter_when_max_length_exceeded_bound_statements() + throws Exception { + // given + normal.setLevel(TRACE); + queryLogger = QueryLogger.builder().withMaxParameterValueLength(6).build(); + cluster().register(queryLogger); + // when + String query = "UPDATE test SET c_blob = ? WHERE pk = ?"; + PreparedStatement ps = session().prepare(query); + BoundStatement bs = ps.bind(); + bs.setBytes("c_blob", ByteBuffer.wrap(Bytes.toArray(Lists.newArrayList(1, 2, 3)))); + bs.setInt("pk", 42); + session().execute(bs); + // then + String line = normalAppender.waitAndGet(10000); + assertThat(line) + .contains("Query completed normally") + .contains(ipOfNode(1)) + .contains("c_blob:0x0102" + TRUNCATED_OUTPUT) + .doesNotContain("0x010203"); + } + + @Test(groups = "short") + @CassandraVersion("2.0.0") + public void should_truncate_blob_parameter_when_max_length_exceeded_simple_statements() + throws Exception { + // given + normal.setLevel(TRACE); + queryLogger = QueryLogger.builder().withMaxParameterValueLength(6).build(); + cluster().register(queryLogger); + // when + String query = "UPDATE test SET c_blob = ? WHERE pk = ?"; + SimpleStatement ss = + new SimpleStatement(query, ByteBuffer.wrap(Bytes.toArray(Lists.newArrayList(1, 2, 3))), 42); + session().execute(ss); + // then + String line = normalAppender.waitAndGet(10000); + assertThat(line) + .contains("Query completed normally") + .contains(ipOfNode(1)) + .contains("0x0102" + TRUNCATED_OUTPUT) + .doesNotContain("0x010203"); + } + + @Test(groups = "short") + public void should_not_truncate_parameter_when_max_length_unlimited_bound_statements() + throws Exception { + // given + normal.setLevel(TRACE); + queryLogger = QueryLogger.builder().withMaxParameterValueLength(-1).build(); + cluster().register(queryLogger); + // when + String query = "UPDATE test SET c_int = ? WHERE pk = ?"; + PreparedStatement ps = session().prepare(query); + BoundStatement bs = ps.bind(); + bs.setInt("c_int", 123456); + bs.setInt("pk", 42); + session().execute(bs); + // then + String line = normalAppender.waitAndGet(10000); + assertThat(line) + .contains("Query completed normally") + .contains(ipOfNode(1)) + .contains("c_int:123456") + .doesNotContain(TRUNCATED_OUTPUT); + } + + @Test(groups = "short") + @CassandraVersion("2.0.0") + public void should_not_truncate_parameter_when_max_length_unlimited_simple_statements() + throws Exception { + // given + normal.setLevel(TRACE); + queryLogger = QueryLogger.builder().withMaxParameterValueLength(-1).build(); + cluster().register(queryLogger); + // when + String query = "UPDATE test SET c_int = ? WHERE pk = ?"; + SimpleStatement ss = new SimpleStatement(query, 123456, 42); + session().execute(ss); + // then + String line = normalAppender.waitAndGet(10000); + assertThat(line) + .contains("Query completed normally") + .contains(ipOfNode(1)) + .contains("123456") + .doesNotContain(TRUNCATED_OUTPUT); + } + + @Test(groups = "short") + public void should_not_log_exceeding_number_of_parameters_bound_statements() throws Exception { + // given + normal.setLevel(TRACE); + queryLogger = QueryLogger.builder().withMaxLoggedParameters(1).build(); + cluster().register(queryLogger); + // when + String query = "UPDATE test SET c_int = ? WHERE pk = ?"; + PreparedStatement ps = session().prepare(query); + BoundStatement bs = ps.bind(); + bs.setInt("c_int", 123456); + bs.setInt("pk", 42); + session().execute(bs); + // then + String line = normalAppender.waitAndGet(10000); + assertThat(line) + .contains("Query completed normally") + .contains(ipOfNode(1)) + .contains("c_int:123456") + .doesNotContain("pk:42") + .contains(FURTHER_PARAMS_OMITTED); + } + + @Test(groups = "short") + @CassandraVersion("2.0.0") + public void should_not_log_exceeding_number_of_parameters_simple_statements() throws Exception { + // given + normal.setLevel(TRACE); + queryLogger = QueryLogger.builder().withMaxLoggedParameters(1).build(); + cluster().register(queryLogger); + // when + String query = "UPDATE test SET c_int = ? WHERE pk = ?"; + SimpleStatement ss = new SimpleStatement(query, 123456, 42); + session().execute(ss); + // then + String line = normalAppender.waitAndGet(10000); + assertThat(line) + .contains("Query completed normally") + .contains(ipOfNode(1)) + .contains("123456") + .doesNotContain("123456, 42") + .contains(FURTHER_PARAMS_OMITTED); + } + + @Test(groups = "short") + @CassandraVersion("2.1.0") + public void should_not_log_exceeding_number_of_parameters_simple_statements_with_named_values() + throws Exception { + // given + normal.setLevel(TRACE); + queryLogger = QueryLogger.builder().withMaxLoggedParameters(1).build(); + cluster().register(queryLogger); + // when + String query = "UPDATE test SET c_int = :c_int WHERE pk = :pk"; + Map namedValues = new LinkedHashMap(); + namedValues.put("c_int", 123456); + namedValues.put("pk", 42); + SimpleStatement ss = new SimpleStatement(query, namedValues); + session().execute(ss); + // then + String line = normalAppender.waitAndGet(10000); + assertThat(line) + .contains("Query completed normally") + .contains(ipOfNode(1)) + .contains("c_int:123456") + .doesNotContain("pk:42") + .contains(FURTHER_PARAMS_OMITTED); + } + + @Test(groups = "short") + @CassandraVersion("2.0.0") + public void should_not_log_exceeding_number_of_parameters_in_batch_statement_bound_statements() + throws Exception { + // given + normal.setLevel(TRACE); + queryLogger = QueryLogger.builder().withMaxLoggedParameters(1).build(); + cluster().register(queryLogger); + // when + String query1 = "UPDATE test SET c_text = ? WHERE pk = ?"; + String query2 = "UPDATE test SET c_int = ? WHERE pk = ?"; + BatchStatement batch = new BatchStatement(); + batch.add(session().prepare(query1).bind("foo", 42)); + batch.add(session().prepare(query2).bind(12345, 43)); + session().execute(batch); + // then + String line = normalAppender.waitAndGet(10000); + assertThat(line) + .contains("Query completed normally") + .contains(ipOfNode(1)) + .contains(query1) + .contains(query2) + .contains("c_text:'foo'") + .doesNotContain("pk:42") + .doesNotContain("c_int:12345") + .doesNotContain("pk:43") + .contains(FURTHER_PARAMS_OMITTED); + } + + @Test(groups = "short") + @CassandraVersion("2.0.0") + public void should_not_log_exceeding_number_of_parameters_in_batch_statement_simple_statements() + throws Exception { + // given + normal.setLevel(TRACE); + queryLogger = QueryLogger.builder().withMaxLoggedParameters(1).build(); + cluster().register(queryLogger); + // when + String query1 = "UPDATE test SET c_text = ? WHERE pk = ?"; + String query2 = "UPDATE test SET c_int = ? WHERE pk = ?"; + BatchStatement batch = new BatchStatement(); + batch.add(new SimpleStatement(query1, "foo", 42)); + batch.add(new SimpleStatement(query2, 12345, 43)); + session().execute(batch); + // then + String line = normalAppender.waitAndGet(10000); + assertThat(line) + .contains("Query completed normally") + .contains(ipOfNode(1)) + .contains(query1) + .contains(query2) + .contains("'foo'") + .doesNotContain("42, 12345, 43") + .contains(FURTHER_PARAMS_OMITTED); + } + + @Test(groups = "short") + public void should_log_all_parameters_when_max_unlimited_bound_statements() throws Exception { + // given + normal.setLevel(TRACE); + queryLogger = QueryLogger.builder().withMaxLoggedParameters(-1).build(); + cluster().register(queryLogger); + // when + String query = "UPDATE test SET c_int = ? WHERE pk = ?"; + PreparedStatement ps = session().prepare(query); + BoundStatement bs = ps.bind(); + bs.setInt("c_int", 123456); + bs.setInt("pk", 42); + session().execute(bs); + // then + String line = normalAppender.waitAndGet(10000); + assertThat(line) + .contains("Query completed normally") + .contains(ipOfNode(1)) + .contains("c_int:123456") + .contains("pk:42"); + } + + @Test(groups = "short") + @CassandraVersion("2.0.0") + public void should_log_all_parameters_when_max_unlimited_simple_statements() throws Exception { + // given + normal.setLevel(TRACE); + queryLogger = QueryLogger.builder().withMaxLoggedParameters(-1).build(); + cluster().register(queryLogger); + // when + String query = "UPDATE test SET c_int = ? WHERE pk = ?"; + SimpleStatement ss = new SimpleStatement(query, 123456, 42); + session().execute(ss); + // then + String line = normalAppender.waitAndGet(10000); + assertThat(line) + .contains("Query completed normally") + .contains(ipOfNode(1)) + .contains("123456") + .contains("42"); + } + + @Test(groups = "short") + @CassandraVersion("2.1.0") + public void should_log_all_parameters_when_max_unlimited_simple_statements_with_named_values() + throws Exception { + // given + normal.setLevel(TRACE); + queryLogger = QueryLogger.builder().withMaxLoggedParameters(-1).build(); + cluster().register(queryLogger); + // when + String query = "UPDATE test SET c_int = :c_int WHERE pk = :pk"; + Map namedValues = new HashMap(); + namedValues.put("c_int", 123456); + namedValues.put("pk", 42); + SimpleStatement ss = new SimpleStatement(query, namedValues); + session().execute(ss); + // then + String line = normalAppender.waitAndGet(10000); + assertThat(line) + .contains("Query completed normally") + .contains(ipOfNode(1)) + .contains("123456") + .contains("42"); + } + + @CassandraVersion("2.0.0") + @Test(groups = "short") + public void should_log_wrapped_bound_statement() throws Exception { + // given + normal.setLevel(TRACE); + queryLogger = + QueryLogger.builder() + .withConstantThreshold(Long.MAX_VALUE) + .withMaxQueryStringLength(Integer.MAX_VALUE) + .build(); + cluster().register(queryLogger); + // when + String query = "UPDATE test SET c_text = :param1 WHERE pk = :param2"; + PreparedStatement ps = session().prepare(query); + BoundStatement bs = ps.bind(); + bs.setString("param1", "foo"); + bs.setInt("param2", 42); + session().execute(new CustomStatement(bs)); + // then + String line = normalAppender.waitAndGet(10000); + assertThat(line) + .contains("Query completed normally") + .contains(ipOfNode(1)) + .contains(query) + .contains("param2:42") + .contains("param1:'foo'"); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/QueryOptionsTest.java b/driver-core/src/test/java/com/datastax/driver/core/QueryOptionsTest.java index fe9bc882f47..f0d4b4f34fe 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/QueryOptionsTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/QueryOptionsTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,229 +17,218 @@ */ package com.datastax.driver.core; +import static com.datastax.driver.core.Assertions.assertThat; +import static com.datastax.driver.core.TestUtils.nonQuietClusterCloseOptions; + import com.google.common.collect.Lists; +import java.util.List; +import java.util.concurrent.TimeUnit; import org.scassandra.http.client.PreparedStatementPreparation; import org.testng.annotations.AfterMethod; import org.testng.annotations.BeforeMethod; import org.testng.annotations.Test; -import java.util.List; -import java.util.concurrent.TimeUnit; - -import static com.datastax.driver.core.Assertions.assertThat; -import static com.datastax.driver.core.TestUtils.nonQuietClusterCloseOptions; - public class QueryOptionsTest { - ScassandraCluster scassandra; - - QueryOptions queryOptions; - - SortingLoadBalancingPolicy loadBalancingPolicy; - - Cluster cluster = null; - Session session = null; - Host host1, host2, host3; - + ScassandraCluster scassandra; - @BeforeMethod(groups = "short") - public void beforeMethod() { - scassandra = ScassandraCluster.builder().withNodes(3).build(); - scassandra.init(); + QueryOptions queryOptions; - queryOptions = new QueryOptions(); - loadBalancingPolicy = new SortingLoadBalancingPolicy(); - cluster = Cluster.builder() - .addContactPoints(scassandra.address(2).getAddress()) - .withPort(scassandra.getBinaryPort()) - .withLoadBalancingPolicy(loadBalancingPolicy) - .withQueryOptions(queryOptions) - .withNettyOptions(nonQuietClusterCloseOptions) - .build(); + SortingLoadBalancingPolicy loadBalancingPolicy; - session = cluster.connect(); + Cluster cluster = null; + Session session = null; + Host host1, host2, host3; - host1 = TestUtils.findHost(cluster, 1); - host2 = TestUtils.findHost(cluster, 2); - host3 = TestUtils.findHost(cluster, 3); + @BeforeMethod(groups = "short") + public void beforeMethod() { + scassandra = ScassandraCluster.builder().withNodes(3).build(); + scassandra.init(); - // Make sure there are no prepares - for (int host : Lists.newArrayList(1, 2, 3)) { - assertThat(scassandra.node(host).activityClient().retrievePreparedStatementPreparations()).hasSize(0); - scassandra.node(host).activityClient().clearAllRecordedActivity(); - } - } + queryOptions = new QueryOptions(); + loadBalancingPolicy = new SortingLoadBalancingPolicy(); + cluster = + Cluster.builder() + .addContactPoints(scassandra.address(2).getAddress()) + .withPort(scassandra.getBinaryPort()) + .withLoadBalancingPolicy(loadBalancingPolicy) + .withQueryOptions(queryOptions) + .withNettyOptions(nonQuietClusterCloseOptions) + .build(); - public void validatePrepared(boolean expectAll) { - // Prepare the statement - String query = "select sansa_stark from the_known_world"; - PreparedStatement statement = session.prepare(query); - assertThat(cluster.manager.preparedQueries).containsValue(statement); - - // Ensure prepared properly based on expectation. - List preparationOne = scassandra.node(1).activityClient().retrievePreparedStatementPreparations(); - List preparationTwo = scassandra.node(2).activityClient().retrievePreparedStatementPreparations(); - List preparationThree = scassandra.node(3).activityClient().retrievePreparedStatementPreparations(); - - assertThat(preparationOne).hasSize(1); - assertThat(preparationOne.get(0).getPreparedStatementText()).isEqualTo(query); - - if (expectAll) { - assertThat(preparationTwo).hasSize(1); - assertThat(preparationTwo.get(0).getPreparedStatementText()).isEqualTo(query); - assertThat(preparationThree).hasSize(1); - assertThat(preparationThree.get(0).getPreparedStatementText()).isEqualTo(query); - } else { - assertThat(preparationTwo).isEmpty(); - assertThat(preparationThree).isEmpty(); - } - } + session = cluster.connect(); - /** - *

    - * Validates that statements are only prepared on one node when - * {@link QueryOptions#setPrepareOnAllHosts(boolean)} is set to false. - *

    - * - * @test_category prepared_statements:prepared - * @expected_result prepare query only on the first node. - * @jira_ticket JAVA-797 - * @since 2.0.11, 2.1.8, 2.2.1 - */ - @Test(groups = "short") - public void should_prepare_once_when_prepare_on_all_hosts_false() { - queryOptions.setPrepareOnAllHosts(false); - validatePrepared(false); - } + host1 = TestUtils.findHost(cluster, 1); + host2 = TestUtils.findHost(cluster, 2); + host3 = TestUtils.findHost(cluster, 3); - /** - *

    - * Validates that statements are prepared on one node when - * {@link QueryOptions#setPrepareOnAllHosts(boolean)} is set to true. - *

    - * - * @test_category prepared_statements:prepared - * @expected_result all nodes prepared the query - * @jira_ticket JAVA-797 - * @since 2.0.11, 2.1.8, 2.2.1 - */ - @Test(groups = "short") - public void should_prepare_everywhere_when_prepare_on_all_hosts_true() { - queryOptions.setPrepareOnAllHosts(true); - validatePrepared(true); + // Make sure there are no prepares + for (int host : Lists.newArrayList(1, 2, 3)) { + assertThat(scassandra.node(host).activityClient().retrievePreparedStatementPreparations()) + .hasSize(0); + scassandra.node(host).activityClient().clearAllRecordedActivity(); } - - /** - *

    - * Validates that statements are prepared on one node when - * {@link QueryOptions#setPrepareOnAllHosts(boolean)} is not set. - *

    - * - * @test_category prepared_statements:prepared - * @expected_result all nodes prepared the query. - * @jira_ticket JAVA-797 - * @since 2.0.11 - */ - @Test(groups = "short") - public void should_prepare_everywhere_when_not_configured() { - validatePrepared(true); + } + + public void validatePrepared(boolean expectAll) { + // Prepare the statement + String query = "select sansa_stark from the_known_world"; + PreparedStatement statement = session.prepare(query); + assertThat(cluster.manager.preparedQueries).containsValue(statement); + + // Ensure prepared properly based on expectation. + List preparationOne = + scassandra.node(1).activityClient().retrievePreparedStatementPreparations(); + List preparationTwo = + scassandra.node(2).activityClient().retrievePreparedStatementPreparations(); + List preparationThree = + scassandra.node(3).activityClient().retrievePreparedStatementPreparations(); + + assertThat(preparationOne).hasSize(1); + assertThat(preparationOne.get(0).getPreparedStatementText()).isEqualTo(query); + + if (expectAll) { + assertThat(preparationTwo).hasSize(1); + assertThat(preparationTwo.get(0).getPreparedStatementText()).isEqualTo(query); + assertThat(preparationThree).hasSize(1); + assertThat(preparationThree.get(0).getPreparedStatementText()).isEqualTo(query); + } else { + assertThat(preparationTwo).isEmpty(); + assertThat(preparationThree).isEmpty(); } - - private void valideReprepareOnUp(boolean expectReprepare) { - String query = "select sansa_stark from the_known_world"; - int maxTries = 3; - for (int i = 1; i <= maxTries; i++) { - session.prepare(query); - - List preparationOne = scassandra.node(1).activityClient().retrievePreparedStatementPreparations(); - - assertThat(preparationOne).hasSize(1); - assertThat(preparationOne.get(0).getPreparedStatementText()).isEqualTo(query); - - scassandra.node(1).activityClient().clearAllRecordedActivity(); - scassandra.node(1).stop(); - assertThat(cluster).host(1).goesDownWithin(10, TimeUnit.SECONDS); - - scassandra.node(1).start(); - assertThat(cluster).host(1).comesUpWithin(60, TimeUnit.SECONDS); - - preparationOne = scassandra.node(1).activityClient().retrievePreparedStatementPreparations(); - if (expectReprepare) { - // tests fail randomly at this point, probably due to - // https://github.com/scassandra/scassandra-server/issues/116 - try { - assertThat(preparationOne).hasSize(1); - assertThat(preparationOne.get(0).getPreparedStatementText()).isEqualTo(query); - break; - } catch (AssertionError e) { - if (i == maxTries) - throw e; - // retry - scassandra.node(1).activityClient().clearAllRecordedActivity(); - } - } else { - assertThat(preparationOne).isEmpty(); - break; - } + } + + /** + * Validates that statements are only prepared on one node when {@link + * QueryOptions#setPrepareOnAllHosts(boolean)} is set to false. + * + * @test_category prepared_statements:prepared + * @expected_result prepare query only on the first node. + * @jira_ticket JAVA-797 + * @since 2.0.11, 2.1.8, 2.2.1 + */ + @Test(groups = "short") + public void should_prepare_once_when_prepare_on_all_hosts_false() { + queryOptions.setPrepareOnAllHosts(false); + validatePrepared(false); + } + + /** + * Validates that statements are prepared on one node when {@link + * QueryOptions#setPrepareOnAllHosts(boolean)} is set to true. + * + * @test_category prepared_statements:prepared + * @expected_result all nodes prepared the query + * @jira_ticket JAVA-797 + * @since 2.0.11, 2.1.8, 2.2.1 + */ + @Test(groups = "short") + public void should_prepare_everywhere_when_prepare_on_all_hosts_true() { + queryOptions.setPrepareOnAllHosts(true); + validatePrepared(true); + } + + /** + * Validates that statements are prepared on one node when {@link + * QueryOptions#setPrepareOnAllHosts(boolean)} is not set. + * + * @test_category prepared_statements:prepared + * @expected_result all nodes prepared the query. + * @jira_ticket JAVA-797 + * @since 2.0.11 + */ + @Test(groups = "short") + public void should_prepare_everywhere_when_not_configured() { + validatePrepared(true); + } + + private void valideReprepareOnUp(boolean expectReprepare) { + String query = "select sansa_stark from the_known_world"; + int maxTries = 3; + for (int i = 1; i <= maxTries; i++) { + session.prepare(query); + + List preparationOne = + scassandra.node(1).activityClient().retrievePreparedStatementPreparations(); + + assertThat(preparationOne).hasSize(1); + assertThat(preparationOne.get(0).getPreparedStatementText()).isEqualTo(query); + + scassandra.node(1).activityClient().clearAllRecordedActivity(); + scassandra.node(1).stop(); + assertThat(cluster).host(1).goesDownWithin(10, TimeUnit.SECONDS); + + scassandra.node(1).start(); + assertThat(cluster).host(1).comesUpWithin(60, TimeUnit.SECONDS); + + preparationOne = scassandra.node(1).activityClient().retrievePreparedStatementPreparations(); + if (expectReprepare) { + // tests fail randomly at this point, probably due to + // https://github.com/scassandra/scassandra-server/issues/116 + try { + assertThat(preparationOne).hasSize(1); + assertThat(preparationOne.get(0).getPreparedStatementText()).isEqualTo(query); + break; + } catch (AssertionError e) { + if (i == maxTries) throw e; + // retry + scassandra.node(1).activityClient().clearAllRecordedActivity(); } + } else { + assertThat(preparationOne).isEmpty(); + break; + } } - - /** - *

    - * Validates that statements are reprepared when a node comes back up when - * {@link QueryOptions#setReprepareOnUp(boolean)} is set to true. - *

    - * - * @test_category prepared_statements:prepared - * @expected_result reprepare query on the restarted node. - * @jira_ticket JAVA-658 - * @since 2.0.11, 2.1.8, 2.2.1 - */ - @Test(groups = "short") - public void should_reprepare_on_up_when_enabled() { - queryOptions.setReprepareOnUp(true); - valideReprepareOnUp(true); - } - - /** - *

    - * Validates that statements are reprepared when a node comes back up with - * the default configuration. - *

    - * - * @test_category prepared_statements:prepared - * @expected_result reprepare query on the restarted node. - * @jira_ticket JAVA-658 - * @since 2.0.11, 2.1.8, 2.2.1 - */ - @Test(groups = "short") - public void should_reprepare_on_up_by_default() { - valideReprepareOnUp(true); - } - - /** - *

    - * Validates that statements are not reprepared when a node comes back up when - * {@link QueryOptions#setReprepareOnUp(boolean)} is set to false. - *

    - * - * @test_category prepared_statements:prepared - * @expected_result do not reprepare query on the restarted node. - * @jira_ticket JAVA-658 - * @since 2.0.11, 2.1.8, 2.2.1 - */ - @Test(groups = "short") - public void should_not_reprepare_on_up_when_disabled() { - queryOptions.setReprepareOnUp(false); - valideReprepareOnUp(false); - } - - @AfterMethod(groups = "short", alwaysRun = true) - public void afterMethod() { - if (cluster != null) - cluster.close(); - - if (scassandra != null) - scassandra.stop(); - } + } + + /** + * Validates that statements are reprepared when a node comes back up when {@link + * QueryOptions#setReprepareOnUp(boolean)} is set to true. + * + * @test_category prepared_statements:prepared + * @expected_result reprepare query on the restarted node. + * @jira_ticket JAVA-658 + * @since 2.0.11, 2.1.8, 2.2.1 + */ + @Test(groups = "short") + public void should_reprepare_on_up_when_enabled() { + queryOptions.setReprepareOnUp(true); + valideReprepareOnUp(true); + } + + /** + * Validates that statements are reprepared when a node comes back up with the default + * configuration. + * + * @test_category prepared_statements:prepared + * @expected_result reprepare query on the restarted node. + * @jira_ticket JAVA-658 + * @since 2.0.11, 2.1.8, 2.2.1 + */ + @Test(groups = "short") + public void should_reprepare_on_up_by_default() { + valideReprepareOnUp(true); + } + + /** + * Validates that statements are not reprepared when a node comes back up when {@link + * QueryOptions#setReprepareOnUp(boolean)} is set to false. + * + * @test_category prepared_statements:prepared + * @expected_result do not reprepare query on the restarted node. + * @jira_ticket JAVA-658 + * @since 2.0.11, 2.1.8, 2.2.1 + */ + @Test(groups = "short") + public void should_not_reprepare_on_up_when_disabled() { + queryOptions.setReprepareOnUp(false); + valideReprepareOnUp(false); + } + + @AfterMethod(groups = "short", alwaysRun = true) + public void afterMethod() { + if (cluster != null) cluster.close(); + + if (scassandra != null) scassandra.stop(); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/QueryTimestampTest.java b/driver-core/src/test/java/com/datastax/driver/core/QueryTimestampTest.java index 4f33b4dbe9e..e92f8d06a45 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/QueryTimestampTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/QueryTimestampTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,143 +17,153 @@ */ package com.datastax.driver.core; +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertTrue; + +import com.datastax.driver.core.Cluster.Builder; import com.datastax.driver.core.Metrics.Errors; -import com.datastax.driver.core.policies.DowngradingConsistencyRetryPolicy; import com.datastax.driver.core.utils.CassandraVersion; import org.testng.annotations.BeforeMethod; import org.testng.annotations.Test; -import static org.testng.Assert.assertEquals; -import static org.testng.Assert.assertTrue; - -/** - * Tests the behavior of client-provided timestamps with protocol v3. - */ +/** Tests the behavior of client-provided timestamps with protocol v3. */ @CassandraVersion("2.1.0") public class QueryTimestampTest extends CCMTestsSupport { - private volatile long timestampFromGenerator; - - @Override - public void onTestContextInitialized() { - execute("CREATE TABLE foo (k int PRIMARY KEY, v int)"); - } - - @Override - public Cluster.Builder createClusterBuilder() { - return Cluster.builder() - .withTimestampGenerator(new TimestampGenerator() { - @Override - public long next() { - return timestampFromGenerator; - } + private volatile long timestampFromGenerator; + + @Override + public void onTestContextInitialized() { + execute("CREATE TABLE foo (k int PRIMARY KEY, v int)"); + } + + @Override + public Cluster.Builder createClusterBuilder() { + @SuppressWarnings("deprecation") + Builder builder = + super.createClusterBuilder() + .withTimestampGenerator( + new TimestampGenerator() { + @Override + public long next() { + return timestampFromGenerator; + } }) - .withRetryPolicy(DowngradingConsistencyRetryPolicy.INSTANCE); - } - - @BeforeMethod(groups = "short") - public void cleanData() { - session().execute("TRUNCATE foo"); - } - - @Test(groups = "short") - public void should_use_CQL_timestamp_over_anything_else() { - timestampFromGenerator = 10; - String query = "INSERT INTO foo (k, v) VALUES (1, 1) USING TIMESTAMP 20"; - session().execute(new SimpleStatement(query).setDefaultTimestamp(30)); - - long writeTime = session().execute("SELECT writeTime(v) FROM foo WHERE k = 1").one().getLong(0); - assertEquals(writeTime, 20); - } - - @Test(groups = "short") - public void should_use_statement_timestamp_over_generator() { - timestampFromGenerator = 10; - String query = "INSERT INTO foo (k, v) VALUES (1, 1)"; - session().execute(new SimpleStatement(query).setDefaultTimestamp(30)); - - long writeTime = session().execute("SELECT writeTime(v) FROM foo WHERE k = 1").one().getLong(0); - assertEquals(writeTime, 30); - } - - @Test(groups = "short") - public void should_use_generator_timestamp_if_none_other_specified() { - timestampFromGenerator = 10; - String query = "INSERT INTO foo (k, v) VALUES (1, 1)"; - session().execute(query); - - long writeTime = session().execute("SELECT writeTime(v) FROM foo WHERE k = 1").one().getLong(0); - assertEquals(writeTime, 10); - } - - @Test(groups = "short") - public void should_use_server_side_timestamp_if_none_specified() { - timestampFromGenerator = Long.MIN_VALUE; - long clientTime = System.currentTimeMillis() * 1000; - String query = "INSERT INTO foo (k, v) VALUES (1, 1)"; - session().execute(query); - - long writeTime = session().execute("SELECT writeTime(v) FROM foo WHERE k = 1").one().getLong(0); - assertTrue(writeTime >= clientTime); - } - - @Test(groups = "short") - public void should_apply_statement_timestamp_only_to_batched_queries_without_timestamp() { - BatchStatement batch = new BatchStatement(); - batch.add(new SimpleStatement("INSERT INTO foo (k, v) VALUES (1, 1)")); - batch.add(new SimpleStatement("INSERT INTO foo (k, v) VALUES (2, 1) USING TIMESTAMP 20")); - batch.setDefaultTimestamp(10); - session().execute(batch); - - long writeTime1 = session().execute("SELECT writeTime(v) FROM foo WHERE k = 1").one().getLong(0); - long writeTime2 = session().execute("SELECT writeTime(v) FROM foo WHERE k = 2").one().getLong(0); - assertEquals(writeTime1, 10); - assertEquals(writeTime2, 20); - } - - @Test(groups = "short") - public void should_apply_generator_timestamp_only_to_batched_queries_without_timestamp() { - timestampFromGenerator = 10; - BatchStatement batch = new BatchStatement(); - batch.add(new SimpleStatement("INSERT INTO foo (k, v) VALUES (1, 1)")); - batch.add(new SimpleStatement("INSERT INTO foo (k, v) VALUES (2, 1) USING TIMESTAMP 20")); - session().execute(batch); - - long writeTime1 = session().execute("SELECT writeTime(v) FROM foo WHERE k = 1").one().getLong(0); - long writeTime2 = session().execute("SELECT writeTime(v) FROM foo WHERE k = 2").one().getLong(0); - assertEquals(writeTime1, 10); - assertEquals(writeTime2, 20); - } - - @Test(groups = "short") - public void should_apply_server_side_timestamp_only_to_batched_queries_without_timestamp() { - timestampFromGenerator = Long.MIN_VALUE; - long clientTime = System.currentTimeMillis() * 1000; - BatchStatement batch = new BatchStatement(); - batch.add(new SimpleStatement("INSERT INTO foo (k, v) VALUES (1, 1)")); - batch.add(new SimpleStatement("INSERT INTO foo (k, v) VALUES (2, 1) USING TIMESTAMP 20")); - session().execute(batch); - - long writeTime1 = session().execute("SELECT writeTime(v) FROM foo WHERE k = 1").one().getLong(0); - long writeTime2 = session().execute("SELECT writeTime(v) FROM foo WHERE k = 2").one().getLong(0); - assertTrue(writeTime1 >= clientTime); - assertEquals(writeTime2, 20); - } - - @Test(groups = "short") - public void should_preserve_timestamp_when_retrying() { - SimpleStatement statement = new SimpleStatement("INSERT INTO foo (k, v) VALUES (1, 1)"); - statement.setDefaultTimestamp(10); - // This will fail since we test against a single-host cluster. The DowngradingConsistencyRetryPolicy - // will retry it at ONE. - statement.setConsistencyLevel(ConsistencyLevel.TWO); - - session().execute(statement); - - Errors metrics = session().getCluster().getMetrics().getErrorMetrics(); - assertEquals(metrics.getRetriesOnUnavailable().getCount(), 1); - - long writeTime = session().execute("SELECT writeTime(v) FROM foo WHERE k = 1").one().getLong(0); - assertEquals(writeTime, 10); - } + .withRetryPolicy( + com.datastax.driver.core.policies.DowngradingConsistencyRetryPolicy.INSTANCE); + return builder; + } + + @BeforeMethod(groups = "short") + public void cleanData() { + session().execute("TRUNCATE foo"); + } + + @Test(groups = "short") + public void should_use_CQL_timestamp_over_anything_else() { + timestampFromGenerator = 10; + String query = "INSERT INTO foo (k, v) VALUES (1, 1) USING TIMESTAMP 20"; + session().execute(new SimpleStatement(query).setDefaultTimestamp(30)); + + long writeTime = session().execute("SELECT writeTime(v) FROM foo WHERE k = 1").one().getLong(0); + assertEquals(writeTime, 20); + } + + @Test(groups = "short") + public void should_use_statement_timestamp_over_generator() { + timestampFromGenerator = 10; + String query = "INSERT INTO foo (k, v) VALUES (1, 1)"; + session().execute(new SimpleStatement(query).setDefaultTimestamp(30)); + + long writeTime = session().execute("SELECT writeTime(v) FROM foo WHERE k = 1").one().getLong(0); + assertEquals(writeTime, 30); + } + + @Test(groups = "short") + public void should_use_generator_timestamp_if_none_other_specified() { + timestampFromGenerator = 10; + String query = "INSERT INTO foo (k, v) VALUES (1, 1)"; + session().execute(query); + + long writeTime = session().execute("SELECT writeTime(v) FROM foo WHERE k = 1").one().getLong(0); + assertEquals(writeTime, 10); + } + + @Test(groups = "short") + public void should_use_server_side_timestamp_if_none_specified() { + timestampFromGenerator = Long.MIN_VALUE; + long clientTime = System.currentTimeMillis() * 1000; + String query = "INSERT INTO foo (k, v) VALUES (1, 1)"; + session().execute(query); + + long writeTime = session().execute("SELECT writeTime(v) FROM foo WHERE k = 1").one().getLong(0); + assertTrue(writeTime >= clientTime); + } + + @Test(groups = "short") + public void should_apply_statement_timestamp_only_to_batched_queries_without_timestamp() { + BatchStatement batch = new BatchStatement(); + batch.add(new SimpleStatement("INSERT INTO foo (k, v) VALUES (1, 1)")); + batch.add(new SimpleStatement("INSERT INTO foo (k, v) VALUES (2, 1) USING TIMESTAMP 20")); + batch.setDefaultTimestamp(10); + session().execute(batch); + + long writeTime1 = + session().execute("SELECT writeTime(v) FROM foo WHERE k = 1").one().getLong(0); + long writeTime2 = + session().execute("SELECT writeTime(v) FROM foo WHERE k = 2").one().getLong(0); + assertEquals(writeTime1, 10); + assertEquals(writeTime2, 20); + } + + @Test(groups = "short") + public void should_apply_generator_timestamp_only_to_batched_queries_without_timestamp() { + timestampFromGenerator = 10; + BatchStatement batch = new BatchStatement(); + batch.add(new SimpleStatement("INSERT INTO foo (k, v) VALUES (1, 1)")); + batch.add(new SimpleStatement("INSERT INTO foo (k, v) VALUES (2, 1) USING TIMESTAMP 20")); + session().execute(batch); + + long writeTime1 = + session().execute("SELECT writeTime(v) FROM foo WHERE k = 1").one().getLong(0); + long writeTime2 = + session().execute("SELECT writeTime(v) FROM foo WHERE k = 2").one().getLong(0); + assertEquals(writeTime1, 10); + assertEquals(writeTime2, 20); + } + + @Test(groups = "short") + public void should_apply_server_side_timestamp_only_to_batched_queries_without_timestamp() { + timestampFromGenerator = Long.MIN_VALUE; + long clientTime = System.currentTimeMillis() * 1000; + BatchStatement batch = new BatchStatement(); + batch.add(new SimpleStatement("INSERT INTO foo (k, v) VALUES (1, 1)")); + batch.add(new SimpleStatement("INSERT INTO foo (k, v) VALUES (2, 1) USING TIMESTAMP 20")); + session().execute(batch); + + long writeTime1 = + session().execute("SELECT writeTime(v) FROM foo WHERE k = 1").one().getLong(0); + long writeTime2 = + session().execute("SELECT writeTime(v) FROM foo WHERE k = 2").one().getLong(0); + assertTrue(writeTime1 >= clientTime); + assertEquals(writeTime2, 20); + } + + @Test(groups = "short") + public void should_preserve_timestamp_when_retrying() { + SimpleStatement statement = new SimpleStatement("INSERT INTO foo (k, v) VALUES (1, 1)"); + statement.setDefaultTimestamp(10); + // This will fail since we test against a single-host cluster. The + // DowngradingConsistencyRetryPolicy + // will retry it at ONE. + statement.setConsistencyLevel(ConsistencyLevel.TWO); + + session().execute(statement); + + Errors metrics = session().getCluster().getMetrics().getErrorMetrics(); + assertEquals(metrics.getRetriesOnUnavailable().getCount(), 1); + + long writeTime = session().execute("SELECT writeTime(v) FROM foo WHERE k = 1").one().getLong(0); + assertEquals(writeTime, 10); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/QueryTracker.java b/driver-core/src/test/java/com/datastax/driver/core/QueryTracker.java index a021cc7418c..7890c4549f3 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/QueryTracker.java +++ b/driver-core/src/test/java/com/datastax/driver/core/QueryTracker.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,114 +17,128 @@ */ package com.datastax.driver.core; +import static com.google.common.collect.Lists.newArrayList; +import static org.assertj.core.api.Assertions.assertThat; +import static org.testng.Assert.fail; + import com.datastax.driver.core.exceptions.CoordinatorException; import com.google.common.util.concurrent.ListenableFuture; import com.google.common.util.concurrent.Uninterruptibles; -import org.assertj.core.util.Maps; - import java.net.InetAddress; import java.net.InetSocketAddress; import java.util.List; import java.util.Map; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; - -import static com.google.common.collect.Lists.newArrayList; -import static org.assertj.core.api.Assertions.assertThat; -import static org.testng.Assert.fail; +import org.assertj.core.util.Maps; /** - * A convenience utility for executing queries against a {@link Session} and tracking - * which hosts were queried. + * A convenience utility for executing queries against a {@link Session} and tracking which hosts + * were queried. */ public class QueryTracker { - public static final String QUERY = "select * from test.foo"; - - Map coordinators = Maps.newConcurrentHashMap(); - - public void query(Session session, int times) { - query(session, times, ConsistencyLevel.ONE); + public static final String QUERY = "select * from test.foo"; + + Map coordinators = Maps.newConcurrentHashMap(); + + public void query(Session session, int times) { + query(session, times, ConsistencyLevel.ONE); + } + + public void query(Session session, int times, ConsistencyLevel cl) { + query(session, times, cl, null); + } + + public void query(Session session, int times, InetSocketAddress expectedHost) { + query(session, times, new SimpleStatement(QUERY), null, expectedHost); + } + + public void query( + Session session, + int times, + Class expectedException, + InetSocketAddress expectedHost) { + query(session, times, new SimpleStatement(QUERY), expectedException, expectedHost); + } + + public void query( + Session session, + int times, + ConsistencyLevel cl, + Class expectedException) { + Statement statement = new SimpleStatement(QUERY); + if (cl != null) { + statement.setConsistencyLevel(cl); } - public void query(Session session, int times, ConsistencyLevel cl) { - query(session, times, cl, null); - } + query(session, times, statement, expectedException, null); + } - public void query(Session session, int times, InetSocketAddress expectedHost) { - query(session, times, new SimpleStatement(QUERY), null, expectedHost); - } + public void query(Session session, int times, Statement statement) { + query(session, times, statement, null, null); + } - public void query(Session session, int times, Class expectedException, InetSocketAddress expectedHost) { - query(session, times, new SimpleStatement(QUERY), expectedException, expectedHost); - } - - public void query(Session session, int times, ConsistencyLevel cl, Class expectedException) { - Statement statement = new SimpleStatement(QUERY); - if (cl != null) { - statement.setConsistencyLevel(cl); - } - - query(session, times, statement, expectedException, null); - } + public void query( + Session session, + int times, + Statement statement, + Class expectedException, + InetSocketAddress expectedHost) { + List> futures = newArrayList(); - public void query(Session session, int times, Statement statement) { - query(session, times, statement, null, null); + for (int i = 0; i < times; i++) { + futures.add(session.executeAsync(statement)); } - public void query(Session session, int times, Statement statement, Class expectedException, InetSocketAddress expectedHost) { - List> futures = newArrayList(); - - for (int i = 0; i < times; i++) { - futures.add(session.executeAsync(statement)); - } - + try { + for (ListenableFuture future : futures) { try { - for (ListenableFuture future : futures) { - try { - ResultSet result = Uninterruptibles.getUninterruptibly(future, 1, TimeUnit.SECONDS); - InetSocketAddress address = result.getExecutionInfo().getQueriedHost().getSocketAddress(); - InetAddress coordinator = address.getAddress(); - Integer n = coordinators.get(coordinator); - coordinators.put(coordinator, n == null ? 1 : n + 1); - if (expectedHost != null) { - assertThat(address).isEqualTo(expectedHost); - } - } catch (ExecutionException ex) { - Throwable cause = ex.getCause(); - if (expectedException == null) { - fail("Query fail", ex); - } else { - assertThat(cause).isInstanceOf(expectedException); - } - - if (cause instanceof CoordinatorException) { - assertThat(((CoordinatorException) cause).getAddress()).isEqualTo(expectedHost); - } - } - } - } catch (Exception e) { - fail("Queries failed", e); + ResultSet result = Uninterruptibles.getUninterruptibly(future, 1, TimeUnit.SECONDS); + InetSocketAddress address = + result.getExecutionInfo().getQueriedHost().getEndPoint().resolve(); + InetAddress coordinator = address.getAddress(); + Integer n = coordinators.get(coordinator); + coordinators.put(coordinator, n == null ? 1 : n + 1); + if (expectedHost != null) { + assertThat(address).isEqualTo(expectedHost); + } + } catch (ExecutionException ex) { + Throwable cause = ex.getCause(); + if (expectedException == null) { + fail("Query fail", ex); + } else { + assertThat(cause).isInstanceOf(expectedException); + } + + if (cause instanceof CoordinatorException) { + assertThat(((CoordinatorException) cause).getEndPoint().resolve()) + .isEqualTo(expectedHost); + } } + } + } catch (Exception e) { + fail("Queries failed", e); } - - public int queryCount(ScassandraCluster sCluster, int dc, int node) { - try { - InetSocketAddress host = sCluster.address(dc, node); - Integer queried = coordinators.get(host.getAddress()); - return queried != null ? queried : 0; - } catch (Exception e) { - throw new RuntimeException(e); - } - } - - public void assertQueried(ScassandraCluster sCluster, int dc, int node, int n) { - int queryCount = queryCount(sCluster, dc, node); - assertThat(queryCount) - .as("Expected node %d:%d to be queried %d times but was %d", dc, node, n, queryCount) - .isEqualTo(n); - } - - public void reset() { - this.coordinators.clear(); + } + + public int queryCount(ScassandraCluster sCluster, int dc, int node) { + try { + InetSocketAddress host = sCluster.address(dc, node); + Integer queried = coordinators.get(host.getAddress()); + return queried != null ? queried : 0; + } catch (Exception e) { + throw new RuntimeException(e); } + } + + public void assertQueried(ScassandraCluster sCluster, int dc, int node, int n) { + int queryCount = queryCount(sCluster, dc, node); + assertThat(queryCount) + .as("Expected node %d:%d to be queried %d times but was %d", dc, node, n, queryCount) + .isEqualTo(n); + } + + public void reset() { + this.coordinators.clear(); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/RPTokenFactoryTest.java b/driver-core/src/test/java/com/datastax/driver/core/RPTokenFactoryTest.java index 42c3b78f00e..2f682cbaae2 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/RPTokenFactoryTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/RPTokenFactoryTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,74 +17,72 @@ */ package com.datastax.driver.core; -import com.datastax.driver.core.utils.Bytes; -import org.testng.annotations.Test; +import static org.assertj.core.api.Assertions.assertThat; +import com.datastax.driver.core.utils.Bytes; import java.nio.ByteBuffer; import java.util.List; - -import static org.assertj.core.api.Assertions.assertThat; +import org.testng.annotations.Test; public class RPTokenFactoryTest { - Token.Factory factory = Token.RPToken.FACTORY; + Token.Factory factory = Token.RPToken.FACTORY; - @Test(groups = "unit") - public void should_hash_consistently() { - ByteBuffer byteBuffer = Bytes.fromHexString("0xCAFEBABE"); - Token tokenA = factory.hash(byteBuffer); - Token tokenB = factory.hash(byteBuffer); - assertThat(tokenA) - .isEqualTo(factory.fromString("59959303159920881837560881824507314222")) - .isEqualTo(tokenB); - } + @Test(groups = "unit") + public void should_hash_consistently() { + ByteBuffer byteBuffer = Bytes.fromHexString("0xCAFEBABE"); + Token tokenA = factory.hash(byteBuffer); + Token tokenB = factory.hash(byteBuffer); + assertThat(tokenA) + .isEqualTo(factory.fromString("59959303159920881837560881824507314222")) + .isEqualTo(tokenB); + } - @Test(groups = "unit") - public void should_split_range() { - List splits = factory.split(factory.fromString("0"), factory.fromString("127605887595351923798765477786913079296"), 3); - assertThat(splits).containsExactly( - factory.fromString("42535295865117307932921825928971026432"), - factory.fromString("85070591730234615865843651857942052864") - ); - } + @Test(groups = "unit") + public void should_split_range() { + List splits = + factory.split( + factory.fromString("0"), + factory.fromString("127605887595351923798765477786913079296"), + 3); + assertThat(splits) + .containsExactly( + factory.fromString("42535295865117307932921825928971026432"), + factory.fromString("85070591730234615865843651857942052864")); + } - @Test(groups = "unit") - public void should_split_range_that_wraps_around_the_ring() { - List splits = factory.split( - factory.fromString("127605887595351923798765477786913079296"), - factory.fromString("85070591730234615865843651857942052864"), - 3); + @Test(groups = "unit") + public void should_split_range_that_wraps_around_the_ring() { + List splits = + factory.split( + factory.fromString("127605887595351923798765477786913079296"), + factory.fromString("85070591730234615865843651857942052864"), + 3); - assertThat(splits).containsExactly( - factory.fromString("0"), - factory.fromString("42535295865117307932921825928971026432") - ); - } + assertThat(splits) + .containsExactly( + factory.fromString("0"), factory.fromString("42535295865117307932921825928971026432")); + } - @Test(groups = "unit") - public void should_split_range_producing_empty_splits_near_ring_end() { - Token minToken = factory.fromString("-1"); - Token maxToken = factory.fromString("170141183460469231731687303715884105728"); + @Test(groups = "unit") + public void should_split_range_producing_empty_splits_near_ring_end() { + Token minToken = factory.fromString("-1"); + Token maxToken = factory.fromString("170141183460469231731687303715884105728"); - // These are edge cases where we want to make sure we don't accidentally generate the ]min,min] range (which is the whole ring) - List splits = factory.split(maxToken, minToken, 3); - assertThat(splits).containsExactly( - maxToken, - maxToken - ); + // These are edge cases where we want to make sure we don't accidentally generate the ]min,min] + // range (which is the whole ring) + List splits = factory.split(maxToken, minToken, 3); + assertThat(splits).containsExactly(maxToken, maxToken); - splits = factory.split(minToken, factory.fromString("0"), 3); - assertThat(splits).containsExactly( - factory.fromString("0"), - factory.fromString("0") - ); - } + splits = factory.split(minToken, factory.fromString("0"), 3); + assertThat(splits).containsExactly(factory.fromString("0"), factory.fromString("0")); + } - @Test(groups = "unit") - public void should_split_whole_ring() { - List splits = factory.split(factory.fromString("-1"), factory.fromString("-1"), 3); - assertThat(splits).containsExactly( - factory.fromString("56713727820156410577229101238628035242"), - factory.fromString("113427455640312821154458202477256070485") - ); - } + @Test(groups = "unit") + public void should_split_whole_ring() { + List splits = factory.split(factory.fromString("-1"), factory.fromString("-1"), 3); + assertThat(splits) + .containsExactly( + factory.fromString("56713727820156410577229101238628035242"), + factory.fromString("113427455640312821154458202477256070485")); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/RPTokenIntegrationTest.java b/driver-core/src/test/java/com/datastax/driver/core/RPTokenIntegrationTest.java index 9fe94182a32..bb181559ef2 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/RPTokenIntegrationTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/RPTokenIntegrationTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,12 +22,12 @@ @CCMConfig(options = "-p RandomPartitioner") public class RPTokenIntegrationTest extends TokenIntegrationTest { - public RPTokenIntegrationTest() { - super(DataType.varint(), false); - } + public RPTokenIntegrationTest() { + super(DataType.varint(), false); + } - @Override - protected Token.Factory tokenFactory() { - return RPToken.FACTORY; - } + @Override + protected Token.Factory tokenFactory() { + return RPToken.FACTORY; + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/RPTokenVnodeIntegrationTest.java b/driver-core/src/test/java/com/datastax/driver/core/RPTokenVnodeIntegrationTest.java index 47bf02ab509..d0a186c8500 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/RPTokenVnodeIntegrationTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/RPTokenVnodeIntegrationTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,12 +20,12 @@ @CCMConfig(options = {"-p RandomPartitioner", "--vnodes"}) public class RPTokenVnodeIntegrationTest extends TokenIntegrationTest { - public RPTokenVnodeIntegrationTest() { - super(DataType.varint(), true); - } + public RPTokenVnodeIntegrationTest() { + super(DataType.varint(), true); + } - @Override - protected Token.Factory tokenFactory() { - return Token.RPToken.FACTORY; - } + @Override + protected Token.Factory tokenFactory() { + return Token.RPToken.FACTORY; + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/ReadTimeoutTest.java b/driver-core/src/test/java/com/datastax/driver/core/ReadTimeoutTest.java index 3a6247be515..e1c6b339324 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/ReadTimeoutTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/ReadTimeoutTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,44 +17,40 @@ */ package com.datastax.driver.core; +import static org.scassandra.http.client.PrimingRequest.queryBuilder; +import static org.scassandra.http.client.PrimingRequest.then; + import com.datastax.driver.core.exceptions.OperationTimedOutException; import org.testng.annotations.BeforeMethod; import org.testng.annotations.Test; -import static org.scassandra.http.client.PrimingRequest.queryBuilder; -import static org.scassandra.http.client.PrimingRequest.then; - public class ReadTimeoutTest extends ScassandraTestBase.PerClassCluster { - String query = "SELECT foo FROM bar"; - - @BeforeMethod(groups = "short") - public void setup() { - primingClient.prime( - queryBuilder() - .withQuery(query) - .withThen(then().withFixedDelay(100L)) - .build() - ); - - // Set default timeout too low - cluster.getConfiguration().getSocketOptions().setReadTimeoutMillis(10); - } - - @Test(groups = "short", expectedExceptions = OperationTimedOutException.class) - public void should_use_default_timeout_if_not_overridden_by_statement() { - session.execute(query); - } - - @Test(groups = "short") - public void should_use_statement_timeout_if_overridden() { - Statement statement = new SimpleStatement(query).setReadTimeoutMillis(10000); - session.execute(statement); - } - - @Test(groups = "short") - public void should_disable_timeout_if_set_to_zero_at_statement_level() { - Statement statement = new SimpleStatement(query).setReadTimeoutMillis(0); - session.execute(statement); - } + String query = "SELECT foo FROM bar"; + + @BeforeMethod(groups = "short") + public void setup() { + primingClient.prime( + queryBuilder().withQuery(query).withThen(then().withFixedDelay(100L)).build()); + + // Set default timeout too low + cluster.getConfiguration().getSocketOptions().setReadTimeoutMillis(10); + } + + @Test(groups = "short", expectedExceptions = OperationTimedOutException.class) + public void should_use_default_timeout_if_not_overridden_by_statement() { + session.execute(query); + } + + @Test(groups = "short") + public void should_use_statement_timeout_if_overridden() { + Statement statement = new SimpleStatement(query).setReadTimeoutMillis(10000); + session.execute(statement); + } + + @Test(groups = "short") + public void should_disable_timeout_if_set_to_zero_at_statement_level() { + Statement statement = new SimpleStatement(query).setReadTimeoutMillis(0); + session.execute(statement); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/RecommissionedNodeTest.java b/driver-core/src/test/java/com/datastax/driver/core/RecommissionedNodeTest.java index bc79426fd59..1ce02b024c1 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/RecommissionedNodeTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/RecommissionedNodeTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,217 +17,229 @@ */ package com.datastax.driver.core; +import static com.datastax.driver.core.Assertions.assertThat; +import static com.datastax.driver.core.Assertions.fail; +import static com.datastax.driver.core.Host.State.DOWN; +import static com.datastax.driver.core.Host.State.UP; +import static com.datastax.driver.core.TestUtils.nonDebouncingQueryOptions; + import com.datastax.driver.core.utils.CassandraVersion; +import java.util.concurrent.TimeUnit; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.testng.annotations.AfterMethod; import org.testng.annotations.BeforeMethod; import org.testng.annotations.Test; -import java.util.concurrent.TimeUnit; - -import static com.datastax.driver.core.Assertions.assertThat; -import static com.datastax.driver.core.Assertions.fail; -import static com.datastax.driver.core.Host.State.DOWN; -import static com.datastax.driver.core.Host.State.UP; -import static com.datastax.driver.core.TestUtils.nonDebouncingQueryOptions; - /** * Due to C* gossip bugs, system.peers may report nodes that are gone from the cluster. - *

    - * This class tests scenarios where these nodes have been recommissioned to another cluster and - * come back up. The driver must detect that they are not part of the cluster anymore, and ignore them. + * + *

    This class tests scenarios where these nodes have been recommissioned to another cluster and + * come back up. The driver must detect that they are not part of the cluster anymore, and ignore + * them. */ public class RecommissionedNodeTest { - private static final Logger logger = LoggerFactory.getLogger(RecommissionedNodeTest.class); - - CCMBridge.Builder mainCcmBuilder, otherCcmBuilder; - CCMAccess mainCcm, otherCcm; - Cluster mainCluster; - - @Test(groups = "long") - public void should_ignore_recommissioned_node_on_reconnection_attempt() throws Exception { - mainCcmBuilder = CCMBridge.builder().withNodes(3); - mainCcm = CCMCache.get(mainCcmBuilder); - - // node1 will be our "recommissioned" node, for now we just stop it so that it stays in the peers table. - mainCcm.stop(1); - mainCcm.waitForDown(1); - - // Now start the driver that will connect to node2 and node3, and consider node1 down - mainCluster = Cluster.builder() - .addContactPoints(mainCcm.addressOfNode(2).getAddress()) - .withPort(mainCcm.getBinaryPort()) - .withQueryOptions(nonDebouncingQueryOptions()).build(); - mainCluster.connect(); - waitForCountUpHosts(mainCluster, 2); - // From that point, reconnections to node1 have been scheduled. - - // Start another ccm that will reuse node1's address - otherCcmBuilder = CCMBridge.builder() - .withStoragePort(mainCcm.getStoragePort()) - .withThriftPort(mainCcm.getThriftPort()) - .withBinaryPort(mainCcm.getBinaryPort()) - .withNodes(1); - otherCcm = CCMCache.get(otherCcmBuilder); - otherCcm.waitForUp(1); - - // Give the driver the time to notice the node is back up and try to connect to it. - TimeUnit.SECONDS.sleep(32); - - assertThat(countUpHosts(mainCluster)).isEqualTo(2); - } - - @Test(groups = "long") - public void should_ignore_recommissioned_node_on_control_connection_reconnect() throws Exception { - mainCcmBuilder = CCMBridge.builder().withNodes(2); - mainCcm = CCMCache.get(mainCcmBuilder); - mainCcm.stop(1); - mainCcm.waitForDown(1); - - // Start the driver, the control connection will be on node2 - mainCluster = Cluster.builder() - .addContactPoints(mainCcm.addressOfNode(2).getAddress()) - .withPort(mainCcm.getBinaryPort()) - .withQueryOptions(nonDebouncingQueryOptions()).build(); - mainCluster.connect(); - waitForCountUpHosts(mainCluster, 1); - - // Start another ccm that will reuse node1's address - otherCcmBuilder = CCMBridge.builder() - .withStoragePort(mainCcm.getStoragePort()) - .withThriftPort(mainCcm.getThriftPort()) - .withBinaryPort(mainCcm.getBinaryPort()) - .withNodes(1); - otherCcm = CCMCache.get(otherCcmBuilder); - otherCcm.waitForUp(1); - - // Stop node2, the control connection gets defunct - mainCcm.stop(2); - TimeUnit.SECONDS.sleep(32); - - // The driver should not try to reconnect the control connection to node1 - assertThat(mainCluster).hasClosedControlConnection(); - } - - @Test(groups = "long") - public void should_ignore_recommissioned_node_on_session_init() throws Exception { - // Simulate the bug before starting the cluster - mainCcmBuilder = CCMBridge.builder().withNodes(2); - mainCcm = CCMCache.get(mainCcmBuilder); - mainCcm.stop(1); - mainCcm.waitForDown(1); - - otherCcmBuilder = CCMBridge.builder() - .withStoragePort(mainCcm.getStoragePort()) - .withThriftPort(mainCcm.getThriftPort()) - .withBinaryPort(mainCcm.getBinaryPort()) - .withNodes(1); - otherCcm = CCMCache.get(otherCcmBuilder); - otherCcm.waitForUp(1); - - // Start the driver, it should only connect to node 2 - mainCluster = Cluster.builder() - .addContactPoints(mainCcm.addressOfNode(2).getAddress()) - .withPort(mainCcm.getBinaryPort()) - .withQueryOptions(nonDebouncingQueryOptions()).build(); - - // When we first initialize the Cluster, all hosts are marked UP - assertThat(mainCluster).host(2).hasState(UP); - assertThat(mainCluster).host(1).hasState(UP); - - // Create a session. This will try to open a pool to node 1 and find out that the cluster name doesn't match. - mainCluster.connect(); - - // Node 1 should now be DOWN with no reconnection attempt - assertThat(mainCluster).host(1) - .goesDownWithin(10, TimeUnit.SECONDS) - .hasState(DOWN) - .isNotReconnectingFromDown(); - } - - @Test(groups = "long") - @CassandraVersion("2.0.0") - public void should_ignore_node_that_does_not_support_protocol_version_on_session_init() throws Exception { - // Simulate the bug before starting the cluster - mainCcmBuilder = CCMBridge.builder().withNodes(2); - mainCcm = CCMCache.get(mainCcmBuilder); - mainCcm.stop(1); - mainCcm.waitForDown(1); - - otherCcmBuilder = CCMBridge.builder().withNodes(1) - .withStoragePort(mainCcm.getStoragePort()) - .withThriftPort(mainCcm.getThriftPort()) - .withBinaryPort(mainCcm.getBinaryPort()) - .withVersion(VersionNumber.parse("1.2.19")); - otherCcm = CCMCache.get(otherCcmBuilder); - otherCcm.waitForUp(1); - - // Start the driver, it should only connect to node 2 - mainCluster = Cluster.builder() - .addContactPoints(mainCcm.addressOfNode(2).getAddress()) - .withPort(mainCcm.getBinaryPort()) - .withQueryOptions(nonDebouncingQueryOptions()).build(); - - // Create a session. This will try to open a pool to node 1 and find that it doesn't support protocol version. - mainCluster.connect(); - - // Node 1 should now be DOWN with no reconnection attempt - assertThat(mainCluster).host(1) - .goesDownWithin(10, TimeUnit.SECONDS) - .hasState(DOWN) - .isNotReconnectingFromDown(); - } - - @BeforeMethod(groups = "long") - public void clearFields() { - // Clear cluster and ccm instances between tests. - mainCluster = null; - mainCcmBuilder = null; - otherCcmBuilder = null; - mainCcm = null; - otherCcm = null; - } - - @AfterMethod(groups = "long", alwaysRun = true) - public void teardown() { - if (mainCluster != null) - mainCluster.close(); - if (mainCcmBuilder != null) - CCMCache.remove(mainCcmBuilder); - if (otherCcmBuilder != null) - CCMCache.remove(otherCcmBuilder); - if (mainCcm != null) - mainCcm.close(); - if (otherCcm != null) - otherCcm.close(); - } - - private static int countUpHosts(Cluster cluster) { - int ups = 0; - for (Host host : cluster.getMetadata().getAllHosts()) { - if (host.isUp()) - ups += 1; - } - return ups; + private static final Logger logger = LoggerFactory.getLogger(RecommissionedNodeTest.class); + + CCMBridge.Builder mainCcmBuilder, otherCcmBuilder; + CCMAccess mainCcm, otherCcm; + Cluster mainCluster; + + @Test(groups = "long") + public void should_ignore_recommissioned_node_on_reconnection_attempt() throws Exception { + mainCcmBuilder = CCMBridge.builder().withNodes(3); + mainCcm = CCMCache.get(mainCcmBuilder); + + // node1 will be our "recommissioned" node, for now we just stop it so that it stays in the + // peers table. + mainCcm.stop(1); + mainCcm.waitForDown(1); + + // Now start the driver that will connect to node2 and node3, and consider node1 down + mainCluster = + TestUtils.configureClusterBuilder( + Cluster.builder(), mainCcm, mainCcm.addressOfNode(2).getAddress()) + .withQueryOptions(nonDebouncingQueryOptions()) + .build(); + mainCluster.connect(); + waitForCountUpHosts(mainCluster, 2); + // From that point, reconnections to node1 have been scheduled. + + // Start another ccm that will reuse node1's address + otherCcmBuilder = + CCMBridge.builder() + .withStoragePort(mainCcm.getStoragePort()) + .withThriftPort(mainCcm.getThriftPort()) + .withBinaryPort(mainCcm.getBinaryPort()) + .withNodes(1); + otherCcm = CCMCache.get(otherCcmBuilder); + otherCcm.waitForUp(1); + + // Give the driver the time to notice the node is back up and try to connect to it. + TimeUnit.SECONDS.sleep(32); + + assertThat(countUpHosts(mainCluster)).isEqualTo(2); + } + + @Test(groups = "long") + public void should_ignore_recommissioned_node_on_control_connection_reconnect() throws Exception { + mainCcmBuilder = CCMBridge.builder().withNodes(2); + mainCcm = CCMCache.get(mainCcmBuilder); + mainCcm.stop(1); + mainCcm.waitForDown(1); + + // Start the driver, the control connection will be on node2 + mainCluster = + TestUtils.configureClusterBuilder( + Cluster.builder(), mainCcm, mainCcm.addressOfNode(2).getAddress()) + .withQueryOptions(nonDebouncingQueryOptions()) + .build(); + mainCluster.connect(); + waitForCountUpHosts(mainCluster, 1); + + // Start another ccm that will reuse node1's address + otherCcmBuilder = + CCMBridge.builder() + .withStoragePort(mainCcm.getStoragePort()) + .withThriftPort(mainCcm.getThriftPort()) + .withBinaryPort(mainCcm.getBinaryPort()) + .withNodes(1); + otherCcm = CCMCache.get(otherCcmBuilder); + otherCcm.waitForUp(1); + + // Stop node2, the control connection gets defunct + mainCcm.stop(2); + TimeUnit.SECONDS.sleep(32); + + // The driver should not try to reconnect the control connection to node1 + assertThat(mainCluster).hasClosedControlConnection(); + } + + @Test(groups = "long") + public void should_ignore_recommissioned_node_on_session_init() throws Exception { + // Simulate the bug before starting the cluster + mainCcmBuilder = CCMBridge.builder().withNodes(2); + mainCcm = CCMCache.get(mainCcmBuilder); + mainCcm.stop(1); + mainCcm.waitForDown(1); + + otherCcmBuilder = + CCMBridge.builder() + .withStoragePort(mainCcm.getStoragePort()) + .withThriftPort(mainCcm.getThriftPort()) + .withBinaryPort(mainCcm.getBinaryPort()) + .withNodes(1); + otherCcm = CCMCache.get(otherCcmBuilder); + otherCcm.waitForUp(1); + + // Start the driver, it should only connect to node 2 + mainCluster = + TestUtils.configureClusterBuilder( + Cluster.builder(), mainCcm, mainCcm.addressOfNode(2).getAddress()) + .withQueryOptions(nonDebouncingQueryOptions()) + .build(); + + // When we first initialize the Cluster, all hosts are marked UP + assertThat(mainCluster).host(2).hasState(UP); + assertThat(mainCluster).host(1).hasState(UP); + + // Create a session. This will try to open a pool to node 1 and find out that the cluster name + // doesn't match. + mainCluster.connect(); + + // Node 1 should now be DOWN with no reconnection attempt + assertThat(mainCluster) + .host(1) + .goesDownWithin(10, TimeUnit.SECONDS) + .hasState(DOWN) + .isNotReconnectingFromDown(); + } + + @Test( + groups = "long", + enabled = false, + description = "Disabled because requires specific C* version, not essential") + @CassandraVersion("3.0.0") + public void should_ignore_node_that_does_not_support_protocol_version_on_session_init() + throws Exception { + // Simulate the bug before starting the cluster + mainCcmBuilder = CCMBridge.builder().withNodes(2); + mainCcm = CCMCache.get(mainCcmBuilder); + mainCcm.stop(1); + mainCcm.waitForDown(1); + + otherCcmBuilder = + CCMBridge.builder() + .withNodes(1) + .withStoragePort(mainCcm.getStoragePort()) + .withThriftPort(mainCcm.getThriftPort()) + .withBinaryPort(mainCcm.getBinaryPort()) + .withVersion(VersionNumber.parse("2.1.20")); + otherCcm = CCMCache.get(otherCcmBuilder); + otherCcm.waitForUp(1); + + // Start the driver, it should only connect to node 2 + mainCluster = + TestUtils.configureClusterBuilder( + Cluster.builder(), mainCcm, mainCcm.addressOfNode(2).getAddress()) + .withQueryOptions(nonDebouncingQueryOptions()) + .build(); + + // Create a session. This will try to open a pool to node 1 and find that it doesn't support + // protocol version. + mainCluster.connect(); + + // Node 1 should now be DOWN with no reconnection attempt + assertThat(mainCluster) + .host(1) + .goesDownWithin(10, TimeUnit.SECONDS) + .hasState(DOWN) + .isNotReconnectingFromDown(); + } + + @BeforeMethod(groups = "long") + public void clearFields() { + // Clear cluster and ccm instances between tests. + mainCluster = null; + mainCcmBuilder = null; + otherCcmBuilder = null; + mainCcm = null; + otherCcm = null; + } + + @AfterMethod(groups = "long", alwaysRun = true) + public void teardown() { + if (mainCluster != null) mainCluster.close(); + if (mainCcmBuilder != null) CCMCache.remove(mainCcmBuilder); + if (otherCcmBuilder != null) CCMCache.remove(otherCcmBuilder); + if (mainCcm != null) mainCcm.close(); + if (otherCcm != null) otherCcm.close(); + } + + private static int countUpHosts(Cluster cluster) { + int ups = 0; + for (Host host : cluster.getMetadata().getAllHosts()) { + if (host.isUp()) ups += 1; } + return ups; + } - private static void waitForCountUpHosts(Cluster cluster, int expectedCount) throws InterruptedException { - int maxRetries = 10; - int interval = 6; + private static void waitForCountUpHosts(Cluster cluster, int expectedCount) + throws InterruptedException { + int maxRetries = 10; + int interval = 6; - for (int i = 0; i <= maxRetries; i++) { - int actualCount = countUpHosts(cluster); - if (actualCount == expectedCount) - return; + for (int i = 0; i <= maxRetries; i++) { + int actualCount = countUpHosts(cluster); + if (actualCount == expectedCount) return; - if (i == maxRetries) - fail(String.format("Up host count didn't reach %d after %d seconds", - expectedCount, i * interval)); - else - logger.debug("Counted {} up hosts after {} seconds", actualCount, i * interval); + if (i == maxRetries) + fail( + String.format( + "Up host count didn't reach %d after %d seconds", expectedCount, i * interval)); + else logger.debug("Counted {} up hosts after {} seconds", actualCount, i * interval); - TimeUnit.SECONDS.sleep(interval); - } + TimeUnit.SECONDS.sleep(interval); } + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/ReconnectionPolicyTest.java b/driver-core/src/test/java/com/datastax/driver/core/ReconnectionPolicyTest.java index 5e8f8b73e78..5fc54280eeb 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/ReconnectionPolicyTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/ReconnectionPolicyTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,225 +17,242 @@ */ package com.datastax.driver.core; +import static com.datastax.driver.core.CreateCCM.TestMode.PER_METHOD; +import static org.testng.Assert.assertTrue; +import static org.testng.Assert.fail; + import com.datastax.driver.core.exceptions.NoHostAvailableException; import com.datastax.driver.core.policies.ConstantReconnectionPolicy; import com.datastax.driver.core.policies.ExponentialReconnectionPolicy; import com.datastax.driver.core.policies.ReconnectionPolicy; import org.testng.annotations.Test; -import static com.datastax.driver.core.CreateCCM.TestMode.PER_METHOD; -import static org.testng.Assert.assertTrue; -import static org.testng.Assert.fail; - @CreateCCM(PER_METHOD) @CCMConfig(dirtiesContext = true, createKeyspace = false) @SuppressWarnings("unused") public class ReconnectionPolicyTest extends AbstractPoliciesTest { - private Cluster.Builder exponential() { - return Cluster.builder().withReconnectionPolicy(new ExponentialReconnectionPolicy(2 * 1000, 5 * 60 * 1000)); + private Cluster.Builder exponential() { + return Cluster.builder() + .withReconnectionPolicy(new ExponentialReconnectionPolicy(2 * 1000, 5 * 60 * 1000)); + } + + private Cluster.Builder constant() { + return Cluster.builder().withReconnectionPolicy(new ConstantReconnectionPolicy(10 * 1000)); + } + + /* + * Test the ExponentialReconnectionPolicy. + */ + @Test(groups = "long") + @CCMConfig(clusterProvider = "exponential") + public void exponentialReconnectionPolicyTest() throws Throwable { + + // Ensure that ExponentialReconnectionPolicy is what we should be testing + if (!(cluster().getConfiguration().getPolicies().getReconnectionPolicy() + instanceof ExponentialReconnectionPolicy)) { + fail("Set policy does not match retrieved policy."); } - private Cluster.Builder constant() { - return Cluster.builder().withReconnectionPolicy(new ConstantReconnectionPolicy(10 * 1000)); + // Test basic getters + ExponentialReconnectionPolicy reconnectionPolicy = + (ExponentialReconnectionPolicy) + cluster().getConfiguration().getPolicies().getReconnectionPolicy(); + assertTrue(reconnectionPolicy.getBaseDelayMs() == 2 * 1000); + assertTrue(reconnectionPolicy.getMaxDelayMs() == 5 * 60 * 1000); + + // Test erroneous instantiations + try { + new ExponentialReconnectionPolicy(-1, 1); + fail(); + } catch (IllegalArgumentException e) { + // ok } - /* - * Test the ExponentialReconnectionPolicy. - */ - @Test(groups = "long") - @CCMConfig(clusterProvider = "exponential") - public void exponentialReconnectionPolicyTest() throws Throwable { - - // Ensure that ExponentialReconnectionPolicy is what we should be testing - if (!(cluster().getConfiguration().getPolicies().getReconnectionPolicy() instanceof ExponentialReconnectionPolicy)) { - fail("Set policy does not match retrieved policy."); - } - - // Test basic getters - ExponentialReconnectionPolicy reconnectionPolicy = (ExponentialReconnectionPolicy) cluster().getConfiguration().getPolicies().getReconnectionPolicy(); - assertTrue(reconnectionPolicy.getBaseDelayMs() == 2 * 1000); - assertTrue(reconnectionPolicy.getMaxDelayMs() == 5 * 60 * 1000); - - // Test erroneous instantiations - try { - new ExponentialReconnectionPolicy(-1, 1); - fail(); - } catch (IllegalArgumentException e) { - //ok - } - - try { - new ExponentialReconnectionPolicy(1, -1); - fail(); - } catch (IllegalArgumentException e) { - //ok - } - - try { - new ExponentialReconnectionPolicy(-1, -1); - fail(); - } catch (IllegalArgumentException e) { - //ok - } + try { + new ExponentialReconnectionPolicy(1, -1); + fail(); + } catch (IllegalArgumentException e) { + // ok + } - try { - new ExponentialReconnectionPolicy(2, 1); - fail(); - } catch (IllegalArgumentException e) { - //ok - } + try { + new ExponentialReconnectionPolicy(-1, -1); + fail(); + } catch (IllegalArgumentException e) { + // ok + } - // Test nextDelays() - ReconnectionPolicy.ReconnectionSchedule schedule = new ExponentialReconnectionPolicy(2 * 1000, 5 * 60 * 1000).newSchedule(); - assertTrue(schedule.nextDelayMs() == 2000); - assertTrue(schedule.nextDelayMs() == 4000); - assertTrue(schedule.nextDelayMs() == 8000); - assertTrue(schedule.nextDelayMs() == 16000); - assertTrue(schedule.nextDelayMs() == 32000); - for (int i = 0; i < 64; ++i) - schedule.nextDelayMs(); - assertTrue(schedule.nextDelayMs() == reconnectionPolicy.getMaxDelayMs()); - - // Run integration test - long restartTime = 2 + 4 + 8 + 2; // 16: 3 full cycles + 2 seconds - long retryTime = 30; // 4th cycle start time - long breakTime = 62; // time until next reconnection attempt - - // TODO: Try to sort out variance - //reconnectionPolicyTest(restartTime, retryTime, breakTime); + try { + new ExponentialReconnectionPolicy(2, 1); + fail(); + } catch (IllegalArgumentException e) { + // ok } - /* - * Test the ConstantReconnectionPolicy. - */ - @Test(groups = "long") - @CCMConfig(clusterProvider = "constant") - public void constantReconnectionPolicyTest() throws Throwable { + // Test nextDelays() + ReconnectionPolicy.ReconnectionSchedule schedule = + new ExponentialReconnectionPolicy(2 * 1000, 5 * 60 * 1000).newSchedule(); + assertTrue(schedule.nextDelayMs() == 2000); + assertTrue(schedule.nextDelayMs() == 4000); + assertTrue(schedule.nextDelayMs() == 8000); + assertTrue(schedule.nextDelayMs() == 16000); + assertTrue(schedule.nextDelayMs() == 32000); + for (int i = 0; i < 64; ++i) schedule.nextDelayMs(); + assertTrue(schedule.nextDelayMs() == reconnectionPolicy.getMaxDelayMs()); + + // Run integration test + long restartTime = 2 + 4 + 8 + 2; // 16: 3 full cycles + 2 seconds + long retryTime = 30; // 4th cycle start time + long breakTime = 62; // time until next reconnection attempt + + // TODO: Try to sort out variance + // reconnectionPolicyTest(restartTime, retryTime, breakTime); + } + + /* + * Test the ConstantReconnectionPolicy. + */ + @Test(groups = "long") + @CCMConfig(clusterProvider = "constant") + public void constantReconnectionPolicyTest() throws Throwable { + + // Ensure that ConstantReconnectionPolicy is what we should be testing + if (!(cluster().getConfiguration().getPolicies().getReconnectionPolicy() + instanceof ConstantReconnectionPolicy)) { + fail("Set policy does not match retrieved policy."); + } - // Ensure that ConstantReconnectionPolicy is what we should be testing - if (!(cluster().getConfiguration().getPolicies().getReconnectionPolicy() instanceof ConstantReconnectionPolicy)) { - fail("Set policy does not match retrieved policy."); - } + // Test basic getters + ConstantReconnectionPolicy reconnectionPolicy = + (ConstantReconnectionPolicy) + cluster().getConfiguration().getPolicies().getReconnectionPolicy(); + assertTrue(reconnectionPolicy.getConstantDelayMs() == 10 * 1000); + + // Test erroneous instantiations + try { + new ConstantReconnectionPolicy(-1); + fail(); + } catch (IllegalArgumentException e) { + // ok + } - // Test basic getters - ConstantReconnectionPolicy reconnectionPolicy = (ConstantReconnectionPolicy) cluster().getConfiguration().getPolicies().getReconnectionPolicy(); - assertTrue(reconnectionPolicy.getConstantDelayMs() == 10 * 1000); + // Test nextDelays() + ReconnectionPolicy.ReconnectionSchedule schedule = + new ConstantReconnectionPolicy(10 * 1000).newSchedule(); + assertTrue(schedule.nextDelayMs() == 10000); + assertTrue(schedule.nextDelayMs() == 10000); + assertTrue(schedule.nextDelayMs() == 10000); + assertTrue(schedule.nextDelayMs() == 10000); + assertTrue(schedule.nextDelayMs() == 10000); + + // Run integration test + long restartTime = 32; // matches the above test + long retryTime = 40; // 2nd cycle start time + long breakTime = 10; // time until next reconnection attempt + + // TODO: Try to sort out variance + // reconnectionPolicyTest(restartTime, retryTime, breakTime); + } + + public void reconnectionPolicyTest(long restartTime, long retryTime, long breakTime) + throws Throwable { + createSchema(1); + init(12); + query(12); + + // Ensure a basic test works + assertQueried(TestUtils.IP_PREFIX + '1', 12); + resetCoordinators(); + ccm().forceStop(1); + + // Start timing and ensure that the node is down + long startTime = 0; + try { + startTime = System.nanoTime() / 1000000000; + query(12); + fail("Test race condition where node has not shut off quickly enough."); + } catch (NoHostAvailableException e) { + // ok + } - // Test erroneous instantiations - try { - new ConstantReconnectionPolicy(-1); - fail(); - } catch (IllegalArgumentException e) { - //ok - } + long thisTime; + boolean restarted = false; + while (true) { + thisTime = System.nanoTime() / 1000000000; - // Test nextDelays() - ReconnectionPolicy.ReconnectionSchedule schedule = new ConstantReconnectionPolicy(10 * 1000).newSchedule(); - assertTrue(schedule.nextDelayMs() == 10000); - assertTrue(schedule.nextDelayMs() == 10000); - assertTrue(schedule.nextDelayMs() == 10000); - assertTrue(schedule.nextDelayMs() == 10000); - assertTrue(schedule.nextDelayMs() == 10000); - - // Run integration test - long restartTime = 32; // matches the above test - long retryTime = 40; // 2nd cycle start time - long breakTime = 10; // time until next reconnection attempt - - // TODO: Try to sort out variance - //reconnectionPolicyTest(restartTime, retryTime, breakTime); - } + // Restart node at restartTime + if (!restarted && thisTime - startTime > restartTime) { + ccm().start(1); + restarted = true; + } - public void reconnectionPolicyTest(long restartTime, long retryTime, long breakTime) throws Throwable { - createSchema(1); - init(12); + // Continue testing queries each second + try { query(12); - - // Ensure a basic test works assertQueried(TestUtils.IP_PREFIX + '1', 12); resetCoordinators(); - ccm().forceStop(1); - // Start timing and ensure that the node is down - long startTime = 0; - try { - startTime = System.nanoTime() / 1000000000; - query(12); - fail("Test race condition where node has not shut off quickly enough."); - } catch (NoHostAvailableException e) { - //ok + // Ensure the time when the query completes successfully is what was expected + assertTrue( + retryTime - 2 < thisTime - startTime && thisTime - startTime < retryTime + 2, + String.format( + "Waited %s seconds instead an expected %s seconds wait", + thisTime - startTime, retryTime)); + } catch (NoHostAvailableException e) { + Thread.sleep(1000); + continue; + } + + Thread.sleep(breakTime); + + // The the same query once more, just to be sure + query(12); + assertQueried(TestUtils.IP_PREFIX + '1', 12); + resetCoordinators(); + + // Ensure the reconnection times reset + ccm().forceStop(1); + + // Start timing and ensure that the node is down + startTime = 0; + try { + startTime = System.nanoTime() / 1000000000; + query(12); + fail("Test race condition where node has not shut off quickly enough."); + } catch (NoHostAvailableException e) { + // ok + } + + restarted = false; + while (true) { + thisTime = System.nanoTime() / 1000000000; + + // Restart node at restartTime + if (!restarted && thisTime - startTime > restartTime) { + ccm().start(1); + restarted = true; } - long thisTime; - boolean restarted = false; - while (true) { - thisTime = System.nanoTime() / 1000000000; - - // Restart node at restartTime - if (!restarted && thisTime - startTime > restartTime) { - ccm().start(1); - restarted = true; - } - - // Continue testing queries each second - try { - query(12); - assertQueried(TestUtils.IP_PREFIX + '1', 12); - resetCoordinators(); - - // Ensure the time when the query completes successfully is what was expected - assertTrue(retryTime - 2 < thisTime - startTime && thisTime - startTime < retryTime + 2, String.format("Waited %s seconds instead an expected %s seconds wait", thisTime - startTime, retryTime)); - } catch (NoHostAvailableException e) { - Thread.sleep(1000); - continue; - } - - Thread.sleep(breakTime); - - // The the same query once more, just to be sure - query(12); - assertQueried(TestUtils.IP_PREFIX + '1', 12); - resetCoordinators(); - - // Ensure the reconnection times reset - ccm().forceStop(1); - - // Start timing and ensure that the node is down - startTime = 0; - try { - startTime = System.nanoTime() / 1000000000; - query(12); - fail("Test race condition where node has not shut off quickly enough."); - } catch (NoHostAvailableException e) { - //ok - } - - restarted = false; - while (true) { - thisTime = System.nanoTime() / 1000000000; - - // Restart node at restartTime - if (!restarted && thisTime - startTime > restartTime) { - ccm().start(1); - restarted = true; - } - - // Continue testing queries each second - try { - query(12); - assertQueried(TestUtils.IP_PREFIX + '1', 12); - resetCoordinators(); - - // Ensure the time when the query completes successfully is what was expected - assertTrue(retryTime - 2 < thisTime - startTime && thisTime - startTime < retryTime + 2, String.format("Waited %s seconds instead an expected %s seconds wait", thisTime - startTime, retryTime)); - } catch (NoHostAvailableException e) { - Thread.sleep(1000); - continue; - } - break; - } - break; + // Continue testing queries each second + try { + query(12); + assertQueried(TestUtils.IP_PREFIX + '1', 12); + resetCoordinators(); + + // Ensure the time when the query completes successfully is what was expected + assertTrue( + retryTime - 2 < thisTime - startTime && thisTime - startTime < retryTime + 2, + String.format( + "Waited %s seconds instead an expected %s seconds wait", + thisTime - startTime, retryTime)); + } catch (NoHostAvailableException e) { + Thread.sleep(1000); + continue; } + break; + } + break; } + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/ReconnectionTest.java b/driver-core/src/test/java/com/datastax/driver/core/ReconnectionTest.java index c455852a0eb..5f872409cb2 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/ReconnectionTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/ReconnectionTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,6 +17,14 @@ */ package com.datastax.driver.core; +import static com.datastax.driver.core.Assertions.assertThat; +import static com.datastax.driver.core.CreateCCM.TestMode.PER_METHOD; +import static java.util.concurrent.TimeUnit.SECONDS; +import static org.mockito.Mockito.reset; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; + import com.datastax.driver.core.Host.State; import com.datastax.driver.core.policies.ConstantReconnectionPolicy; import com.datastax.driver.core.policies.DelegatingLoadBalancingPolicy; @@ -22,290 +32,279 @@ import com.datastax.driver.core.policies.RoundRobinPolicy; import com.google.common.util.concurrent.ListenableFuture; import com.google.common.util.concurrent.Uninterruptibles; -import org.testng.annotations.Test; - import java.io.IOException; -import java.net.InetSocketAddress; import java.util.Collections; import java.util.Iterator; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; +import org.testng.annotations.Test; -import static com.datastax.driver.core.Assertions.assertThat; -import static com.datastax.driver.core.CreateCCM.TestMode.PER_METHOD; -import static java.util.concurrent.TimeUnit.SECONDS; -import static org.mockito.Mockito.*; - -/** - * Scenarios where a Cluster loses connection to a host and reconnects. - */ +/** Scenarios where a Cluster loses connection to a host and reconnects. */ @CreateCCM(PER_METHOD) public class ReconnectionTest extends CCMTestsSupport { - private final int reconnectionDelayMillis = 1000; + private final int reconnectionDelayMillis = 1000; - @CCMConfig(dirtiesContext = true, numberOfNodes = 2, createCluster = false) - @Test(groups = "long") - public void should_reconnect_after_full_connectivity_loss() throws InterruptedException { - Cluster cluster = register(Cluster.builder() - .addContactPoints(getContactPoints().get(0)) - .withPort(ccm().getBinaryPort()) + @CCMConfig(dirtiesContext = true, numberOfNodes = 2, createCluster = false) + @Test(groups = "long") + public void should_reconnect_after_full_connectivity_loss() throws InterruptedException { + Cluster cluster = + register( + createClusterBuilder() .withReconnectionPolicy(new ConstantReconnectionPolicy(reconnectionDelayMillis)) .build()); - cluster.connect(); - - assertThat(cluster).usesControlHost(1); - - // Stop all nodes. We won't get notifications anymore, so the only mechanism to - // reconnect is the background reconnection attempts. - ccm().stop(2); - ccm().stop(1); - - ccm().waitForDown(2); - ccm().start(2); - ccm().waitForUp(2); - - assertThat(cluster).host(2).comesUpWithin(Cluster.NEW_NODE_DELAY_SECONDS * 2, SECONDS); - - // Give the control connection a few moments to reconnect - TimeUnit.MILLISECONDS.sleep(reconnectionDelayMillis * 2); - assertThat(cluster).usesControlHost(2); + cluster.connect(); + + assertThat(cluster).usesControlHost(1); + + // Stop all nodes. We won't get notifications anymore, so the only mechanism to + // reconnect is the background reconnection attempts. + ccm().stop(2); + ccm().stop(1); + + ccm().waitForDown(2); + ccm().start(2); + ccm().waitForUp(2); + + assertThat(cluster).host(2).comesUpWithin(Cluster.NEW_NODE_DELAY_SECONDS * 2, SECONDS); + + // Give the control connection a few moments to reconnect + TimeUnit.MILLISECONDS.sleep(reconnectionDelayMillis * 2); + assertThat(cluster).usesControlHost(2); + } + + @CCMConfig( + dirtiesContext = true, + config = "authenticator:PasswordAuthenticator", + jvmArgs = "-Dcassandra.superuser_setup_delay_ms=0", + createCluster = false) + @Test(groups = "long") + public void should_keep_reconnecting_on_authentication_error() throws InterruptedException { + // For C* 1.2, sleep before attempting to connect as there is a small delay between + // user being created. + if (ccm().getCassandraVersion().getMajor() < 2) { + Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS); } - - @CCMConfig( - dirtiesContext = true, - config = "authenticator:PasswordAuthenticator", - jvmArgs = "-Dcassandra.superuser_setup_delay_ms=0", - createCluster = false) - @Test(groups = "long") - public void should_keep_reconnecting_on_authentication_error() throws InterruptedException { - // For C* 1.2, sleep before attempting to connect as there is a small delay between - // user being created. - if (ccm().getCassandraVersion().getMajor() < 2) { - Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS); - } - CountingReconnectionPolicy reconnectionPolicy = new CountingReconnectionPolicy(new ConstantReconnectionPolicy(reconnectionDelayMillis)); - CountingAuthProvider authProvider = new CountingAuthProvider("cassandra", "cassandra"); - Cluster cluster = register(Cluster.builder() - .addContactPoints(getContactPoints().get(0)) - .withPort(ccm().getBinaryPort()) + CountingReconnectionPolicy reconnectionPolicy = + new CountingReconnectionPolicy(new ConstantReconnectionPolicy(reconnectionDelayMillis)); + CountingAuthProvider authProvider = new CountingAuthProvider("cassandra", "cassandra"); + Cluster cluster = + register( + createClusterBuilder() // Start with the correct auth so that we can initialize the server .withAuthProvider(authProvider) .withReconnectionPolicy(reconnectionPolicy) .build()); - cluster.init(); - assertThat(cluster).usesControlHost(1); - - // Stop the server, set wrong credentials and restart - ccm().stop(1); - ccm().waitForDown(1); - authProvider.setPassword("wrongPassword"); - ccm().start(1); - ccm().waitForUp(1); - - // Wait a few iterations to ensure that our authProvider has returned the wrong credentials at least twice - // NB: authentication errors show up in the logs - int initialCount = authProvider.count.get(); - long initialMetricCount = cluster.getMetrics().getErrorMetrics().getAuthenticationErrors().getCount(); - int iterations = 0, maxIterations = 12; // make sure we don't wait indefinitely - do { - iterations += 1; - TimeUnit.SECONDS.sleep(5); - } while (iterations < maxIterations && authProvider.count.get() <= initialCount + 1); - assertThat(iterations).isLessThan(maxIterations); - // Number of authentication errors should have increased. - assertThat(cluster.getMetrics().getErrorMetrics().getAuthenticationErrors().getCount()) - .isGreaterThan(initialMetricCount); - - // Fix the credentials - authProvider.setPassword("cassandra"); - - // The driver should eventually reconnect to the node - assertThat(cluster).host(1).comesUpWithin(Cluster.NEW_NODE_DELAY_SECONDS * 2, SECONDS); - } - - @CCMConfig(dirtiesContext = true, numberOfNodes = 2, createCluster = false) - @Test(groups = "long") - public void should_cancel_reconnection_attempts() throws InterruptedException { - // Stop a node and cancel the reconnection attempts to it - CountingReconnectionPolicy reconnectionPolicy = new CountingReconnectionPolicy(new ConstantReconnectionPolicy(reconnectionDelayMillis)); - - Cluster cluster = register(Cluster.builder() - .addContactPoints(getContactPoints().get(0)) - .withPort(ccm().getBinaryPort()) - .withReconnectionPolicy(reconnectionPolicy).build()); - cluster.connect(); - - // Stop a node and cancel the reconnection attempts to it - ccm().stop(2); - Host host2 = TestUtils.findHost(cluster, 2); - host2.getReconnectionAttemptFuture().cancel(false); - - // The reconnection count should not vary over time anymore - int initialCount = reconnectionPolicy.count.get(); - TimeUnit.MILLISECONDS.sleep(reconnectionDelayMillis * 2); - assertThat(reconnectionPolicy.count.get()).isEqualTo(initialCount); - - // Restart the node, which will trigger an UP notification - ccm().start(2); - ccm().waitForUp(2); - - // The driver should now see the node as UP again - assertThat(cluster).host(2).comesUpWithin(Cluster.NEW_NODE_DELAY_SECONDS * 2, SECONDS); - } - - @CCMConfig(dirtiesContext = true, createCluster = false) - @Test(groups = "long") - public void should_trigger_one_time_reconnect() throws InterruptedException, IOException { - TogglabePolicy loadBalancingPolicy = new TogglabePolicy(new RoundRobinPolicy()); - Cluster cluster = register(Cluster.builder() - .addContactPointsWithPorts(ccm().addressOfNode(1)) - .withPort(ccm().getBinaryPort()) + cluster.init(); + assertThat(cluster).usesControlHost(1); + + // Stop the server, set wrong credentials and restart + ccm().stop(1); + ccm().waitForDown(1); + authProvider.setPassword("wrongPassword"); + ccm().start(1); + ccm().waitForUp(1); + + // Wait a few iterations to ensure that our authProvider has returned the wrong credentials at + // least twice + // NB: authentication errors show up in the logs + int initialCount = authProvider.count.get(); + long initialMetricCount = + cluster.getMetrics().getErrorMetrics().getAuthenticationErrors().getCount(); + int iterations = 0, maxIterations = 12; // make sure we don't wait indefinitely + do { + iterations += 1; + TimeUnit.SECONDS.sleep(5); + } while (iterations < maxIterations && authProvider.count.get() <= initialCount + 1); + assertThat(iterations).isLessThan(maxIterations); + // Number of authentication errors should have increased. + assertThat(cluster.getMetrics().getErrorMetrics().getAuthenticationErrors().getCount()) + .isGreaterThan(initialMetricCount); + + // Fix the credentials + authProvider.setPassword("cassandra"); + + // The driver should eventually reconnect to the node + assertThat(cluster).host(1).comesUpWithin(Cluster.NEW_NODE_DELAY_SECONDS * 2, SECONDS); + } + + @CCMConfig(dirtiesContext = true, numberOfNodes = 2, createCluster = false) + @Test(groups = "long") + public void should_cancel_reconnection_attempts() throws InterruptedException { + // Stop a node and cancel the reconnection attempts to it + CountingReconnectionPolicy reconnectionPolicy = + new CountingReconnectionPolicy(new ConstantReconnectionPolicy(reconnectionDelayMillis)); + + Cluster cluster = + register(createClusterBuilder().withReconnectionPolicy(reconnectionPolicy).build()); + cluster.connect(); + + // Stop a node and cancel the reconnection attempts to it + ccm().stop(2); + Host host2 = TestUtils.findHost(cluster, 2); + host2.getReconnectionAttemptFuture().cancel(false); + + // The reconnection count should not vary over time anymore + int initialCount = reconnectionPolicy.count.get(); + TimeUnit.MILLISECONDS.sleep(reconnectionDelayMillis * 2); + assertThat(reconnectionPolicy.count.get()).isEqualTo(initialCount); + + // Restart the node, which will trigger an UP notification + ccm().start(2); + ccm().waitForUp(2); + + // The driver should now see the node as UP again + assertThat(cluster).host(2).comesUpWithin(Cluster.NEW_NODE_DELAY_SECONDS * 2, SECONDS); + } + + @CCMConfig(dirtiesContext = true, createCluster = false) + @Test(groups = "long") + public void should_trigger_one_time_reconnect() throws InterruptedException, IOException { + TogglabePolicy loadBalancingPolicy = new TogglabePolicy(new RoundRobinPolicy()); + Cluster cluster = + register( + createClusterBuilder() .withLoadBalancingPolicy(loadBalancingPolicy) .withReconnectionPolicy(new ConstantReconnectionPolicy(reconnectionDelayMillis)) .build()); - cluster.connect(); - - // Tweak the LBP so that the control connection never reconnects, otherwise - // it would interfere with the rest of the test (this is a bit of a hack) - loadBalancingPolicy.returnEmptyQueryPlan = true; - - // Stop the node, ignore it and cancel reconnection attempts to it - ccm().stop(1); - ccm().waitForDown(1); - assertThat(cluster).host(1).goesDownWithin(20, SECONDS); - Host host1 = TestUtils.findHost(cluster, 1); - loadBalancingPolicy.setDistance(TestUtils.findHost(cluster, 1), HostDistance.IGNORED); - ListenableFuture reconnectionAttemptFuture = host1.getReconnectionAttemptFuture(); - if (reconnectionAttemptFuture != null) - reconnectionAttemptFuture.cancel(false); - - // Trigger a one-time reconnection attempt (this will fail) - host1.tryReconnectOnce(); - - // Wait for a few reconnection cycles before checking - TimeUnit.MILLISECONDS.sleep(reconnectionDelayMillis * 2); - assertThat(cluster).host(1).hasState(State.DOWN); - - // Restart the node (this will not trigger an UP notification thanks to our - // hack to disable the control connection reconnects). The host should stay - // down for the driver. - ccm().start(1); - ccm().waitForUp(1); - assertThat(cluster).host(1).hasState(State.DOWN); - - TimeUnit.SECONDS.sleep(Cluster.NEW_NODE_DELAY_SECONDS); - assertThat(cluster).host(1).hasState(State.DOWN); - - // Trigger another one-time reconnection attempt (this will succeed). The - // host should be back up. - host1.tryReconnectOnce(); - assertThat(cluster).host(1).comesUpWithin(Cluster.NEW_NODE_DELAY_SECONDS * 2, SECONDS); - } - - /** - * The connection established by a successful reconnection attempt should be reused in one of the - * connection pools (JAVA-505). - */ - @CCMConfig(dirtiesContext = true, createCluster = false) - @Test(groups = "long") - public void should_use_connection_from_reconnection_in_pool() { - TogglabePolicy loadBalancingPolicy = new TogglabePolicy(new RoundRobinPolicy()); - - // Spy SocketOptions.getKeepAlive to count how many connections were instantiated. - SocketOptions socketOptions = spy(new SocketOptions()); - Cluster cluster = register(Cluster.builder() - .addContactPoints(getContactPoints().get(0)) - .withPort(ccm().getBinaryPort()) + cluster.connect(); + + // Tweak the LBP so that the control connection never reconnects, otherwise + // it would interfere with the rest of the test (this is a bit of a hack) + loadBalancingPolicy.returnEmptyQueryPlan = true; + + // Stop the node, ignore it and cancel reconnection attempts to it + ccm().stop(1); + ccm().waitForDown(1); + assertThat(cluster).host(1).goesDownWithin(20, SECONDS); + Host host1 = TestUtils.findHost(cluster, 1); + loadBalancingPolicy.setDistance(TestUtils.findHost(cluster, 1), HostDistance.IGNORED); + ListenableFuture reconnectionAttemptFuture = host1.getReconnectionAttemptFuture(); + if (reconnectionAttemptFuture != null) reconnectionAttemptFuture.cancel(false); + + // Trigger a one-time reconnection attempt (this will fail) + host1.tryReconnectOnce(); + + // Wait for a few reconnection cycles before checking + TimeUnit.MILLISECONDS.sleep(reconnectionDelayMillis * 2); + assertThat(cluster).host(1).hasState(State.DOWN); + + // Restart the node (this will not trigger an UP notification thanks to our + // hack to disable the control connection reconnects). The host should stay + // down for the driver. + ccm().start(1); + ccm().waitForUp(1); + assertThat(cluster).host(1).hasState(State.DOWN); + + TimeUnit.SECONDS.sleep(Cluster.NEW_NODE_DELAY_SECONDS); + assertThat(cluster).host(1).hasState(State.DOWN); + + // Trigger another one-time reconnection attempt (this will succeed). The + // host should be back up. + host1.tryReconnectOnce(); + assertThat(cluster).host(1).comesUpWithin(Cluster.NEW_NODE_DELAY_SECONDS * 2, SECONDS); + } + + /** + * The connection established by a successful reconnection attempt should be reused in one of the + * connection pools (JAVA-505). + */ + @CCMConfig(dirtiesContext = true, createCluster = false) + @Test(groups = "long") + public void should_use_connection_from_reconnection_in_pool() { + TogglabePolicy loadBalancingPolicy = new TogglabePolicy(new RoundRobinPolicy()); + + // Spy SocketOptions.getKeepAlive to count how many connections were instantiated. + SocketOptions socketOptions = spy(new SocketOptions()); + Cluster cluster = + register( + createClusterBuilder() .withReconnectionPolicy(new ConstantReconnectionPolicy(5000)) .withLoadBalancingPolicy(loadBalancingPolicy) .withSocketOptions(socketOptions) .withProtocolVersion(ccm().getProtocolVersion()) .build()); - // Create two sessions to have multiple pools - cluster.connect(); - cluster.connect(); - - int corePoolSize = TestUtils.numberOfLocalCoreConnections(cluster); - - // Right after init, 1 connection has been opened by the control connection, and the core size for each pool. - verify(socketOptions, times(1 + corePoolSize * 2)).getKeepAlive(); - - // Tweak the LBP so that the control connection never reconnects. This makes it easier - // to reason about the number of connection attempts. - loadBalancingPolicy.returnEmptyQueryPlan = true; - - // Stop the node and cancel the reconnection attempts to it - ccm().stop(1); - ccm().waitForDown(1); - assertThat(cluster).host(1).goesDownWithin(20, SECONDS); - Host host1 = TestUtils.findHost(cluster, 1); - host1.getReconnectionAttemptFuture().cancel(false); - - ccm().start(1); - ccm().waitForUp(1); - - // Reset the spy and count the number of connections attempts for 1 reconnect - reset(socketOptions); - host1.tryReconnectOnce(); - assertThat(cluster).host(1).comesUpWithin(Cluster.NEW_NODE_DELAY_SECONDS * 2, SECONDS); - // Expect 1 connection from the reconnection attempt 3 for the pools (we need 4 - // but the one from the reconnection attempt gets reused). - verify(socketOptions, times(corePoolSize * 2)).getKeepAlive(); + // Create two sessions to have multiple pools + cluster.connect(); + cluster.connect(); + + int corePoolSize = TestUtils.numberOfLocalCoreConnections(cluster); + + // Right after init, 1 connection has been opened by the control connection, and the core size + // for each pool. + verify(socketOptions, times(1 + corePoolSize * 2)).getKeepAlive(); + + // Tweak the LBP so that the control connection never reconnects. This makes it easier + // to reason about the number of connection attempts. + loadBalancingPolicy.returnEmptyQueryPlan = true; + + // Stop the node and cancel the reconnection attempts to it + ccm().stop(1); + ccm().waitForDown(1); + assertThat(cluster).host(1).goesDownWithin(20, SECONDS); + Host host1 = TestUtils.findHost(cluster, 1); + host1.getReconnectionAttemptFuture().cancel(false); + + ccm().start(1); + ccm().waitForUp(1); + + // Reset the spy and count the number of connections attempts for 1 reconnect + reset(socketOptions); + host1.tryReconnectOnce(); + assertThat(cluster).host(1).comesUpWithin(Cluster.NEW_NODE_DELAY_SECONDS * 2, SECONDS); + // Expect 1 connection from the reconnection attempt 3 for the pools (we need 4 + // but the one from the reconnection attempt gets reused). + verify(socketOptions, times(corePoolSize * 2)).getKeepAlive(); + } + + /** + * Extends the plain text auth provider to track how many times the credentials have been + * requested + */ + static class CountingAuthProvider extends PlainTextAuthProvider { + final AtomicInteger count = new AtomicInteger(); + + CountingAuthProvider(String username, String password) { + super(username, password); + } + + @Override + public Authenticator newAuthenticator(EndPoint host, String authenticator) { + count.incrementAndGet(); + return super.newAuthenticator(host, authenticator); + } + } + + /** + * A load balancing policy that: - can be "disabled" by having its query plan return no hosts. - + * can be instructed to return a specific distance for some hosts. + */ + public static class TogglabePolicy extends DelegatingLoadBalancingPolicy { + + volatile boolean returnEmptyQueryPlan; + final ConcurrentMap distances = new ConcurrentHashMap(); + + public TogglabePolicy(LoadBalancingPolicy delegate) { + super(delegate); + } + + @Override + public HostDistance distance(Host host) { + HostDistance distance = distances.get(host); + return (distance != null) ? distance : super.distance(host); } - /** - * Extends the plain text auth provider to track how many times the credentials have been requested - */ - static class CountingAuthProvider extends PlainTextAuthProvider { - final AtomicInteger count = new AtomicInteger(); - - CountingAuthProvider(String username, String password) { - super(username, password); - } - - @Override - public Authenticator newAuthenticator(InetSocketAddress host, String authenticator) { - count.incrementAndGet(); - return super.newAuthenticator(host, authenticator); - } + public void setDistance(Host host, HostDistance distance) { + distances.put(host, distance); } - /** - * A load balancing policy that: - * - can be "disabled" by having its query plan return no hosts. - * - can be instructed to return a specific distance for some hosts. - */ - public static class TogglabePolicy extends DelegatingLoadBalancingPolicy { - - volatile boolean returnEmptyQueryPlan; - final ConcurrentMap distances = new ConcurrentHashMap(); - - public TogglabePolicy(LoadBalancingPolicy delegate) { - super(delegate); - } - - @Override - public HostDistance distance(Host host) { - HostDistance distance = distances.get(host); - return (distance != null) - ? distance - : super.distance(host); - } - - public void setDistance(Host host, HostDistance distance) { - distances.put(host, distance); - } - - @Override - public Iterator newQueryPlan(String loggedKeyspace, Statement statement) { - if (returnEmptyQueryPlan) - return Collections.emptyList().iterator(); - else - return super.newQueryPlan(loggedKeyspace, statement); - } + @Override + public Iterator newQueryPlan(String loggedKeyspace, Statement statement) { + if (returnEmptyQueryPlan) return Collections.emptyList().iterator(); + else return super.newQueryPlan(loggedKeyspace, statement); } + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/RefreshConnectedHostTest.java b/driver-core/src/test/java/com/datastax/driver/core/RefreshConnectedHostTest.java index 87fabe9b2b0..dbeb48f1eea 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/RefreshConnectedHostTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/RefreshConnectedHostTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,6 +17,9 @@ */ package com.datastax.driver.core; +import static com.datastax.driver.core.Assertions.assertThat; +import static java.util.concurrent.TimeUnit.SECONDS; + import com.datastax.driver.core.Host.State; import com.datastax.driver.core.policies.ConstantReconnectionPolicy; import com.datastax.driver.core.policies.LimitingLoadBalancingPolicy; @@ -22,81 +27,66 @@ import org.mockito.Mockito; import org.testng.annotations.Test; -import static com.datastax.driver.core.Assertions.assertThat; -import static java.util.concurrent.TimeUnit.SECONDS; - -@CCMConfig( - dirtiesContext = true, - numberOfNodes = 2, - createCluster = false -) +@CCMConfig(dirtiesContext = true, numberOfNodes = 2, createCluster = false) public class RefreshConnectedHostTest extends CCMTestsSupport { - /** - * Tests {@link PoolingOptions#refreshConnectedHost(Host)} through a custom load balancing policy. - */ - @Test(groups = "long") - public void should_refresh_single_connected_host() { - // This will make the driver use at most 2 hosts, the others will be ignored - LimitingLoadBalancingPolicy loadBalancingPolicy = new LimitingLoadBalancingPolicy(new RoundRobinPolicy(), 2, 1); + /** + * Tests {@link PoolingOptions#refreshConnectedHost(Host)} through a custom load balancing policy. + */ + @Test(groups = "long") + public void should_refresh_single_connected_host() { + // This will make the driver use at most 2 hosts, the others will be ignored + LimitingLoadBalancingPolicy loadBalancingPolicy = + new LimitingLoadBalancingPolicy(new RoundRobinPolicy(), 2, 1); - PoolingOptions poolingOptions = Mockito.spy(new PoolingOptions()); - Cluster cluster = register(Cluster.builder() - .addContactPoints(getContactPoints().get(0)) - .withPort(ccm().getBinaryPort()) + PoolingOptions poolingOptions = Mockito.spy(new PoolingOptions()); + Cluster cluster = + register( + createClusterBuilderNoDebouncing() .withPoolingOptions(poolingOptions) .withLoadBalancingPolicy(loadBalancingPolicy) .withReconnectionPolicy(new ConstantReconnectionPolicy(1000)) - .withQueryOptions(TestUtils.nonDebouncingQueryOptions()) .build()); - Session session = cluster.connect(); + Session session = cluster.connect(); - assertThat(cluster).usesControlHost(1); - assertThat(cluster).host(1) - .hasState(State.UP) - .isAtDistance(HostDistance.LOCAL); - // Wait for the node to be up, because apparently on Jenkins it's still only ADDED when we reach this line - // Waiting for NEW_NODE_DELAY_SECONDS+1 allows the driver to create a connection pool and mark the node up - assertThat(cluster).host(2) - .comesUpWithin(Cluster.NEW_NODE_DELAY_SECONDS + 1, SECONDS) - .isAtDistance(HostDistance.LOCAL); + assertThat(cluster).usesControlHost(1); + assertThat(cluster).host(1).hasState(State.UP).isAtDistance(HostDistance.LOCAL); + // Wait for the node to be up, because apparently on Jenkins it's still only ADDED when we reach + // this line + // Waiting for NEW_NODE_DELAY_SECONDS+1 allows the driver to create a connection pool and mark + // the node up + assertThat(cluster) + .host(2) + .comesUpWithin(Cluster.NEW_NODE_DELAY_SECONDS + 1, SECONDS) + .isAtDistance(HostDistance.LOCAL); - // Add and bring host 3 up, its presence should be acknowledged but it should be ignored - ccm().add(3); - ccm().start(3); - ccm().waitForUp(3); + // Add and bring host 3 up, its presence should be acknowledged but it should be ignored + ccm().add(3); + ccm().start(3); + ccm().waitForUp(3); - assertThat(cluster).host(1) - .hasState(State.UP) - .isAtDistance(HostDistance.LOCAL); - assertThat(cluster).host(2) - .hasState(State.UP) - .isAtDistance(HostDistance.LOCAL); + assertThat(cluster).host(1).hasState(State.UP).isAtDistance(HostDistance.LOCAL); + assertThat(cluster).host(2).hasState(State.UP).isAtDistance(HostDistance.LOCAL); - // Ensure that the host is added to the Cluster. - assertThat(cluster).host(3) - .comesUpWithin(Cluster.NEW_NODE_DELAY_SECONDS + 1, SECONDS) - .isAtDistance(HostDistance.IGNORED); - assertThat(session).hasNoPoolFor(3); + // Ensure that the host is added to the Cluster. + assertThat(cluster) + .host(3) + .comesUpWithin(Cluster.NEW_NODE_DELAY_SECONDS + 1, SECONDS) + .isAtDistance(HostDistance.IGNORED); + assertThat(session).hasNoPoolFor(3); - // Kill host 2, host 3 should take its place - ccm().stop(2); - TestUtils.waitForUp(TestUtils.ipOfNode(3), cluster); + // Kill host 2, host 3 should take its place + ccm().stop(2); + TestUtils.waitForUp(TestUtils.ipOfNode(3), cluster); - assertThat(cluster).host(1) - .hasState(State.UP) - .isAtDistance(HostDistance.LOCAL); - assertThat(cluster).host(2) - .hasState(State.DOWN); - assertThat(cluster).host(3) - .hasState(State.UP) - .isAtDistance(HostDistance.LOCAL); - assertThat(session).hasPoolFor(3); + assertThat(cluster).host(1).hasState(State.UP).isAtDistance(HostDistance.LOCAL); + assertThat(cluster).host(2).hasState(State.DOWN); + assertThat(cluster).host(3).hasState(State.UP).isAtDistance(HostDistance.LOCAL); + assertThat(session).hasPoolFor(3); - // This is when refreshConnectedHost should have been invoked, it triggers pool creation when - // we switch the node from IGNORED to UP: - Mockito.verify(poolingOptions) - .refreshConnectedHost(TestUtils.findHost(cluster, 3)); - } + // This is when refreshConnectedHost should have been invoked, it triggers pool creation when + // we switch the node from IGNORED to UP: + Mockito.verify(poolingOptions).refreshConnectedHost(TestUtils.findHost(cluster, 3)); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/ReplicationFactorTest.java b/driver-core/src/test/java/com/datastax/driver/core/ReplicationFactorTest.java new file mode 100644 index 00000000000..3611646f270 --- /dev/null +++ b/driver-core/src/test/java/com/datastax/driver/core/ReplicationFactorTest.java @@ -0,0 +1,46 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import static org.assertj.core.api.Assertions.assertThat; + +import org.testng.annotations.Test; + +public class ReplicationFactorTest { + + @Test(groups = "unit") + public void should_parse_factor_from_string() { + ReplicationFactor transFactor = ReplicationFactor.fromString("3/1"); + assertThat(transFactor.fullReplicas()).isEqualTo(2); + assertThat(transFactor.hasTransientReplicas()).isTrue(); + assertThat(transFactor.transientReplicas()).isEqualTo(1); + + ReplicationFactor factor = ReplicationFactor.fromString("3"); + assertThat(factor.fullReplicas()).isEqualTo(3); + assertThat(factor.hasTransientReplicas()).isFalse(); + assertThat(factor.transientReplicas()).isEqualTo(0); + } + + @Test(groups = "unit") + public void should_create_string_from_factor() { + ReplicationFactor transFactor = new ReplicationFactor(3, 1); + assertThat(transFactor.toString()).isEqualTo("3/1"); + ReplicationFactor factor = new ReplicationFactor(3); + assertThat(factor.toString()).isEqualTo("3"); + } +} diff --git a/driver-core/src/test/java/com/datastax/driver/core/ReplicationStrategyTest.java b/driver-core/src/test/java/com/datastax/driver/core/ReplicationStrategyTest.java index 6f11ebc46f3..2d935e718b3 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/ReplicationStrategyTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/ReplicationStrategyTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,69 +17,150 @@ */ package com.datastax.driver.core; +import static org.assertj.core.api.Assertions.assertThat; +import static org.testng.Assert.assertNotNull; +import static org.testng.Assert.assertNull; +import static org.testng.Assert.assertTrue; + import com.google.common.collect.ImmutableMap; import org.testng.annotations.Test; -import static org.testng.Assert.*; - public class ReplicationStrategyTest { - @Test(groups = "unit") - public void createSimpleReplicationStrategyTest() throws Exception { - ReplicationStrategy strategy = ReplicationStrategy.create( - ImmutableMap.builder() - .put("class", "SimpleStrategy") - .put("replication_factor", "3") - .build()); - - assertNotNull(strategy); - assertTrue(strategy instanceof ReplicationStrategy.SimpleStrategy); - } - - @Test(groups = "unit") - public void createNetworkTopologyStrategyTest() throws Exception { - ReplicationStrategy strategy = ReplicationStrategy.create( - ImmutableMap.builder() - .put("class", "NetworkTopologyStrategy") - .put("dc1", "2") - .put("dc2", "2") - .build()); - - assertNotNull(strategy); - assertTrue(strategy instanceof ReplicationStrategy.NetworkTopologyStrategy); - } - - @Test(groups = "unit") - public void createSimpleReplicationStrategyWithoutFactorTest() throws Exception { - ReplicationStrategy strategy = ReplicationStrategy.create( - ImmutableMap.builder() - .put("class", "SimpleStrategy") - //no replication_factor - .build()); - - assertNull(strategy); - } - - @Test(groups = "unit") - public void createUnknownStrategyTest() throws Exception { - ReplicationStrategy strategy = ReplicationStrategy.create( - ImmutableMap.builder() - //no such strategy - .put("class", "FooStrategy") - .put("foo_factor", "3") - .build()); - - assertNull(strategy); - } - - @Test(groups = "unit") - public void createUnspecifiedStrategyTest() throws Exception { - ReplicationStrategy strategy = ReplicationStrategy.create( - ImmutableMap.builder() - //nothing useful is set - .put("foo", "bar") - .build()); - - assertNull(strategy); - } + @Test(groups = "unit") + public void createSimpleReplicationStrategyTest() throws Exception { + ReplicationStrategy strategy = + ReplicationStrategy.create( + ImmutableMap.builder() + .put("class", "SimpleStrategy") + .put("replication_factor", "3") + .build()); + + assertNotNull(strategy); + assertTrue(strategy instanceof ReplicationStrategy.SimpleStrategy); + } + + @Test(groups = "unit") + public void createNetworkTopologyStrategyTest() throws Exception { + ReplicationStrategy strategy = + ReplicationStrategy.create( + ImmutableMap.builder() + .put("class", "NetworkTopologyStrategy") + .put("dc1", "2") + .put("dc2", "2") + .build()); + + assertNotNull(strategy); + assertTrue(strategy instanceof ReplicationStrategy.NetworkTopologyStrategy); + } + + @Test(groups = "unit") + public void createSimpleReplicationStrategyWithoutFactorTest() throws Exception { + ReplicationStrategy strategy = + ReplicationStrategy.create( + ImmutableMap.builder() + .put("class", "SimpleStrategy") + // no replication_factor + .build()); + + assertNull(strategy); + } + + @Test(groups = "unit") + public void createUnknownStrategyTest() throws Exception { + ReplicationStrategy strategy = + ReplicationStrategy.create( + ImmutableMap.builder() + // no such strategy + .put("class", "FooStrategy") + .put("foo_factor", "3") + .build()); + + assertNull(strategy); + } + + @Test(groups = "unit") + public void createUnspecifiedStrategyTest() throws Exception { + ReplicationStrategy strategy = + ReplicationStrategy.create( + ImmutableMap.builder() + // nothing useful is set + .put("foo", "bar") + .build()); + + assertNull(strategy); + } + + @Test(groups = "unit") + public void simpleStrategyEqualsTest() { + ReplicationStrategy rf3_1 = + ReplicationStrategy.create( + ImmutableMap.builder() + .put("class", "SimpleStrategy") + .put("replication_factor", "3") + .build()); + + ReplicationStrategy rf3_2 = + ReplicationStrategy.create( + ImmutableMap.builder() + .put("class", "SimpleStrategy") + .put("replication_factor", "3") + .build()); + + ReplicationStrategy rf2_1 = + ReplicationStrategy.create( + ImmutableMap.builder() + .put("class", "SimpleStrategy") + .put("replication_factor", "2") + .build()); + + //noinspection EqualsWithItself + assertThat(rf3_1).isEqualTo(rf3_1); + assertThat(rf3_1).isEqualTo(rf3_2); + assertThat(rf3_1).isNotEqualTo(rf2_1); + } + + public void networkTopologyStrategyEqualsTest() { + ReplicationStrategy network_dc1x2_dc2x2_1 = + ReplicationStrategy.create( + ImmutableMap.builder() + .put("class", "NetworkTopologyStrategy") + .put("dc1", "2") + .put("dc2", "2") + .build()); + ReplicationStrategy network_dc1x2_dc2x2_2 = + ReplicationStrategy.create( + ImmutableMap.builder() + .put("class", "NetworkTopologyStrategy") + .put("dc1", "2") + .put("dc2", "2") + .build()); + ReplicationStrategy network_dc1x1_dc2x2_1 = + ReplicationStrategy.create( + ImmutableMap.builder() + .put("class", "NetworkTopologyStrategy") + .put("dc1", "1") + .put("dc2", "2") + .build()); + ReplicationStrategy network_dc1x2_dc3x2_1 = + ReplicationStrategy.create( + ImmutableMap.builder() + .put("class", "NetworkTopologyStrategy") + .put("dc1", "2") + .put("dc3", "2") + .build()); + ReplicationStrategy network_dc1x2_1 = + ReplicationStrategy.create( + ImmutableMap.builder() + .put("class", "NetworkTopologyStrategy") + .put("dc1", "2") + .build()); + + //noinspection EqualsWithItself + assertThat(network_dc1x2_dc2x2_1).isEqualTo(network_dc1x2_dc2x2_1); + assertThat(network_dc1x2_dc2x2_1).isEqualTo(network_dc1x2_dc2x2_2); + assertThat(network_dc1x2_dc2x2_1).isNotEqualTo(network_dc1x1_dc2x2_1); + assertThat(network_dc1x2_dc2x2_1).isNotEqualTo(network_dc1x2_dc3x2_1); + assertThat(network_dc1x2_dc2x2_1).isNotEqualTo(network_dc1x2_1); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/RequestHandlerTest.java b/driver-core/src/test/java/com/datastax/driver/core/RequestHandlerTest.java index 6c9fb974e0b..b045e5ba16b 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/RequestHandlerTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/RequestHandlerTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,80 +17,83 @@ */ package com.datastax.driver.core; -import com.google.common.collect.ImmutableMap; -import org.scassandra.Scassandra; -import org.scassandra.http.client.PrimingRequest; -import org.testng.annotations.Test; +import static org.assertj.core.api.Assertions.assertThat; +import static org.scassandra.http.client.PrimingRequest.then; +import com.google.common.collect.ImmutableMap; import java.util.Collections; import java.util.List; import java.util.Map; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.scassandra.http.client.PrimingRequest.then; +import org.scassandra.Scassandra; +import org.scassandra.http.client.PrimingRequest; +import org.testng.annotations.Test; public class RequestHandlerTest { - @Test(groups = "long") - public void should_handle_race_between_response_and_cancellation() { - final Scassandra scassandra = TestUtils.createScassandraServer(); - Cluster cluster = null; + @Test(groups = "short") + public void should_handle_race_between_response_and_cancellation() { + final Scassandra scassandra = TestUtils.createScassandraServer(); + Cluster cluster = null; - try { - // Use a mock server that takes a constant time to reply - scassandra.start(); - List> rows = Collections.>singletonList(ImmutableMap.of("key", 1)); - scassandra.primingClient().prime( - PrimingRequest.queryBuilder() - .withQuery("mock query") - .withThen(then().withRows(rows).withFixedDelay(10L)) - .build() - ); + try { + // Use a mock server that takes a constant time to reply + scassandra.start(); + ScassandraCluster.primeSystemLocalRow(scassandra); + List> rows = + Collections.>singletonList(ImmutableMap.of("key", 1)); + scassandra + .primingClient() + .prime( + PrimingRequest.queryBuilder() + .withQuery("mock query") + .withThen(then().withRows(rows).withFixedDelay(10L)) + .build()); - cluster = Cluster.builder() - .addContactPoint(TestUtils.ipOfNode(1)) - .withPort(scassandra.getBinaryPort()) - .withPoolingOptions(new PoolingOptions() - .setCoreConnectionsPerHost(HostDistance.LOCAL, 1) - .setMaxConnectionsPerHost(HostDistance.LOCAL, 1) - .setHeartbeatIntervalSeconds(0)) - .build(); + cluster = + Cluster.builder() + .addContactPoint(TestUtils.ipOfNode(1)) + .withPort(scassandra.getBinaryPort()) + .withPoolingOptions( + new PoolingOptions() + .setCoreConnectionsPerHost(HostDistance.LOCAL, 1) + .setMaxConnectionsPerHost(HostDistance.LOCAL, 1) + .setHeartbeatIntervalSeconds(0)) + .build(); - Session session = cluster.connect(); + Session session = cluster.connect(); - // To reproduce, we need to cancel the query exactly when the reply arrives. - // Run a few queries to estimate how much that will take. - int samples = 100; - long start = System.currentTimeMillis(); - for (int i = 0; i < samples; i++) { - session.execute("mock query"); - } - long elapsed = System.currentTimeMillis() - start; - long queryDuration = elapsed / samples; + // To reproduce, we need to cancel the query exactly when the reply arrives. + // Run a few queries to estimate how much that will take. + int samples = 100; + long start = System.currentTimeMillis(); + for (int i = 0; i < samples; i++) { + session.execute("mock query"); + } + long elapsed = System.currentTimeMillis() - start; + long queryDuration = elapsed / samples; - // Now run queries and cancel them after that estimated time - for (int i = 0; i < 2000; i++) { - ResultSetFuture future = session.executeAsync("mock query"); - try { - future.getUninterruptibly(queryDuration, TimeUnit.MILLISECONDS); - } catch (TimeoutException e) { - future.cancel(true); - } - } - - Connection connection = getSingleConnection(session); - assertThat(connection.inFlight.get()).isEqualTo(0); - } finally { - if (cluster != null) - cluster.close(); - scassandra.stop(); + // Now run queries and cancel them after that estimated time + for (int i = 0; i < 2000; i++) { + ResultSetFuture future = session.executeAsync("mock query"); + try { + future.getUninterruptibly(queryDuration, TimeUnit.MILLISECONDS); + } catch (TimeoutException e) { + future.cancel(true); } - } + } - private Connection getSingleConnection(Session session) { - HostConnectionPool pool = ((SessionManager) session).pools.values().iterator().next(); - return pool.connections.get(0); + Connection connection = getSingleConnection(session); + assertThat(connection.inFlight.get()).isEqualTo(0); + } finally { + if (cluster != null) cluster.close(); + scassandra.stop(); } + } + + private Connection getSingleConnection(Session session) { + HostConnectionPool pool = ((SessionManager) session).pools.values().iterator().next(); + return pool.connections.get(0); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/ResultSetAssert.java b/driver-core/src/test/java/com/datastax/driver/core/ResultSetAssert.java index b8863949206..34013cedb69 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/ResultSetAssert.java +++ b/driver-core/src/test/java/com/datastax/driver/core/ResultSetAssert.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,127 +20,127 @@ import com.google.common.base.Function; import com.google.common.base.Joiner; import com.google.common.collect.Lists; -import org.assertj.core.api.AbstractListAssert; -import org.assertj.core.data.Index; - import java.util.Arrays; import java.util.List; +import org.assertj.core.api.AbstractListAssert; +import org.assertj.core.data.Index; -// better use ListAssert than IterableAssert to avoid attempting to consume the resultset more than once +// better use ListAssert than IterableAssert to avoid attempting to consume the resultset more than +// once public class ResultSetAssert extends AbstractListAssert, Row> { - // a helper assert object to simplify assertions on row contents - // by considering a row as a mere tuple - private final TupleListAssert helper; - - public ResultSetAssert(ResultSet actual) { - super(actual.all(), ResultSetAssert.class); - helper = new TupleListAssert(Lists.transform(this.actual, ROW_TO_TUPLE)); - } - - public static Tuple row(Object... cols) { - return new Tuple(cols); - } - - public ResultSetAssert contains(Tuple value, Index index) { - helper.contains(value, index); - return this; - } - - public ResultSetAssert containsSubsequence(Tuple... sequence) { - helper.containsSubsequence(sequence); - return this; - } - - public ResultSetAssert containsOnly(Tuple... values) { - helper.containsOnly(values); - return this; - } - - public ResultSetAssert endsWith(Tuple... sequence) { - helper.endsWith(sequence); - return this; - } - - public ResultSetAssert startsWith(Tuple... sequence) { - helper.startsWith(sequence); - return this; - } - - public ResultSetAssert doesNotContain(Tuple value, Index index) { - helper.doesNotContain(value, index); - return this; + // a helper assert object to simplify assertions on row contents + // by considering a row as a mere tuple + private final TupleListAssert helper; + + public ResultSetAssert(ResultSet actual) { + super(actual.all(), ResultSetAssert.class); + helper = new TupleListAssert(Lists.transform(this.actual, ROW_TO_TUPLE)); + } + + public static Tuple row(Object... cols) { + return new Tuple(cols); + } + + public ResultSetAssert contains(Tuple value, Index index) { + helper.contains(value, index); + return this; + } + + public ResultSetAssert containsSubsequence(Tuple... sequence) { + helper.containsSubsequence(sequence); + return this; + } + + public ResultSetAssert containsOnly(Tuple... values) { + helper.containsOnly(values); + return this; + } + + public ResultSetAssert endsWith(Tuple... sequence) { + helper.endsWith(sequence); + return this; + } + + public ResultSetAssert startsWith(Tuple... sequence) { + helper.startsWith(sequence); + return this; + } + + public ResultSetAssert doesNotContain(Tuple value, Index index) { + helper.doesNotContain(value, index); + return this; + } + + public ResultSetAssert doesNotContain(Tuple... values) { + helper.doesNotContain(values); + return this; + } + + public ResultSetAssert containsExactly(Tuple... values) { + helper.containsExactly(values); + return this; + } + + public ResultSetAssert containsOnlyOnce(Tuple... values) { + helper.containsOnlyOnce(values); + return this; + } + + public ResultSetAssert contains(Tuple... values) { + helper.contains(values); + return this; + } + + public ResultSetAssert containsSequence(Tuple... sequence) { + helper.containsSequence(sequence); + return this; + } + + public static class Tuple { + + final Object[] cols; + + public Tuple(Object... cols) { + this.cols = cols; } - public ResultSetAssert doesNotContain(Tuple... values) { - helper.doesNotContain(values); - return this; + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (getClass() != o.getClass()) return false; + Tuple tuple = (Tuple) o; + return Arrays.equals(cols, tuple.cols); } - public ResultSetAssert containsExactly(Tuple... values) { - helper.containsExactly(values); - return this; + @Override + public int hashCode() { + return Arrays.hashCode(cols); } - public ResultSetAssert containsOnlyOnce(Tuple... values) { - helper.containsOnlyOnce(values); - return this; + @Override + public String toString() { + return '(' + Joiner.on(',').join(cols) + ')'; } + } - public ResultSetAssert contains(Tuple... values) { - helper.contains(values); - return this; - } - - public ResultSetAssert containsSequence(Tuple... sequence) { - helper.containsSequence(sequence); - return this; - } - - public static class Tuple { - - final Object[] cols; - - public Tuple(Object... cols) { - this.cols = cols; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (getClass() != o.getClass()) return false; - Tuple tuple = (Tuple) o; - return Arrays.equals(cols, tuple.cols); - } - - @Override - public int hashCode() { - return Arrays.hashCode(cols); - } - - @Override - public String toString() { - return '(' + Joiner.on(',').join(cols) + ')'; - } - } - - private static final Function ROW_TO_TUPLE = new Function() { + private static final Function ROW_TO_TUPLE = + new Function() { @Override public Tuple apply(Row input) { - Object[] cols = new Object[input.getColumnDefinitions().size()]; - for (int i = 0; i < input.getColumnDefinitions().size(); i++) { - cols[i] = input.getObject(i); - } - return new Tuple(cols); + Object[] cols = new Object[input.getColumnDefinitions().size()]; + for (int i = 0; i < input.getColumnDefinitions().size(); i++) { + cols[i] = input.getObject(i); + } + return new Tuple(cols); } - }; + }; - private static class TupleListAssert extends AbstractListAssert, Tuple> { + private static class TupleListAssert + extends AbstractListAssert, Tuple> { - private TupleListAssert(List rows) { - super(rows, TupleListAssert.class); - } - + private TupleListAssert(List rows) { + super(rows, TupleListAssert.class); } - + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/ReusedStreamIdTest.java b/driver-core/src/test/java/com/datastax/driver/core/ReusedStreamIdTest.java index 8ed42d433ab..ac78f84a125 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/ReusedStreamIdTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/ReusedStreamIdTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,140 +17,154 @@ */ package com.datastax.driver.core; +import static com.datastax.driver.core.CreateCCM.TestMode.PER_METHOD; +import static org.assertj.core.api.Assertions.assertThat; +import static org.testng.Assert.fail; + import com.datastax.driver.core.exceptions.OperationTimedOutException; import com.google.common.util.concurrent.FutureCallback; -import com.google.common.util.concurrent.Futures; import com.google.common.util.concurrent.Uninterruptibles; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.testng.annotations.Test; - import java.util.List; import java.util.Random; import java.util.concurrent.CountDownLatch; import java.util.concurrent.Semaphore; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; - -import static com.datastax.driver.core.CreateCCM.TestMode.PER_METHOD; -import static org.assertj.core.api.Assertions.assertThat; -import static org.testng.Assert.fail; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.testng.annotations.Test; @CreateCCM(PER_METHOD) public class ReusedStreamIdTest extends CCMTestsSupport { - static Logger logger = LoggerFactory.getLogger(ReusedStreamIdTest.class); - - @SuppressWarnings("unused") - public Cluster.Builder createClusterLowReadTimeout() { - // Low Read Timeout to trigger retry behavior. - return Cluster.builder().withSocketOptions(new SocketOptions().setReadTimeoutMillis(1000)); - } + static Logger logger = LoggerFactory.getLogger(ReusedStreamIdTest.class); - /** - * Ensures that if activity tied to future completion blocks netty io threads that this does not cause the - * driver to possibly invoke the wrong handler for a response, as described in JAVA-1179. - *

    - * This is accomplished by setting up a 2 node cluster and setting a low read timeout (1 second). The test - * submits 10 concurrent requests repeatedly for up to 500 queries and on completion of each request may block - * in a callback randomly (25% of the time) for 1 second, causing a retry on the next host to trigger. If a new - * stream id is not allocated on the retry, its possible it could use an already used stream id and cause the driver - * to invoke the wrong handlers for a response. The test checks for this by ensuring that the column returned in a - * response matches the one queried. If the column received does not match, the test fails. In cases where this - * bug is present, it should be detected within 10 seconds. - * - * @jira_ticket JAVA-1179 - * @test_category queries:async_callback - */ - @Test(groups = "long") - @CCMConfig(numberOfNodes = 2, clusterProvider = "createClusterLowReadTimeout") - public void should_not_receive_wrong_response_when_callbacks_block_io_thread() { - int concurrency = 10; - final Semaphore semaphore = new Semaphore(concurrency); - // RNG to determine sleep times. - final Random random = new Random(); + @SuppressWarnings("unused") + public Cluster.Builder createClusterLowReadTimeout() { + // Low Read Timeout to trigger retry behavior. + return Cluster.builder().withSocketOptions(new SocketOptions().setReadTimeoutMillis(1000)); + } - try { - // Use the system.local table and alternate between columns that are queried. - List columnsToGrab = cluster().getMetadata().getKeyspace("system").getTable("local").getColumns(); - assertThat(columnsToGrab.size()).isGreaterThan(1); + /** + * Ensures that if activity tied to future completion blocks netty io threads that this does not + * cause the driver to possibly invoke the wrong handler for a response, as described in + * JAVA-1179. + * + *

    This is accomplished by setting up a 2 node cluster and setting a low read timeout (1 + * second). The test submits 10 concurrent requests repeatedly for up to 500 queries and on + * completion of each request may block in a callback randomly (25% of the time) for 1 second, + * causing a retry on the next host to trigger. If a new stream id is not allocated on the retry, + * its possible it could use an already used stream id and cause the driver to invoke the wrong + * handlers for a response. The test checks for this by ensuring that the column returned in a + * response matches the one queried. If the column received does not match, the test fails. In + * cases where this bug is present, it should be detected within 10 seconds. + * + * @jira_ticket JAVA-1179 + * @test_category queries:async_callback + */ + @Test(groups = "long") + @CCMConfig(numberOfNodes = 2, clusterProvider = "createClusterLowReadTimeout") + public void should_not_receive_wrong_response_when_callbacks_block_io_thread() { + int concurrency = 10; + final Semaphore semaphore = new Semaphore(concurrency); + // RNG to determine sleep times. + final Random random = new Random(); - final CountDownLatch errorTrigger = new CountDownLatch(1); + try { + // Use the system.local table and alternate between columns that are queried. + List columnsToGrab = + cluster().getMetadata().getKeyspace("system").getTable("local").getColumns(); + assertThat(columnsToGrab.size()).isGreaterThan(1); - long start = System.currentTimeMillis(); - // 500 iterations will take roughly 1 minute. - int iterations = 500; - final AtomicInteger completed = new AtomicInteger(0); + final CountDownLatch errorTrigger = new CountDownLatch(1); - for (int i = 1; i <= iterations; i++) { - try { - if (errorTrigger.getCount() == 0) { - fail(String.format("Error triggered at or before %d of %d requests after %dms.", i, iterations, - System.currentTimeMillis() - start)); - } - semaphore.acquire(); - final String column = columnsToGrab.get(i % columnsToGrab.size()).getName(); - String query = String.format("select %s from system.local", column); - ResultSetFuture future = session().executeAsync(query); + long start = System.currentTimeMillis(); + // 500 iterations will take roughly 1 minute. + int iterations = 500; + final AtomicInteger completed = new AtomicInteger(0); - Futures.addCallback(future, new FutureCallback() { - @Override - public void onSuccess(ResultSet result) { - semaphore.release(); - // Expect the column that you queried to be present, if its not we got the wrong response - // back. - int columnIndex = result.getColumnDefinitions().getIndexOf(column); - if (columnIndex == -1) { - logger.error("Got response without column {}, got columns {} from Host {}.", column, - result.getColumnDefinitions(), result.getExecutionInfo().getQueriedHost()); - errorTrigger.countDown(); - return; - } - completed.incrementAndGet(); - // Block netty io thread 25% of the time. - int num = random.nextInt(1); - if (num == 0) { - // Sleep exactly one second, should trigger retry. - Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS); - } - } + for (int i = 1; i <= iterations; i++) { + try { + if (errorTrigger.getCount() == 0) { + fail( + String.format( + "Error triggered at or before %d of %d requests after %dms.", + i, iterations, System.currentTimeMillis() - start)); + } + semaphore.acquire(); + final String column = columnsToGrab.get(i % columnsToGrab.size()).getName(); + String query = String.format("select %s from system.local", column); + ResultSetFuture future = session().executeAsync(query); - @Override - public void onFailure(Throwable t) { - semaphore.release(); - // Timeouts are inevitable because of low query timeouts and blocked threads. - if (!(t instanceof OperationTimedOutException)) { - logger.error("Unexpected error encountered.", t); - errorTrigger.countDown(); - } - } - }); - } catch (InterruptedException e) { - fail("Test interrupted", e); - } - if (i % (iterations / 10) == 0) { - logger.info("Submitted {} of {} requests. ({} completed successfully)", i, iterations, completed.get()); + GuavaCompatibility.INSTANCE.addCallback( + future, + new FutureCallback() { + @Override + public void onSuccess(ResultSet result) { + semaphore.release(); + // Expect the column that you queried to be present, if its not we got the wrong + // response + // back. + int columnIndex = result.getColumnDefinitions().getIndexOf(column); + if (columnIndex == -1) { + logger.error( + "Got response without column {}, got columns {} from Host {}.", + column, + result.getColumnDefinitions(), + result.getExecutionInfo().getQueriedHost()); + errorTrigger.countDown(); + return; + } + completed.incrementAndGet(); + // Block netty io thread 25% of the time. + int num = random.nextInt(1); + if (num == 0) { + // Sleep exactly one second, should trigger retry. + Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS); + } } - } - // Wait for 10 seconds for any remaining requests to possibly trigger an error, its likely - // that if we get to this point this will not happen. - Uninterruptibles.awaitUninterruptibly(errorTrigger, 10, TimeUnit.SECONDS); - if (errorTrigger.getCount() == 0) { - fail(String.format("Error triggered after %dms.", System.currentTimeMillis() - start)); - } - // Sanity check to ensure that at least some requests succeeded, we expect some failures if both - // hosts timeout as its likely they could be blocked on the event loop. - assertThat(completed.get()).isGreaterThan(0); - } finally { - try { - // Acquire all permits to make sure all inflight requests complete. - if (!semaphore.tryAcquire(concurrency, 10, TimeUnit.SECONDS)) { - fail("Could not acquire all permits within 10 seconds of completion."); + @Override + public void onFailure(Throwable t) { + semaphore.release(); + // Timeouts are inevitable because of low query timeouts and blocked threads. + if (!(t instanceof OperationTimedOutException)) { + logger.error("Unexpected error encountered.", t); + errorTrigger.countDown(); + } } - } catch (InterruptedException e) { - fail("Interrupted.", e); - } + }); + } catch (InterruptedException e) { + fail("Test interrupted", e); + } + if (i % (iterations / 10) == 0) { + logger.info( + "Submitted {} of {} requests. ({} completed successfully)", + i, + iterations, + completed.get()); + } + } + + // Wait for 10 seconds for any remaining requests to possibly trigger an error, its likely + // that if we get to this point this will not happen. + Uninterruptibles.awaitUninterruptibly(errorTrigger, 10, TimeUnit.SECONDS); + if (errorTrigger.getCount() == 0) { + fail(String.format("Error triggered after %dms.", System.currentTimeMillis() - start)); + } + // Sanity check to ensure that at least some requests succeeded, we expect some failures if + // both + // hosts timeout as its likely they could be blocked on the event loop. + assertThat(completed.get()).isGreaterThan(0); + } finally { + try { + // Acquire all permits to make sure all inflight requests complete. + if (!semaphore.tryAcquire(concurrency, 10, TimeUnit.SECONDS)) { + fail("Could not acquire all permits within 10 seconds of completion."); } + } catch (InterruptedException e) { + fail("Interrupted.", e); + } } + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/SSLAuthenticatedEncryptionTest.java b/driver-core/src/test/java/com/datastax/driver/core/SSLAuthenticatedEncryptionTest.java index f96275a36ea..1ae24cbc478 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/SSLAuthenticatedEncryptionTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/SSLAuthenticatedEncryptionTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,69 +17,74 @@ */ package com.datastax.driver.core; +import static com.datastax.driver.core.CCMBridge.DEFAULT_CLIENT_KEYSTORE_FILE; +import static com.datastax.driver.core.CCMBridge.DEFAULT_CLIENT_KEYSTORE_PASSWORD; +import static com.datastax.driver.core.CCMBridge.DEFAULT_CLIENT_TRUSTSTORE_FILE; +import static com.datastax.driver.core.CCMBridge.DEFAULT_CLIENT_TRUSTSTORE_PASSWORD; + import com.datastax.driver.core.exceptions.NoHostAvailableException; import org.testng.annotations.Test; -import static com.datastax.driver.core.CCMBridge.*; - @CCMConfig(auth = true) public class SSLAuthenticatedEncryptionTest extends SSLTestBase { - /** - *

    - * Validates that an SSL connection can be established with client auth if the target - * cassandra cluster is using SSL, requires client auth and would validate with the client's - * certificate. - *

    - * - * @test_category connection:ssl, authentication - * @expected_result Connection can be established to a cassandra node using SSL that requires client auth. - */ - @Test(groups = "short", dataProvider = "sslImplementation", dataProviderClass = SSLTestBase.class) - public void should_connect_with_ssl_with_client_auth_and_node_requires_auth(SslImplementation sslImplementation) throws Exception { - connectWithSSLOptions(getSSLOptions(sslImplementation, true, true)); - } + /** + * Validates that an SSL connection can be established with client auth if the target cassandra + * cluster is using SSL, requires client auth and would validate with the client's certificate. + * + * @test_category connection:ssl, authentication + * @expected_result Connection can be established to a cassandra node using SSL that requires + * client auth. + */ + @Test(groups = "short", dataProvider = "sslImplementation", dataProviderClass = SSLTestBase.class) + public void should_connect_with_ssl_with_client_auth_and_node_requires_auth( + SslImplementation sslImplementation) throws Exception { + connectWithSSLOptions(getSSLOptions(sslImplementation, true, true)); + } - /** - *

    - * Validates that an SSL connection can not be established with if the target - * cassandra cluster is using SSL, requires client auth, but the client does not provide - * sufficient certificate authentication. - *

    - * - * @test_category connection:ssl, authentication - * @expected_result Connection is not established. - */ - @Test(groups = "short", dataProvider = "sslImplementation", dataProviderClass = SSLTestBase.class, expectedExceptions = {NoHostAvailableException.class}) - public void should_not_connect_without_client_auth_but_node_requires_auth(SslImplementation sslImplementation) throws Exception { - connectWithSSLOptions(getSSLOptions(sslImplementation, false, true)); - } + /** + * Validates that an SSL connection can not be established with if the target cassandra cluster is + * using SSL, requires client auth, but the client does not provide sufficient certificate + * authentication. + * + * @test_category connection:ssl, authentication + * @expected_result Connection is not established. + */ + @Test( + groups = "short", + dataProvider = "sslImplementation", + dataProviderClass = SSLTestBase.class, + expectedExceptions = {NoHostAvailableException.class}) + public void should_not_connect_without_client_auth_but_node_requires_auth( + SslImplementation sslImplementation) throws Exception { + connectWithSSLOptions(getSSLOptions(sslImplementation, false, true)); + } - /** - *

    - * Validates that SSL connectivity can be configured via the standard javax.net.ssl System properties. - *

    - * - * @test_category connection:ssl, authentication - * @expected_result Connection can be established. - */ - @Test(groups = "isolated") - public void should_use_system_properties_with_default_ssl_options() throws Exception { - System.setProperty("javax.net.ssl.keyStore", DEFAULT_CLIENT_KEYSTORE_FILE.getAbsolutePath()); - System.setProperty("javax.net.ssl.keyStorePassword", DEFAULT_CLIENT_KEYSTORE_PASSWORD); - System.setProperty("javax.net.ssl.trustStore", DEFAULT_CLIENT_TRUSTSTORE_FILE.getAbsolutePath()); - System.setProperty("javax.net.ssl.trustStorePassword", DEFAULT_CLIENT_TRUSTSTORE_PASSWORD); - try { - connectWithSSL(); - } finally { - try { - System.clearProperty("javax.net.ssl.keyStore"); - System.clearProperty("javax.net.ssl.keyStorePassword"); - System.clearProperty("javax.net.ssl.trustStore"); - System.clearProperty("javax.net.ssl.trustStorePassword"); - } catch (SecurityException e) { - // ok - } - } + /** + * Validates that SSL connectivity can be configured via the standard javax.net.ssl System + * properties. + * + * @test_category connection:ssl, authentication + * @expected_result Connection can be established. + */ + @Test(groups = "isolated") + public void should_use_system_properties_with_default_ssl_options() throws Exception { + System.setProperty("javax.net.ssl.keyStore", DEFAULT_CLIENT_KEYSTORE_FILE.getAbsolutePath()); + System.setProperty("javax.net.ssl.keyStorePassword", DEFAULT_CLIENT_KEYSTORE_PASSWORD); + System.setProperty( + "javax.net.ssl.trustStore", DEFAULT_CLIENT_TRUSTSTORE_FILE.getAbsolutePath()); + System.setProperty("javax.net.ssl.trustStorePassword", DEFAULT_CLIENT_TRUSTSTORE_PASSWORD); + try { + connectWithSSL(); + } finally { + try { + System.clearProperty("javax.net.ssl.keyStore"); + System.clearProperty("javax.net.ssl.keyStorePassword"); + System.clearProperty("javax.net.ssl.trustStore"); + System.clearProperty("javax.net.ssl.trustStorePassword"); + } catch (SecurityException e) { + // ok + } } + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/SSLEncryptionTest.java b/driver-core/src/test/java/com/datastax/driver/core/SSLEncryptionTest.java index 3e9c5d8c48e..3db7d8b9125 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/SSLEncryptionTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/SSLEncryptionTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,119 +17,116 @@ */ package com.datastax.driver.core; -import com.datastax.driver.core.exceptions.NoHostAvailableException; -import org.testng.annotations.Test; - -import java.util.concurrent.TimeUnit; - import static com.datastax.driver.core.Assertions.assertThat; import static com.datastax.driver.core.CCMBridge.DEFAULT_CLIENT_TRUSTSTORE_FILE; import static com.datastax.driver.core.CCMBridge.DEFAULT_CLIENT_TRUSTSTORE_PASSWORD; import static com.datastax.driver.core.CreateCCM.TestMode.PER_METHOD; +import com.datastax.driver.core.exceptions.NoHostAvailableException; +import java.util.concurrent.TimeUnit; +import org.testng.annotations.Test; + @CreateCCM(PER_METHOD) @CCMConfig(auth = false) public class SSLEncryptionTest extends SSLTestBase { - /** - *

    - * Validates that an SSL connection can be established without client auth if the target - * cassandra cluster is using SSL and does not require auth. - *

    - * - * @test_category connection:ssl - * @expected_result Connection can be established to a cassandra node using SSL. - */ - @Test(groups = "short", dataProvider = "sslImplementation", dataProviderClass = SSLTestBase.class) - public void should_connect_with_ssl_without_client_auth_and_node_doesnt_require_auth(SslImplementation sslImplementation) throws Exception { - connectWithSSLOptions(getSSLOptions(sslImplementation, false, true)); - } + /** + * Validates that an SSL connection can be established without client auth if the target cassandra + * cluster is using SSL and does not require auth. + * + * @test_category connection:ssl + * @expected_result Connection can be established to a cassandra node using SSL. + */ + @Test(groups = "short", dataProvider = "sslImplementation", dataProviderClass = SSLTestBase.class) + public void should_connect_with_ssl_without_client_auth_and_node_doesnt_require_auth( + SslImplementation sslImplementation) throws Exception { + connectWithSSLOptions(getSSLOptions(sslImplementation, false, true)); + } - /** - *

    - * Validates that an SSL connection can not be established if the client does not trust - * the cassandra node's certificate. - *

    - * - * @test_category connection:ssl - * @expected_result Connection can not be established to a cassandra node using SSL with an untrusted cert. - */ - @Test(groups = "short", dataProvider = "sslImplementation", dataProviderClass = SSLTestBase.class, expectedExceptions = {NoHostAvailableException.class}) - public void should_not_connect_with_ssl_without_trusting_server_cert(SslImplementation sslImplementation) throws Exception { - connectWithSSLOptions(getSSLOptions(sslImplementation, false, false)); - } + /** + * Validates that an SSL connection can not be established if the client does not trust the + * cassandra node's certificate. + * + * @test_category connection:ssl + * @expected_result Connection can not be established to a cassandra node using SSL with an + * untrusted cert. + */ + @Test( + groups = "short", + dataProvider = "sslImplementation", + dataProviderClass = SSLTestBase.class, + expectedExceptions = {NoHostAvailableException.class}) + public void should_not_connect_with_ssl_without_trusting_server_cert( + SslImplementation sslImplementation) throws Exception { + connectWithSSLOptions(getSSLOptions(sslImplementation, false, false)); + } - /** - *

    - * Validates that an SSL connection can not be established if the client is not specifying SSL, but - * the cassandra node is using SSL. - *

    - *

    - *

    - * Note that future versions of cassandra may support both SSL-encrypted and non-SSL connections - * simultaneously (CASSANDRA-8803) - *

    - * - * @test_category connection:ssl - * @expected_result Connection can not be established to a cassandra node using SSL and the client not using SSL. - */ - @Test(groups = "short", expectedExceptions = {NoHostAvailableException.class}) - public void should_not_connect_without_ssl_but_node_uses_ssl() throws Exception { - Cluster cluster = register(Cluster.builder() - .addContactPoints(getContactPoints()) - .withPort(ccm().getBinaryPort()) - .build()); - cluster.connect(); - } + /** + * Validates that an SSL connection can not be established if the client is not specifying SSL, + * but the cassandra node is using SSL. + * + *

    + * + *

    Note that future versions of cassandra may support both SSL-encrypted and non-SSL + * connections simultaneously (CASSANDRA-8803) + * + * @test_category connection:ssl + * @expected_result Connection can not be established to a cassandra node using SSL and the client + * not using SSL. + */ + @Test( + groups = "short", + expectedExceptions = {NoHostAvailableException.class}) + public void should_not_connect_without_ssl_but_node_uses_ssl() throws Exception { + Cluster cluster = register(createClusterBuilder().build()); + cluster.connect(); + } - /** - *

    - * Validates that if connection is lost to a node that is using SSL authentication, that connection can be - * re-established when the node becomes available again. - *

    - * - * @test_category connection:ssl - * @expected_result Connection is re-established within a sufficient amount of time after a node comes back online. - */ - @CCMConfig(dirtiesContext = true) - @Test(groups = "long", dataProvider = "sslImplementation", dataProviderClass = SSLTestBase.class) - public void should_reconnect_with_ssl_on_node_up(SslImplementation sslImplementation) throws Exception { - Cluster cluster = register(Cluster.builder() - .addContactPoints(this.getContactPoints()) - .withPort(ccm().getBinaryPort()) - .withSSL(getSSLOptions(sslImplementation, true, true)) - .build()); + /** + * Validates that if connection is lost to a node that is using SSL authentication, that + * connection can be re-established when the node becomes available again. + * + * @test_category connection:ssl + * @expected_result Connection is re-established within a sufficient amount of time after a node + * comes back online. + */ + @CCMConfig(dirtiesContext = true) + @Test(groups = "long", dataProvider = "sslImplementation", dataProviderClass = SSLTestBase.class) + public void should_reconnect_with_ssl_on_node_up(SslImplementation sslImplementation) + throws Exception { + Cluster cluster = + register( + createClusterBuilder().withSSL(getSSLOptions(sslImplementation, true, true)).build()); - cluster.connect(); + cluster.connect(); - ccm().stop(1); - ccm().start(1); + ccm().stop(1); + ccm().start(1); - assertThat(cluster).host(1).comesUpWithin(TestUtils.TEST_BASE_NODE_WAIT, TimeUnit.SECONDS); - } + assertThat(cluster).host(1).comesUpWithin(TestUtils.TEST_BASE_NODE_WAIT, TimeUnit.SECONDS); + } - /** - *

    - * Validates that SSL connectivity can be configured via the standard javax.net.ssl System properties. - *

    - * - * @test_category connection:ssl - * @expected_result Connection can be established. - */ - @Test(groups = "isolated") - public void should_use_system_properties_with_default_ssl_options() throws Exception { - System.setProperty("javax.net.ssl.trustStore", DEFAULT_CLIENT_TRUSTSTORE_FILE.getAbsolutePath()); - System.setProperty("javax.net.ssl.trustStorePassword", DEFAULT_CLIENT_TRUSTSTORE_PASSWORD); - try { - connectWithSSL(); - } finally { - try { - System.clearProperty("javax.net.ssl.trustStore"); - System.clearProperty("javax.net.ssl.trustStorePassword"); - } catch (SecurityException e) { - // ok - } - } + /** + * Validates that SSL connectivity can be configured via the standard javax.net.ssl System + * properties. + * + * @test_category connection:ssl + * @expected_result Connection can be established. + */ + @Test(groups = "isolated") + public void should_use_system_properties_with_default_ssl_options() throws Exception { + System.setProperty( + "javax.net.ssl.trustStore", DEFAULT_CLIENT_TRUSTSTORE_FILE.getAbsolutePath()); + System.setProperty("javax.net.ssl.trustStorePassword", DEFAULT_CLIENT_TRUSTSTORE_PASSWORD); + try { + connectWithSSL(); + } finally { + try { + System.clearProperty("javax.net.ssl.trustStore"); + System.clearProperty("javax.net.ssl.trustStorePassword"); + } catch (SecurityException e) { + // ok + } } - + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/SSLTestBase.java b/driver-core/src/test/java/com/datastax/driver/core/SSLTestBase.java index e470d5a7efa..f4f69045f6e 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/SSLTestBase.java +++ b/driver-core/src/test/java/com/datastax/driver/core/SSLTestBase.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,126 +17,119 @@ */ package com.datastax.driver.core; -import io.netty.handler.ssl.SslContextBuilder; -import org.testng.annotations.DataProvider; - -import javax.net.ssl.KeyManagerFactory; -import javax.net.ssl.SSLContext; -import javax.net.ssl.TrustManagerFactory; -import java.security.KeyStore; -import java.security.SecureRandom; - import static com.datastax.driver.core.SSLTestBase.SslImplementation.JDK; import static com.datastax.driver.core.SSLTestBase.SslImplementation.NETTY_OPENSSL; import static io.netty.handler.ssl.SslProvider.OPENSSL; import static org.assertj.core.api.Assertions.fail; +import io.netty.handler.ssl.SslContextBuilder; +import java.security.KeyStore; +import java.security.SecureRandom; +import javax.net.ssl.KeyManagerFactory; +import javax.net.ssl.SSLContext; +import javax.net.ssl.TrustManagerFactory; +import org.testng.annotations.DataProvider; + @CCMConfig(ssl = true, createCluster = false) public abstract class SSLTestBase extends CCMTestsSupport { - @DataProvider(name = "sslImplementation") - public static Object[][] sslImplementation() { - // Bypass Netty SSL if on JDK 1.6 since it only works on 1.7+. - String javaVersion = System.getProperty("java.version"); - if (javaVersion.startsWith("1.6")) { - return new Object[][]{{JDK}}; - } else { - return new Object[][]{{JDK}, {NETTY_OPENSSL}}; - } + @DataProvider(name = "sslImplementation") + public static Object[][] sslImplementation() { + // Bypass Netty SSL if on JDK 1.6 since it only works on 1.7+. + String javaVersion = System.getProperty("java.version"); + if (javaVersion.startsWith("1.6")) { + return new Object[][] {{JDK}}; + } else { + return new Object[][] {{JDK}, {NETTY_OPENSSL}}; } - - /** - *

    - * Attempts to connect to a cassandra cluster with the given SSLOptions and then closes the - * created {@link Cluster} instance. - *

    - * - * @param sslOptions SSLOptions to use. - * @throws Exception A {@link com.datastax.driver.core.exceptions.NoHostAvailableException} will be - * raised here if connection cannot be established. - */ - protected void connectWithSSLOptions(SSLOptions sslOptions) throws Exception { - Cluster cluster = register(Cluster.builder() - .addContactPoints(getContactPoints()) - .withPort(ccm().getBinaryPort()) - .withSSL(sslOptions) - .build()); - cluster.connect(); + } + + /** + * Attempts to connect to a cassandra cluster with the given SSLOptions and then closes the + * created {@link Cluster} instance. + * + * @param sslOptions SSLOptions to use. + * @throws Exception A {@link com.datastax.driver.core.exceptions.NoHostAvailableException} will + * be raised here if connection cannot be established. + */ + protected void connectWithSSLOptions(SSLOptions sslOptions) throws Exception { + Cluster cluster = register(createClusterBuilder().withSSL(sslOptions).build()); + cluster.connect(); + } + + /** + * Attempts to connect to a cassandra cluster with using {@link Cluster.Builder#withSSL} with no + * provided {@link SSLOptions} and then closes the created {@link Cluster} instance. + * + * @throws Exception A {@link com.datastax.driver.core.exceptions.NoHostAvailableException} will + * be raised here if connection cannot be established. + */ + protected void connectWithSSL() throws Exception { + Cluster cluster = register(createClusterBuilder().withSSL().build()); + cluster.connect(); + } + + enum SslImplementation { + JDK, + NETTY_OPENSSL + } + + /** + * @param sslImplementation the SSL implementation to use + * @param clientAuth whether the client should authenticate + * @param trustingServer whether the client should trust the server's certificate + * @return {@link com.datastax.driver.core.SSLOptions} with the given configuration for server + * certificate validation and client certificate authentication. + */ + public SSLOptions getSSLOptions( + SslImplementation sslImplementation, boolean clientAuth, boolean trustingServer) + throws Exception { + + TrustManagerFactory tmf = null; + if (trustingServer) { + KeyStore ks = KeyStore.getInstance("JKS"); + ks.load( + this.getClass().getResourceAsStream(CCMBridge.DEFAULT_CLIENT_TRUSTSTORE_PATH), + CCMBridge.DEFAULT_CLIENT_TRUSTSTORE_PASSWORD.toCharArray()); + + tmf = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm()); + tmf.init(ks); } - /** - *

    - * Attempts to connect to a cassandra cluster with using {@link Cluster.Builder#withSSL} with no - * provided {@link SSLOptions} and then closes the created {@link Cluster} instance. - *

    - * - * @throws Exception A {@link com.datastax.driver.core.exceptions.NoHostAvailableException} will be - * raised here if connection cannot be established. - */ - protected void connectWithSSL() throws Exception { - Cluster cluster = register(Cluster.builder() - .addContactPoints(getContactPoints()) - .withPort(ccm().getBinaryPort()) - .withSSL() - .build()); - cluster.connect(); - } - - enum SslImplementation {JDK, NETTY_OPENSSL} - - /** - * @param sslImplementation the SSL implementation to use - * @param clientAuth whether the client should authenticate - * @param trustingServer whether the client should trust the server's certificate - * @return {@link com.datastax.driver.core.SSLOptions} with the given configuration for - * server certificate validation and client certificate authentication. - */ - public SSLOptions getSSLOptions(SslImplementation sslImplementation, boolean clientAuth, boolean trustingServer) throws Exception { - - TrustManagerFactory tmf = null; - if (trustingServer) { - KeyStore ks = KeyStore.getInstance("JKS"); - ks.load( - this.getClass().getResourceAsStream(CCMBridge.DEFAULT_CLIENT_TRUSTSTORE_PATH), - CCMBridge.DEFAULT_CLIENT_TRUSTSTORE_PASSWORD.toCharArray()); - - tmf = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm()); - tmf.init(ks); + switch (sslImplementation) { + case JDK: + KeyManagerFactory kmf = null; + if (clientAuth) { + KeyStore ks = KeyStore.getInstance("JKS"); + ks.load( + this.getClass().getResourceAsStream(CCMBridge.DEFAULT_CLIENT_KEYSTORE_PATH), + CCMBridge.DEFAULT_CLIENT_KEYSTORE_PASSWORD.toCharArray()); + + kmf = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm()); + kmf.init(ks, CCMBridge.DEFAULT_CLIENT_KEYSTORE_PASSWORD.toCharArray()); } + SSLContext sslContext = SSLContext.getInstance("TLS"); + sslContext.init( + kmf != null ? kmf.getKeyManagers() : null, + tmf != null ? tmf.getTrustManagers() : null, + new SecureRandom()); - switch (sslImplementation) { - case JDK: - KeyManagerFactory kmf = null; - if (clientAuth) { - KeyStore ks = KeyStore.getInstance("JKS"); - ks.load( - this.getClass().getResourceAsStream(CCMBridge.DEFAULT_CLIENT_KEYSTORE_PATH), - CCMBridge.DEFAULT_CLIENT_KEYSTORE_PASSWORD.toCharArray()); - - kmf = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm()); - kmf.init(ks, CCMBridge.DEFAULT_CLIENT_KEYSTORE_PASSWORD.toCharArray()); - } - - SSLContext sslContext = SSLContext.getInstance("TLS"); - sslContext.init(kmf != null ? kmf.getKeyManagers() : null, tmf != null ? tmf.getTrustManagers() : null, new SecureRandom()); + return RemoteEndpointAwareJdkSSLOptions.builder().withSSLContext(sslContext).build(); - return RemoteEndpointAwareJdkSSLOptions.builder().withSSLContext(sslContext).build(); + case NETTY_OPENSSL: + SslContextBuilder builder = + SslContextBuilder.forClient().sslProvider(OPENSSL).trustManager(tmf); - case NETTY_OPENSSL: - SslContextBuilder builder = SslContextBuilder - .forClient() - .sslProvider(OPENSSL) - .trustManager(tmf); - - if (clientAuth) { - builder.keyManager(CCMBridge.DEFAULT_CLIENT_CERT_CHAIN_FILE, CCMBridge.DEFAULT_CLIENT_PRIVATE_KEY_FILE); - } - - return new RemoteEndpointAwareNettySSLOptions(builder.build()); - default: - fail("Unsupported SSL implementation: " + sslImplementation); - return null; + if (clientAuth) { + builder.keyManager( + CCMBridge.DEFAULT_CLIENT_CERT_CHAIN_FILE, CCMBridge.DEFAULT_CLIENT_PRIVATE_KEY_FILE); } + + return new RemoteEndpointAwareNettySSLOptions(builder.build()); + default: + fail("Unsupported SSL implementation: " + sslImplementation); + return null; } + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/ScassandraCluster.java b/driver-core/src/test/java/com/datastax/driver/core/ScassandraCluster.java index 348eb8e7579..aa3a46b1455 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/ScassandraCluster.java +++ b/driver-core/src/test/java/com/datastax/driver/core/ScassandraCluster.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,600 +17,939 @@ */ package com.datastax.driver.core; -import com.datastax.driver.core.utils.UUIDs; -import com.google.common.collect.*; -import org.scassandra.Scassandra; -import org.scassandra.ScassandraFactory; -import org.scassandra.cql.MapType; -import org.scassandra.http.client.PrimingClient; -import org.scassandra.http.client.PrimingRequest; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import static com.datastax.driver.core.Assertions.assertThat; +import static org.scassandra.cql.MapType.map; +import static org.scassandra.cql.PrimitiveType.BIG_INT; +import static org.scassandra.cql.PrimitiveType.BOOLEAN; +import static org.scassandra.cql.PrimitiveType.DOUBLE; +import static org.scassandra.cql.PrimitiveType.INET; +import static org.scassandra.cql.PrimitiveType.INT; +import static org.scassandra.cql.PrimitiveType.TEXT; +import static org.scassandra.cql.PrimitiveType.UUID; +import static org.scassandra.cql.SetType.set; +import static org.scassandra.http.client.PrimingRequest.then; +import static org.scassandra.http.client.types.ColumnMetadata.column; +import com.datastax.driver.core.utils.UUIDs; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; +import com.google.common.collect.Sets; import java.net.InetAddress; import java.net.InetSocketAddress; +import java.net.UnknownHostException; +import java.util.ArrayList; import java.util.Collections; +import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.TreeSet; import java.util.concurrent.TimeUnit; - -import static com.datastax.driver.core.Assertions.assertThat; -import static org.scassandra.cql.MapType.map; -import static org.scassandra.cql.PrimitiveType.*; -import static org.scassandra.cql.SetType.set; -import static org.scassandra.http.client.PrimingRequest.then; -import static org.scassandra.http.client.types.ColumnMetadata.column; +import org.scassandra.Scassandra; +import org.scassandra.ScassandraFactory; +import org.scassandra.cql.MapType; +import org.scassandra.http.client.PrimingClient; +import org.scassandra.http.client.PrimingRequest; +import org.scassandra.http.client.Result; +import org.scassandra.http.client.types.ColumnMetadata; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; public class ScassandraCluster { - private static final Logger logger = LoggerFactory.getLogger(ScassandraCluster.class); - - private final String ipPrefix; - - private final int binaryPort; - - private final List instances; - - private final Map> dcNodeMap; - - private final List> keyspaceRows; - - private final String cassandraVersion; - - private static final java.util.UUID schemaVersion = UUIDs.random(); - - private final Map>> forcedPeerInfos; - - private final String keyspaceQuery; - - private final org.scassandra.http.client.types.ColumnMetadata[] keyspaceColumnTypes; - - ScassandraCluster(Integer[] nodes, String ipPrefix, int binaryPort, int adminPort, - List> keyspaceRows, String cassandraVersion, - Map>> forcedPeerInfos) { - this.ipPrefix = ipPrefix; - this.binaryPort = binaryPort; - // If cassandraVersion is not explicitly provided, use 3.0.10 as current version of SCassandra that - // supports up to protocol version 4. Without specifying a newer version, could cause the driver - // to think the node doesn't support a protocol version that it indeed does. - this.cassandraVersion = cassandraVersion != null ? - cassandraVersion : "3.0.10"; - this.forcedPeerInfos = forcedPeerInfos; - - int node = 1; - ImmutableList.Builder instanceListBuilder = ImmutableList.builder(); - ImmutableMap.Builder> dcNodeMapBuilder = ImmutableMap.builder(); - for (int dc = 0; dc < nodes.length; dc++) { - ImmutableList.Builder dcNodeListBuilder = ImmutableList.builder(); - for (int n = 0; n < nodes[dc]; n++) { - String ip = ipPrefix + node++; - Scassandra instance = ScassandraFactory.createServer(ip, binaryPort, ip, adminPort); - instanceListBuilder = instanceListBuilder.add(instance); - dcNodeListBuilder = dcNodeListBuilder.add(instance); - } - dcNodeMapBuilder.put(dc + 1, dcNodeListBuilder.build()); - } - instances = instanceListBuilder.build(); - dcNodeMap = dcNodeMapBuilder.build(); - - // Prime correct keyspace table based on C* version. - String[] versionArray = this.cassandraVersion.split("\\.|-"); - double major = Double.parseDouble(versionArray[0] + "." + versionArray[1]); - - if (major < 3.0) { - this.keyspaceQuery = "SELECT * FROM system.schema_keyspaces"; - this.keyspaceColumnTypes = SELECT_SCHEMA_KEYSPACES; - this.keyspaceRows = Lists.newArrayList(keyspaceRows); - // remove replication map as it is not part of the < 3.0 schema. - for (Map keyspaceRow : this.keyspaceRows) { - keyspaceRow.remove("replication"); - } - } else { - this.keyspaceQuery = "SELECT * FROM system_schema.keyspaces"; - this.keyspaceColumnTypes = SELECT_SCHEMA_KEYSPACES_V3; - this.keyspaceRows = Lists.newArrayList(keyspaceRows); - // remove strategy options and strategy class as these are not part of the 3.0+ schema. - for (Map keyspaceRow : this.keyspaceRows) { - keyspaceRow.remove("strategy_class"); - keyspaceRow.remove("strategy_options"); - } - } - + private static final Logger logger = LoggerFactory.getLogger(ScassandraCluster.class); + + private final int binaryPort; + + private final List instances; + private final List binaryAddresses; + private final List listenAddresses; + + private final Map> dcNodeMap; + + private final List> keyspaceRows; + + private final String cassandraVersion; + + private static final java.util.UUID schemaVersion = UUIDs.random(); + + private final Map>> forcedPeerInfos; + + private final String keyspaceQuery; + + private final org.scassandra.http.client.types.ColumnMetadata[] keyspaceColumnTypes; + + private final boolean peersV2; + + ScassandraCluster( + Integer[] nodes, + String ipPrefix, + int binaryPort, + int adminPort, + int listenPort, + List> keyspaceRows, + String cassandraVersion, + Map>> forcedPeerInfos, + boolean peersV2) { + this( + nodes, + binaryPort, + buildAddresses(nodes, ipPrefix, binaryPort), + buildAddresses(nodes, ipPrefix, adminPort), + buildAddresses(nodes, ipPrefix, listenPort), + keyspaceRows, + cassandraVersion, + forcedPeerInfos, + peersV2); + } + + ScassandraCluster( + Integer[] nodes, + int binaryPort, + List binaryAddresses, + List adminAddresses, + List listenAddresses, + List> keyspaceRows, + String cassandraVersion, + Map>> forcedPeerInfos, + boolean peersV2) { + this.binaryPort = binaryPort; + this.binaryAddresses = binaryAddresses; + this.listenAddresses = listenAddresses; + // If cassandraVersion is not explicitly provided, use 3.0.10 as current version of SCassandra + // that + // supports up to protocol version 4. Without specifying a newer version, could cause the + // driver + // to think the node doesn't support a protocol version that it indeed does. + this.cassandraVersion = cassandraVersion != null ? cassandraVersion : "3.0.10"; + this.forcedPeerInfos = forcedPeerInfos; + + int node = 1; + ImmutableList.Builder instanceListBuilder = ImmutableList.builder(); + ImmutableMap.Builder> dcNodeMapBuilder = ImmutableMap.builder(); + for (int dc = 0; dc < nodes.length; dc++) { + ImmutableList.Builder dcNodeListBuilder = ImmutableList.builder(); + for (int n = 0; n < nodes[dc]; n++) { + InetSocketAddress binaryAddress = binaryAddresses.get(node - 1); + InetSocketAddress adminAddress = adminAddresses.get(node - 1); + Scassandra instance = + ScassandraFactory.createServer( + binaryAddress.getAddress().getHostAddress(), + binaryAddress.getPort(), + adminAddress.getAddress().getHostAddress(), + adminAddress.getPort()); + instanceListBuilder.add(instance); + dcNodeListBuilder.add(instance); + node++; + } + dcNodeMapBuilder.put(dc + 1, dcNodeListBuilder.build()); } - - public Scassandra node(int node) { - return instances.get(node - 1); + instances = instanceListBuilder.build(); + dcNodeMap = dcNodeMapBuilder.build(); + + // Prime correct keyspace table based on C* version. + String[] versionArray = this.cassandraVersion.split("\\.|-"); + double major = Double.parseDouble(versionArray[0] + "." + versionArray[1]); + + if (major < 3.0) { + this.keyspaceQuery = "SELECT * FROM system.schema_keyspaces"; + this.keyspaceColumnTypes = SELECT_SCHEMA_KEYSPACES; + this.keyspaceRows = Lists.newArrayList(keyspaceRows); + // remove replication map as it is not part of the < 3.0 schema. + for (Map keyspaceRow : this.keyspaceRows) { + keyspaceRow.remove("replication"); + } + } else { + this.keyspaceQuery = "SELECT * FROM system_schema.keyspaces"; + this.keyspaceColumnTypes = SELECT_SCHEMA_KEYSPACES_V3; + this.keyspaceRows = Lists.newArrayList(keyspaceRows); + // remove strategy options and strategy class as these are not part of the 3.0+ schema. + for (Map keyspaceRow : this.keyspaceRows) { + keyspaceRow.remove("strategy_class"); + keyspaceRow.remove("strategy_options"); + } } - public List nodes() { - return instances; + this.peersV2 = peersV2; + } + + public Scassandra node(int node) { + return instances.get(node - 1); + } + + public List nodes() { + return instances; + } + + public Scassandra node(int dc, int node) { + return dcNodeMap.get(dc).get(node - 1); + } + + public List nodes(int dc) { + return dcNodeMap.get(dc); + } + + public int ipSuffix(int dc, int node) { + int nodeCount = 0; + for (Integer dcNum : new TreeSet(dcNodeMap.keySet())) { + List nodesInDc = dcNodeMap.get(dc); + for (int n = 0; n < nodesInDc.size(); n++) { + nodeCount++; + if (dcNum == dc && n + 1 == node) { + return nodeCount; + } + } } - - public Scassandra node(int dc, int node) { - return dcNodeMap.get(dc).get(node - 1); + return -1; + } + + /** + * @return The binary port for nodes in this cluster. Note that this is only relevant when {@link + * ScassandraClusterBuilder#withSharedIP} is not used. + */ + public int getBinaryPort() { + return binaryPort; + } + + public InetSocketAddress address(int node) { + return binaryAddresses.get(node - 1); + } + + public InetSocketAddress listenAddress(int node) { + return listenAddresses.get(node - 1); + } + + public InetSocketAddress address(int dc, int node) { + int ipSuffix = ipSuffix(dc, node); + if (ipSuffix == -1) return null; + return address(ipSuffix); + } + + public InetSocketAddress listenAddress(int dc, int node) { + int ipSuffix = ipSuffix(dc, node); + if (ipSuffix == -1) return null; + return listenAddress(ipSuffix); + } + + public Host host(Cluster cluster, int dc, int node) { + InetSocketAddress address = address(dc, node); + for (Host host : cluster.getMetadata().getAllHosts()) { + if (host.getEndPoint().resolve().equals(address)) { + return host; + } } - - public List nodes(int dc) { - return dcNodeMap.get(dc); + return null; + } + + public static String datacenter(int dc) { + return "DC" + dc; + } + + public void init() { + for (Map.Entry> dc : dcNodeMap.entrySet()) { + for (Scassandra node : dc.getValue()) { + node.start(); + primeMetadata(node); + } } - - public int ipSuffix(int dc, int node) { - // TODO: Scassandra should be updated to include address to avoid O(n) lookup. - int nodeCount = 0; - for (Integer dcNum : new TreeSet(dcNodeMap.keySet())) { - List nodesInDc = dcNodeMap.get(dc); - for (int n = 0; n < nodesInDc.size(); n++) { - nodeCount++; - if (dcNum == dc && n + 1 == node) { - return nodeCount; - } - } - } - return -1; + } + + public void stop() { + logger.debug("Stopping ScassandraCluster."); + for (Scassandra node : instances) { + try { + node.stop(); + } catch (Exception e) { + logger.error("Could not stop node " + node, e); + } } - - public int getBinaryPort() { - return binaryPort; + } + + /** + * First stops each node in {@code dc} and then asserts that each node's {@link Host} is marked + * down for the given {@link Cluster} instance within 10 seconds. + * + *

    If any of the nodes are the control host, this node is stopped last, to reduce likelihood of + * control connection choosing a host that will be shut down. + * + * @param cluster cluster to wait for down statuses on. + * @param dc DC to stop. + */ + public void stopDC(Cluster cluster, int dc) { + logger.debug("Stopping all nodes in {}.", datacenter(dc)); + // If any node is the control host, stop it last. + int controlHost = -1; + for (int i = 1; i <= nodes(dc).size(); i++) { + int id = ipSuffix(dc, i); + Host host = TestUtils.findHost(cluster, id); + if (cluster.manager.controlConnection.connectedHost() == host) { + logger.debug("Node {} identified as control host. Stopping last.", id); + controlHost = id; + continue; + } + stop(cluster, id); } - public InetSocketAddress address(int node) { - return new InetSocketAddress(ipPrefix + node, binaryPort); + if (controlHost != -1) { + stop(cluster, controlHost); } - - public InetSocketAddress address(int dc, int node) { - // TODO: Scassandra should be updated to include address to avoid O(n) lookup. - int ipSuffix = ipSuffix(dc, node); - if (ipSuffix == -1) - return null; - return new InetSocketAddress(ipPrefix + ipSuffix, binaryPort); + } + + /** + * Stops a node by id and then asserts that its {@link Host} is marked down for the given {@link + * Cluster} instance within 10 seconds. + * + * @param cluster cluster to wait for down status on. + * @param node Node to stop. + */ + public void stop(Cluster cluster, int node) { + logger.debug("Stopping node {}.", node); + Scassandra scassandra = node(node); + try { + scassandra.stop(); + } catch (Exception e) { + logger.error("Could not stop node " + scassandra, e); } - - public Host host(Cluster cluster, int dc, int node) { - InetAddress address = address(dc, node).getAddress(); - for (Host host : cluster.getMetadata().getAllHosts()) { - if (host.getAddress().equals(address)) { - return host; - } - } - return null; + assertThat(cluster).host(node).goesDownWithin(10, TimeUnit.SECONDS); + } + + /** + * Stops a node by dc and id and then asserts that its {@link Host} is marked down for the given + * {@link Cluster} instance within 10 seconds. + * + * @param cluster cluster to wait for down status on. + * @param dc Data center node is in. + * @param node Node to stop. + */ + public void stop(Cluster cluster, int dc, int node) { + logger.debug("Stopping node {} in {}.", node, datacenter(dc)); + stop(cluster, ipSuffix(dc, node)); + } + + /** + * First starts each node in {@code dc} and then asserts that each node's {@link Host} is marked + * up for the given {@link Cluster} instance within 10 seconds. + * + * @param cluster cluster to wait for up statuses on. + * @param dc DC to start. + */ + public void startDC(Cluster cluster, int dc) { + logger.debug("Starting all nodes in {}.", datacenter(dc)); + for (int i = 1; i <= nodes(dc).size(); i++) { + int id = ipSuffix(dc, i); + start(cluster, id); } - - public static String datacenter(int dc) { - return "DC" + dc; + } + + /** + * Starts a node by id and then asserts that its {@link Host} is marked up for the given {@link + * Cluster} instance within 10 seconds. + * + * @param cluster cluster to wait for up status on. + * @param node Node to start. + */ + public void start(Cluster cluster, int node) { + logger.debug("Starting node {}.", node); + Scassandra scassandra = node(node); + scassandra.start(); + assertThat(cluster).host(node).comesUpWithin(10, TimeUnit.SECONDS); + } + + /** + * Starts a node by dc and id and then asserts that its {@link Host} is marked up for the given + * {@link Cluster} instance within 10 seconds. + * + * @param cluster cluster to wait for up status on. + * @param dc Data center node is in. + * @param node Node to start. + */ + public void start(Cluster cluster, int dc, int node) { + logger.debug("Starting node {} in {}.", node, datacenter(dc)); + start(cluster, ipSuffix(dc, node)); + } + + public List getTokensForDC(int dc) { + // Offset DCs by dc * 100 to ensure unique tokens. + int offset = (dc - 1) * 100; + int dcNodeCount = nodes(dc).size(); + List tokens = Lists.newArrayListWithExpectedSize(dcNodeCount); + for (int i = 0; i < dcNodeCount; i++) { + tokens.add((i * ((long) Math.pow(2, 64) / dcNodeCount) + offset)); } - - public void init() { - for (Map.Entry> dc : dcNodeMap.entrySet()) { - for (Scassandra node : dc.getValue()) { - node.start(); - primeMetadata(node); - } + return tokens; + } + + private void primeMetadata(Scassandra node) { + PrimingClient client = node.primingClient(); + int nodeCount = 1; + + ImmutableList.Builder> rows = ImmutableList.builder(); + ImmutableList.Builder> rowsV2 = ImmutableList.builder(); + for (Integer dc : new TreeSet(dcNodeMap.keySet())) { + List nodesInDc = dcNodeMap.get(dc); + List tokens = getTokensForDC(dc); + for (int n = 0; n < nodesInDc.size(); n++) { + InetSocketAddress binaryAddress = address(nodeCount); + InetSocketAddress listenAddress = listenAddress(nodeCount); + nodeCount++; + Scassandra peer = nodesInDc.get(n); + if (node == peer) { // prime system.local. + Map row = Maps.newHashMap(); + addPeerInfo(row, dc, n + 1, "key", "local"); + addPeerInfo(row, dc, n + 1, "bootstrapped", "COMPLETED"); + addPeerInfo( + row, dc, n + 1, "broadcast_address", listenAddress.getAddress().getHostAddress()); + addPeerInfo(row, dc, n + 1, "cluster_name", "scassandra"); + addPeerInfo(row, dc, n + 1, "cql_version", "3.2.0"); + addPeerInfo(row, dc, n + 1, "data_center", datacenter(dc)); + addPeerInfo( + row, + dc, + n + 1, + "listen_address", + getPeerInfo( + dc, n + 1, "listen_address", listenAddress.getAddress().getHostAddress())); + addPeerInfo(row, dc, n + 1, "partitioner", "org.apache.cassandra.dht.Murmur3Partitioner"); + addPeerInfo(row, dc, n + 1, "rack", getPeerInfo(dc, n + 1, "rack", "rack1")); + addPeerInfo( + row, + dc, + n + 1, + "release_version", + getPeerInfo(dc, n + 1, "release_version", cassandraVersion)); + addPeerInfo(row, dc, n + 1, "tokens", ImmutableSet.of(tokens.get(n))); + addPeerInfo(row, dc, n + 1, "host_id", UUIDs.random()); + addPeerInfo(row, dc, n + 1, "schema_version", schemaVersion); + addPeerInfo(row, dc, n + 1, "graph", false); + + // These columns might not always be present, we don't have to specify them in the + // scassandra + // column metadata as it will default them to text columns. + addPeerInfoIfExists(row, dc, n + 1, "dse_version"); + addPeerInfoIfExists(row, dc, n + 1, "workload"); + + String query = "SELECT * FROM system.local WHERE key='local'"; + if (!peersV2) { + client.prime( + PrimingRequest.queryBuilder() + .withQuery(query) + .withThen( + then() + .withColumnTypes(SELECT_LOCAL) + .withRows(Collections.>singletonList(row)) + .build()) + .build()); + } else { + addPeerInfo(row, dc, n + 1, "broadcast_port", listenAddress.getPort()); + addPeerInfo(row, dc, n + 1, "listen_port", listenAddress.getPort()); + client.prime( + PrimingRequest.queryBuilder() + .withQuery(query) + .withThen( + then() + .withColumnTypes(SELECT_LOCAL_V2) + .withRows(Collections.>singletonList(row)) + .build()) + .build()); + } + } else { // prime system.peers. + Map row = Maps.newHashMap(); + Map rowV2 = Maps.newHashMap(); + + addPeerInfo(row, dc, n + 1, "peer", listenAddress.getAddress().getHostAddress()); + addPeerInfo(rowV2, dc, n + 1, "peer", listenAddress.getAddress().getHostAddress()); + addPeerInfo(rowV2, dc, n + 1, "peer_port", listenAddress.getPort()); + + addPeerInfo(row, dc, n + 1, "rpc_address", binaryAddress.getAddress().getHostAddress()); + addPeerInfo( + rowV2, dc, n + 1, "native_address", binaryAddress.getAddress().getHostAddress()); + addPeerInfo(rowV2, dc, n + 1, "native_port", binaryAddress.getPort()); + + addPeerInfo(row, dc, n + 1, "data_center", datacenter(dc)); + addPeerInfo(rowV2, dc, n + 1, "data_center", datacenter(dc)); + addPeerInfo(row, dc, n + 1, "rack", getPeerInfo(dc, n + 1, "rack", "rack1")); + addPeerInfo(rowV2, dc, n + 1, "rack", getPeerInfo(dc, n + 1, "rack", "rack1")); + addPeerInfo( + row, + dc, + n + 1, + "release_version", + getPeerInfo(dc, n + 1, "release_version", cassandraVersion)); + addPeerInfo( + rowV2, + dc, + n + 1, + "release_version", + getPeerInfo(dc, n + 1, "release_version", cassandraVersion)); + addPeerInfo(row, dc, n + 1, "tokens", ImmutableSet.of(Long.toString(tokens.get(n)))); + addPeerInfo(rowV2, dc, n + 1, "tokens", ImmutableSet.of(Long.toString(tokens.get(n)))); + + java.util.UUID hostId = UUIDs.random(); + addPeerInfo(row, dc, n + 1, "host_id", hostId); + addPeerInfo(rowV2, dc, n + 1, "host_id", hostId); + + addPeerInfo(row, dc, n + 1, "schema_version", schemaVersion); + addPeerInfo(rowV2, dc, n + 1, "schema_version", schemaVersion); + + addPeerInfo(row, dc, n + 1, "graph", false); + addPeerInfo(rowV2, dc, n + 1, "graph", false); + + addPeerInfoIfExists(row, dc, n + 1, "listen_address"); + addPeerInfoIfExists(row, dc, n + 1, "dse_version"); + addPeerInfoIfExists(row, dc, n + 1, "workload"); + + addPeerInfoIfExists(rowV2, dc, n + 1, "dse_version"); + addPeerInfoIfExists(rowV2, dc, n + 1, "workload"); + + rows.add(row); + rowsV2.add(rowV2); + + client.prime( + PrimingRequest.queryBuilder() + .withQuery( + "SELECT * FROM system.peers WHERE peer='" + + listenAddress.getAddress().getHostAddress() + + "'") + .withThen( + then() + .withColumnTypes(SELECT_PEERS) + .withRows(Collections.>singletonList(row)) + .build()) + .build()); + + if (peersV2) { + client.prime( + PrimingRequest.queryBuilder() + .withQuery( + "SELECT * FROM system.peers_v2 WHERE peer='" + + listenAddress.getAddress().getHostAddress() + + "' AND peer_port=" + + listenAddress.getPort()) + .withThen( + then() + .withColumnTypes(SELECT_PEERS_V2) + .withRows(Collections.>singletonList(rowV2)) + .build()) + .build()); + } } + } } - public void stop() { - logger.debug("Stopping ScassandraCluster."); - for (Scassandra node : instances) { - node.stop(); - } + client.prime( + PrimingRequest.queryBuilder() + .withQuery("SELECT * FROM system.peers") + .withThen(then().withColumnTypes(SELECT_PEERS).withRows(rows.build()).build()) + .build()); + + // return invalid error for peers_v2, indicating the table doesn't exist. + if (!peersV2) { + client.prime( + PrimingRequest.queryBuilder() + .withQuery("SELECT * FROM system.peers_v2") + .withThen(then().withResult(Result.invalid)) + .build()); + } else { + client.prime( + PrimingRequest.queryBuilder() + .withQuery("SELECT * FROM system.peers_v2") + .withThen(then().withColumnTypes(SELECT_PEERS_V2).withRows(rowsV2.build()).build()) + .build()); } - /** - * First stops each node in {@code dc} and then asserts that each node's {@link Host} - * is marked down for the given {@link Cluster} instance within 10 seconds. - *

    - * If any of the nodes are the control host, this node is stopped last, to reduce - * likelihood of control connection choosing a host that will be shut down. - * - * @param cluster cluster to wait for down statuses on. - * @param dc DC to stop. - */ - public void stopDC(Cluster cluster, int dc) { - logger.debug("Stopping all nodes in {}.", datacenter(dc)); - // If any node is the control host, stop it last. - int controlHost = -1; - for (int i = 1; i <= nodes(dc).size(); i++) { - int id = ipSuffix(dc, i); - Host host = TestUtils.findHost(cluster, id); - if (cluster.manager.controlConnection.connectedHost() == host) { - logger.debug("Node {} identified as control host. Stopping last.", id); - controlHost = id; - continue; - } - stop(cluster, id); - } - - if (controlHost != -1) { - stop(cluster, controlHost); - } + // Needed to ensure cluster_name matches what we expect on connection. + Map clusterNameRow = + ImmutableMap.builder().put("cluster_name", "scassandra").build(); + client.prime( + PrimingRequest.queryBuilder() + .withQuery("select cluster_name from system.local where key = 'local'") + .withThen( + then() + .withColumnTypes(SELECT_CLUSTER_NAME) + .withRows(Collections.>singletonList(clusterNameRow)) + .build()) + .build()); + + client.prime( + PrimingRequest.queryBuilder() + .withQuery(keyspaceQuery) + .withThen(then().withColumnTypes(keyspaceColumnTypes).withRows(keyspaceRows).build()) + .build()); + } + + private void addPeerInfo( + Map input, int dc, int node, String property, Object defaultValue) { + Object peerInfo = getPeerInfo(dc, node, property, defaultValue); + if (peerInfo != null) { + input.put(property, peerInfo); } - - /** - * Stops a node by id and then asserts that its {@link Host} is marked down - * for the given {@link Cluster} instance within 10 seconds. - * - * @param cluster cluster to wait for down status on. - * @param node Node to stop. - */ - public void stop(Cluster cluster, int node) { - logger.debug("Stopping node {}.", node); - Scassandra scassandra = node(node); - scassandra.stop(); - assertThat(cluster).host(node).goesDownWithin(10, TimeUnit.SECONDS); + } + + private void addPeerInfoIfExists(Map input, int dc, int node, String property) { + Map> forDc = forcedPeerInfos.get(dc); + if (forDc == null) return; + + Map forNode = forDc.get(node); + if (forNode == null) return; + + if (forNode.containsKey(property)) input.put(property, forNode.get(property)); + } + + private Object getPeerInfo(int dc, int node, String property, Object defaultValue) { + Map> forDc = forcedPeerInfos.get(dc); + if (forDc == null) return defaultValue; + + Map forNode = forDc.get(node); + if (forNode == null) return defaultValue; + + return (forNode.containsKey(property)) ? forNode.get(property) : defaultValue; + } + + public static final org.scassandra.http.client.types.ColumnMetadata[] SELECT_PEERS = { + column("peer", INET), + column("rpc_address", INET), + column("data_center", TEXT), + column("rack", TEXT), + column("release_version", TEXT), + column("tokens", set(TEXT)), + column("listen_address", INET), + column("host_id", UUID), + column("graph", BOOLEAN), + column("schema_version", UUID) + }; + + /* system.peers was re-worked for DSE 6.8 */ + public static final org.scassandra.http.client.types.ColumnMetadata[] SELECT_PEERS_DSE68 = { + column("peer", INET), + column("rpc_address", INET), + column("data_center", TEXT), + column("rack", TEXT), + column("release_version", TEXT), + column("tokens", set(TEXT)), + column("host_id", UUID), + column("graph", BOOLEAN), + column("schema_version", UUID), + column("native_transport_address", INET), + column("native_transport_port", INT), + column("native_transport_port_ssl", INT) + }; + + public static final org.scassandra.http.client.types.ColumnMetadata[] SELECT_PEERS_V2 = { + column("peer", INET), + column("peer_port", INT), + column("native_address", INET), + column("native_port", INT), + column("data_center", TEXT), + column("rack", TEXT), + column("release_version", TEXT), + column("tokens", set(TEXT)), + column("host_id", UUID), + column("graph", BOOLEAN), + column("schema_version", UUID) + }; + + public static final org.scassandra.http.client.types.ColumnMetadata[] SELECT_LOCAL = { + column("key", TEXT), + column("bootstrapped", TEXT), + column("broadcast_address", INET), + column("cluster_name", TEXT), + column("cql_version", TEXT), + column("data_center", TEXT), + column("graph", BOOLEAN), + column("host_id", UUID), + column("listen_address", INET), + column("partitioner", TEXT), + column("rack", TEXT), + column("release_version", TEXT), + column("schema_version", UUID), + column("tokens", set(TEXT)) + }; + + public static final org.scassandra.http.client.types.ColumnMetadata[] SELECT_LOCAL_V2 = { + column("key", TEXT), + column("bootstrapped", TEXT), + column("broadcast_address", INET), + column("broadcast_port", INT), + column("cluster_name", TEXT), + column("cql_version", TEXT), + column("data_center", TEXT), + column("graph", BOOLEAN), + column("host_id", UUID), + column("listen_address", INET), + column("listen_port", INT), + column("partitioner", TEXT), + column("rack", TEXT), + column("release_version", TEXT), + column("schema_version", UUID), + column("tokens", set(TEXT)) + }; + + public static final org.scassandra.http.client.types.ColumnMetadata[] + SELECT_LOCAL_RPC_ADDRESS_AND_PORT = { + column("key", TEXT), + column("bootstrapped", TEXT), + column("broadcast_address", INET), + column("broadcast_port", INT), + column("cluster_name", TEXT), + column("cql_version", TEXT), + column("data_center", TEXT), + column("host_id", UUID), + column("listen_address", INET), + column("listen_port", INT), + column("partitioner", TEXT), + column("rack", TEXT), + column("release_version", TEXT), + column("rpc_address", INET), + column("rpc_port", INT), + column("schema_version", UUID), + column("tokens", set(TEXT)) + }; + + static final org.scassandra.http.client.types.ColumnMetadata[] SELECT_CLUSTER_NAME = { + column("cluster_name", TEXT) + }; + + static final org.scassandra.http.client.types.ColumnMetadata[] SELECT_SCHEMA_KEYSPACES = { + column("durable_writes", BOOLEAN), + column("keyspace_name", TEXT), + column("strategy_class", TEXT), + column("strategy_options", TEXT) + }; + + static final org.scassandra.http.client.types.ColumnMetadata[] SELECT_SCHEMA_KEYSPACES_V3 = { + column("durable_writes", BOOLEAN), + column("keyspace_name", TEXT), + column("replication", MapType.map(TEXT, TEXT)) + }; + + static final org.scassandra.http.client.types.ColumnMetadata[] SELECT_SCHEMA_COLUMN_FAMILIES = { + column("bloom_filter_fp_chance", DOUBLE), + column("caching", TEXT), + column("cf_id", UUID), + column("column_aliases", TEXT), + column("columnfamily_name", TEXT), + column("comment", TEXT), + column("compaction_strategy_class", TEXT), + column("compaction_strategy_options", TEXT), + column("comparator", TEXT), + column("compression_parameters", TEXT), + column("default_time_to_live", INT), + column("default_validator", TEXT), + column("dropped_columns", map(TEXT, BIG_INT)), + column("gc_grace_seconds", INT), + column("index_interval", INT), + column("is_dense", BOOLEAN), + column("key_aliases", TEXT), + column("key_validator", TEXT), + column("keyspace_name", TEXT), + column("local_read_repair_chance", DOUBLE), + column("max_compaction_threshold", INT), + column("max_index_interval", INT), + column("memtable_flush_period_in_ms", INT), + column("min_compaction_threshold", INT), + column("min_index_interval", INT), + column("read_repair_chance", DOUBLE), + column("speculative_retry", TEXT), + column("subcomparator", TEXT), + column("type", TEXT), + column("value_alias", TEXT) + }; + + static final org.scassandra.http.client.types.ColumnMetadata[] SELECT_SCHEMA_COLUMNS = { + column("column_name", TEXT), + column("columnfamily_name", TEXT), + column("component_index", INT), + column("index_name", TEXT), + column("index_options", TEXT), + column("index_type", TEXT), + column("keyspace_name", TEXT), + column("type", TEXT), + column("validator", TEXT), + }; + + // Primes a minimal system.local row on an Scassandra node. + // We need a host_id so that the driver can store it in Metadata.hosts + public static void primeSystemLocalRow(Scassandra scassandra) { + Set localMetadata = Sets.newHashSet(SELECT_LOCAL); + Map row = new HashMap(); + row.put("host_id", java.util.UUID.randomUUID()); + scassandra + .primingClient() + .prime( + PrimingRequest.queryBuilder() + .withQuery("SELECT * FROM system.local WHERE key='local'") + .withThen( + then() + .withColumnTypes( + localMetadata.toArray(new ColumnMetadata[localMetadata.size()])) + .withRows(Collections.>singletonList(row)))); + } + + public static ScassandraClusterBuilder builder() { + return new ScassandraClusterBuilder(); + } + + public static class ScassandraClusterBuilder { + + private Integer nodes[] = {1}; + private boolean sharedIP = false; + private boolean peersV2 = false; + private String ipPrefix = TestUtils.IP_PREFIX; + private final List> keyspaceRows = Lists.newArrayList(); + private final Map>> forcedPeerInfos = + Maps.newHashMap(); + private String cassandraVersion = null; + + public ScassandraClusterBuilder withNodes(Integer... nodes) { + this.nodes = nodes; + return this; } - /** - * Stops a node by dc and id and then asserts that its {@link Host} is marked down - * for the given {@link Cluster} instance within 10 seconds. - * - * @param cluster cluster to wait for down status on. - * @param dc Data center node is in. - * @param node Node to stop. - */ - public void stop(Cluster cluster, int dc, int node) { - logger.debug("Stopping node {} in {}.", node, datacenter(dc)); - stop(cluster, ipSuffix(dc, node)); + public ScassandraClusterBuilder withIpPrefix(String ipPrefix) { + this.ipPrefix = ipPrefix; + return this; } - /** - * First starts each node in {@code dc} and then asserts that each node's {@link Host} - * is marked up for the given {@link Cluster} instance within 10 seconds. - * - * @param cluster cluster to wait for up statuses on. - * @param dc DC to start. - */ - public void startDC(Cluster cluster, int dc) { - logger.debug("Starting all nodes in {}.", datacenter(dc)); - for (int i = 1; i <= nodes(dc).size(); i++) { - int id = ipSuffix(dc, i); - start(cluster, id); - } + public ScassandraClusterBuilder withSimpleKeyspace(String name, int replicationFactor) { + Map simpleKeyspaceRow = Maps.newHashMap(); + simpleKeyspaceRow.put("durable_writes", false); + simpleKeyspaceRow.put("keyspace_name", name); + simpleKeyspaceRow.put( + "replication", + ImmutableMap.builder() + .put("class", "org.apache.cassandra.locator.SimpleStrategy") + .put("replication_factor", "" + replicationFactor) + .build()); + simpleKeyspaceRow.put("strategy_class", "SimpleStrategy"); + simpleKeyspaceRow.put( + "strategy_options", "{\"replication_factor\":\"" + replicationFactor + "\"}"); + keyspaceRows.add(simpleKeyspaceRow); + return this; } - /** - * Starts a node by id and then asserts that its {@link Host} is marked up - * for the given {@link Cluster} instance within 10 seconds. - * - * @param cluster cluster to wait for up status on. - * @param node Node to start. - */ - public void start(Cluster cluster, int node) { - logger.debug("Starting node {}.", node); - Scassandra scassandra = node(node); - scassandra.start(); - assertThat(cluster).host(node).comesUpWithin(10, TimeUnit.SECONDS); + public ScassandraClusterBuilder withNetworkTopologyKeyspace( + String name, Map replicationFactors) { + StringBuilder strategyOptionsBuilder = new StringBuilder("{"); + ImmutableMap.Builder replicationBuilder = ImmutableMap.builder(); + replicationBuilder.put("class", "org.apache.cassandra.locator.NetworkTopologyStrategy"); + + for (Map.Entry dc : replicationFactors.entrySet()) { + strategyOptionsBuilder.append("\""); + strategyOptionsBuilder.append(datacenter(dc.getKey())); + strategyOptionsBuilder.append("\":\""); + strategyOptionsBuilder.append(dc.getValue()); + strategyOptionsBuilder.append("\","); + replicationBuilder.put(datacenter(dc.getKey()), "" + dc.getValue()); + } + + String strategyOptions = + strategyOptionsBuilder.substring(0, strategyOptionsBuilder.length() - 1) + "}"; + + Map ntsKeyspaceRow = Maps.newHashMap(); + ntsKeyspaceRow.put("durable_writes", false); + ntsKeyspaceRow.put("keyspace_name", name); + ntsKeyspaceRow.put("strategy_class", "NetworkTopologyStrategy"); + ntsKeyspaceRow.put("strategy_options", strategyOptions); + ntsKeyspaceRow.put("replication", replicationBuilder.build()); + keyspaceRows.add(ntsKeyspaceRow); + return this; } - /** - * Starts a node by dc and id and then asserts that its {@link Host} is marked up - * for the given {@link Cluster} instance within 10 seconds. - * - * @param cluster cluster to wait for up status on. - * @param dc Data center node is in. - * @param node Node to start. - */ - public void start(Cluster cluster, int dc, int node) { - logger.debug("Starting node {} in {}.", node, datacenter(dc)); - start(cluster, ipSuffix(dc, node)); + public ScassandraClusterBuilder forcePeerInfo(int dc, int node, String name, Object value) { + Map> forDc = forcedPeerInfos.get(dc); + if (forDc == null) { + forDc = Maps.newHashMap(); + forcedPeerInfos.put(dc, forDc); + } + Map forNode = forDc.get(node); + if (forNode == null) { + forNode = Maps.newHashMap(); + forDc.put(node, forNode); + } + forNode.put(name, value); + return this; } - public List getTokensForDC(int dc) { - // Offset DCs by dc * 100 to ensure unique tokens. - int offset = (dc - 1) * 100; - int dcNodeCount = nodes(dc).size(); - List tokens = Lists.newArrayListWithExpectedSize(dcNodeCount); - for (int i = 0; i < dcNodeCount; i++) { - tokens.add((i * ((long) Math.pow(2, 64) / dcNodeCount) + offset)); - } - return tokens; + public ScassandraClusterBuilder withPeersV2(boolean enabled) { + this.peersV2 = enabled; + return this; } - private void primeMetadata(Scassandra node) { - PrimingClient client = node.primingClient(); - int nodeCount = 0; - - ImmutableList.Builder> rows = ImmutableList.builder(); - for (Integer dc : new TreeSet(dcNodeMap.keySet())) { - List nodesInDc = dcNodeMap.get(dc); - List tokens = getTokensForDC(dc); - for (int n = 0; n < nodesInDc.size(); n++) { - String address = ipPrefix + ++nodeCount; - Scassandra peer = nodesInDc.get(n); - String query; - Map row; - org.scassandra.http.client.types.ColumnMetadata[] metadata; - if (node == peer) { // prime system.local. - metadata = SELECT_LOCAL; - query = "SELECT * FROM system.local WHERE key='local'"; - - row = Maps.newHashMap(); - addPeerInfo(row, dc, n + 1, "key", "local"); - addPeerInfo(row, dc, n + 1, "bootstrapped", "COMPLETED"); - addPeerInfo(row, dc, n + 1, "broadcast_address", address); - addPeerInfo(row, dc, n + 1, "cluster_name", "scassandra"); - addPeerInfo(row, dc, n + 1, "cql_version", "3.2.0"); - addPeerInfo(row, dc, n + 1, "data_center", datacenter(dc)); - addPeerInfo(row, dc, n + 1, "listen_address", getPeerInfo(dc, n + 1, "listen_address", address)); - addPeerInfo(row, dc, n + 1, "partitioner", "org.apache.cassandra.dht.Murmur3Partitioner"); - addPeerInfo(row, dc, n + 1, "rack", getPeerInfo(dc, n + 1, "rack", "rack1")); - addPeerInfo(row, dc, n + 1, "release_version", getPeerInfo(dc, n + 1, "release_version", cassandraVersion)); - addPeerInfo(row, dc, n + 1, "tokens", ImmutableSet.of(tokens.get(n))); - addPeerInfo(row, dc, n + 1, "schema_version", schemaVersion); - addPeerInfo(row, dc, n + 1, "graph", false); - - // These columns might not always be present, we don't have to specify them in the scassandra - // column metadata as it will default them to text columns. - addPeerInfoIfExists(row, dc, n + 1, "dse_version"); - addPeerInfoIfExists(row, dc, n + 1, "workload"); - } else { // prime system.peers. - query = "SELECT * FROM system.peers WHERE peer='" + address + "'"; - metadata = SELECT_PEERS; - row = Maps.newHashMap(); - addPeerInfo(row, dc, n + 1, "peer", address); - addPeerInfo(row, dc, n + 1, "rpc_address", address); - addPeerInfo(row, dc, n + 1, "data_center", datacenter(dc)); - addPeerInfo(row, dc, n + 1, "rack", getPeerInfo(dc, n + 1, "rack", "rack1")); - addPeerInfo(row, dc, n + 1, "release_version", getPeerInfo(dc, n + 1, "release_version", cassandraVersion)); - addPeerInfo(row, dc, n + 1, "tokens", ImmutableSet.of(Long.toString(tokens.get(n)))); - addPeerInfo(row, dc, n + 1, "host_id", UUIDs.random()); - addPeerInfo(row, dc, n + 1, "schema_version", schemaVersion); - addPeerInfo(row, dc, n + 1, "graph", false); - - addPeerInfoIfExists(row, dc, n + 1, "listen_address"); - addPeerInfoIfExists(row, dc, n + 1, "dse_version"); - addPeerInfoIfExists(row, dc, n + 1, "workload"); - - rows.add(row); - } - client.prime(PrimingRequest.queryBuilder() - .withQuery(query) - .withThen(then() - .withColumnTypes(metadata) - .withRows(Collections.>singletonList(row)) - .build()) - .build()); - } - } - - client.prime(PrimingRequest.queryBuilder() - .withQuery("SELECT * FROM system.peers") - .withThen(then() - .withColumnTypes(SELECT_PEERS) - .withRows(rows.build()) - .build()) - .build()); - - // Needed to ensure cluster_name matches what we expect on connection. - Map clusterNameRow = ImmutableMap.builder() - .put("cluster_name", "scassandra") - .build(); - client.prime(PrimingRequest.queryBuilder() - .withQuery("select cluster_name from system.local") - .withThen(then() - .withColumnTypes(SELECT_CLUSTER_NAME) - .withRows(Collections.>singletonList(clusterNameRow)) - .build()) - .build()); - - - client.prime(PrimingRequest.queryBuilder() - .withQuery(keyspaceQuery) - .withThen(then() - .withColumnTypes(keyspaceColumnTypes) - .withRows(keyspaceRows) - .build()) - .build()); + public ScassandraClusterBuilder withPeersV2() { + return withPeersV2(true); } - private void addPeerInfo(Map input, int dc, int node, String property, Object defaultValue) { - Object peerInfo = getPeerInfo(dc, node, property, defaultValue); - if (peerInfo != null) { - input.put(property, peerInfo); - } + public ScassandraClusterBuilder withSharedIP(boolean enabled) { + this.sharedIP = enabled; + if (this.sharedIP) { + this.withPeersV2(); + } + return this; } - private void addPeerInfoIfExists(Map input, int dc, int node, String property) { - Map> forDc = forcedPeerInfos.get(dc); - if (forDc == null) - return; - - Map forNode = forDc.get(node); - if (forNode == null) - return; - - if (forNode.containsKey(property)) - input.put(property, forNode.get(property)); - } - - private Object getPeerInfo(int dc, int node, String property, Object defaultValue) { - Map> forDc = forcedPeerInfos.get(dc); - if (forDc == null) - return defaultValue; - - Map forNode = forDc.get(node); - if (forNode == null) - return defaultValue; - - return (forNode.containsKey(property)) - ? forNode.get(property) - : defaultValue; + public ScassandraClusterBuilder withSharedIP() { + return withSharedIP(true); } - public static final org.scassandra.http.client.types.ColumnMetadata[] SELECT_PEERS = { - column("peer", INET), - column("rpc_address", INET), - column("data_center", TEXT), - column("rack", TEXT), - column("release_version", TEXT), - column("tokens", set(TEXT)), - column("listen_address", INET), - column("host_id", UUID), - column("graph", BOOLEAN), - column("schema_version", UUID) - }; - - public static final org.scassandra.http.client.types.ColumnMetadata[] SELECT_LOCAL = { - column("key", TEXT), - column("bootstrapped", TEXT), - column("broadcast_address", INET), - column("cluster_name", TEXT), - column("cql_version", TEXT), - column("data_center", TEXT), - column("listen_address", INET), - column("partitioner", TEXT), - column("rack", TEXT), - column("release_version", TEXT), - column("tokens", set(TEXT)), - column("graph", BOOLEAN), - column("schema_version", UUID) - }; - - static final org.scassandra.http.client.types.ColumnMetadata[] SELECT_CLUSTER_NAME = { - column("cluster_name", TEXT) - }; - - static final org.scassandra.http.client.types.ColumnMetadata[] SELECT_SCHEMA_KEYSPACES = { - column("durable_writes", BOOLEAN), - column("keyspace_name", TEXT), - column("strategy_class", TEXT), - column("strategy_options", TEXT) - }; - - static final org.scassandra.http.client.types.ColumnMetadata[] SELECT_SCHEMA_KEYSPACES_V3 = { - column("durable_writes", BOOLEAN), - column("keyspace_name", TEXT), - column("replication", MapType.map(TEXT, TEXT)) - }; - - static final org.scassandra.http.client.types.ColumnMetadata[] SELECT_SCHEMA_COLUMN_FAMILIES = { - column("bloom_filter_fp_chance", DOUBLE), - column("caching", TEXT), - column("cf_id", UUID), - column("column_aliases", TEXT), - column("columnfamily_name", TEXT), - column("comment", TEXT), - column("compaction_strategy_class", TEXT), - column("compaction_strategy_options", TEXT), - column("comparator", TEXT), - column("compression_parameters", TEXT), - column("default_time_to_live", INT), - column("default_validator", TEXT), - column("dropped_columns", map(TEXT, BIG_INT)), - column("gc_grace_seconds", INT), - column("index_interval", INT), - column("is_dense", BOOLEAN), - column("key_aliases", TEXT), - column("key_validator", TEXT), - column("keyspace_name", TEXT), - column("local_read_repair_chance", DOUBLE), - column("max_compaction_threshold", INT), - column("max_index_interval", INT), - column("memtable_flush_period_in_ms", INT), - column("min_compaction_threshold", INT), - column("min_index_interval", INT), - column("read_repair_chance", DOUBLE), - column("speculative_retry", TEXT), - column("subcomparator", TEXT), - column("type", TEXT), - column("value_alias", TEXT) - }; - - static final org.scassandra.http.client.types.ColumnMetadata[] SELECT_SCHEMA_COLUMNS = { - column("column_name", TEXT), - column("columnfamily_name", TEXT), - column("component_index", INT), - column("index_name", TEXT), - column("index_options", TEXT), - column("index_type", TEXT), - column("keyspace_name", TEXT), - column("type", TEXT), - column("validator", TEXT), - }; - - public static ScassandraClusterBuilder builder() { - return new ScassandraClusterBuilder(); + public ScassandraClusterBuilder withCassandraVersion(String version) { + this.cassandraVersion = version; + return this; } - public static class ScassandraClusterBuilder { - - private Integer nodes[] = {1}; - private String ipPrefix = TestUtils.IP_PREFIX; - private final List> keyspaceRows = Lists.newArrayList(); - private final Map>> forcedPeerInfos = Maps.newHashMap(); - private String cassandraVersion = null; - - public ScassandraClusterBuilder withNodes(Integer... nodes) { - this.nodes = nodes; - return this; - } - - public ScassandraClusterBuilder withIpPrefix(String ipPrefix) { - this.ipPrefix = ipPrefix; - return this; - } - - public ScassandraClusterBuilder withSimpleKeyspace(String name, int replicationFactor) { - Map simpleKeyspaceRow = Maps.newHashMap(); - simpleKeyspaceRow.put("durable_writes", false); - simpleKeyspaceRow.put("keyspace_name", name); - simpleKeyspaceRow.put("replication", ImmutableMap.builder() - .put("class", "org.apache.cassandra.locator.SimpleStrategy") - .put("replication_factor", "" + replicationFactor).build()); - simpleKeyspaceRow.put("strategy_class", "SimpleStrategy"); - simpleKeyspaceRow.put("strategy_options", "{\"replication_factor\":\"" + replicationFactor + "\"}"); - keyspaceRows.add(simpleKeyspaceRow); - return this; - } - - public ScassandraClusterBuilder withNetworkTopologyKeyspace(String name, Map replicationFactors) { - StringBuilder strategyOptionsBuilder = new StringBuilder("{"); - ImmutableMap.Builder replicationBuilder = ImmutableMap.builder(); - replicationBuilder.put("class", "org.apache.cassandra.locator.NetworkTopologyStrategy"); - - for (Map.Entry dc : replicationFactors.entrySet()) { - strategyOptionsBuilder.append("\""); - strategyOptionsBuilder.append(datacenter(dc.getKey())); - strategyOptionsBuilder.append("\":\""); - strategyOptionsBuilder.append(dc.getValue()); - strategyOptionsBuilder.append("\","); - replicationBuilder.put(datacenter(dc.getKey()), "" + dc.getValue()); - } - - String strategyOptions = strategyOptionsBuilder.substring(0, strategyOptionsBuilder.length() - 1) + "}"; - - Map ntsKeyspaceRow = Maps.newHashMap(); - ntsKeyspaceRow.put("durable_writes", false); - ntsKeyspaceRow.put("keyspace_name", name); - ntsKeyspaceRow.put("strategy_class", "NetworkTopologyStrategy"); - ntsKeyspaceRow.put("strategy_options", strategyOptions); - ntsKeyspaceRow.put("replication", replicationBuilder.build()); - keyspaceRows.add(ntsKeyspaceRow); - return this; - } - - public ScassandraClusterBuilder forcePeerInfo(int dc, int node, String name, Object value) { - Map> forDc = forcedPeerInfos.get(dc); - if (forDc == null) { - forDc = Maps.newHashMap(); - forcedPeerInfos.put(dc, forDc); - } - Map forNode = forDc.get(node); - if (forNode == null) { - forNode = Maps.newHashMap(); - forDc.put(node, forNode); - } - forNode.put(name, value); - return this; - } - - public ScassandraClusterBuilder withCassandraVersion(String version) { - this.cassandraVersion = version; - return this; - } - - public ScassandraCluster build() { - return new ScassandraCluster(nodes, ipPrefix, TestUtils.findAvailablePort(), TestUtils.findAvailablePort(), keyspaceRows, cassandraVersion, forcedPeerInfos); + public ScassandraCluster build() { + if (sharedIP) { + try { + InetAddress address = InetAddress.getByName(ipPrefix + "1"); + return new ScassandraCluster( + nodes, + TestUtils.findAvailablePort(), + buildAddresses(nodes, address), + buildAddresses(nodes, address), + buildAddresses(nodes, address), + keyspaceRows, + cassandraVersion, + forcedPeerInfos, + peersV2); + } catch (UnknownHostException uhe) { + throw new RuntimeException(uhe); } + } else { + return new ScassandraCluster( + nodes, + ipPrefix, + TestUtils.findAvailablePort(), + TestUtils.findAvailablePort(), + TestUtils.findAvailablePort(), + keyspaceRows, + cassandraVersion, + forcedPeerInfos, + peersV2); + } + } + } + + public static List buildAddresses(Integer[] nodes, String ipPrefix, int port) { + int node = 1; + List addresses = new ArrayList(); + for (int nodesInDcCount : nodes) { + for (int n = 0; n < nodesInDcCount; n++) { + String ip = ipPrefix + node++; + addresses.add(new InetSocketAddress(ip, port)); + } + } + return addresses; + } + + public static List buildAddresses(Integer[] nodes, InetAddress address) { + List addresses = new ArrayList(); + for (int nodesInDcCount : nodes) { + for (int n = 0; n < nodesInDcCount; n++) { + addresses.add(new InetSocketAddress(address, TestUtils.findAvailablePort())); + } } + return addresses; + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/ScassandraTestBase.java b/driver-core/src/test/java/com/datastax/driver/core/ScassandraTestBase.java index 140592174cd..68a6ea67832 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/ScassandraTestBase.java +++ b/driver-core/src/test/java/com/datastax/driver/core/ScassandraTestBase.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,110 +17,122 @@ */ package com.datastax.driver.core; +import static org.assertj.core.api.Assertions.fail; + +import java.net.InetSocketAddress; import org.scassandra.Scassandra; import org.scassandra.http.client.ActivityClient; import org.scassandra.http.client.CurrentClient; import org.scassandra.http.client.PrimingClient; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.testng.annotations.AfterClass; import org.testng.annotations.AfterMethod; import org.testng.annotations.BeforeClass; import org.testng.annotations.BeforeMethod; -import java.net.InetSocketAddress; - -import static org.assertj.core.api.Assertions.fail; - /** - * Base class for Scassandra tests. - * This class takes care of starting and stopping a Scassandra server, - * and provides some helper methods to leverage the creation of Cluster and Session objects. - * The actual creation of such objects is however left to subclasses. - * If a single cluster instance can be shared by all test methods, - * consider using {@link com.datastax.driver.core.ScassandraTestBase.PerClassCluster} instead. + * Base class for Scassandra tests. This class takes care of starting and stopping a Scassandra + * server, and provides some helper methods to leverage the creation of Cluster and Session objects. + * The actual creation of such objects is however left to subclasses. If a single cluster instance + * can be shared by all test methods, consider using {@link + * com.datastax.driver.core.ScassandraTestBase.PerClassCluster} instead. */ public abstract class ScassandraTestBase { - protected Scassandra scassandra; + private static final Logger logger = LoggerFactory.getLogger(ScassandraTestBase.class); - protected InetSocketAddress hostAddress; + protected Scassandra scassandra; - protected PrimingClient primingClient; + protected EndPoint hostEndPoint; - protected ActivityClient activityClient; + protected PrimingClient primingClient; - protected CurrentClient currentClient; + protected ActivityClient activityClient; - protected static String ip = TestUtils.ipOfNode(1); + protected CurrentClient currentClient; - @BeforeClass(groups = {"short", "long"}) - public void beforeTestClass() { - scassandra = TestUtils.createScassandraServer(); - scassandra.start(); - primingClient = scassandra.primingClient(); - activityClient = scassandra.activityClient(); - currentClient = scassandra.currentClient(); - hostAddress = new InetSocketAddress(ip, scassandra.getBinaryPort()); - } + protected static String ip = TestUtils.ipOfNode(1); - @AfterClass(groups = {"short", "long"}) - public void afterTestClass() { - if (scassandra != null) - scassandra.stop(); - } + @BeforeClass(groups = {"short", "long"}) + public void beforeTestClass() { + scassandra = TestUtils.createScassandraServer(); + scassandra.start(); + primingClient = scassandra.primingClient(); + activityClient = scassandra.activityClient(); + currentClient = scassandra.currentClient(); + hostEndPoint = + new TranslatedAddressEndPoint(new InetSocketAddress(ip, scassandra.getBinaryPort())); + } - @BeforeMethod(groups = {"short", "long"}) - @AfterMethod(groups = {"short", "long"}) - public void resetClients() { - activityClient.clearAllRecordedActivity(); - primingClient.clearAllPrimes(); - currentClient.enableListener(); + @AfterClass(groups = {"short", "long"}) + public void afterTestClass() { + if (scassandra != null) { + try { + scassandra.stop(); + } catch (Exception e) { + logger.error("Could not stop node " + scassandra, e); + } } - - protected Cluster.Builder createClusterBuilder() { - return Cluster.builder() - .withPort(scassandra.getBinaryPort()) - .addContactPoints(hostAddress.getAddress()) - .withPort(scassandra.getBinaryPort()) - .withPoolingOptions(new PoolingOptions() - .setCoreConnectionsPerHost(HostDistance.LOCAL, 1) - .setMaxConnectionsPerHost(HostDistance.LOCAL, 1) - .setHeartbeatIntervalSeconds(0)); + } + + @BeforeMethod(groups = {"short", "long"}) + @AfterMethod(groups = {"short", "long"}) + public void resetClients() { + activityClient.clearAllRecordedActivity(); + primingClient.clearAllPrimes(); + currentClient.enableListener(); + ScassandraCluster.primeSystemLocalRow(scassandra); + } + + protected Cluster.Builder createClusterBuilder() { + return Cluster.builder() + .withPort(scassandra.getBinaryPort()) + .addContactPoint(hostEndPoint) + .withPort(scassandra.getBinaryPort()) + .withPoolingOptions( + new PoolingOptions() + .setCoreConnectionsPerHost(HostDistance.LOCAL, 1) + .setMaxConnectionsPerHost(HostDistance.LOCAL, 1) + .setHeartbeatIntervalSeconds(0)); + } + + protected Host retrieveSingleHost(Cluster cluster) { + Host host = cluster.getMetadata().getHost(hostEndPoint); + if (host == null) { + fail("Unable to retrieve host"); } + return host; + } + + /** + * This subclass of ScassandraTestBase assumes that the same Cluster instance will be used for all + * tests. + */ + public abstract static class PerClassCluster extends ScassandraTestBase { + + protected Cluster cluster; + + protected Session session; - protected Host retrieveSingleHost(Cluster cluster) { - Host host = cluster.getMetadata().getHost(hostAddress); - if (host == null) { - fail("Unable to retrieve host"); - } - return host; + protected Host host; + + @Override + @BeforeClass(groups = {"short", "long"}) + public void beforeTestClass() { + super.beforeTestClass(); + ScassandraCluster.primeSystemLocalRow(scassandra); + Cluster.Builder builder = createClusterBuilder(); + cluster = builder.build(); + host = retrieveSingleHost(cluster); + session = cluster.connect(); } - /** - * This subclass of ScassandraTestBase assumes that - * the same Cluster instance will be used for all tests. - */ - public static abstract class PerClassCluster extends ScassandraTestBase { - - protected Cluster cluster; - - protected Session session; - - protected Host host; - - @BeforeClass(groups = {"short", "long"}) - public void beforeTestClass() { - super.beforeTestClass(); - Cluster.Builder builder = createClusterBuilder(); - cluster = builder.build(); - host = retrieveSingleHost(cluster); - session = cluster.connect(); - } - - @AfterClass(groups = {"short", "long"}) - public void afterTestClass() { - if (cluster != null) - cluster.close(); - super.afterTestClass(); - } + @Override + @AfterClass(groups = {"short", "long"}) + public void afterTestClass() { + if (cluster != null) cluster.close(); + super.afterTestClass(); } + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/SchemaAgreementTest.java b/driver-core/src/test/java/com/datastax/driver/core/SchemaAgreementTest.java index 99fcc00fc0d..a2722671512 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/SchemaAgreementTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/SchemaAgreementTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,75 +17,75 @@ */ package com.datastax.driver.core; -import com.datastax.driver.core.utils.UUIDs; -import org.testng.annotations.Test; +import static org.assertj.core.api.Assertions.assertThat; +import com.datastax.driver.core.utils.UUIDs; import java.net.InetAddress; import java.util.UUID; import java.util.concurrent.atomic.AtomicInteger; - -import static org.assertj.core.api.Assertions.assertThat; +import org.testng.annotations.Test; @CCMConfig(numberOfNodes = 2) public class SchemaAgreementTest extends CCMTestsSupport { - // Don't use "IF EXISTS" to remain compatible with older C* versions - static final String CREATE_TABLE = "CREATE TABLE table_%s (k int primary key, v int)"; - - static final AtomicInteger COUNTER = new AtomicInteger(1); - - @Test(groups = "short") - public void should_set_flag_on_successful_agreement() { - ProtocolOptions protocolOptions = cluster().getConfiguration().getProtocolOptions(); - protocolOptions.maxSchemaAgreementWaitSeconds = 10; - ResultSet rs = session().execute(String.format(CREATE_TABLE, COUNTER.getAndIncrement())); - assertThat(rs.getExecutionInfo().isSchemaInAgreement()).isTrue(); - } - - @Test(groups = "short") - public void should_set_flag_on_non_schema_altering_statement() { - ProtocolOptions protocolOptions = cluster().getConfiguration().getProtocolOptions(); - protocolOptions.maxSchemaAgreementWaitSeconds = 10; - ResultSet rs = session().execute("select release_version from system.local"); - assertThat(rs.getExecutionInfo().isSchemaInAgreement()).isTrue(); - } - - @Test(groups = "short", priority = 1) - public void should_unset_flag_on_failed_agreement() { - // Setting to 0 results in no query being set, so agreement fails - ProtocolOptions protocolOptions = cluster().getConfiguration().getProtocolOptions(); - protocolOptions.maxSchemaAgreementWaitSeconds = 0; - ResultSet rs = session().execute(String.format(CREATE_TABLE, COUNTER.getAndIncrement())); - assertThat(rs.getExecutionInfo().isSchemaInAgreement()).isFalse(); - } - - @Test(groups = "short") - public void should_check_agreement_through_cluster_metadata() { - Cluster controlCluster = register(TestUtils.buildControlCluster(cluster(), ccm())); - Session controlSession = controlCluster.connect(); - - Row localRow = controlSession.execute("SELECT schema_version FROM system.local").one(); - UUID localVersion = localRow.getUUID("schema_version"); - Row peerRow = controlSession.execute("SELECT peer, schema_version FROM system.peers").one(); - InetAddress peerAddress = peerRow.getInet("peer"); - UUID peerVersion = peerRow.getUUID("schema_version"); - // The two nodes should be in agreement at this point, but check just in case: - assertThat(localVersion).isEqualTo(peerVersion); - - // Now check the method under test: - assertThat(cluster().getMetadata().checkSchemaAgreement()).isTrue(); - - // Insert a fake version to simulate a disagreement: - forceSchemaVersion(controlSession, peerAddress, UUIDs.random()); - assertThat(cluster().getMetadata().checkSchemaAgreement()).isFalse(); - - forceSchemaVersion(controlSession, peerAddress, peerVersion); - } - - private static void forceSchemaVersion(Session session, InetAddress peerAddress, UUID schemaVersion) { - session.execute(String.format("UPDATE system.peers SET schema_version = %s WHERE peer = %s", - TypeCodec.uuid().format(schemaVersion), TypeCodec.inet().format(peerAddress))); - } - + // Don't use "IF EXISTS" to remain compatible with older C* versions + static final String CREATE_TABLE = "CREATE TABLE table_%s (k int primary key, v int)"; + + static final AtomicInteger COUNTER = new AtomicInteger(1); + + @Test(groups = "short") + public void should_set_flag_on_successful_agreement() { + ProtocolOptions protocolOptions = cluster().getConfiguration().getProtocolOptions(); + protocolOptions.maxSchemaAgreementWaitSeconds = 10; + ResultSet rs = session().execute(String.format(CREATE_TABLE, COUNTER.getAndIncrement())); + assertThat(rs.getExecutionInfo().isSchemaInAgreement()).isTrue(); + } + + @Test(groups = "short") + public void should_set_flag_on_non_schema_altering_statement() { + ProtocolOptions protocolOptions = cluster().getConfiguration().getProtocolOptions(); + protocolOptions.maxSchemaAgreementWaitSeconds = 10; + ResultSet rs = session().execute("select release_version from system.local"); + assertThat(rs.getExecutionInfo().isSchemaInAgreement()).isTrue(); + } + + @Test(groups = "short", priority = 1) + public void should_unset_flag_on_failed_agreement() { + // Setting to 0 results in no query being set, so agreement fails + ProtocolOptions protocolOptions = cluster().getConfiguration().getProtocolOptions(); + protocolOptions.maxSchemaAgreementWaitSeconds = 0; + ResultSet rs = session().execute(String.format(CREATE_TABLE, COUNTER.getAndIncrement())); + assertThat(rs.getExecutionInfo().isSchemaInAgreement()).isFalse(); + } + + @Test(groups = "short") + public void should_check_agreement_through_cluster_metadata() { + Cluster controlCluster = register(TestUtils.buildControlCluster(cluster(), ccm())); + Session controlSession = controlCluster.connect(); + + Row localRow = controlSession.execute("SELECT schema_version FROM system.local").one(); + UUID localVersion = localRow.getUUID("schema_version"); + Row peerRow = controlSession.execute("SELECT peer, schema_version FROM system.peers").one(); + InetAddress peerAddress = peerRow.getInet("peer"); + UUID peerVersion = peerRow.getUUID("schema_version"); + // The two nodes should be in agreement at this point, but check just in case: + assertThat(localVersion).isEqualTo(peerVersion); + + // Now check the method under test: + assertThat(cluster().getMetadata().checkSchemaAgreement()).isTrue(); + + // Insert a fake version to simulate a disagreement: + forceSchemaVersion(controlSession, peerAddress, UUIDs.random()); + assertThat(cluster().getMetadata().checkSchemaAgreement()).isFalse(); + + forceSchemaVersion(controlSession, peerAddress, peerVersion); + } + + private static void forceSchemaVersion( + Session session, InetAddress peerAddress, UUID schemaVersion) { + session.execute( + String.format( + "UPDATE system.peers SET schema_version = %s WHERE peer = %s", + TypeCodec.uuid().format(schemaVersion), TypeCodec.inet().format(peerAddress))); + } } - diff --git a/driver-core/src/test/java/com/datastax/driver/core/SchemaChangesCCTest.java b/driver-core/src/test/java/com/datastax/driver/core/SchemaChangesCCTest.java index 995c5b1bbd7..7cb08d97d22 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/SchemaChangesCCTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/SchemaChangesCCTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,199 +17,205 @@ */ package com.datastax.driver.core; +import static com.datastax.driver.core.Assertions.assertThat; +import static com.datastax.driver.core.TestUtils.CREATE_KEYSPACE_SIMPLE_FORMAT; +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.reset; +import static org.mockito.Mockito.timeout; +import static org.mockito.Mockito.verify; + import com.datastax.driver.core.policies.DelegatingLoadBalancingPolicy; import com.datastax.driver.core.policies.LoadBalancingPolicy; import com.datastax.driver.core.policies.Policies; import com.google.common.util.concurrent.Uninterruptibles; -import org.mockito.ArgumentCaptor; -import org.testng.annotations.Test; - import java.util.Collections; import java.util.Iterator; import java.util.concurrent.TimeUnit; - -import static com.datastax.driver.core.Assertions.assertThat; -import static com.datastax.driver.core.TestUtils.CREATE_KEYSPACE_SIMPLE_FORMAT; -import static org.mockito.Matchers.any; -import static org.mockito.Mockito.*; +import org.mockito.ArgumentCaptor; +import org.testng.annotations.Test; @CCMConfig(numberOfNodes = 2, dirtiesContext = true, createCluster = false) public class SchemaChangesCCTest extends CCMTestsSupport { - private static final int NOTIF_TIMEOUT_MS = 5000; - - /** - * Validates that any schema change events made while the control connection is down are - * propagated when the control connection is re-established. - *

    - *

    - * Note that on control connection recovery not all schema changes are propagated. For example, - * if a table was altered and then dropped only a drop event would be received as that is all - * that can be discerned. - * - * @test_category control_connection, schema - * @expected_result keyspace and table add, drop, and remove events are propagated on control connection reconnect. - * @jira_ticket JAVA-151 - * @since 2.0.11, 2.1.8, 2.2.1 - */ - @Test(groups = "long") - public void should_receive_changes_made_while_control_connection_is_down_on_reconnect() throws Exception { - ToggleablePolicy lbPolicy = new ToggleablePolicy(Policies.defaultLoadBalancingPolicy()); - Cluster cluster = register( - Cluster.builder() - .withLoadBalancingPolicy(lbPolicy) - .addContactPoints(getContactPoints().get(0)) - .withPort(ccm().getBinaryPort()) - .build()); - // Put cluster2 control connection on node 2 so it doesn't go down (to prevent noise for debugging). - Cluster cluster2 = register( - Cluster.builder() - .withLoadBalancingPolicy(lbPolicy) - .addContactPoints(getContactPoints().get(1)) - .withPort(ccm().getBinaryPort()) - .build()); - SchemaChangeListener listener = mock(SchemaChangeListener.class); - - cluster.init(); - cluster.register(listener); - - Session session2 = cluster2.connect(); - // Create two keyspaces to experiment with. - session2.execute(String.format(CREATE_KEYSPACE_SIMPLE_FORMAT, "ks1", 1)); - session2.execute(String.format(CREATE_KEYSPACE_SIMPLE_FORMAT, "ks2", 1)); - session2.execute("create table ks1.tbl1 (k text primary key, v text)"); - session2.execute("create table ks1.tbl2 (k text primary key, v text)"); - - // Wait for both create events to be received. - verify(listener, timeout(NOTIF_TIMEOUT_MS).times(2)).onKeyspaceAdded(any(KeyspaceMetadata.class)); - verify(listener, timeout(NOTIF_TIMEOUT_MS).times(2)).onTableAdded(any(TableMetadata.class)); - - - KeyspaceMetadata prealteredKeyspace = cluster.getMetadata().getKeyspace("ks1"); - KeyspaceMetadata predroppedKeyspace = cluster.getMetadata().getKeyspace("ks2"); - TableMetadata prealteredTable = cluster.getMetadata().getKeyspace("ks1").getTable("tbl1"); - TableMetadata predroppedTable = cluster.getMetadata().getKeyspace("ks1").getTable("tbl2"); - - // Enable returning empty query plan for default statements. This will - // prevent the control connection from being able to reconnect. - lbPolicy.returnEmptyQueryPlan = true; - - // Stop node 1, which hosts the control connection. - ccm().stop(1); - assertThat(cluster).host(1).goesDownWithin(20, TimeUnit.SECONDS); - - // Ensure control connection is down. - assertThat(cluster.manager.controlConnection.isOpen()).isFalse(); - - // Perform some schema changes that we'll validate when the control connection comes back. - session2.execute("drop keyspace ks2"); - session2.execute("drop table ks1.tbl2"); - session2.execute("alter keyspace ks1 with durable_writes=false"); - session2.execute("alter table ks1.tbl1 add new_col varchar"); - session2.execute(String.format(CREATE_KEYSPACE_SIMPLE_FORMAT, "ks3", 1)); - session2.execute("create table ks1.tbl3 (k text primary key, v text)"); - - // Reset the mock to clear invocations. (sanity check to ensure all events happen after CC comes back up) - reset(listener); - - // Switch the flag so the control connection may now be established. - lbPolicy.returnEmptyQueryPlan = false; - - // Poll on the control connection and wait for it to be reestablished. - long maxControlConnectionWait = 60000; - long startTime = System.currentTimeMillis(); - while (!cluster.manager.controlConnection.isOpen() && System.currentTimeMillis() - startTime < maxControlConnectionWait) { - Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS); - } - - assertThat(cluster.manager.controlConnection.isOpen()) - .as("Control connection was not opened after %dms.", maxControlConnectionWait) - .isTrue(); - - // Ensure the drop keyspace event shows up. - ArgumentCaptor removedKeyspace = ArgumentCaptor.forClass(KeyspaceMetadata.class); - verify(listener, timeout(NOTIF_TIMEOUT_MS).times(1)).onKeyspaceRemoved(removedKeyspace.capture()); - assertThat(removedKeyspace.getValue()) - .hasName("ks2") - .isEqualTo(predroppedKeyspace); - - // Ensure the drop table event shows up. - ArgumentCaptor droppedTable = ArgumentCaptor.forClass(TableMetadata.class); - verify(listener, timeout(NOTIF_TIMEOUT_MS).times(1)).onTableRemoved(droppedTable.capture()); - - assertThat(droppedTable.getValue()) - .isInKeyspace("ks1") - .hasName("tbl2") - .isEqualTo(predroppedTable); - - // Ensure that the alter keyspace event shows up. - ArgumentCaptor alteredKeyspace = ArgumentCaptor.forClass(KeyspaceMetadata.class); - ArgumentCaptor originalKeyspace = ArgumentCaptor.forClass(KeyspaceMetadata.class); - verify(listener, timeout(NOTIF_TIMEOUT_MS).times(1)).onKeyspaceChanged(alteredKeyspace.capture(), originalKeyspace.capture()); - - // Previous metadata should match the metadata observed before disconnect. - assertThat(originalKeyspace.getValue()) - .hasName("ks1") - .isDurableWrites() - .isEqualTo(prealteredKeyspace); - - // New metadata should reflect that the durable writes attribute changed. - assertThat(alteredKeyspace.getValue()) - .hasName("ks1") - .isNotDurableWrites(); - - // Ensure the alter table event shows up. - ArgumentCaptor alteredTable = ArgumentCaptor.forClass(TableMetadata.class); - ArgumentCaptor originalTable = ArgumentCaptor.forClass(TableMetadata.class); - verify(listener, timeout(NOTIF_TIMEOUT_MS).times(1)).onTableChanged(alteredTable.capture(), originalTable.capture()); - - // Previous metadata should match the metadata observed before disconnect. - assertThat(originalTable.getValue()) - .isInKeyspace("ks1") - .hasName("tbl1") - .doesNotHaveColumn("new_col") - .isEqualTo(prealteredTable); - - // New metadata should reflect that the column type changed. - assertThat(alteredTable.getValue()) - .isInKeyspace("ks1") - .hasName("tbl1") - .hasColumn("new_col", DataType.varchar()); - - // Ensure the add keyspace event shows up. - ArgumentCaptor addedKeyspace = ArgumentCaptor.forClass(KeyspaceMetadata.class); - verify(listener, timeout(NOTIF_TIMEOUT_MS).times(1)).onKeyspaceAdded(addedKeyspace.capture()); - - assertThat(addedKeyspace.getValue()).hasName("ks3"); - - // Ensure the add table event shows up. - ArgumentCaptor addedTable = ArgumentCaptor.forClass(TableMetadata.class); - verify(listener, timeout(NOTIF_TIMEOUT_MS).times(1)).onTableAdded(addedTable.capture()); - - assertThat(addedTable.getValue()) - .isInKeyspace("ks1") - .hasName("tbl3"); + private static final int NOTIF_TIMEOUT_MS = 5000; + + /** + * Validates that any schema change events made while the control connection is down are + * propagated when the control connection is re-established. + * + *

    + * + *

    Note that on control connection recovery not all schema changes are propagated. For example, + * if a table was altered and then dropped only a drop event would be received as that is all that + * can be discerned. + * + * @test_category control_connection, schema + * @expected_result keyspace and table add, drop, and remove events are propagated on control + * connection reconnect. + * @jira_ticket JAVA-151 + * @since 2.0.11, 2.1.8, 2.2.1 + */ + @Test(groups = "long") + public void should_receive_changes_made_while_control_connection_is_down_on_reconnect() + throws Exception { + ToggleablePolicy lbPolicy = new ToggleablePolicy(Policies.defaultLoadBalancingPolicy()); + Cluster cluster = register(createClusterBuilder().withLoadBalancingPolicy(lbPolicy).build()); + // Put cluster2 control connection on node 2 so it doesn't go down (to prevent noise for + // debugging). + Cluster cluster2 = register(createClusterBuilder().withLoadBalancingPolicy(lbPolicy).build()); + SchemaChangeListener listener = mock(SchemaChangeListener.class); + + cluster.init(); + cluster.register(listener); + + Session session2 = cluster2.connect(); + // Create two keyspaces to experiment with. + session2.execute(String.format(CREATE_KEYSPACE_SIMPLE_FORMAT, "ks1", 1)); + session2.execute(String.format(CREATE_KEYSPACE_SIMPLE_FORMAT, "ks2", 1)); + session2.execute("create table ks1.tbl1 (k text primary key, v text)"); + session2.execute("create table ks1.tbl2 (k text primary key, v text)"); + + // Wait for both create events to be received. + verify(listener, timeout(NOTIF_TIMEOUT_MS).times(2)) + .onKeyspaceAdded(any(KeyspaceMetadata.class)); + verify(listener, timeout(NOTIF_TIMEOUT_MS).times(2)).onTableAdded(any(TableMetadata.class)); + + KeyspaceMetadata prealteredKeyspace = cluster.getMetadata().getKeyspace("ks1"); + KeyspaceMetadata predroppedKeyspace = cluster.getMetadata().getKeyspace("ks2"); + TableMetadata prealteredTable = cluster.getMetadata().getKeyspace("ks1").getTable("tbl1"); + TableMetadata predroppedTable = cluster.getMetadata().getKeyspace("ks1").getTable("tbl2"); + + // Enable returning empty query plan for default statements. This will + // prevent the control connection from being able to reconnect. + lbPolicy.returnEmptyQueryPlan = true; + + // Stop node 1, which hosts the control connection. + ccm().stop(1); + assertThat(cluster).host(1).goesDownWithin(20, TimeUnit.SECONDS); + + // Ensure control connection is down. + assertThat(cluster.manager.controlConnection.isOpen()).isFalse(); + + // Perform some schema changes that we'll validate when the control connection comes back. + session2.execute("drop keyspace ks2"); + session2.execute("drop table ks1.tbl2"); + + // Modifying keyspaces with a node down is not possible in 4.0+ (CASSANDRA-14404) + if (!isCassandraVersionOrHigher("4.0.0")) { + session2.execute("alter keyspace ks1 with durable_writes=false"); + } + + session2.execute("alter table ks1.tbl1 add new_col varchar"); + session2.execute(String.format(CREATE_KEYSPACE_SIMPLE_FORMAT, "ks3", 1)); + session2.execute("create table ks1.tbl3 (k text primary key, v text)"); + + // Reset the mock to clear invocations. (sanity check to ensure all events happen after CC comes + // back up) + reset(listener); + + // Switch the flag so the control connection may now be established. + lbPolicy.returnEmptyQueryPlan = false; + + // Poll on the control connection and wait for it to be reestablished. + long maxControlConnectionWait = 60000; + long startTime = System.currentTimeMillis(); + while (!cluster.manager.controlConnection.isOpen() + && System.currentTimeMillis() - startTime < maxControlConnectionWait) { + Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS); + } + + assertThat(cluster.manager.controlConnection.isOpen()) + .as("Control connection was not opened after %dms.", maxControlConnectionWait) + .isTrue(); + + // Ensure the drop keyspace event shows up. + ArgumentCaptor removedKeyspace = + ArgumentCaptor.forClass(KeyspaceMetadata.class); + verify(listener, timeout(NOTIF_TIMEOUT_MS).times(1)) + .onKeyspaceRemoved(removedKeyspace.capture()); + assertThat(removedKeyspace.getValue()).hasName("ks2").isEqualTo(predroppedKeyspace); + + // Ensure the drop table event shows up. + ArgumentCaptor droppedTable = ArgumentCaptor.forClass(TableMetadata.class); + verify(listener, timeout(NOTIF_TIMEOUT_MS).times(1)).onTableRemoved(droppedTable.capture()); + + assertThat(droppedTable.getValue()) + .isInKeyspace("ks1") + .hasName("tbl2") + .isEqualTo(predroppedTable); + + // Ensure that the alter keyspace event shows up. + ArgumentCaptor alteredKeyspace = + ArgumentCaptor.forClass(KeyspaceMetadata.class); + ArgumentCaptor originalKeyspace = + ArgumentCaptor.forClass(KeyspaceMetadata.class); + verify(listener, timeout(NOTIF_TIMEOUT_MS).times(1)) + .onKeyspaceChanged(alteredKeyspace.capture(), originalKeyspace.capture()); + + // Previous metadata should match the metadata observed before disconnect. + assertThat(originalKeyspace.getValue()) + .hasName("ks1") + .isDurableWrites() + .isEqualTo(prealteredKeyspace); + + // Modifying keyspaces with a node down is not possible in 4.0+ (CASSANDRA-14404) + if (!isCassandraVersionOrHigher("4.0.0")) { + // New metadata should reflect that the durable writes attribute changed. + assertThat(alteredKeyspace.getValue()).hasName("ks1").isNotDurableWrites(); + } + + // Ensure the alter table event shows up. + ArgumentCaptor alteredTable = ArgumentCaptor.forClass(TableMetadata.class); + ArgumentCaptor originalTable = ArgumentCaptor.forClass(TableMetadata.class); + verify(listener, timeout(NOTIF_TIMEOUT_MS).times(1)) + .onTableChanged(alteredTable.capture(), originalTable.capture()); + + // Previous metadata should match the metadata observed before disconnect. + assertThat(originalTable.getValue()) + .isInKeyspace("ks1") + .hasName("tbl1") + .doesNotHaveColumn("new_col") + .isEqualTo(prealteredTable); + + // New metadata should reflect that the column type changed. + assertThat(alteredTable.getValue()) + .isInKeyspace("ks1") + .hasName("tbl1") + .hasColumn("new_col", DataType.varchar()); + + // Ensure the add keyspace event shows up. + ArgumentCaptor addedKeyspace = + ArgumentCaptor.forClass(KeyspaceMetadata.class); + verify(listener, timeout(NOTIF_TIMEOUT_MS).times(1)).onKeyspaceAdded(addedKeyspace.capture()); + + assertThat(addedKeyspace.getValue()).hasName("ks3"); + + // Ensure the add table event shows up. + ArgumentCaptor addedTable = ArgumentCaptor.forClass(TableMetadata.class); + verify(listener, timeout(NOTIF_TIMEOUT_MS).times(1)).onTableAdded(addedTable.capture()); + + assertThat(addedTable.getValue()).isInKeyspace("ks1").hasName("tbl3"); + } + + /** + * A load balancing policy that can be "disabled" by having its query plan return no hosts when + * given the 'DEFAULT' statement. This statement is used for retrieving query plan and for finding + * what hosts to use for control connection. + */ + public static class ToggleablePolicy extends DelegatingLoadBalancingPolicy { + + volatile boolean returnEmptyQueryPlan; + + public ToggleablePolicy(LoadBalancingPolicy delegate) { + super(delegate); } - /** - * A load balancing policy that can be "disabled" by having its query plan return no hosts when - * given the 'DEFAULT' statement. This statement is used for retrieving query plan - * and for finding what hosts to use for control connection. - */ - public static class ToggleablePolicy extends DelegatingLoadBalancingPolicy { - - volatile boolean returnEmptyQueryPlan; - - public ToggleablePolicy(LoadBalancingPolicy delegate) { - super(delegate); - } - - @Override - public Iterator newQueryPlan(String loggedKeyspace, Statement statement) { - if (returnEmptyQueryPlan && statement == Statement.DEFAULT) - return Collections.emptyList().iterator(); - else - return super.newQueryPlan(loggedKeyspace, statement); - } + @Override + public Iterator newQueryPlan(String loggedKeyspace, Statement statement) { + if (returnEmptyQueryPlan && statement == Statement.DEFAULT) + return Collections.emptyList().iterator(); + else return super.newQueryPlan(loggedKeyspace, statement); } + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/SchemaChangesTest.java b/driver-core/src/test/java/com/datastax/driver/core/SchemaChangesTest.java index 23f06c8084e..1193031c812 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/SchemaChangesTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/SchemaChangesTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,633 +17,768 @@ */ package com.datastax.driver.core; +import static com.datastax.driver.core.Assertions.assertThat; +import static com.datastax.driver.core.Metadata.handleId; +import static com.datastax.driver.core.TestUtils.nonDebouncingQueryOptions; +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.after; +import static org.mockito.Mockito.anyListOf; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.reset; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.timeout; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoMoreInteractions; + import com.datastax.driver.core.utils.Bytes; import com.datastax.driver.core.utils.CassandraVersion; import com.google.common.collect.Lists; import com.google.common.util.concurrent.Futures; import com.google.common.util.concurrent.ListenableFuture; -import org.mockito.ArgumentCaptor; -import org.testng.annotations.*; - import java.util.List; +import java.util.concurrent.Callable; import java.util.concurrent.TimeUnit; - -import static com.datastax.driver.core.Assertions.assertThat; -import static com.datastax.driver.core.Metadata.handleId; -import static com.datastax.driver.core.TestUtils.nonDebouncingQueryOptions; -import static org.mockito.Matchers.any; -import static org.mockito.Mockito.*; +import org.mockito.ArgumentCaptor; +import org.testng.annotations.AfterClass; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.DataProvider; +import org.testng.annotations.Test; @CCMConfig(createCluster = false, config = "enable_user_defined_functions:true") public class SchemaChangesTest extends CCMTestsSupport { - private static final String CREATE_KEYSPACE = "CREATE KEYSPACE %s WITH REPLICATION = { 'class' : 'SimpleStrategy', 'replication_factor': '1' }"; - private static final String ALTER_KEYSPACE = "ALTER KEYSPACE %s WITH durable_writes = false"; - private static final String DROP_KEYSPACE = "DROP KEYSPACE %s"; + private static final String CREATE_KEYSPACE = + "CREATE KEYSPACE %s WITH REPLICATION = { 'class' : 'SimpleStrategy', 'replication_factor': '1' }"; + private static final String ALTER_KEYSPACE = "ALTER KEYSPACE %s WITH durable_writes = false"; + private static final String DROP_KEYSPACE = "DROP KEYSPACE %s"; - private static final String CREATE_TABLE = "CREATE TABLE %s.table1(i int primary key)"; - private static final String ALTER_TABLE = "ALTER TABLE %s.table1 ADD j int"; - private static final String DROP_TABLE = "DROP TABLE %s.table1"; + private static final String CREATE_TABLE = "CREATE TABLE %s.table1(i int primary key)"; + private static final String ALTER_TABLE = "ALTER TABLE %s.table1 ADD j int"; + private static final String DROP_TABLE = "DROP TABLE %s.table1"; - /** - * The maximum time that the test will wait to check that listeners have been notified. - * This threshold is intentionally set to a very high value to allow CI tests - * to pass. - */ - private static final long NOTIF_TIMEOUT_MS = TimeUnit.MINUTES.toMillis(1); + /** + * The maximum time that the test will wait to check that listeners have been notified. This + * threshold is intentionally set to a very high value to allow CI tests to pass. + */ + private static final long NOTIF_TIMEOUT_MS = TimeUnit.MINUTES.toMillis(1); - Cluster cluster1; - Cluster cluster2; // a second cluster to check that other clients also get notified - Cluster schemaDisabledCluster; // a cluster with schema metadata disabled. + Cluster cluster1; + Cluster cluster2; // a second cluster to check that other clients also get notified + Cluster schemaDisabledCluster; // a cluster with schema metadata disabled. - Session session1; + Session session1; - Session schemaDisabledSession; + Session schemaDisabledSession; - SchemaChangeListener listener1; - SchemaChangeListener listener2; - SchemaChangeListener schemaDisabledListener; + SchemaChangeListener listener1; + SchemaChangeListener listener2; + SchemaChangeListener schemaDisabledListener; - List listeners; + List listeners; - ControlConnection schemaDisabledControlConnection; + ControlConnection schemaDisabledControlConnection; - @BeforeClass(groups = "short") - public void setup() throws InterruptedException { - Cluster.Builder builder = Cluster.builder() - .addContactPoints(getContactPoints()) - .withPort(ccm().getBinaryPort()) - .withQueryOptions(nonDebouncingQueryOptions()); - cluster1 = builder.build(); - cluster2 = builder.build(); - schemaDisabledCluster = spy(Cluster.builder() - .addContactPoints(getContactPoints()) - .withPort(ccm().getBinaryPort()) + @BeforeClass(groups = "short") + public void setup() throws InterruptedException { + cluster1 = createClusterBuilderNoDebouncing().build(); + cluster2 = createClusterBuilderNoDebouncing().build(); + schemaDisabledCluster = + spy( + createClusterBuilder() .withClusterName("schema-disabled") - .withQueryOptions(nonDebouncingQueryOptions() - .setMetadataEnabled(false) - ).build()); - - schemaDisabledSession = schemaDisabledCluster.connect(); - - schemaDisabledControlConnection = spy(schemaDisabledCluster.manager.controlConnection); - schemaDisabledCluster.manager.controlConnection = schemaDisabledControlConnection; - - session1 = cluster1.connect(); - cluster2.init(); - - cluster1.register(listener1 = mock(SchemaChangeListener.class)); - cluster2.register(listener2 = mock(SchemaChangeListener.class)); - listeners = Lists.newArrayList(listener1, listener2); - - schemaDisabledCluster.register(schemaDisabledListener = mock(SchemaChangeListener.class)); - verify(schemaDisabledListener, times(1)).onRegister(schemaDisabledCluster); - - execute(CREATE_KEYSPACE, "lowercase"); - execute(CREATE_KEYSPACE, "\"CaseSensitive\""); - } - - @AfterClass(groups = "short", alwaysRun = true) - public void teardown() { - if (cluster1 != null) - cluster1.close(); - if (cluster2 != null) - cluster2.close(); - if (schemaDisabledCluster != null) - schemaDisabledCluster.close(); - } - - - @DataProvider(name = "existingKeyspaceName") - public static Object[][] existingKeyspaceName() { - return new Object[][]{{"lowercase"}, {"\"CaseSensitive\""}}; - } - - @DataProvider(name = "newKeyspaceName") - public static Object[][] newKeyspaceName() { - return new Object[][]{{"lowercase2"}, {"\"CaseSensitive2\""}}; - } - - @BeforeMethod(groups = "short") - public void resetListeners() { - for (SchemaChangeListener listener : listeners) { - reset(listener); - } - reset(schemaDisabledControlConnection); - } - - /** - * Ensures that a listener registered on a Cluster that has schema metadata disabled - * is never invoked with schema change events. - * - * @jira_ticket JAVA-858 - * @since 2.0.11 - */ - @AfterMethod(groups = "short") - public void verifyNoMoreInteractionsWithListener() { - verifyNoMoreInteractions(schemaDisabledListener); - } - - @Test(groups = "short", dataProvider = "existingKeyspaceName") - public void should_notify_of_table_creation(String keyspace) throws InterruptedException { - execute(CREATE_TABLE, keyspace); - for (SchemaChangeListener listener : listeners) { - ArgumentCaptor added = ArgumentCaptor.forClass(TableMetadata.class); - verify(listener, timeout(NOTIF_TIMEOUT_MS).times(1)).onTableAdded(added.capture()); - assertThat(added.getValue()) - .isInKeyspace(handleId(keyspace)) - .hasName("table1"); - } - for (Metadata m : metadatas()) - assertThat(m.getKeyspace(keyspace).getTable("table1")).isNotNull(); - } - - @Test(groups = "short", dataProvider = "existingKeyspaceName") - public void should_notify_of_table_update(String keyspace) throws InterruptedException { - execute(CREATE_TABLE, keyspace); - ArgumentCaptor added = null; - for (SchemaChangeListener listener : listeners) { - added = ArgumentCaptor.forClass(TableMetadata.class); - verify(listener, timeout(NOTIF_TIMEOUT_MS).times(1)).onTableAdded(added.capture()); - assertThat(added.getValue()) - .isInKeyspace(handleId(keyspace)) - .hasName("table1"); - } - for (Metadata m : metadatas()) - assertThat(m.getKeyspace(keyspace).getTable("table1")).hasNoColumn("j"); - assert added != null; - execute(ALTER_TABLE, keyspace); - for (SchemaChangeListener listener : listeners) { - ArgumentCaptor current = ArgumentCaptor.forClass(TableMetadata.class); - ArgumentCaptor previous = ArgumentCaptor.forClass(TableMetadata.class); - verify(listener, timeout(NOTIF_TIMEOUT_MS).times(1)).onTableChanged(current.capture(), previous.capture()); - assertThat(previous.getValue()) - .isEqualTo(added.getValue()) - .hasNoColumn("j"); - assertThat(current.getValue()) - .isInKeyspace(handleId(keyspace)) - .hasName("table1") - .hasColumn("j"); - } - for (Metadata m : metadatas()) - assertThat(m.getKeyspace(keyspace).getTable("table1")).hasColumn("j"); - } - - @Test(groups = "short", dataProvider = "existingKeyspaceName") - public void should_notify_of_table_drop(String keyspace) throws InterruptedException { - execute(CREATE_TABLE, keyspace); - for (SchemaChangeListener listener : listeners) { - ArgumentCaptor added = ArgumentCaptor.forClass(TableMetadata.class); - verify(listener, timeout(NOTIF_TIMEOUT_MS).times(1)).onTableAdded(added.capture()); - assertThat(added.getValue()) - .hasName("table1") - .isInKeyspace(handleId(keyspace)); - } - execute(DROP_TABLE, keyspace); - for (SchemaChangeListener listener : listeners) { - ArgumentCaptor removed = ArgumentCaptor.forClass(TableMetadata.class); - verify(listener, timeout(NOTIF_TIMEOUT_MS).times(1)).onTableRemoved(removed.capture()); - assertThat(removed.getValue()).hasName("table1"); - } - for (Metadata m : metadatas()) - assertThat(m.getKeyspace(keyspace).getTable("table1")).isNull(); - } - - @SuppressWarnings("RedundantCast") - @Test(groups = "short", dataProvider = "existingKeyspaceName") - @CassandraVersion("2.1.0") - public void should_notify_of_udt_creation(String keyspace) { - session1.execute(String.format("CREATE TYPE %s.type1(i int)", keyspace)); - for (SchemaChangeListener listener : listeners) { - ArgumentCaptor added = ArgumentCaptor.forClass(UserType.class); - verify(listener, timeout(NOTIF_TIMEOUT_MS).times(1)).onUserTypeAdded(added.capture()); - assertThat((DataType) added.getValue()) - .isUserType(handleId(keyspace), "type1"); - } - for (Metadata m : metadatas()) - assertThat((DataType) m.getKeyspace(keyspace).getUserType("type1")).isNotNull(); - } - - @Test(groups = "short", dataProvider = "existingKeyspaceName") - @CassandraVersion("2.1.0") - public void should_notify_of_udt_update(String keyspace) { - session1.execute(String.format("CREATE TYPE %s.type1(i int)", keyspace)); - session1.execute(String.format("ALTER TYPE %s.type1 ADD j int", keyspace)); - for (SchemaChangeListener listener : listeners) { - ArgumentCaptor current = ArgumentCaptor.forClass(UserType.class); - ArgumentCaptor previous = ArgumentCaptor.forClass(UserType.class); - verify(listener, timeout(NOTIF_TIMEOUT_MS).times(1)).onUserTypeChanged(current.capture(), previous.capture()); - assertThat(previous.getValue().getFieldNames()).doesNotContain("j"); - assertThat(current.getValue().getFieldNames()).contains("j"); - } - for (Metadata m : metadatas()) - assertThat(m.getKeyspace(keyspace).getUserType("type1").getFieldType("j")).isNotNull(); - } - - @SuppressWarnings("RedundantCast") - @Test(groups = "short", dataProvider = "existingKeyspaceName") - @CassandraVersion("2.1.0") - public void should_notify_of_udt_drop(String keyspace) { - session1.execute(String.format("CREATE TYPE %s.type1(i int)", keyspace)); - session1.execute(String.format("DROP TYPE %s.type1", keyspace)); - for (SchemaChangeListener listener : listeners) { - ArgumentCaptor removed = ArgumentCaptor.forClass(UserType.class); - verify(listener, timeout(NOTIF_TIMEOUT_MS).times(1)).onUserTypeRemoved(removed.capture()); - assertThat((DataType) removed.getValue()).isUserType(handleId(keyspace), "type1"); - } - for (Metadata m : metadatas()) - assertThat((DataType) m.getKeyspace(keyspace).getUserType("type1")).isNull(); - } - - @Test(groups = "short", dataProvider = "existingKeyspaceName") - @CassandraVersion("2.2.0") - public void should_notify_of_function_creation(String keyspace) { - session1.execute(String.format("CREATE FUNCTION %s.\"ID\"(i int) RETURNS NULL ON NULL INPUT RETURNS int LANGUAGE java AS 'return i;'", keyspace)); - for (SchemaChangeListener listener : listeners) { - ArgumentCaptor added = ArgumentCaptor.forClass(FunctionMetadata.class); - verify(listener, timeout(NOTIF_TIMEOUT_MS).times(1)).onFunctionAdded(added.capture()); - assertThat(added.getValue()) - .isInKeyspace(handleId(keyspace)) - .hasSignature("\"ID\"(int)"); - } - for (Metadata m : metadatas()) - assertThat(m.getKeyspace(keyspace).getFunction("\"ID\"", DataType.cint())) - .isNotNull(); - } - - @Test(groups = "short", dataProvider = "existingKeyspaceName") - @CassandraVersion("2.2.0") - public void should_notify_of_function_update(String keyspace) { - session1.execute(String.format("CREATE FUNCTION %s.\"ID\"(i int) RETURNS NULL ON NULL INPUT RETURNS int LANGUAGE java AS 'return i;'", keyspace)); - for (SchemaChangeListener listener : listeners) { - ArgumentCaptor added = ArgumentCaptor.forClass(FunctionMetadata.class); - verify(listener, timeout(NOTIF_TIMEOUT_MS).times(1)).onFunctionAdded(added.capture()); - assertThat(added.getValue()) - .isInKeyspace(handleId(keyspace)) - .hasSignature("\"ID\"(int)"); - } - for (Metadata m : metadatas()) - assertThat(m.getKeyspace(keyspace).getFunction("\"ID\"", DataType.cint())) - .isNotNull(); - session1.execute(String.format("CREATE OR REPLACE FUNCTION %s.\"ID\"(i int) RETURNS NULL ON NULL INPUT RETURNS int LANGUAGE java AS 'return i + 1;'", keyspace)); - for (SchemaChangeListener listener : listeners) { - ArgumentCaptor current = ArgumentCaptor.forClass(FunctionMetadata.class); - ArgumentCaptor previous = ArgumentCaptor.forClass(FunctionMetadata.class); - verify(listener, timeout(NOTIF_TIMEOUT_MS).times(1)).onFunctionChanged(current.capture(), previous.capture()); - assertThat(previous.getValue()).hasBody("return i;"); - assertThat(current.getValue()).hasBody("return i + 1;"); - } - for (Metadata m : metadatas()) - assertThat(m.getKeyspace(keyspace).getFunction("\"ID\"", DataType.cint()).getBody()) - .isEqualTo("return i + 1;"); - } - - @Test(groups = "short", dataProvider = "existingKeyspaceName") - @CassandraVersion("2.2.0") - public void should_notify_of_function_drop(String keyspace) { - session1.execute(String.format("CREATE FUNCTION %s.\"ID\"(i int) RETURNS NULL ON NULL INPUT RETURNS int LANGUAGE java AS 'return i;'", keyspace)); - session1.execute(String.format("DROP FUNCTION %s.\"ID\"", keyspace)); - for (SchemaChangeListener listener : listeners) { - ArgumentCaptor removed = ArgumentCaptor.forClass(FunctionMetadata.class); - verify(listener, timeout(NOTIF_TIMEOUT_MS).times(1)).onFunctionRemoved(removed.capture()); - assertThat(removed.getValue()) - .isInKeyspace(handleId(keyspace)) - .hasSignature("\"ID\"(int)"); - } - for (Metadata m : metadatas()) - assertThat(m.getKeyspace(keyspace).getFunction("\"ID\"", DataType.cint())) - .isNull(); - } - - @Test(groups = "short", dataProvider = "existingKeyspaceName") - @CassandraVersion("2.2.0") - public void should_notify_of_aggregate_creation(String keyspace) { - session1.execute(String.format("CREATE FUNCTION %s.\"PLUS\"(s int, v int) RETURNS NULL ON NULL INPUT RETURNS int LANGUAGE java" - + " AS 'return s+v;'", keyspace)); - session1.execute(String.format("CREATE AGGREGATE %s.\"SUM\"(int) SFUNC \"PLUS\" STYPE int INITCOND 0;", keyspace)); - for (SchemaChangeListener listener : listeners) { - ArgumentCaptor added = ArgumentCaptor.forClass(AggregateMetadata.class); - verify(listener, timeout(NOTIF_TIMEOUT_MS).times(1)).onAggregateAdded(added.capture()); - assertThat(added.getValue()) - .isInKeyspace(handleId(keyspace)) - .hasSignature("\"SUM\"(int)"); - } - for (Metadata m : metadatas()) - assertThat(m.getKeyspace(keyspace).getAggregate("\"SUM\"", DataType.cint())) - .isNotNull(); - } - - @Test(groups = "short", dataProvider = "existingKeyspaceName") - @CassandraVersion("2.2.0") - public void should_notify_of_aggregate_update(String keyspace) { - session1.execute(String.format("CREATE FUNCTION %s.\"PLUS\"(s int, v int) RETURNS NULL ON NULL INPUT RETURNS int LANGUAGE java" - + " AS 'return s+v;'", keyspace)); - session1.execute(String.format("CREATE AGGREGATE %s.\"SUM\"(int) SFUNC \"PLUS\" STYPE int INITCOND 0", keyspace)); - for (SchemaChangeListener listener : listeners) { - ArgumentCaptor added = ArgumentCaptor.forClass(AggregateMetadata.class); - verify(listener, timeout(NOTIF_TIMEOUT_MS).times(1)).onAggregateAdded(added.capture()); - assertThat(added.getValue()) - .isInKeyspace(handleId(keyspace)) - .hasSignature("\"SUM\"(int)"); - } - for (Metadata m : metadatas()) - assertThat(m.getKeyspace(keyspace).getAggregate("\"SUM\"", DataType.cint()).getInitCond()) - .isEqualTo(0); - session1.execute(String.format("CREATE OR REPLACE AGGREGATE %s.\"SUM\"(int) SFUNC \"PLUS\" STYPE int INITCOND 1", keyspace)); - for (SchemaChangeListener listener : listeners) { - ArgumentCaptor current = ArgumentCaptor.forClass(AggregateMetadata.class); - ArgumentCaptor previous = ArgumentCaptor.forClass(AggregateMetadata.class); - verify(listener, timeout(NOTIF_TIMEOUT_MS).times(1)).onAggregateChanged(current.capture(), previous.capture()); - assertThat(previous.getValue()).hasInitCond(0); - assertThat(current.getValue()).hasInitCond(1); - } - for (Metadata m : metadatas()) - assertThat(m.getKeyspace(keyspace).getAggregate("\"SUM\"", DataType.cint()).getInitCond()) - .isEqualTo(1); - } - - @Test(groups = "short", dataProvider = "existingKeyspaceName") - @CassandraVersion("2.2.0") - public void should_notify_of_aggregate_drop(String keyspace) { - session1.execute(String.format("CREATE FUNCTION %s.\"PLUS\"(s int, v int) RETURNS NULL ON NULL INPUT RETURNS int LANGUAGE java" - + " AS 'return s+v;'", keyspace)); - session1.execute(String.format("CREATE AGGREGATE %s.\"SUM\"(int) SFUNC \"PLUS\" STYPE int INITCOND 0", keyspace)); - session1.execute(String.format("DROP AGGREGATE %s.\"SUM\"", keyspace)); - for (SchemaChangeListener listener : listeners) { - ArgumentCaptor removed = ArgumentCaptor.forClass(AggregateMetadata.class); - verify(listener, timeout(NOTIF_TIMEOUT_MS).times(1)).onAggregateRemoved(removed.capture()); - assertThat(removed.getValue()) - .isInKeyspace(handleId(keyspace)) - .hasSignature("\"SUM\"(int)"); - } - for (Metadata m : metadatas()) - assertThat(m.getKeyspace(keyspace).getAggregate("\"SUM\"", DataType.cint())) - .isNull(); - } - - @Test(groups = "short", dataProvider = "existingKeyspaceName") - @CassandraVersion("3.0") - public void should_notify_of_view_creation(String keyspace) { - session1.execute(String.format("CREATE TABLE %s.table1 (pk int PRIMARY KEY, c int)", keyspace)); - session1.execute(String.format("CREATE MATERIALIZED VIEW %s.mv1 AS SELECT c FROM %s.table1 WHERE c IS NOT NULL PRIMARY KEY (pk, c)", keyspace, keyspace)); - for (SchemaChangeListener listener : listeners) { - ArgumentCaptor removed = ArgumentCaptor.forClass(MaterializedViewMetadata.class); - verify(listener, timeout(NOTIF_TIMEOUT_MS).times(1)).onMaterializedViewAdded(removed.capture()); - assertThat(removed.getValue()) - .hasName("mv1"); - } - for (Metadata m : metadatas()) - assertThat(m.getKeyspace(keyspace).getMaterializedView("mv1")).isNotNull(); - } - - @Test(groups = "short", dataProvider = "existingKeyspaceName") - @CassandraVersion("3.0") - public void should_notify_of_view_update(String keyspace) { - session1.execute(String.format("CREATE TABLE %s.table1 (pk int PRIMARY KEY, c int)", keyspace)); - session1.execute(String.format("CREATE MATERIALIZED VIEW %s.mv1 AS SELECT c FROM %s.table1 WHERE c IS NOT NULL PRIMARY KEY (pk, c) WITH compaction = { 'class' : 'SizeTieredCompactionStrategy' }", keyspace, keyspace)); - for (SchemaChangeListener listener : listeners) { - ArgumentCaptor removed = ArgumentCaptor.forClass(MaterializedViewMetadata.class); - verify(listener, timeout(NOTIF_TIMEOUT_MS).times(1)).onMaterializedViewAdded(removed.capture()); - assertThat(removed.getValue()) - .hasName("mv1"); - assertThat(removed.getValue().getOptions().getCompaction().get("class")) - .contains("SizeTieredCompactionStrategy"); - } - for (Metadata m : metadatas()) - assertThat(m.getKeyspace(keyspace).getMaterializedView("mv1").getOptions().getCompaction().get("class")).contains("SizeTieredCompactionStrategy"); - session1.execute(String.format("ALTER MATERIALIZED VIEW %s.mv1 WITH compaction = { 'class' : 'LeveledCompactionStrategy' }", keyspace)); - for (SchemaChangeListener listener : listeners) { - ArgumentCaptor current = ArgumentCaptor.forClass(MaterializedViewMetadata.class); - ArgumentCaptor previous = ArgumentCaptor.forClass(MaterializedViewMetadata.class); - verify(listener, timeout(NOTIF_TIMEOUT_MS).times(1)).onMaterializedViewChanged(current.capture(), previous.capture()); - assertThat(previous.getValue().getOptions().getCompaction().get("class")) - .contains("SizeTieredCompactionStrategy"); - assertThat(current.getValue().getOptions().getCompaction().get("class")) - .contains("LeveledCompactionStrategy"); - } - for (Metadata m : metadatas()) - assertThat(m.getKeyspace(keyspace).getMaterializedView("mv1").getOptions().getCompaction().get("class")).contains("LeveledCompactionStrategy"); - } - - @Test(groups = "short", dataProvider = "existingKeyspaceName") - @CassandraVersion("3.0") - public void should_notify_of_view_drop(String keyspace) { - session1.execute(String.format("CREATE TABLE %s.table1 (pk int PRIMARY KEY, c int)", keyspace)); - session1.execute(String.format("CREATE MATERIALIZED VIEW %s.mv1 AS SELECT c FROM %s.table1 WHERE c IS NOT NULL PRIMARY KEY (pk, c)", keyspace, keyspace)); - session1.execute(String.format("DROP MATERIALIZED VIEW %s.mv1", keyspace)); - for (SchemaChangeListener listener : listeners) { - ArgumentCaptor removed = ArgumentCaptor.forClass(MaterializedViewMetadata.class); - verify(listener, timeout(NOTIF_TIMEOUT_MS).times(1)).onMaterializedViewRemoved(removed.capture()); - assertThat(removed.getValue()) - .hasName("mv1"); - } - for (Metadata m : metadatas()) - assertThat(m.getKeyspace(keyspace).getMaterializedView("mv1")).isNull(); - } - - @Test(groups = "short", dataProvider = "newKeyspaceName") - public void should_notify_of_keyspace_creation(String keyspace) throws InterruptedException { - execute(CREATE_KEYSPACE, keyspace); - for (SchemaChangeListener listener : listeners) { - ArgumentCaptor added = ArgumentCaptor.forClass(KeyspaceMetadata.class); - verify(listener, timeout(NOTIF_TIMEOUT_MS).times(1)).onKeyspaceAdded(added.capture()); - assertThat(added.getValue()).hasName(handleId(keyspace)); - } - for (Metadata m : metadatas()) - assertThat(m.getKeyspace(keyspace)).isNotNull(); - } - - @Test(groups = "short", dataProvider = "newKeyspaceName") - public void should_notify_of_keyspace_update(String keyspace) throws InterruptedException { - execute(CREATE_KEYSPACE, keyspace); - ArgumentCaptor added = null; - for (SchemaChangeListener listener : listeners) { - added = ArgumentCaptor.forClass(KeyspaceMetadata.class); - verify(listener, timeout(NOTIF_TIMEOUT_MS).times(1)).onKeyspaceAdded(added.capture()); - assertThat(added.getValue()).hasName(handleId(keyspace)); - } - assert added != null; - for (Metadata m : metadatas()) - assertThat(m.getKeyspace(keyspace).isDurableWrites()).isTrue(); - execute(ALTER_KEYSPACE, keyspace); - for (SchemaChangeListener listener : listeners) { - ArgumentCaptor current = ArgumentCaptor.forClass(KeyspaceMetadata.class); - ArgumentCaptor previous = ArgumentCaptor.forClass(KeyspaceMetadata.class); - verify(listener, timeout(NOTIF_TIMEOUT_MS).times(1)).onKeyspaceChanged(current.capture(), previous.capture()); - assertThat(previous.getValue()) - .isEqualTo(added.getValue()) - .isDurableWrites(); - assertThat(current.getValue()) - .hasName(handleId(keyspace)) - .isNotDurableWrites(); - } - for (Metadata m : metadatas()) - assertThat(m.getKeyspace(keyspace)).isNotDurableWrites(); - } - - @Test(groups = "short", dataProvider = "newKeyspaceName") - public void should_notify_of_keyspace_drop(String keyspace) throws InterruptedException { - execute(CREATE_KEYSPACE, keyspace); - for (SchemaChangeListener listener : listeners) { - ArgumentCaptor added = ArgumentCaptor.forClass(KeyspaceMetadata.class); - verify(listener, timeout(NOTIF_TIMEOUT_MS).times(1)).onKeyspaceAdded(added.capture()); - assertThat(added.getValue()).hasName(handleId(keyspace)); - } - for (Metadata m : metadatas()) - assertThat(m.getReplicas(keyspace, Bytes.fromHexString("0xCAFEBABE"))).isNotEmpty(); - execute(CREATE_TABLE, keyspace); // to test table drop notifications - execute(DROP_KEYSPACE, keyspace); - for (SchemaChangeListener listener : listeners) { - ArgumentCaptor table = ArgumentCaptor.forClass(TableMetadata.class); - verify(listener, timeout(NOTIF_TIMEOUT_MS).times(1)).onTableRemoved(table.capture()); - assertThat(table.getValue()) - .hasName("table1") - .isInKeyspace(handleId(keyspace)); - ArgumentCaptor ks = ArgumentCaptor.forClass(KeyspaceMetadata.class); - verify(listener, timeout(NOTIF_TIMEOUT_MS).times(1)).onKeyspaceRemoved(ks.capture()); - assertThat(ks.getValue()) - .hasName(handleId(keyspace)); - } - for (Metadata m : metadatas()) { - assertThat(m.getKeyspace(keyspace)).isNull(); - assertThat(m.getReplicas(keyspace, Bytes.fromHexString("0xCAFEBABE"))).isEmpty(); - } - } - - /** - * Ensures that calling {@link Metadata#newToken(String)} on a Cluster that has schema - * metadata disabled will throw a {@link IllegalStateException}. - * - * @jira_ticket JAVA-858 - * @since 2.0.11 - */ - @Test(groups = "short", expectedExceptions = IllegalStateException.class) - public void should_throw_illegal_state_exception_on_newToken_with_metadata_disabled() { - Cluster cluster = Cluster.builder() - .addContactPoints(getContactPoints()) - .withPort(ccm().getBinaryPort()) - .withQueryOptions(nonDebouncingQueryOptions() - .setMetadataEnabled(false) - ).build(); - - try { - cluster.init(); - cluster.getMetadata().newToken("0x00"); - } finally { - cluster.close(); - } - } - - - /** - * Ensures that calling {@link Metadata#newTokenRange(Token, Token)} on a Cluster that has schema - * metadata disabled will throw a {@link IllegalStateException}. - * - * @jira_ticket JAVA-858 - * @since 2.0.11 - */ - @Test(groups = "short", expectedExceptions = IllegalStateException.class) - public void should_throw_illegal_state_exception_on_newTokenRange_with_metadata_disabled() { - Cluster cluster = Cluster.builder() - .addContactPoints(getContactPoints()) - .withPort(ccm().getBinaryPort()) - .withQueryOptions(nonDebouncingQueryOptions() - .setMetadataEnabled(false) - ).build(); - - try { - cluster.init(); - Token.Factory factory = Token.getFactory("Murmur3Partitioner"); - Token token = factory.fromString(Long.toString(1)); - cluster.getMetadata().newTokenRange(token, token); - } finally { - cluster.close(); - } - } - - /** - * Ensures that executing a query causing a schema change with a Cluster that has schema metadata - * disabled will still wait on schema agreement, but not refresh the schema. - * - * @jira_ticket JAVA-858 - * @since 2.0.11 - */ - @Test(groups = "short", dataProvider = "existingKeyspaceName") - public void should_not_refresh_schema_on_schema_change_response(String keyspace) throws InterruptedException { - ResultSet rs = schemaDisabledSession.execute(String.format(CREATE_TABLE, keyspace)); - - // Should still wait on schema agreement. - assertThat(rs.getExecutionInfo().isSchemaInAgreement()).isTrue(); - assertThat(schemaDisabledCluster.getMetadata().checkSchemaAgreement()).isTrue(); - - // Wait up to 1 second (since refreshSchema submitted in an executor) and check that refreshSchema never called. - verify(schemaDisabledControlConnection, after(1000).never()).refreshSchema(any(SchemaElement.class), any(String.class), any(String.class), anyListOf(String.class)); - } - - /** - * Ensures that when schema metadata is enabled using {@link QueryOptions#setMetadataEnabled(boolean)} - * that a schema and nodelist refresh is submitted, but only if schema metadata is currently disabled. - * - * @jira_ticket JAVA-858 - * @since 2.0.11 - */ - @Test(groups = "short", dataProvider = "existingKeyspaceName") - public void should_refresh_schema_and_token_map_if_schema_metadata_reenabled(String keyspace) throws Exception { - try { - schemaDisabledCluster.getConfiguration().getQueryOptions().setMetadataEnabled(true); - - verify(schemaDisabledControlConnection, after(1000)).refreshSchema(null, null, null, null); - - // Ensure that there is schema metadata. - assertThat(schemaDisabledCluster.getMetadata().getKeyspace(keyspace)).isNotNull(); - Token token1 = schemaDisabledCluster.getMetadata().newToken("0"); - Token token2 = schemaDisabledCluster.getMetadata().newToken("111111"); - assertThat(token1).isNotNull(); - assertThat(token2).isNotNull(); - assertThat(schemaDisabledCluster.getMetadata().newTokenRange(token1, token2)).isNotNull(); - - assertThat(schemaDisabledCluster.getMetadata().getTokenRanges()).isNotNull().isNotEmpty(); - - // Try enabling again and ensure schema is not refreshed again. - reset(schemaDisabledControlConnection); - schemaDisabledCluster.getConfiguration().getQueryOptions().setMetadataEnabled(true); - verify(schemaDisabledControlConnection, after(1000).never()).refreshSchema(null, null, null, null); - } finally { - // Reset listener mock to not count it's interactions in this test. - reset(schemaDisabledListener); - schemaDisabledCluster.getConfiguration().getQueryOptions().setMetadataEnabled(false); - } - } - - @AfterMethod(groups = "short", alwaysRun = true) - public void cleanup() throws InterruptedException { - if (session1 != null) { - ListenableFuture> f = Futures.successfulAsList(Lists.newArrayList( - session1.executeAsync("DROP TABLE lowercase.table1"), - session1.executeAsync("DROP TABLE \"CaseSensitive\".table1"), - session1.executeAsync("DROP TYPE lowercase.type1"), - session1.executeAsync("DROP TYPE \"CaseSensitive\".type1"), - session1.executeAsync("DROP FUNCTION lowercase.\"ID\""), - session1.executeAsync("DROP FUNCTION \"CaseSensitive\".\"ID\""), - session1.executeAsync("DROP FUNCTION lowercase.\"PLUS\""), - session1.executeAsync("DROP FUNCTION \"CaseSensitive\".\"PLUS\""), - session1.executeAsync("DROP AGGREGATE lowercase.\"SUM\""), - session1.executeAsync("DROP AGGREGATE \"CaseSensitive\".\"SUM\""), - session1.executeAsync("DROP MATERIALIZED VIEW lowercase.mv1"), - session1.executeAsync("DROP MATERIALIZED VIEW \"CaseSensitive\".mv1"), - session1.executeAsync("DROP KEYSPACE lowercase2"), - session1.executeAsync("DROP KEYSPACE \"CaseSensitive2\"") - )); - Futures.getUnchecked(f); - } - } - - private void execute(String cql, String keyspace) throws InterruptedException { - session1.execute(String.format(cql, keyspace)); - } - - private List metadatas() { - return Lists.newArrayList(cluster1.getMetadata(), cluster2.getMetadata()); + .withQueryOptions(nonDebouncingQueryOptions().setMetadataEnabled(false)) + .build()); + + schemaDisabledSession = schemaDisabledCluster.connect(); + + schemaDisabledControlConnection = spy(schemaDisabledCluster.manager.controlConnection); + schemaDisabledCluster.manager.controlConnection = schemaDisabledControlConnection; + + session1 = cluster1.connect(); + cluster2.init(); + + cluster1.register(listener1 = mock(SchemaChangeListener.class)); + cluster2.register(listener2 = mock(SchemaChangeListener.class)); + listeners = Lists.newArrayList(listener1, listener2); + + schemaDisabledCluster.register(schemaDisabledListener = mock(SchemaChangeListener.class)); + verify(schemaDisabledListener, times(1)).onRegister(schemaDisabledCluster); + + execute(CREATE_KEYSPACE, "lowercase"); + execute(CREATE_KEYSPACE, "\"CaseSensitive\""); + } + + @AfterClass(groups = "short", alwaysRun = true) + public void teardown() { + if (cluster1 != null) cluster1.close(); + if (cluster2 != null) cluster2.close(); + if (schemaDisabledCluster != null) schemaDisabledCluster.close(); + } + + @DataProvider(name = "existingKeyspaceName") + public static Object[][] existingKeyspaceName() { + return new Object[][] {{"lowercase"}, {"\"CaseSensitive\""}}; + } + + @DataProvider(name = "newKeyspaceName") + public static Object[][] newKeyspaceName() { + return new Object[][] {{"lowercase2"}, {"\"CaseSensitive2\""}}; + } + + @BeforeMethod(groups = "short") + public void resetListeners() { + for (SchemaChangeListener listener : listeners) { + reset(listener); + } + reset(schemaDisabledControlConnection); + } + + /** + * Ensures that a listener registered on a Cluster that has schema metadata disabled is never + * invoked with schema change events. + * + * @jira_ticket JAVA-858 + * @since 2.0.11 + */ + @AfterMethod(groups = "short") + public void verifyNoMoreInteractionsWithListener() { + verifyNoMoreInteractions(schemaDisabledListener); + } + + @Test(groups = "short", dataProvider = "existingKeyspaceName") + public void should_notify_of_table_creation(String keyspace) throws InterruptedException { + execute(CREATE_TABLE, keyspace); + for (SchemaChangeListener listener : listeners) { + ArgumentCaptor added = ArgumentCaptor.forClass(TableMetadata.class); + verify(listener, timeout(NOTIF_TIMEOUT_MS).times(1)).onTableAdded(added.capture()); + assertThat(added.getValue()).isInKeyspace(handleId(keyspace)).hasName("table1"); + } + for (Metadata m : metadatas()) + assertThat(m.getKeyspace(keyspace).getTable("table1")).isNotNull(); + } + + @Test(groups = "short", dataProvider = "existingKeyspaceName") + public void should_notify_of_table_update(String keyspace) throws InterruptedException { + execute(CREATE_TABLE, keyspace); + ArgumentCaptor added = null; + for (SchemaChangeListener listener : listeners) { + added = ArgumentCaptor.forClass(TableMetadata.class); + verify(listener, timeout(NOTIF_TIMEOUT_MS).times(1)).onTableAdded(added.capture()); + assertThat(added.getValue()).isInKeyspace(handleId(keyspace)).hasName("table1"); + } + for (Metadata m : metadatas()) + assertThat(m.getKeyspace(keyspace).getTable("table1")).hasNoColumn("j"); + assert added != null; + execute(ALTER_TABLE, keyspace); + for (SchemaChangeListener listener : listeners) { + ArgumentCaptor current = ArgumentCaptor.forClass(TableMetadata.class); + ArgumentCaptor previous = ArgumentCaptor.forClass(TableMetadata.class); + verify(listener, timeout(NOTIF_TIMEOUT_MS).times(1)) + .onTableChanged(current.capture(), previous.capture()); + assertThat(previous.getValue()).isEqualTo(added.getValue()).hasNoColumn("j"); + assertThat(current.getValue()) + .isInKeyspace(handleId(keyspace)) + .hasName("table1") + .hasColumn("j"); + } + for (Metadata m : metadatas()) + assertThat(m.getKeyspace(keyspace).getTable("table1")).hasColumn("j"); + } + + /** + * JAVA-2204: Make sure we don't accidentally store new table instances in an old keyspace + * instance, otherwise this will create a memory leak if a client holds onto a stale table + * instance (in particular, the object mapper does). + */ + @Test(groups = "short") + public void should_not_update_tables_on_stale_keyspace_instance() throws InterruptedException { + final Cluster cluster1 = session1.getCluster(); + final String keyspaceName = "lowercase"; + execute(CREATE_TABLE, keyspaceName); + final KeyspaceMetadata oldKeyspace = cluster1.getMetadata().getKeyspace(keyspaceName); + TableMetadata oldTable = oldKeyspace.getTable("table1"); + + // Force a full refresh + cluster1.getConfiguration().getQueryOptions().setMetadataEnabled(false); + cluster1.getConfiguration().getQueryOptions().setMetadataEnabled(true); + ConditionChecker.check() + .that( + new Callable() { + @Override + public Boolean call() { + return cluster1.getMetadata().getKeyspace(keyspaceName) == oldKeyspace; + } + }) + .becomesFalse(); + + // Before the fix, the schema parser updated the old keyspace's tables during a full refresh: + // oldTable -> oldKeyspace -> newTable + // If the client held onto the initial table instance, successive refreshes would grow the chain + // over time: + // table1 -> keyspace1 -> table2 -> keyspace2 -> ... + assertThat(oldKeyspace.getTable("table1")).isSameAs(oldTable); + } + + /** + * Verifies that when a table is updated that its associated views remain accessible from the + * table via {@link TableMetadata#getView(String)}. + * + * @jira_ticket JAVA-1872 + */ + @Test(groups = "short", dataProvider = "existingKeyspaceName") + @CassandraVersion("3.0") + public void should_retain_view_on_table_update(String keyspace) throws InterruptedException { + // Create table and ensure event is received and metadata is updated + session1.execute(String.format("CREATE TABLE %s.table1 (pk int PRIMARY KEY, c int)", keyspace)); + ArgumentCaptor added = null; + for (SchemaChangeListener listener : listeners) { + added = ArgumentCaptor.forClass(TableMetadata.class); + verify(listener, timeout(NOTIF_TIMEOUT_MS).times(1)).onTableAdded(added.capture()); + assertThat(added.getValue()).isInKeyspace(handleId(keyspace)).hasName("table1"); + } + for (Metadata m : metadatas()) { + assertThat(m.getKeyspace(keyspace).getTable("table1")).hasNoColumn("j"); + } + assert added != null; + + // Create view and ensure event is received and metadata is updated + session1.execute( + String.format( + "CREATE MATERIALIZED VIEW %s.mv1 AS SELECT pk, c FROM %s.table1 WHERE c IS NOT NULL AND pk IS NOT NULL PRIMARY KEY (pk, c)", + keyspace, keyspace)); + for (SchemaChangeListener listener : listeners) { + ArgumentCaptor viewAdded = + ArgumentCaptor.forClass(MaterializedViewMetadata.class); + verify(listener, timeout(NOTIF_TIMEOUT_MS).times(1)) + .onMaterializedViewAdded(viewAdded.capture()); + assertThat(viewAdded.getValue()).hasName("mv1"); + } + for (Metadata m : metadatas()) { + // Ensure materialized view exists and table has reference to it. + assertThat(m.getKeyspace(keyspace).getMaterializedView("mv1")).isNotNull(); + assertThat(m.getKeyspace(keyspace).getTable("table1").getView("mv1")).isNotNull(); } + // Alter table, ensure event is received and metadata is updated. Most importantly, ensure view + // is + // present on table metadata. + execute(ALTER_TABLE, keyspace); + for (SchemaChangeListener listener : listeners) { + ArgumentCaptor current = ArgumentCaptor.forClass(TableMetadata.class); + ArgumentCaptor previous = ArgumentCaptor.forClass(TableMetadata.class); + verify(listener, timeout(NOTIF_TIMEOUT_MS).times(1)) + .onTableChanged(current.capture(), previous.capture()); + assertThat(previous.getValue()).isEqualTo(added.getValue()).hasNoColumn("j"); + assertThat(current.getValue()) + .isInKeyspace(handleId(keyspace)) + .hasName("table1") + .hasColumn("j"); + } + for (Metadata m : metadatas()) { + // Ensure table changed. + assertThat(m.getKeyspace(keyspace).getTable("table1")).hasColumn("j"); + // Ensure view is present on table even after it changes. + assertThat(m.getKeyspace(keyspace).getMaterializedView("mv1")).isNotNull(); + assertThat(m.getKeyspace(keyspace).getTable("table1").getView("mv1")).isNotNull(); + } + } + + @Test(groups = "short", dataProvider = "existingKeyspaceName") + public void should_notify_of_table_drop(String keyspace) throws InterruptedException { + execute(CREATE_TABLE, keyspace); + for (SchemaChangeListener listener : listeners) { + ArgumentCaptor added = ArgumentCaptor.forClass(TableMetadata.class); + verify(listener, timeout(NOTIF_TIMEOUT_MS).times(1)).onTableAdded(added.capture()); + assertThat(added.getValue()).hasName("table1").isInKeyspace(handleId(keyspace)); + } + execute(DROP_TABLE, keyspace); + for (SchemaChangeListener listener : listeners) { + ArgumentCaptor removed = ArgumentCaptor.forClass(TableMetadata.class); + verify(listener, timeout(NOTIF_TIMEOUT_MS).times(1)).onTableRemoved(removed.capture()); + assertThat(removed.getValue()).hasName("table1"); + } + for (Metadata m : metadatas()) assertThat(m.getKeyspace(keyspace).getTable("table1")).isNull(); + } + + @SuppressWarnings("RedundantCast") + @Test(groups = "short", dataProvider = "existingKeyspaceName") + @CassandraVersion("2.1.0") + public void should_notify_of_udt_creation(String keyspace) { + session1.execute(String.format("CREATE TYPE %s.type1(i int)", keyspace)); + for (SchemaChangeListener listener : listeners) { + ArgumentCaptor added = ArgumentCaptor.forClass(UserType.class); + verify(listener, timeout(NOTIF_TIMEOUT_MS).times(1)).onUserTypeAdded(added.capture()); + assertThat((DataType) added.getValue()).isUserType(handleId(keyspace), "type1"); + } + for (Metadata m : metadatas()) + assertThat((DataType) m.getKeyspace(keyspace).getUserType("type1")).isNotNull(); + } + + @Test(groups = "short", dataProvider = "existingKeyspaceName") + @CassandraVersion("2.1.0") + public void should_notify_of_udt_update(String keyspace) { + session1.execute(String.format("CREATE TYPE %s.type1(i int)", keyspace)); + session1.execute(String.format("ALTER TYPE %s.type1 ADD j int", keyspace)); + for (SchemaChangeListener listener : listeners) { + ArgumentCaptor current = ArgumentCaptor.forClass(UserType.class); + ArgumentCaptor previous = ArgumentCaptor.forClass(UserType.class); + verify(listener, timeout(NOTIF_TIMEOUT_MS).times(1)) + .onUserTypeChanged(current.capture(), previous.capture()); + assertThat(previous.getValue().getFieldNames()).doesNotContain("j"); + assertThat(current.getValue().getFieldNames()).contains("j"); + } + for (Metadata m : metadatas()) + assertThat(m.getKeyspace(keyspace).getUserType("type1").getFieldType("j")).isNotNull(); + } + + @SuppressWarnings("RedundantCast") + @Test(groups = "short", dataProvider = "existingKeyspaceName") + @CassandraVersion("2.1.0") + public void should_notify_of_udt_drop(String keyspace) { + session1.execute(String.format("CREATE TYPE %s.type1(i int)", keyspace)); + session1.execute(String.format("DROP TYPE %s.type1", keyspace)); + for (SchemaChangeListener listener : listeners) { + ArgumentCaptor removed = ArgumentCaptor.forClass(UserType.class); + verify(listener, timeout(NOTIF_TIMEOUT_MS).times(1)).onUserTypeRemoved(removed.capture()); + assertThat((DataType) removed.getValue()).isUserType(handleId(keyspace), "type1"); + } + for (Metadata m : metadatas()) + assertThat((DataType) m.getKeyspace(keyspace).getUserType("type1")).isNull(); + } + + @Test(groups = "short", dataProvider = "existingKeyspaceName") + @CassandraVersion("2.2.0") + public void should_notify_of_function_creation(String keyspace) { + session1.execute( + String.format( + "CREATE FUNCTION %s.\"ID\"(i int) RETURNS NULL ON NULL INPUT RETURNS int LANGUAGE java AS 'return i;'", + keyspace)); + for (SchemaChangeListener listener : listeners) { + ArgumentCaptor added = ArgumentCaptor.forClass(FunctionMetadata.class); + verify(listener, timeout(NOTIF_TIMEOUT_MS).times(1)).onFunctionAdded(added.capture()); + assertThat(added.getValue()).isInKeyspace(handleId(keyspace)).hasSignature("\"ID\"(int)"); + } + for (Metadata m : metadatas()) + assertThat(m.getKeyspace(keyspace).getFunction("\"ID\"", DataType.cint())).isNotNull(); + } + + @Test(groups = "short", dataProvider = "existingKeyspaceName") + @CassandraVersion("2.2.0") + public void should_notify_of_function_update(String keyspace) { + session1.execute( + String.format( + "CREATE FUNCTION %s.\"ID\"(i int) RETURNS NULL ON NULL INPUT RETURNS int LANGUAGE java AS 'return i;'", + keyspace)); + for (SchemaChangeListener listener : listeners) { + ArgumentCaptor added = ArgumentCaptor.forClass(FunctionMetadata.class); + verify(listener, timeout(NOTIF_TIMEOUT_MS).times(1)).onFunctionAdded(added.capture()); + assertThat(added.getValue()).isInKeyspace(handleId(keyspace)).hasSignature("\"ID\"(int)"); + } + for (Metadata m : metadatas()) + assertThat(m.getKeyspace(keyspace).getFunction("\"ID\"", DataType.cint())).isNotNull(); + session1.execute( + String.format( + "CREATE OR REPLACE FUNCTION %s.\"ID\"(i int) RETURNS NULL ON NULL INPUT RETURNS int LANGUAGE java AS 'return i + 1;'", + keyspace)); + for (SchemaChangeListener listener : listeners) { + ArgumentCaptor current = ArgumentCaptor.forClass(FunctionMetadata.class); + ArgumentCaptor previous = ArgumentCaptor.forClass(FunctionMetadata.class); + verify(listener, timeout(NOTIF_TIMEOUT_MS).times(1)) + .onFunctionChanged(current.capture(), previous.capture()); + assertThat(previous.getValue()).hasBody("return i;"); + assertThat(current.getValue()).hasBody("return i + 1;"); + } + for (Metadata m : metadatas()) + assertThat(m.getKeyspace(keyspace).getFunction("\"ID\"", DataType.cint()).getBody()) + .isEqualTo("return i + 1;"); + } + + @Test(groups = "short", dataProvider = "existingKeyspaceName") + @CassandraVersion("2.2.0") + public void should_notify_of_function_drop(String keyspace) { + session1.execute( + String.format( + "CREATE FUNCTION %s.\"ID\"(i int) RETURNS NULL ON NULL INPUT RETURNS int LANGUAGE java AS 'return i;'", + keyspace)); + session1.execute(String.format("DROP FUNCTION %s.\"ID\"", keyspace)); + for (SchemaChangeListener listener : listeners) { + ArgumentCaptor removed = ArgumentCaptor.forClass(FunctionMetadata.class); + verify(listener, timeout(NOTIF_TIMEOUT_MS).times(1)).onFunctionRemoved(removed.capture()); + assertThat(removed.getValue()).isInKeyspace(handleId(keyspace)).hasSignature("\"ID\"(int)"); + } + for (Metadata m : metadatas()) + assertThat(m.getKeyspace(keyspace).getFunction("\"ID\"", DataType.cint())).isNull(); + } + + @Test(groups = "short", dataProvider = "existingKeyspaceName") + @CassandraVersion("2.2.0") + public void should_notify_of_aggregate_creation(String keyspace) { + session1.execute( + String.format( + "CREATE FUNCTION %s.\"PLUS\"(s int, v int) RETURNS NULL ON NULL INPUT RETURNS int LANGUAGE java" + + " AS 'return s+v;'", + keyspace)); + session1.execute( + String.format( + "CREATE AGGREGATE %s.\"SUM\"(int) SFUNC \"PLUS\" STYPE int INITCOND 0;", keyspace)); + for (SchemaChangeListener listener : listeners) { + ArgumentCaptor added = ArgumentCaptor.forClass(AggregateMetadata.class); + verify(listener, timeout(NOTIF_TIMEOUT_MS).times(1)).onAggregateAdded(added.capture()); + assertThat(added.getValue()).isInKeyspace(handleId(keyspace)).hasSignature("\"SUM\"(int)"); + } + for (Metadata m : metadatas()) + assertThat(m.getKeyspace(keyspace).getAggregate("\"SUM\"", DataType.cint())).isNotNull(); + } + + @Test(groups = "short", dataProvider = "existingKeyspaceName") + @CassandraVersion("2.2.0") + public void should_notify_of_aggregate_update(String keyspace) { + session1.execute( + String.format( + "CREATE FUNCTION %s.\"PLUS\"(s int, v int) RETURNS NULL ON NULL INPUT RETURNS int LANGUAGE java" + + " AS 'return s+v;'", + keyspace)); + session1.execute( + String.format( + "CREATE AGGREGATE %s.\"SUM\"(int) SFUNC \"PLUS\" STYPE int INITCOND 0", keyspace)); + for (SchemaChangeListener listener : listeners) { + ArgumentCaptor added = ArgumentCaptor.forClass(AggregateMetadata.class); + verify(listener, timeout(NOTIF_TIMEOUT_MS).times(1)).onAggregateAdded(added.capture()); + assertThat(added.getValue()).isInKeyspace(handleId(keyspace)).hasSignature("\"SUM\"(int)"); + } + for (Metadata m : metadatas()) + assertThat(m.getKeyspace(keyspace).getAggregate("\"SUM\"", DataType.cint()).getInitCond()) + .isEqualTo(0); + session1.execute( + String.format( + "CREATE OR REPLACE AGGREGATE %s.\"SUM\"(int) SFUNC \"PLUS\" STYPE int INITCOND 1", + keyspace)); + for (SchemaChangeListener listener : listeners) { + ArgumentCaptor current = ArgumentCaptor.forClass(AggregateMetadata.class); + ArgumentCaptor previous = ArgumentCaptor.forClass(AggregateMetadata.class); + verify(listener, timeout(NOTIF_TIMEOUT_MS).times(1)) + .onAggregateChanged(current.capture(), previous.capture()); + assertThat(previous.getValue()).hasInitCond(0); + assertThat(current.getValue()).hasInitCond(1); + } + for (Metadata m : metadatas()) + assertThat(m.getKeyspace(keyspace).getAggregate("\"SUM\"", DataType.cint()).getInitCond()) + .isEqualTo(1); + } + + @Test(groups = "short", dataProvider = "existingKeyspaceName") + @CassandraVersion("2.2.0") + public void should_notify_of_aggregate_drop(String keyspace) { + session1.execute( + String.format( + "CREATE FUNCTION %s.\"PLUS\"(s int, v int) RETURNS NULL ON NULL INPUT RETURNS int LANGUAGE java" + + " AS 'return s+v;'", + keyspace)); + session1.execute( + String.format( + "CREATE AGGREGATE %s.\"SUM\"(int) SFUNC \"PLUS\" STYPE int INITCOND 0", keyspace)); + session1.execute(String.format("DROP AGGREGATE %s.\"SUM\"", keyspace)); + for (SchemaChangeListener listener : listeners) { + ArgumentCaptor removed = ArgumentCaptor.forClass(AggregateMetadata.class); + verify(listener, timeout(NOTIF_TIMEOUT_MS).times(1)).onAggregateRemoved(removed.capture()); + assertThat(removed.getValue()).isInKeyspace(handleId(keyspace)).hasSignature("\"SUM\"(int)"); + } + for (Metadata m : metadatas()) + assertThat(m.getKeyspace(keyspace).getAggregate("\"SUM\"", DataType.cint())).isNull(); + } + + @Test(groups = "short", dataProvider = "existingKeyspaceName") + @CassandraVersion("3.0") + public void should_notify_of_view_creation(String keyspace) { + session1.execute(String.format("CREATE TABLE %s.table1 (pk int PRIMARY KEY, c int)", keyspace)); + session1.execute( + String.format( + "CREATE MATERIALIZED VIEW %s.mv1 AS SELECT pk, c FROM %s.table1 WHERE c IS NOT NULL AND pk IS NOT NULL PRIMARY KEY (pk, c)", + keyspace, keyspace)); + for (SchemaChangeListener listener : listeners) { + ArgumentCaptor removed = + ArgumentCaptor.forClass(MaterializedViewMetadata.class); + verify(listener, timeout(NOTIF_TIMEOUT_MS).times(1)) + .onMaterializedViewAdded(removed.capture()); + assertThat(removed.getValue()).hasName("mv1"); + } + for (Metadata m : metadatas()) + assertThat(m.getKeyspace(keyspace).getMaterializedView("mv1")).isNotNull(); + } + + @Test(groups = "short", dataProvider = "existingKeyspaceName") + @CassandraVersion("3.0") + public void should_notify_of_view_update(String keyspace) { + session1.execute(String.format("CREATE TABLE %s.table1 (pk int PRIMARY KEY, c int)", keyspace)); + session1.execute( + String.format( + "CREATE MATERIALIZED VIEW %s.mv1 AS SELECT pk, c FROM %s.table1 WHERE c IS NOT NULL AND pk IS NOT NULL PRIMARY KEY (pk, c) WITH compaction = { 'class' : 'SizeTieredCompactionStrategy' }", + keyspace, keyspace)); + for (SchemaChangeListener listener : listeners) { + ArgumentCaptor removed = + ArgumentCaptor.forClass(MaterializedViewMetadata.class); + verify(listener, timeout(NOTIF_TIMEOUT_MS).times(1)) + .onMaterializedViewAdded(removed.capture()); + assertThat(removed.getValue()).hasName("mv1"); + assertThat(removed.getValue().getOptions().getCompaction().get("class")) + .contains("SizeTieredCompactionStrategy"); + } + for (Metadata m : metadatas()) + assertThat( + m.getKeyspace(keyspace) + .getMaterializedView("mv1") + .getOptions() + .getCompaction() + .get("class")) + .contains("SizeTieredCompactionStrategy"); + session1.execute( + String.format( + "ALTER MATERIALIZED VIEW %s.mv1 WITH compaction = { 'class' : 'LeveledCompactionStrategy' }", + keyspace)); + for (SchemaChangeListener listener : listeners) { + ArgumentCaptor current = + ArgumentCaptor.forClass(MaterializedViewMetadata.class); + ArgumentCaptor previous = + ArgumentCaptor.forClass(MaterializedViewMetadata.class); + verify(listener, timeout(NOTIF_TIMEOUT_MS).times(1)) + .onMaterializedViewChanged(current.capture(), previous.capture()); + assertThat(previous.getValue().getOptions().getCompaction().get("class")) + .contains("SizeTieredCompactionStrategy"); + assertThat(current.getValue().getOptions().getCompaction().get("class")) + .contains("LeveledCompactionStrategy"); + } + for (Metadata m : metadatas()) + assertThat( + m.getKeyspace(keyspace) + .getMaterializedView("mv1") + .getOptions() + .getCompaction() + .get("class")) + .contains("LeveledCompactionStrategy"); + } + + @Test(groups = "short", dataProvider = "existingKeyspaceName") + @CassandraVersion("3.0") + public void should_notify_of_view_drop(String keyspace) { + session1.execute(String.format("CREATE TABLE %s.table1 (pk int PRIMARY KEY, c int)", keyspace)); + session1.execute( + String.format( + "CREATE MATERIALIZED VIEW %s.mv1 AS SELECT pk, c FROM %s.table1 WHERE c IS NOT NULL AND pk IS NOT NULL PRIMARY KEY (pk, c)", + keyspace, keyspace)); + session1.execute(String.format("DROP MATERIALIZED VIEW %s.mv1", keyspace)); + for (SchemaChangeListener listener : listeners) { + ArgumentCaptor removed = + ArgumentCaptor.forClass(MaterializedViewMetadata.class); + verify(listener, timeout(NOTIF_TIMEOUT_MS).times(1)) + .onMaterializedViewRemoved(removed.capture()); + assertThat(removed.getValue()).hasName("mv1"); + } + for (Metadata m : metadatas()) + assertThat(m.getKeyspace(keyspace).getMaterializedView("mv1")).isNull(); + } + + @Test(groups = "short", dataProvider = "newKeyspaceName") + public void should_notify_of_keyspace_creation(String keyspace) throws InterruptedException { + execute(CREATE_KEYSPACE, keyspace); + for (SchemaChangeListener listener : listeners) { + ArgumentCaptor added = ArgumentCaptor.forClass(KeyspaceMetadata.class); + verify(listener, timeout(NOTIF_TIMEOUT_MS).times(1)).onKeyspaceAdded(added.capture()); + assertThat(added.getValue()).hasName(handleId(keyspace)); + } + for (Metadata m : metadatas()) assertThat(m.getKeyspace(keyspace)).isNotNull(); + } + + @Test(groups = "short", dataProvider = "newKeyspaceName") + public void should_notify_of_keyspace_update(String keyspace) throws InterruptedException { + execute(CREATE_KEYSPACE, keyspace); + ArgumentCaptor added = null; + for (SchemaChangeListener listener : listeners) { + added = ArgumentCaptor.forClass(KeyspaceMetadata.class); + verify(listener, timeout(NOTIF_TIMEOUT_MS).times(1)).onKeyspaceAdded(added.capture()); + assertThat(added.getValue()).hasName(handleId(keyspace)); + } + assert added != null; + for (Metadata m : metadatas()) assertThat(m.getKeyspace(keyspace).isDurableWrites()).isTrue(); + execute(ALTER_KEYSPACE, keyspace); + for (SchemaChangeListener listener : listeners) { + ArgumentCaptor current = ArgumentCaptor.forClass(KeyspaceMetadata.class); + ArgumentCaptor previous = ArgumentCaptor.forClass(KeyspaceMetadata.class); + verify(listener, timeout(NOTIF_TIMEOUT_MS).times(1)) + .onKeyspaceChanged(current.capture(), previous.capture()); + assertThat(previous.getValue()).isEqualTo(added.getValue()).isDurableWrites(); + assertThat(current.getValue()).hasName(handleId(keyspace)).isNotDurableWrites(); + } + for (Metadata m : metadatas()) assertThat(m.getKeyspace(keyspace)).isNotDurableWrites(); + } + + @Test(groups = "short", dataProvider = "newKeyspaceName") + public void should_notify_of_keyspace_drop(String keyspace) throws InterruptedException { + execute(CREATE_KEYSPACE, keyspace); + for (SchemaChangeListener listener : listeners) { + ArgumentCaptor added = ArgumentCaptor.forClass(KeyspaceMetadata.class); + verify(listener, timeout(NOTIF_TIMEOUT_MS).times(1)).onKeyspaceAdded(added.capture()); + assertThat(added.getValue()).hasName(handleId(keyspace)); + } + for (Metadata m : metadatas()) + assertThat(m.getReplicas(keyspace, Bytes.fromHexString("0xCAFEBABE"))).isNotEmpty(); + execute(CREATE_TABLE, keyspace); // to test table drop notifications + execute(DROP_KEYSPACE, keyspace); + for (SchemaChangeListener listener : listeners) { + ArgumentCaptor table = ArgumentCaptor.forClass(TableMetadata.class); + verify(listener, timeout(NOTIF_TIMEOUT_MS).times(1)).onTableRemoved(table.capture()); + assertThat(table.getValue()).hasName("table1").isInKeyspace(handleId(keyspace)); + ArgumentCaptor ks = ArgumentCaptor.forClass(KeyspaceMetadata.class); + verify(listener, timeout(NOTIF_TIMEOUT_MS).times(1)).onKeyspaceRemoved(ks.capture()); + assertThat(ks.getValue()).hasName(handleId(keyspace)); + } + for (Metadata m : metadatas()) { + assertThat(m.getKeyspace(keyspace)).isNull(); + assertThat(m.getReplicas(keyspace, Bytes.fromHexString("0xCAFEBABE"))).isEmpty(); + } + } + + /** + * Ensures that calling {@link Metadata#newToken(String)} on a Cluster that has schema metadata + * disabled will throw a {@link IllegalStateException}. + * + * @jira_ticket JAVA-858 + * @since 2.0.11 + */ + @Test(groups = "short", expectedExceptions = IllegalStateException.class) + public void should_throw_illegal_state_exception_on_newToken_with_metadata_disabled() { + Cluster cluster = + createClusterBuilder() + .withQueryOptions(nonDebouncingQueryOptions().setMetadataEnabled(false)) + .build(); + + try { + cluster.init(); + cluster.getMetadata().newToken("0x00"); + } finally { + cluster.close(); + } + } + + /** + * Ensures that calling {@link Metadata#newTokenRange(Token, Token)} on a Cluster that has schema + * metadata disabled will throw a {@link IllegalStateException}. + * + * @jira_ticket JAVA-858 + * @since 2.0.11 + */ + @Test(groups = "short", expectedExceptions = IllegalStateException.class) + public void should_throw_illegal_state_exception_on_newTokenRange_with_metadata_disabled() { + Cluster cluster = + createClusterBuilder() + .withQueryOptions(nonDebouncingQueryOptions().setMetadataEnabled(false)) + .build(); + + try { + cluster.init(); + Token.Factory factory = Token.getFactory("Murmur3Partitioner"); + Token token = factory.fromString(Long.toString(1)); + cluster.getMetadata().newTokenRange(token, token); + } finally { + cluster.close(); + } + } + + /** + * Ensures that executing a query causing a schema change with a Cluster that has schema metadata + * disabled will still wait on schema agreement, but not refresh the schema. + * + * @jira_ticket JAVA-858 + * @since 2.0.11 + */ + @Test(groups = "short", dataProvider = "existingKeyspaceName") + public void should_not_refresh_schema_on_schema_change_response(String keyspace) + throws InterruptedException { + ResultSet rs = schemaDisabledSession.execute(String.format(CREATE_TABLE, keyspace)); + + // Should still wait on schema agreement. + assertThat(rs.getExecutionInfo().isSchemaInAgreement()).isTrue(); + assertThat(schemaDisabledCluster.getMetadata().checkSchemaAgreement()).isTrue(); + + // Wait up to 1 second (since refreshSchema submitted in an executor) and check that + // refreshSchema never called. + verify(schemaDisabledControlConnection, after(1000).never()) + .refreshSchema( + any(SchemaElement.class), + any(String.class), + any(String.class), + anyListOf(String.class)); + } + + /** + * Ensures that when schema metadata is enabled using {@link + * QueryOptions#setMetadataEnabled(boolean)} that a schema and nodelist refresh is submitted, but + * only if schema metadata is currently disabled. + * + * @jira_ticket JAVA-858 + * @since 2.0.11 + */ + @Test(groups = "short", dataProvider = "existingKeyspaceName") + public void should_refresh_schema_and_token_map_if_schema_metadata_reenabled(String keyspace) + throws Exception { + try { + schemaDisabledCluster.getConfiguration().getQueryOptions().setMetadataEnabled(true); + + verify(schemaDisabledControlConnection, after(1000)).refreshSchema(null, null, null, null); + + // Ensure that there is schema metadata. + assertThat(schemaDisabledCluster.getMetadata().getKeyspace(keyspace)).isNotNull(); + Token token1 = schemaDisabledCluster.getMetadata().newToken("0"); + Token token2 = schemaDisabledCluster.getMetadata().newToken("111111"); + assertThat(token1).isNotNull(); + assertThat(token2).isNotNull(); + assertThat(schemaDisabledCluster.getMetadata().newTokenRange(token1, token2)).isNotNull(); + + assertThat(schemaDisabledCluster.getMetadata().getTokenRanges()).isNotNull().isNotEmpty(); + + // Try enabling again and ensure schema is not refreshed again. + reset(schemaDisabledControlConnection); + schemaDisabledCluster.getConfiguration().getQueryOptions().setMetadataEnabled(true); + verify(schemaDisabledControlConnection, after(1000).never()) + .refreshSchema(null, null, null, null); + } finally { + // Reset listener mock to not count it's interactions in this test. + reset(schemaDisabledListener); + schemaDisabledCluster.getConfiguration().getQueryOptions().setMetadataEnabled(false); + } + } + + @AfterMethod(groups = "short", alwaysRun = true) + public void cleanup() throws InterruptedException { + if (session1 != null) { + ListenableFuture> f = + Futures.successfulAsList( + Lists.newArrayList( + session1.executeAsync("DROP TABLE lowercase.table1"), + session1.executeAsync("DROP TABLE \"CaseSensitive\".table1"), + session1.executeAsync("DROP TYPE lowercase.type1"), + session1.executeAsync("DROP TYPE \"CaseSensitive\".type1"), + session1.executeAsync("DROP FUNCTION lowercase.\"ID\""), + session1.executeAsync("DROP FUNCTION \"CaseSensitive\".\"ID\""), + session1.executeAsync("DROP FUNCTION lowercase.\"PLUS\""), + session1.executeAsync("DROP FUNCTION \"CaseSensitive\".\"PLUS\""), + session1.executeAsync("DROP AGGREGATE lowercase.\"SUM\""), + session1.executeAsync("DROP AGGREGATE \"CaseSensitive\".\"SUM\""), + session1.executeAsync("DROP MATERIALIZED VIEW lowercase.mv1"), + session1.executeAsync("DROP MATERIALIZED VIEW \"CaseSensitive\".mv1"), + session1.executeAsync("DROP KEYSPACE lowercase2"), + session1.executeAsync("DROP KEYSPACE \"CaseSensitive2\""))); + Futures.getUnchecked(f); + } + } + + private void execute(String cql, String keyspace) throws InterruptedException { + session1.execute(String.format(cql, keyspace)); + } + + private List metadatas() { + return Lists.newArrayList(cluster1.getMetadata(), cluster2.getMetadata()); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/SchemaRefreshDebouncerTest.java b/driver-core/src/test/java/com/datastax/driver/core/SchemaRefreshDebouncerTest.java index 80996624791..2c92ba23c7a 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/SchemaRefreshDebouncerTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/SchemaRefreshDebouncerTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,279 +17,299 @@ */ package com.datastax.driver.core; -import org.mockito.ArgumentCaptor; -import org.testng.SkipException; -import org.testng.annotations.BeforeMethod; -import org.testng.annotations.Test; - -import java.util.Collections; - import static com.datastax.driver.core.Assertions.assertThat; import static com.datastax.driver.core.CreateCCM.TestMode.PER_METHOD; import static com.datastax.driver.core.SchemaElement.KEYSPACE; import static com.datastax.driver.core.SchemaElement.TABLE; import static com.datastax.driver.core.TestUtils.CREATE_KEYSPACE_SIMPLE_FORMAT; import static org.mockito.ArgumentCaptor.forClass; -import static org.mockito.Mockito.*; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.reset; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.timeout; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; + +import java.util.Collections; +import org.mockito.ArgumentCaptor; +import org.testng.SkipException; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; @CreateCCM(PER_METHOD) @CCMConfig(dirtiesContext = true, createKeyspace = false) public class SchemaRefreshDebouncerTest extends CCMTestsSupport { - // This may need to be tweaked depending on the reliability of the test environment. - private static final int DEBOUNCE_TIME = 5000; - - // Control Connection to be spied. - private ControlConnection controlConnection; - - // Schema Listener to be mocked. - private SchemaChangeListener listener; - - // Separate session/clusters to observe schema events on. - private Session session2; - private Cluster cluster2; - - @BeforeMethod(groups = "short") - public void setup() { - QueryOptions queryOptions = new QueryOptions(); - queryOptions.setRefreshSchemaIntervalMillis(DEBOUNCE_TIME); - queryOptions.setMaxPendingRefreshSchemaRequests(5); - // Create a separate cluster that will receive the schema events on its control connection. - cluster2 = register(Cluster.builder() - .addContactPoints(getContactPoints()) - .withPort(ccm().getBinaryPort()) - .withQueryOptions(queryOptions) - .build()); - session2 = cluster2.connect(); - - // Create a spy of the Cluster's control connection and replace it with the spy. - controlConnection = spy(cluster2.manager.controlConnection); - cluster2.manager.controlConnection = controlConnection; - - // Create a mock of SchemaChangeListener to use for signalling events. - listener = mock(SchemaChangeListener.class); - cluster2.register(listener); - - reset(listener); - reset(controlConnection); + // This may need to be tweaked depending on the reliability of the test environment. + private static final int DEBOUNCE_TIME = 5000; + + // Control Connection to be spied. + private ControlConnection controlConnection; + + // Schema Listener to be mocked. + private SchemaChangeListener listener; + + // Separate session/clusters to observe schema events on. + private Session session2; + private Cluster cluster2; + + @BeforeMethod(groups = "short") + public void setup() { + QueryOptions queryOptions = new QueryOptions(); + queryOptions.setRefreshSchemaIntervalMillis(DEBOUNCE_TIME); + queryOptions.setMaxPendingRefreshSchemaRequests(5); + // Create a separate cluster that will receive the schema events on its control connection. + cluster2 = register(createClusterBuilder().withQueryOptions(queryOptions).build()); + session2 = cluster2.connect(); + + // Create a spy of the Cluster's control connection and replace it with the spy. + controlConnection = spy(cluster2.manager.controlConnection); + cluster2.manager.controlConnection = controlConnection; + + // Create a mock of SchemaChangeListener to use for signalling events. + listener = mock(SchemaChangeListener.class); + cluster2.register(listener); + + reset(listener); + reset(controlConnection); + } + + /** + * Ensures that when a CREATED and UPDATED schema_change events are received on a control + * connection for the same keyspace within {@link QueryOptions#getRefreshSchemaIntervalMillis()} + * that the schema refresh is debounced and coalesced into a single schema refresh for that + * keyspace only. + * + * @throws Exception + * @jira_ticket JAVA-657 + * @since 2.0.11 + */ + @Test(groups = "short") + public void should_debounce_and_coalesce_create_and_alter_keyspace_into_refresh_keyspace() + throws Exception { + String keyspace = TestUtils.generateIdentifier("ks_"); + session().execute(String.format(CREATE_KEYSPACE_SIMPLE_FORMAT, keyspace, 1)); + session().execute(String.format("ALTER KEYSPACE %s WITH DURABLE_WRITES=false", keyspace)); + + ArgumentCaptor captor = forClass(KeyspaceMetadata.class); + verify(listener, timeout(DEBOUNCE_TIME * 2).only()).onKeyspaceAdded(captor.capture()); + assertThat(captor.getValue()).hasName(keyspace).isNotDurableWrites(); + + // Verify that the schema refresh was debounced and coalesced when a keyspace creation + // and update event occur for the same keyspace. + verify(controlConnection, times(1)).refreshSchema(KEYSPACE, keyspace, null, null); + + KeyspaceMetadata ksm = cluster2.getMetadata().getKeyspace(keyspace); + // By ensuring durable writes is false, we know that the single schema refresh occurred + // after the alter event. + assertThat(ksm).isNotNull().hasName(keyspace).isNotDurableWrites(); + } + + /** + * Ensures that when a CREATED (keyspace) and CREATED (table) schema_change events are received on + * a control connection with that table belonging to that keyspace within {@link + * QueryOptions#getRefreshSchemaIntervalMillis()} that the schema refresh is debounced and + * coalesced into a single schema refresh for that keyspace only. + * + * @throws Exception + * @jira_ticket JAVA-657 + * @since 2.0.11 + */ + @Test(groups = "short") + public void should_debounce_and_coalesce_create_keyspace_and_table_into_refresh_keyspace() + throws Exception { + String keyspace = TestUtils.generateIdentifier("ks_"); + String table = "tbl1"; + session().execute(String.format(CREATE_KEYSPACE_SIMPLE_FORMAT, keyspace, 1)); + session() + .execute( + String.format( + "CREATE TABLE %s (k text PRIMARY KEY, t text, i int, f float)", + keyspace + "." + table)); + + ArgumentCaptor keyspaceCaptor = forClass(KeyspaceMetadata.class); + verify(listener, timeout(DEBOUNCE_TIME * 2).times(1)).onKeyspaceAdded(keyspaceCaptor.capture()); + assertThat(keyspaceCaptor.getValue()).hasName(keyspace); + + ArgumentCaptor tableCaptor = forClass(TableMetadata.class); + verify(listener, timeout(DEBOUNCE_TIME * 2).times(1)).onTableAdded(tableCaptor.capture()); + assertThat(tableCaptor.getValue()).hasName(table); + + // Verify the schema refresh was debounced and coalesced when a keyspace event and table event + // in that keyspace is detected. + verify(controlConnection).refreshSchema(KEYSPACE, keyspace, null, null); + verify(controlConnection, never()).refreshSchema(TABLE, keyspace, table, null); + + KeyspaceMetadata ksm = cluster2.getMetadata().getKeyspace(keyspace); + assertThat(ksm).isNotNull(); + assertThat(ksm.getTable(table)).isNotNull(); + } + + /** + * Ensures that when multiple CREATED schema_change events are received on a control connection + * for tables belonging to the same keyspace within {@link + * QueryOptions#getRefreshSchemaIntervalMillis()} that the schema refresh is debounced and + * coalesced into a single schema refresh for that keyspace only. + * + * @throws Exception + * @jira_ticket JAVA-657 + * @since 2.0.11 + */ + @Test(groups = "short") + public void should_debounce_and_coalesce_tables_in_same_keyspace_into_refresh_keyspace() + throws Exception { + String keyspace = TestUtils.generateIdentifier("ks_"); + session2.execute(String.format(CREATE_KEYSPACE_SIMPLE_FORMAT, keyspace, 1)); + // Reset invocations as creating keyspace causes a keyspace refresh. + reset(controlConnection); + reset(listener); + + int tableCount = 3; + for (int i = 0; i < tableCount; i++) { + session() + .execute( + String.format( + "CREATE TABLE %s (k text PRIMARY KEY, t text, i int, f float)", + keyspace + "." + "tbl" + i)); } - /** - * Ensures that when a CREATED and UPDATED schema_change events are received on a control - * connection for the same keyspace within {@link QueryOptions#getRefreshSchemaIntervalMillis()} - * that the schema refresh is debounced and coalesced into a single schema refresh for that keyspace only. - * - * @throws Exception - * @jira_ticket JAVA-657 - * @since 2.0.11 - */ - @Test(groups = "short") - public void should_debounce_and_coalesce_create_and_alter_keyspace_into_refresh_keyspace() throws Exception { - String keyspace = TestUtils.generateIdentifier("ks_"); - session().execute(String.format(CREATE_KEYSPACE_SIMPLE_FORMAT, keyspace, 1)); - session().execute(String.format("ALTER KEYSPACE %s WITH DURABLE_WRITES=false", keyspace)); - - ArgumentCaptor captor = forClass(KeyspaceMetadata.class); - verify(listener, timeout(DEBOUNCE_TIME * 2).only()).onKeyspaceAdded(captor.capture()); - assertThat(captor.getValue()).hasName(keyspace).isNotDurableWrites(); - - // Verify that the schema refresh was debounced and coalesced when a keyspace creation - // and update event occur for the same keyspace. - verify(controlConnection, times(1)).refreshSchema(KEYSPACE, keyspace, null, null); - - KeyspaceMetadata ksm = cluster2.getMetadata().getKeyspace(keyspace); - // By ensuring durable writes is false, we know that the single schema refresh occurred - // after the alter event. - assertThat(ksm).isNotNull().hasName(keyspace).isNotDurableWrites(); - } + verify(listener, timeout(DEBOUNCE_TIME * 3).times(3)).onTableAdded(any(TableMetadata.class)); + // Verify a refresh of the keyspace was executed, but not individually on the + // tables since those events were coalesced. + verify(controlConnection).refreshSchema(KEYSPACE, keyspace, null, null); - /** - * Ensures that when a CREATED (keyspace) and CREATED (table) schema_change events are received - * on a control connection with that table belonging to that keyspace within - * {@link QueryOptions#getRefreshSchemaIntervalMillis()} that the schema refresh is debounced - * and coalesced into a single schema refresh for that keyspace only. - * - * @throws Exception - * @jira_ticket JAVA-657 - * @since 2.0.11 - */ - @Test(groups = "short") - public void should_debounce_and_coalesce_create_keyspace_and_table_into_refresh_keyspace() throws Exception { - String keyspace = TestUtils.generateIdentifier("ks_"); - String table = "tbl1"; - session().execute(String.format(CREATE_KEYSPACE_SIMPLE_FORMAT, keyspace, 1)); - session().execute(String.format("CREATE TABLE %s (k text PRIMARY KEY, t text, i int, f float)", keyspace + "." + table)); - - ArgumentCaptor keyspaceCaptor = forClass(KeyspaceMetadata.class); - verify(listener, timeout(DEBOUNCE_TIME * 2).times(1)).onKeyspaceAdded(keyspaceCaptor.capture()); - assertThat(keyspaceCaptor.getValue()).hasName(keyspace); - - ArgumentCaptor tableCaptor = forClass(TableMetadata.class); - verify(listener, timeout(DEBOUNCE_TIME * 2).times(1)).onTableAdded(tableCaptor.capture()); - assertThat(tableCaptor.getValue()).hasName(table); - - // Verify the schema refresh was debounced and coalesced when a keyspace event and table event - // in that keyspace is detected. - verify(controlConnection).refreshSchema(KEYSPACE, keyspace, null, null); - verify(controlConnection, never()).refreshSchema(TABLE, keyspace, table, null); - - KeyspaceMetadata ksm = cluster2.getMetadata().getKeyspace(keyspace); - assertThat(ksm).isNotNull(); - assertThat(ksm.getTable(table)).isNotNull(); + KeyspaceMetadata ksm = cluster2.getMetadata().getKeyspace(keyspace); + assertThat(ksm).isNotNull(); + // metadata is present for each table. + for (int i = 0; i < tableCount; i++) { + String table = "tbl" + i; + // Should have never been a refreshSchema on the table. + verify(controlConnection, never()).refreshSchema(TABLE, keyspace, table, null); + assertThat(ksm.getTable(table)).isNotNull(); } - - /** - * Ensures that when multiple CREATED schema_change events are received - * on a control connection for tables belonging to the same keyspace within - * {@link QueryOptions#getRefreshSchemaIntervalMillis()} that the schema refresh is debounced - * and coalesced into a single schema refresh for that keyspace only. - * - * @throws Exception - * @jira_ticket JAVA-657 - * @since 2.0.11 - */ - @Test(groups = "short") - public void should_debounce_and_coalesce_tables_in_same_keyspace_into_refresh_keyspace() throws Exception { - String keyspace = TestUtils.generateIdentifier("ks_"); - session2.execute(String.format(CREATE_KEYSPACE_SIMPLE_FORMAT, keyspace, 1)); - // Reset invocations as creating keyspace causes a keyspace refresh. - reset(controlConnection); - reset(listener); - - int tableCount = 3; - for (int i = 0; i < tableCount; i++) { - session().execute(String.format("CREATE TABLE %s (k text PRIMARY KEY, t text, i int, f float)", keyspace + "." + "tbl" + i)); - } - - verify(listener, timeout(DEBOUNCE_TIME * 3).times(3)).onTableAdded(any(TableMetadata.class)); - - // Verify a refresh of the keyspace was executed, but not individually on the - // tables since those events were coalesced. - verify(controlConnection).refreshSchema(KEYSPACE, keyspace, null, null); - - KeyspaceMetadata ksm = cluster2.getMetadata().getKeyspace(keyspace); - assertThat(ksm).isNotNull(); - // metadata is present for each table. - for (int i = 0; i < tableCount; i++) { - String table = "tbl" + i; - // Should have never been a refreshSchema on the table. - verify(controlConnection, never()).refreshSchema(TABLE, keyspace, table, null); - assertThat(ksm.getTable(table)).isNotNull(); - } + } + + /** + * Ensures that when multiple UPDATED schema_change events are received on a control connection + * for for the same table within {@link QueryOptions#getRefreshSchemaIntervalMillis()} that the + * schema refresh is debounced and coalesced into a single schema refresh for that table only. + * + * @throws Exception + * @jira_ticket JAVA-657 + * @since 2.0.11 + */ + @Test(groups = "short") + public void should_debounce_and_coalesce_multiple_alter_events_on_same_table_into_refresh_table() + throws Exception { + if (ccm().getCassandraVersion().compareTo(VersionNumber.parse("2.2")) >= 0) + throw new SkipException("Disabled in Cassandra 2.2+ because of CASSANDRA-9996"); + + String keyspace = TestUtils.generateIdentifier("ks_"); + String table = "tbl1"; + String comment = "I am changing this table."; + String columnName = "added_column"; + // Execute on session 2 which refreshes schema as part of processing responses. + session2.execute(String.format(CREATE_KEYSPACE_SIMPLE_FORMAT, keyspace, 1)); + session2.execute( + String.format( + "CREATE TABLE %s (k text PRIMARY KEY, t text, i int, f float)", + keyspace + "." + table)); + reset(controlConnection); + reset(listener); + + session() + .execute(String.format("ALTER TABLE %s.%s WITH comment = '%s'", keyspace, table, comment)); + session().execute(String.format("ALTER TABLE %s.%s ADD %s int", keyspace, table, columnName)); + + ArgumentCaptor original = forClass(TableMetadata.class); + ArgumentCaptor captor = forClass(TableMetadata.class); + verify(listener, timeout(DEBOUNCE_TIME * 2).times(1)) + .onTableChanged(captor.capture(), original.capture()); + assertThat(captor.getValue()) + .hasName(table) + .isInKeyspace(keyspace) + .hasColumn(columnName) + .hasComment(comment); + + assertThat(original.getValue()) + .hasName(table) + .isInKeyspace(keyspace) + .hasNoColumn(columnName) + .doesNotHaveComment(comment); + + // Verify a refresh of the table was executed, but only once. + verify(controlConnection, times(1)) + .refreshSchema(TABLE, keyspace, table, Collections.emptyList()); + + KeyspaceMetadata ksm = cluster2.getMetadata().getKeyspace(keyspace); + assertThat(ksm).isNotNull(); + TableMetadata tm = ksm.getTable(table); + assertThat(tm).hasName(table).isInKeyspace(keyspace).hasColumn(columnName).hasComment(comment); + } + + /** + * Ensures that when a CREATED (keyspace) and CREATED (keyspace) schema_change events are received + * on a control connection for different keyspaces within {@link + * QueryOptions#getRefreshSchemaIntervalMillis()} that the schema refresh is debounced and + * coalesced into a single full schema refresh. + * + * @throws Exception + * @jira_ticket JAVA-657 + * @since 2.0.11 + */ + @Test(groups = "short") + public void should_debounce_and_coalesce_multiple_keyspace_creates_into_refresh_entire_schema() + throws Exception { + String prefix = TestUtils.generateIdentifier("ks_"); + for (int i = 0; i < 3; i++) { + session().execute(String.format(CREATE_KEYSPACE_SIMPLE_FORMAT, prefix + i, 1)); + // check that the metadata is immediately up-to-date for the client that issued the DDL + // statement + assertThat(cluster().getMetadata().getKeyspace(prefix + i)).isNotNull(); } - /** - * Ensures that when multiple UPDATED schema_change events are received - * on a control connection for for the same table within - * {@link QueryOptions#getRefreshSchemaIntervalMillis()} that the schema refresh is debounced - * and coalesced into a single schema refresh for that table only. - * - * @throws Exception - * @jira_ticket JAVA-657 - * @since 2.0.11 - */ - @Test(groups = "short") - public void should_debounce_and_coalesce_multiple_alter_events_on_same_table_into_refresh_table() throws Exception { - if (ccm().getCassandraVersion().compareTo(VersionNumber.parse("2.2")) >= 0) - throw new SkipException("Disabled in Cassandra 2.2+ because of CASSANDRA-9996"); - - String keyspace = TestUtils.generateIdentifier("ks_"); - String table = "tbl1"; - String comment = "I am changing this table."; - String columnName = "added_column"; - // Execute on session 2 which refreshes schema as part of processing responses. - session2.execute(String.format(CREATE_KEYSPACE_SIMPLE_FORMAT, keyspace, 1)); - session2.execute(String.format("CREATE TABLE %s (k text PRIMARY KEY, t text, i int, f float)", keyspace + "." + table)); - reset(controlConnection); - reset(listener); - - session().execute(String.format("ALTER TABLE %s.%s WITH comment = '%s'", keyspace, table, comment)); - session().execute(String.format("ALTER TABLE %s.%s ADD %s int", keyspace, table, columnName)); - - ArgumentCaptor original = forClass(TableMetadata.class); - ArgumentCaptor captor = forClass(TableMetadata.class); - verify(listener, timeout(DEBOUNCE_TIME * 2).times(1)).onTableChanged(captor.capture(), original.capture()); - assertThat(captor.getValue()) - .hasName(table) - .isInKeyspace(keyspace) - .hasColumn(columnName) - .hasComment(comment); - - assertThat(original.getValue()) - .hasName(table) - .isInKeyspace(keyspace) - .hasNoColumn(columnName) - .doesNotHaveComment(comment); - - // Verify a refresh of the table was executed, but only once. - verify(controlConnection, times(1)).refreshSchema(TABLE, keyspace, table, Collections.emptyList()); - - KeyspaceMetadata ksm = cluster2.getMetadata().getKeyspace(keyspace); - assertThat(ksm).isNotNull(); - TableMetadata tm = ksm.getTable(table); - assertThat(tm) - .hasName(table) - .isInKeyspace(keyspace) - .hasColumn(columnName) - .hasComment(comment); - } + verify(listener, timeout(DEBOUNCE_TIME * 3).times(3)) + .onKeyspaceAdded(any(KeyspaceMetadata.class)); + // Verify a complete schema refresh was executed, but only once. + verify(controlConnection, times(1)).refreshSchema(null, null, null, null); - /** - * Ensures that when a CREATED (keyspace) and CREATED (keyspace) schema_change events are received - * on a control connection for different keyspaces within - * {@link QueryOptions#getRefreshSchemaIntervalMillis()} that the schema refresh is debounced - * and coalesced into a single full schema refresh. - * - * @throws Exception - * @jira_ticket JAVA-657 - * @since 2.0.11 - */ - @Test(groups = "short") - public void should_debounce_and_coalesce_multiple_keyspace_creates_into_refresh_entire_schema() throws Exception { - String prefix = TestUtils.generateIdentifier("ks_"); - for (int i = 0; i < 3; i++) { - session().execute(String.format(CREATE_KEYSPACE_SIMPLE_FORMAT, prefix + i, 1)); - // check that the metadata is immediately up-to-date for the client that issued the DDL statement - assertThat(cluster().getMetadata().getKeyspace(prefix + i)).isNotNull(); - } - - verify(listener, timeout(DEBOUNCE_TIME * 3).times(3)).onKeyspaceAdded(any(KeyspaceMetadata.class)); - // Verify a complete schema refresh was executed, but only once. - verify(controlConnection, times(1)).refreshSchema(null, null, null, null); - - for (int i = 0; i < 3; i++) { - KeyspaceMetadata ksm = cluster2.getMetadata().getKeyspace(prefix + i); - assertThat(ksm).isNotNull().hasName(prefix + i); - } + for (int i = 0; i < 3; i++) { + KeyspaceMetadata ksm = cluster2.getMetadata().getKeyspace(prefix + i); + assertThat(ksm).isNotNull().hasName(prefix + i); } + } + + /** + * Ensures that when enough schema changes have been received on a control connection to reach + * {@link QueryOptions#getMaxPendingRefreshSchemaRequests()} that a schema refresh is submitted + * right away. + * + * @throws Exception + * @jira_ticket JAVA-657 + * @since 2.0.11 + */ + @Test(groups = "short") + public void should_refresh_when_max_pending_requests_reached() throws Exception { + String prefix = TestUtils.generateIdentifier("ks_"); + for (int i = 0; i < 5; i++) { + session().execute(String.format(CREATE_KEYSPACE_SIMPLE_FORMAT, prefix + i, 1)); + // check that the metadata is immediately up-to-date for the client that issued the DDL + // statement + assertThat(cluster().getMetadata().getKeyspace(prefix + i)).isNotNull(); + } + + // Event should be processed immediately as we hit our threshold. + verify(listener, timeout(DEBOUNCE_TIME * 5).times(5)) + .onKeyspaceAdded(any(KeyspaceMetadata.class)); + // Verify a complete schema refresh was executed, but only once. + verify(controlConnection, times(1)).refreshSchema(null, null, null, null); - /** - * Ensures that when enough schema changes have been received on a control connection to - * reach {@link QueryOptions#getMaxPendingRefreshSchemaRequests()} that a schema refresh - * is submitted right away. - * - * @throws Exception - * @jira_ticket JAVA-657 - * @since 2.0.11 - */ - @Test(groups = "short") - public void should_refresh_when_max_pending_requests_reached() throws Exception { - String prefix = TestUtils.generateIdentifier("ks_"); - for (int i = 0; i < 5; i++) { - session().execute(String.format(CREATE_KEYSPACE_SIMPLE_FORMAT, prefix + i, 1)); - // check that the metadata is immediately up-to-date for the client that issued the DDL statement - assertThat(cluster().getMetadata().getKeyspace(prefix + i)).isNotNull(); - } - - // Event should be processed immediately as we hit our threshold. - verify(listener, timeout(DEBOUNCE_TIME * 5).times(5)).onKeyspaceAdded(any(KeyspaceMetadata.class)); - // Verify a complete schema refresh was executed, but only once. - verify(controlConnection, times(1)).refreshSchema(null, null, null, null); - - for (int i = 0; i < 5; i++) { - KeyspaceMetadata ksm = cluster2.getMetadata().getKeyspace(prefix + i); - assertThat(ksm).isNotNull().hasName(prefix + i); - } + for (int i = 0; i < 5; i++) { + KeyspaceMetadata ksm = cluster2.getMetadata().getKeyspace(prefix + i); + assertThat(ksm).isNotNull().hasName(prefix + i); } + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/SegmentBuilderTest.java b/driver-core/src/test/java/com/datastax/driver/core/SegmentBuilderTest.java new file mode 100644 index 00000000000..557c8c66b87 --- /dev/null +++ b/driver-core/src/test/java/com/datastax/driver/core/SegmentBuilderTest.java @@ -0,0 +1,310 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.when; + +import io.netty.buffer.ByteBufAllocator; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelPromise; +import io.netty.channel.embedded.EmbeddedChannel; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import org.mockito.Mockito; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Test; + +public class SegmentBuilderTest { + + private static final Message.ProtocolEncoder REQUEST_ENCODER = + new Message.ProtocolEncoder(ProtocolVersion.V5); + + // The constant names denote the total encoded size, including the frame header + private static final Message.Request _38B_REQUEST = new Requests.Query("SELECT * FROM table"); + private static final Message.Request _51B_REQUEST = + new Requests.Query("SELECT * FROM table WHERE id = 1"); + private static final Message.Request _1KB_REQUEST = + new Requests.Query( + "SELECT * FROM table WHERE id = ?", + new Requests.QueryProtocolOptions( + Message.Request.Type.QUERY, + ConsistencyLevel.ONE, + new ByteBuffer[] {ByteBuffer.allocate(967)}, + Collections.emptyMap(), + false, + -1, + null, + ConsistencyLevel.SERIAL, + Long.MIN_VALUE, + Integer.MIN_VALUE), + false); + + private static final EmbeddedChannel MOCK_CHANNEL = new EmbeddedChannel(); + private static final ChannelHandlerContext CONTEXT = Mockito.mock(ChannelHandlerContext.class); + + @BeforeClass(groups = "unit") + public static void setup() { + // This is the only method called by our test implementation + when(CONTEXT.newPromise()) + .thenAnswer( + new Answer() { + @Override + public ChannelPromise answer(InvocationOnMock invocation) { + return MOCK_CHANNEL.newPromise(); + } + }); + } + + @Test(groups = "unit") + public void should_concatenate_frames_when_under_limit() { + TestSegmentBuilder builder = new TestSegmentBuilder(CONTEXT, 100); + + ChannelPromise requestPromise1 = newPromise(); + builder.addRequest(_38B_REQUEST, requestPromise1); + ChannelPromise requestPromise2 = newPromise(); + builder.addRequest(_51B_REQUEST, requestPromise2); + // Nothing produced yet since we would still have room for more frames + assertThat(builder.segments).isEmpty(); + + builder.flush(); + assertThat(builder.segments).hasSize(1); + assertThat(builder.segmentPromises).hasSize(1); + Segment segment = builder.segments.get(0); + assertThat(segment.getPayload().readableBytes()).isEqualTo(38 + 51); + assertThat(segment.isSelfContained()).isTrue(); + ChannelPromise segmentPromise = builder.segmentPromises.get(0); + assertForwards(segmentPromise, requestPromise1, requestPromise2); + } + + @Test(groups = "unit") + public void should_start_new_segment_when_over_limit() { + TestSegmentBuilder builder = new TestSegmentBuilder(CONTEXT, 100); + + ChannelPromise requestPromise1 = newPromise(); + builder.addRequest(_38B_REQUEST, requestPromise1); + ChannelPromise requestPromise2 = newPromise(); + builder.addRequest(_51B_REQUEST, requestPromise2); + ChannelPromise requestPromise3 = newPromise(); + builder.addRequest(_38B_REQUEST, requestPromise3); + // Adding the 3rd frame brings the total size over 100, so a first segment should be emitted + // with the first two messages: + assertThat(builder.segments).hasSize(1); + + ChannelPromise requestPromise4 = newPromise(); + builder.addRequest(_38B_REQUEST, requestPromise4); + builder.flush(); + assertThat(builder.segments).hasSize(2); + + Segment segment1 = builder.segments.get(0); + assertThat(segment1.getPayload().readableBytes()).isEqualTo(38 + 51); + assertThat(segment1.isSelfContained()).isTrue(); + ChannelPromise segmentPromise1 = builder.segmentPromises.get(0); + assertForwards(segmentPromise1, requestPromise1, requestPromise2); + Segment segment2 = builder.segments.get(1); + assertThat(segment2.getPayload().readableBytes()).isEqualTo(38 + 38); + assertThat(segment2.isSelfContained()).isTrue(); + ChannelPromise segmentPromise2 = builder.segmentPromises.get(1); + assertForwards(segmentPromise2, requestPromise3, requestPromise4); + } + + @Test(groups = "unit") + public void should_start_new_segment_when_at_limit() { + TestSegmentBuilder builder = new TestSegmentBuilder(CONTEXT, 38 + 51); + + ChannelPromise requestPromise1 = newPromise(); + builder.addRequest(_38B_REQUEST, requestPromise1); + ChannelPromise requestPromise2 = newPromise(); + builder.addRequest(_51B_REQUEST, requestPromise2); + ChannelPromise requestPromise3 = newPromise(); + builder.addRequest(_38B_REQUEST, requestPromise3); + assertThat(builder.segments).hasSize(1); + + ChannelPromise requestPromise4 = newPromise(); + builder.addRequest(_51B_REQUEST, requestPromise4); + builder.flush(); + assertThat(builder.segments).hasSize(2); + + Segment segment1 = builder.segments.get(0); + assertThat(segment1.getPayload().readableBytes()).isEqualTo(38 + 51); + assertThat(segment1.isSelfContained()).isTrue(); + ChannelPromise segmentPromise1 = builder.segmentPromises.get(0); + assertForwards(segmentPromise1, requestPromise1, requestPromise2); + Segment segment2 = builder.segments.get(1); + assertThat(segment2.getPayload().readableBytes()).isEqualTo(38 + 51); + assertThat(segment2.isSelfContained()).isTrue(); + ChannelPromise segmentPromise2 = builder.segmentPromises.get(1); + assertForwards(segmentPromise2, requestPromise3, requestPromise4); + } + + @Test(groups = "unit") + public void should_split_large_frame() { + TestSegmentBuilder builder = new TestSegmentBuilder(CONTEXT, 100); + + ChannelPromise parentPromise = newPromise(); + builder.addRequest(_1KB_REQUEST, parentPromise); + + assertThat(builder.segments).hasSize(11); + assertThat(builder.segmentPromises).hasSize(11); + for (int i = 0; i < 11; i++) { + Segment slice = builder.segments.get(i); + assertThat(slice.getPayload().readableBytes()).isEqualTo(i == 10 ? 24 : 100); + assertThat(slice.isSelfContained()).isFalse(); + } + } + + @Test(groups = "unit") + public void should_succeed_parent_write_if_all_slices_successful() { + TestSegmentBuilder builder = new TestSegmentBuilder(CONTEXT, 100); + + ChannelPromise parentPromise = newPromise(); + builder.addRequest(_1KB_REQUEST, parentPromise); + + assertThat(builder.segments).hasSize(11); + assertThat(builder.segmentPromises).hasSize(11); + + for (int i = 0; i < 11; i++) { + assertThat(parentPromise.isDone()).isFalse(); + builder.segmentPromises.get(i).setSuccess(); + } + + assertThat(parentPromise.isDone()).isTrue(); + } + + @Test(groups = "unit") + public void should_fail_parent_write_if_any_slice_fails() { + TestSegmentBuilder builder = new TestSegmentBuilder(CONTEXT, 100); + + ChannelPromise parentPromise = newPromise(); + builder.addRequest(_1KB_REQUEST, parentPromise); + + assertThat(builder.segments).hasSize(11); + + // Complete a few slices successfully + for (int i = 0; i < 5; i++) { + builder.segmentPromises.get(i).setSuccess(); + } + assertThat(parentPromise.isDone()).isFalse(); + + // Fail a slice, the parent should fail immediately + Exception mockException = new Exception("test"); + builder.segmentPromises.get(5).setFailure(mockException); + assertThat(parentPromise.isDone()).isTrue(); + assertThat(parentPromise.cause()).isEqualTo(mockException); + + // The remaining slices should have been cancelled + for (int i = 6; i < 11; i++) { + assertThat(builder.segmentPromises.get(i).isCancelled()).isTrue(); + } + } + + @Test(groups = "unit") + public void should_split_large_frame_when_exact_multiple() { + TestSegmentBuilder builder = new TestSegmentBuilder(CONTEXT, 256); + + ChannelPromise parentPromise = newPromise(); + builder.addRequest(_1KB_REQUEST, parentPromise); + + assertThat(builder.segments).hasSize(4); + assertThat(builder.segmentPromises).hasSize(4); + for (int i = 0; i < 4; i++) { + Segment slice = builder.segments.get(i); + assertThat(slice.getPayload().readableBytes()).isEqualTo(256); + assertThat(slice.isSelfContained()).isFalse(); + } + } + + @Test(groups = "unit") + public void should_mix_small_frames_and_large_frames() { + TestSegmentBuilder builder = new TestSegmentBuilder(CONTEXT, 100); + + ChannelPromise requestPromise1 = newPromise(); + builder.addRequest(_38B_REQUEST, requestPromise1); + ChannelPromise requestPromise2 = newPromise(); + builder.addRequest(_51B_REQUEST, requestPromise2); + + // Large frame: process immediately, does not impact accumulated small frames + ChannelPromise requestPromise3 = newPromise(); + builder.addRequest(_1KB_REQUEST, requestPromise3); + assertThat(builder.segments).hasSize(11); + + // Another small frames bring us above the limit + ChannelPromise requestPromise4 = newPromise(); + builder.addRequest(_38B_REQUEST, requestPromise4); + assertThat(builder.segments).hasSize(12); + + // One last frame and finish + ChannelPromise requestPromise5 = newPromise(); + builder.addRequest(_38B_REQUEST, requestPromise5); + builder.flush(); + assertThat(builder.segments).hasSize(13); + assertThat(builder.segmentPromises).hasSize(13); + + for (int i = 0; i < 11; i++) { + Segment slice = builder.segments.get(i); + assertThat(slice.getPayload().readableBytes()).isEqualTo(i == 10 ? 24 : 100); + assertThat(slice.isSelfContained()).isFalse(); + } + + Segment smallMessages1 = builder.segments.get(11); + assertThat(smallMessages1.getPayload().readableBytes()).isEqualTo(38 + 51); + assertThat(smallMessages1.isSelfContained()).isTrue(); + ChannelPromise segmentPromise1 = builder.segmentPromises.get(11); + assertForwards(segmentPromise1, requestPromise1, requestPromise2); + Segment smallMessages2 = builder.segments.get(12); + assertThat(smallMessages2.getPayload().readableBytes()).isEqualTo(38 + 38); + assertThat(smallMessages2.isSelfContained()).isTrue(); + ChannelPromise segmentPromise2 = builder.segmentPromises.get(12); + assertForwards(segmentPromise2, requestPromise4, requestPromise5); + } + + private static ChannelPromise newPromise() { + return MOCK_CHANNEL.newPromise(); + } + + private void assertForwards(ChannelPromise segmentPromise, ChannelPromise... requestPromises) { + for (ChannelPromise requestPromise : requestPromises) { + assertThat(requestPromise.isDone()).isFalse(); + } + segmentPromise.setSuccess(); + for (ChannelPromise requestPromise : requestPromises) { + assertThat(requestPromise.isSuccess()).isTrue(); + } + } + + // Test implementation that simply stores segments and promises in the order they were produced. + static class TestSegmentBuilder extends SegmentBuilder { + + List segments = new ArrayList(); + List segmentPromises = new ArrayList(); + + TestSegmentBuilder(ChannelHandlerContext context, int maxPayloadLength) { + super(context, ByteBufAllocator.DEFAULT, REQUEST_ENCODER, maxPayloadLength); + } + + @Override + protected void processSegment(Segment segment, ChannelPromise segmentPromise) { + segments.add(segment); + segmentPromises.add(segmentPromise); + } + } +} diff --git a/driver-core/src/test/java/com/datastax/driver/core/SegmentCodecTest.java b/driver-core/src/test/java/com/datastax/driver/core/SegmentCodecTest.java new file mode 100644 index 00000000000..72ce1f1b032 --- /dev/null +++ b/driver-core/src/test/java/com/datastax/driver/core/SegmentCodecTest.java @@ -0,0 +1,144 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.fail; + +import com.datastax.driver.core.ProtocolOptions.Compression; +import com.datastax.driver.core.SegmentCodec.Header; +import com.datastax.driver.core.exceptions.CrcMismatchException; +import com.google.common.base.Strings; +import io.netty.buffer.ByteBuf; +import io.netty.buffer.UnpooledByteBufAllocator; +import org.testng.annotations.Test; + +public class SegmentCodecTest { + + public static final SegmentCodec CODEC_NO_COMPRESSION = + new SegmentCodec(UnpooledByteBufAllocator.DEFAULT, Compression.NONE); + public static final SegmentCodec CODEC_LZ4 = + new SegmentCodec(UnpooledByteBufAllocator.DEFAULT, Compression.LZ4); + + @Test(groups = "unit") + public void should_encode_uncompressed_header() { + ByteBuf header = CODEC_NO_COMPRESSION.encodeHeader(5, -1, true); + + byte byte0 = header.getByte(2); + byte byte1 = header.getByte(1); + byte byte2 = header.getByte(0); + + assertThat(bits(byte0) + bits(byte1) + bits(byte2)) + .isEqualTo( + "000000" // padding (6 bits) + + "1" // selfContainedFlag + + "00000000000000101" // length (17 bits) + ); + } + + @Test(groups = "unit") + public void should_encode_compressed_header() { + ByteBuf header = CODEC_LZ4.encodeHeader(5, 12, true); + + byte byte0 = header.getByte(4); + byte byte1 = header.getByte(3); + byte byte2 = header.getByte(2); + byte byte3 = header.getByte(1); + byte byte4 = header.getByte(0); + + assertThat(bits(byte0) + bits(byte1) + bits(byte2) + bits(byte3) + bits(byte4)) + .isEqualTo( + "00000" // padding (5 bits) + + "1" // selfContainedFlag + + "00000000000001100" // uncompressed length (17 bits) + + "00000000000000101" // compressed length (17 bits) + ); + } + + /** + * Checks that we correctly use 8 bytes when we left-shift the uncompressed length, to avoid + * overflows. + */ + @Test(groups = "unit") + public void should_encode_compressed_header_when_aligned_uncompressed_length_overflows() { + ByteBuf header = CODEC_LZ4.encodeHeader(5, Segment.MAX_PAYLOAD_LENGTH, true); + + byte byte0 = header.getByte(4); + byte byte1 = header.getByte(3); + byte byte2 = header.getByte(2); + byte byte3 = header.getByte(1); + byte byte4 = header.getByte(0); + + assertThat(bits(byte0) + bits(byte1) + bits(byte2) + bits(byte3) + bits(byte4)) + .isEqualTo( + "00000" // padding (5 bits) + + "1" // selfContainedFlag + + "11111111111111111" // uncompressed length (17 bits) + + "00000000000000101" // compressed length (17 bits) + ); + } + + @Test(groups = "unit") + public void should_decode_uncompressed_payload() { + // Assembling the test data manually would have little value because it would be very similar to + // our production code. So simply use that production code, assuming it's correct. + ByteBuf buffer = CODEC_NO_COMPRESSION.encodeHeader(5, -1, true); + Header header = CODEC_NO_COMPRESSION.decodeHeader(buffer); + assertThat(header.payloadLength).isEqualTo(5); + assertThat(header.uncompressedPayloadLength).isEqualTo(-1); + assertThat(header.isSelfContained).isTrue(); + } + + @Test(groups = "unit") + public void should_decode_compressed_payload() { + ByteBuf buffer = CODEC_LZ4.encodeHeader(5, 12, true); + Header header = CODEC_LZ4.decodeHeader(buffer); + assertThat(header.payloadLength).isEqualTo(5); + assertThat(header.uncompressedPayloadLength).isEqualTo(12); + assertThat(header.isSelfContained).isTrue(); + } + + @Test(groups = "unit") + public void should_fail_to_decode_if_corrupted() { + ByteBuf buffer = CODEC_NO_COMPRESSION.encodeHeader(5, -1, true); + + // Flip a random byte + for (int bitOffset = 0; bitOffset < 47; bitOffset++) { + int byteOffset = bitOffset / 8; + int shift = bitOffset % 8; + + ByteBuf slice = buffer.slice(buffer.readerIndex() + byteOffset, 1); + slice.markReaderIndex(); + byte byteToCorrupt = slice.readByte(); + slice.resetReaderIndex(); + slice.writerIndex(slice.readerIndex()); + slice.writeByte((byteToCorrupt & 0xFF) ^ (1 << shift)); + + try { + CODEC_NO_COMPRESSION.decodeHeader(buffer.duplicate()); + fail("Expected CrcMismatchException"); + } catch (CrcMismatchException e) { + // expected + } + } + } + + private static String bits(byte b) { + return Strings.padStart(Integer.toBinaryString(b & 0xFF), 8, '0'); + } +} diff --git a/driver-core/src/test/java/com/datastax/driver/core/SegmentToFrameDecoderTest.java b/driver-core/src/test/java/com/datastax/driver/core/SegmentToFrameDecoderTest.java new file mode 100644 index 00000000000..bf0689dd04e --- /dev/null +++ b/driver-core/src/test/java/com/datastax/driver/core/SegmentToFrameDecoderTest.java @@ -0,0 +1,122 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import static com.datastax.driver.core.Message.Response.Type.READY; +import static com.datastax.driver.core.Message.Response.Type.RESULT; +import static org.assertj.core.api.Assertions.assertThat; + +import com.datastax.driver.core.Frame.Header; +import com.datastax.driver.core.Frame.Header.Flag; +import io.netty.buffer.ByteBuf; +import io.netty.buffer.UnpooledByteBufAllocator; +import io.netty.channel.embedded.EmbeddedChannel; +import java.util.EnumSet; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; + +public class SegmentToFrameDecoderTest { + + private static final ByteBuf SMALL_BODY_1 = buffer(128); + private static final Header SMALL_HEADER_1 = + new Header( + ProtocolVersion.V5, + EnumSet.noneOf(Flag.class), + 2, + READY.opcode, + SMALL_BODY_1.readableBytes()); + + private static final ByteBuf SMALL_BODY_2 = buffer(1024); + private static final Header SMALL_HEADER_2 = + new Header( + ProtocolVersion.V5, + EnumSet.noneOf(Flag.class), + 7, + RESULT.opcode, + SMALL_BODY_2.readableBytes()); + + private static final ByteBuf LARGE_BODY = buffer(256 * 1024); + private static final Header LARGE_HEADER = + new Header( + ProtocolVersion.V5, + EnumSet.noneOf(Flag.class), + 12, + RESULT.opcode, + LARGE_BODY.readableBytes()); + + private EmbeddedChannel channel; + + @BeforeMethod(groups = "unit") + public void setup() { + channel = new EmbeddedChannel(); + channel.pipeline().addLast(new SegmentToFrameDecoder()); + } + + @Test(groups = "unit") + public void should_decode_self_contained() { + ByteBuf payload = UnpooledByteBufAllocator.DEFAULT.buffer(); + appendFrame(SMALL_HEADER_1, SMALL_BODY_1, payload); + appendFrame(SMALL_HEADER_2, SMALL_BODY_2, payload); + + channel.writeInbound(new Segment(payload, true)); + + Frame frame1 = (Frame) channel.readInbound(); + Header header1 = frame1.header; + assertThat(header1.streamId).isEqualTo(SMALL_HEADER_1.streamId); + assertThat(header1.opcode).isEqualTo(SMALL_HEADER_1.opcode); + assertThat(frame1.body).isEqualTo(SMALL_BODY_1); + + Frame frame2 = (Frame) channel.readInbound(); + Header header2 = frame2.header; + assertThat(header2.streamId).isEqualTo(SMALL_HEADER_2.streamId); + assertThat(header2.opcode).isEqualTo(SMALL_HEADER_2.opcode); + assertThat(frame2.body).isEqualTo(SMALL_BODY_2); + } + + @Test(groups = "unit") + public void should_decode_sequence_of_slices() { + ByteBuf encodedFrame = UnpooledByteBufAllocator.DEFAULT.buffer(); + appendFrame(LARGE_HEADER, LARGE_BODY, encodedFrame); + + do { + ByteBuf payload = + encodedFrame.readSlice( + Math.min(Segment.MAX_PAYLOAD_LENGTH, encodedFrame.readableBytes())); + channel.writeInbound(new Segment(payload, false)); + } while (encodedFrame.isReadable()); + + Frame frame = (Frame) channel.readInbound(); + Header header = frame.header; + assertThat(header.streamId).isEqualTo(LARGE_HEADER.streamId); + assertThat(header.opcode).isEqualTo(LARGE_HEADER.opcode); + assertThat(frame.body).isEqualTo(LARGE_BODY); + } + + private static final ByteBuf buffer(int length) { + ByteBuf buffer = UnpooledByteBufAllocator.DEFAULT.buffer(length); + // Contents don't really matter, keep all zeroes + buffer.writerIndex(buffer.readerIndex() + length); + return buffer; + } + + private static void appendFrame(Header frameHeader, ByteBuf frameBody, ByteBuf payload) { + frameHeader.encodeInto(payload); + // this method doesn't affect the body's indices: + payload.writeBytes(frameBody, frameBody.readerIndex(), frameBody.readableBytes()); + } +} diff --git a/driver-core/src/test/java/com/datastax/driver/core/SessionAssert.java b/driver-core/src/test/java/com/datastax/driver/core/SessionAssert.java index 83c44b42a55..4d4d8c7025e 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/SessionAssert.java +++ b/driver-core/src/test/java/com/datastax/driver/core/SessionAssert.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,26 +17,26 @@ */ package com.datastax.driver.core; -import org.assertj.core.api.AbstractAssert; - import static org.assertj.core.api.Assertions.assertThat; +import org.assertj.core.api.AbstractAssert; + public class SessionAssert extends AbstractAssert { - protected SessionAssert(Session actual) { - // We are cheating a bit by casting, but this is the only implementation anyway - super((SessionManager) actual, SessionAssert.class); - } + protected SessionAssert(Session actual) { + // We are cheating a bit by casting, but this is the only implementation anyway + super((SessionManager) actual, SessionAssert.class); + } - public SessionAssert hasPoolFor(int hostNumber) { - Host host = TestUtils.findHost(actual.cluster, hostNumber); - assertThat(actual.pools.containsKey(host)).isTrue(); - return this; - } + public SessionAssert hasPoolFor(int hostNumber) { + Host host = TestUtils.findHost(actual.cluster, hostNumber); + assertThat(actual.pools.containsKey(host)).isTrue(); + return this; + } - public SessionAssert hasNoPoolFor(int hostNumber) { - Host host = TestUtils.findHost(actual.cluster, hostNumber); - assertThat(actual.pools.containsKey(host)).isFalse(); - return this; - } + public SessionAssert hasNoPoolFor(int hostNumber) { + Host host = TestUtils.findHost(actual.cluster, hostNumber); + assertThat(actual.pools.containsKey(host)).isFalse(); + return this; + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/SessionErrorTest.java b/driver-core/src/test/java/com/datastax/driver/core/SessionErrorTest.java index aaa3418e00c..5fdeab5975e 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/SessionErrorTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/SessionErrorTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,6 +17,10 @@ */ package com.datastax.driver.core; +import static com.datastax.driver.core.Cluster.builder; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.fail; + import org.apache.log4j.Level; import org.jboss.byteman.contrib.bmunit.BMNGListener; import org.jboss.byteman.contrib.bmunit.BMRule; @@ -24,89 +30,83 @@ import org.testng.annotations.Listeners; import org.testng.annotations.Test; -import static com.datastax.driver.core.Cluster.builder; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.fail; - -/** - * Simple test of the Sessions methods against a one node cluster. - */ +/** Simple test of the Sessions methods against a one node cluster. */ @BMUnitConfig(loadDirectory = "target/test-classes") @Listeners(BMNGListener.class) @CCMConfig(createCluster = false) public class SessionErrorTest extends ScassandraTestBase { - private ScassandraCluster scassandra; - private Cluster cluster; + private ScassandraCluster scassandra; + private Cluster cluster; - @BeforeClass(groups = "short") - public void setUp() throws Exception { - scassandra = ScassandraCluster.builder().withNodes(2).build(); - scassandra.init(); - cluster = builder() - .addContactPoints(scassandra.address(1).getAddress()) - .withPort(scassandra.getBinaryPort()) - .build(); - cluster.init(); - } + @BeforeClass(groups = "short") + public void setUp() throws Exception { + scassandra = ScassandraCluster.builder().withNodes(2).build(); + scassandra.init(); + cluster = + builder() + .addContactPoints(scassandra.address(1).getAddress()) + .withPort(scassandra.getBinaryPort()) + .build(); + cluster.init(); + } - @AfterClass(groups = "short") - public void tearDown() throws Exception { - cluster.close(); - scassandra.stop(); - } + @AfterClass(groups = "short") + public void tearDown() throws Exception { + cluster.close(); + scassandra.stop(); + } - @Test(groups = "short") - @BMRule(name = "emulate OOME", - targetClass = "com.datastax.driver.core.Connection$4", - targetMethod = "apply(Void)", - action = "throw new OutOfMemoryError(\"not really\")" - ) - public void should_propagate_errors() { - try { - cluster.connect(); - fail("Expecting OOME"); - } catch (OutOfMemoryError e) { - assertThat(e).hasMessage("not really"); - } + @Test(groups = "short") + @BMRule( + name = "emulate OOME", + targetClass = "com.datastax.driver.core.Connection$4", + targetMethod = "apply(Void)", + action = "throw new OutOfMemoryError(\"not really\")") + public void should_propagate_errors() { + try { + cluster.connect(); + fail("Expecting OOME"); + } catch (OutOfMemoryError e) { + assertThat(e).hasMessage("not really"); } + } - @Test(groups = "short") - @BMRule(name = "emulate NPE", - targetClass = "com.datastax.driver.core.Connection$4", - targetMethod = "apply(Void)", - action = "throw new NullPointerException(\"not really\")" - ) - public void should_not_propagate_unchecked_exceptions() { - Level previous = TestUtils.setLogLevel(HostConnectionPool.class, Level.WARN); - MemoryAppender logs = new MemoryAppender().enableFor(HostConnectionPool.class); - try { - Session session = cluster.connect(); - // Pool to host1 should be still open because host1 is the control host, - // but its pool should have no active connection - // Pool to host2 should have been closed because host2 has no - // more active connections - Host host1 = scassandra.host(cluster, 1, 1); - Host host2 = scassandra.host(cluster, 1, 2); - TestUtils.waitForDown(TestUtils.ipOfNode(2), cluster); - Session.State state = session.getState(); - assertThat(state.getOpenConnections(host1)).isEqualTo(0); // pool open but empty - assertThat(state.getOpenConnections(host2)).isEqualTo(0); // pool closed - assertThat(logs.get()) - .contains( - "Unexpected error during transport initialization", - "not really", - NullPointerException.class.getSimpleName(), - "com.datastax.driver.core.Connection$4.apply"); - HostConnectionPool pool1 = ((SessionManager)session).pools.get(host1); - HostConnectionPool pool2 = ((SessionManager)session).pools.get(host2); - assertThat(pool1).isNotNull(); - assertThat(pool1.isClosed()).isFalse(); - assertThat(pool2).isNull(); // pool2 should have been removed - } finally { - TestUtils.setLogLevel(HostConnectionPool.class, previous); - logs.disableFor(HostConnectionPool.class); - } + @Test(groups = "short") + @BMRule( + name = "emulate NPE", + targetClass = "com.datastax.driver.core.Connection$4", + targetMethod = "apply(Void)", + action = "throw new NullPointerException(\"not really\")") + public void should_not_propagate_unchecked_exceptions() { + Level previous = TestUtils.setLogLevel(HostConnectionPool.class, Level.WARN); + MemoryAppender logs = new MemoryAppender().enableFor(HostConnectionPool.class); + try { + Session session = cluster.connect(); + // Pool to host1 should be still open because host1 is the control host, + // but its pool should have no active connection + // Pool to host2 should have been closed because host2 has no + // more active connections + Host host1 = scassandra.host(cluster, 1, 1); + Host host2 = scassandra.host(cluster, 1, 2); + TestUtils.waitForDown(TestUtils.ipOfNode(2), cluster); + Session.State state = session.getState(); + assertThat(state.getOpenConnections(host1)).isEqualTo(0); // pool open but empty + assertThat(state.getOpenConnections(host2)).isEqualTo(0); // pool closed + assertThat(logs.get()) + .contains( + "Unexpected error during transport initialization", + "not really", + NullPointerException.class.getSimpleName(), + "com.datastax.driver.core.Connection$4.apply"); + HostConnectionPool pool1 = ((SessionManager) session).pools.get(host1); + HostConnectionPool pool2 = ((SessionManager) session).pools.get(host2); + assertThat(pool1).isNotNull(); + assertThat(pool1.isClosed()).isFalse(); + assertThat(pool2).isNull(); // pool2 should have been removed + } finally { + TestUtils.setLogLevel(HostConnectionPool.class, previous); + logs.disableFor(HostConnectionPool.class); } - + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/SessionLeakTest.java b/driver-core/src/test/java/com/datastax/driver/core/SessionLeakTest.java index 956ef8cf9b9..cbc0fadb927 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/SessionLeakTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/SessionLeakTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,112 +17,112 @@ */ package com.datastax.driver.core; -import com.datastax.driver.core.exceptions.InvalidQueryException; -import com.datastax.driver.core.utils.SocketChannelMonitor; -import org.testng.annotations.Test; - -import java.util.concurrent.TimeUnit; - import static com.datastax.driver.core.Assertions.assertThat; import static com.datastax.driver.core.CreateCCM.TestMode.PER_METHOD; -import static com.datastax.driver.core.TestUtils.nonDebouncingQueryOptions; import static com.google.common.collect.Lists.newArrayList; import static java.util.concurrent.TimeUnit.MINUTES; import static org.assertj.core.api.Assertions.fail; +import com.datastax.driver.core.exceptions.InvalidQueryException; +import com.datastax.driver.core.utils.SocketChannelMonitor; +import java.util.concurrent.TimeUnit; +import org.testng.annotations.Test; + @CreateCCM(PER_METHOD) @CCMConfig(dirtiesContext = true, createCluster = false) public class SessionLeakTest extends CCMTestsSupport { - SocketChannelMonitor channelMonitor; + SocketChannelMonitor channelMonitor; - @Test(groups = "long") - public void connectionLeakTest() throws Exception { - // Checking for JAVA-342 - channelMonitor = new SocketChannelMonitor(); - channelMonitor.reportAtFixedInterval(1, TimeUnit.SECONDS); - Cluster cluster = register(Cluster.builder() - .addContactPoints(getContactPoints().get(0)) - .withPort(ccm().getBinaryPort()) + @Test(groups = "long") + public void connectionLeakTest() throws Exception { + // Checking for JAVA-342 + channelMonitor = new SocketChannelMonitor(); + channelMonitor.reportAtFixedInterval(1, TimeUnit.SECONDS); + Cluster cluster = + register( + createClusterBuilderNoDebouncing() .withNettyOptions(channelMonitor.nettyOptions()) - .withQueryOptions(nonDebouncingQueryOptions()) .build()); - cluster.init(); - - assertThat(cluster.manager.sessions.size()).isEqualTo(0); - // Should be 1 control connection after initialization. - assertOpenConnections(1, cluster); - - // ensure sessions.size() returns with 1 control connection + core pool size. - int corePoolSize = TestUtils.numberOfLocalCoreConnections(cluster); - Session session = cluster.connect(); - - assertThat(cluster.manager.sessions.size()).isEqualTo(1); - assertOpenConnections(1 + corePoolSize, cluster); - - // ensure sessions.size() returns to 0 with only 1 active connection (the control connection) - session.close(); - assertThat(cluster.manager.sessions.size()).isEqualTo(0); - assertOpenConnections(1, cluster); - - // ensure bootstrapping a node does not create additional connections - ccm().add(2); - ccm().start(2); - ccm().waitForUp(2); - assertThat(cluster).host(2).comesUpWithin(2, MINUTES); - - assertThat(cluster.manager.sessions.size()).isEqualTo(0); - assertOpenConnections(1, cluster); - - // ensure a new session gets registered and core connections are established - // there should be corePoolSize more connections to accommodate for the new host. - Session thisSession = cluster.connect(); - assertThat(cluster.manager.sessions.size()).isEqualTo(1); - assertOpenConnections(1 + (corePoolSize * 2), cluster); - - // ensure bootstrapping a node does not create additional connections that won't get cleaned up - thisSession.close(); - - assertThat(cluster.manager.sessions.size()).isEqualTo(0); - assertOpenConnections(1, cluster); - cluster.close(); - // Ensure no channels remain open. - channelMonitor.stop(); - channelMonitor.report(); - assertThat(channelMonitor.openChannels(newArrayList(ccm().addressOfNode(1), ccm().addressOfNode(2))).size()).isEqualTo(0); - } - - @Test(groups = "short") - public void should_not_leak_session_when_wrong_keyspace() throws Exception { - // Checking for JAVA-806 - channelMonitor = new SocketChannelMonitor(); - channelMonitor.reportAtFixedInterval(1, TimeUnit.SECONDS); - Cluster cluster = register(Cluster.builder() - .addContactPoints(getContactPoints().get(0)) - .withPort(ccm().getBinaryPort()) - .withNettyOptions(channelMonitor.nettyOptions()) - .build()); - cluster.init(); - assertThat(cluster.manager.sessions.size()).isEqualTo(0); - try { - // Should be 1 control connection after initialization. - assertOpenConnections(1, cluster); - cluster.connect("wrong_keyspace"); - fail("Should not have connected to a wrong keyspace"); - } catch (InvalidQueryException e) { - // ok - } - assertThat(cluster.manager.sessions.size()).isEqualTo(0); - cluster.close(); - // Ensure no channels remain open. - channelMonitor.stop(); - channelMonitor.report(); - assertThat(channelMonitor.openChannels(ccm().addressOfNode(1), ccm().addressOfNode(2)).size()).isEqualTo(0); - } - - private void assertOpenConnections(int expected, Cluster cluster) { - assertThat(cluster.getMetrics().getOpenConnections().getValue()).isEqualTo(expected); - assertThat(channelMonitor.openChannels(ccm().addressOfNode(1), ccm().addressOfNode(2)).size()).isEqualTo(expected); + cluster.init(); + + assertThat(cluster.manager.sessions.size()).isEqualTo(0); + // Should be 1 control connection after initialization. + assertOpenConnections(1, cluster); + + // ensure sessions.size() returns with 1 control connection + core pool size. + int corePoolSize = TestUtils.numberOfLocalCoreConnections(cluster); + Session session = cluster.connect(); + + assertThat(cluster.manager.sessions.size()).isEqualTo(1); + assertOpenConnections(1 + corePoolSize, cluster); + + // ensure sessions.size() returns to 0 with only 1 active connection (the control connection) + session.close(); + assertThat(cluster.manager.sessions.size()).isEqualTo(0); + assertOpenConnections(1, cluster); + + // ensure bootstrapping a node does not create additional connections + ccm().add(2); + ccm().start(2); + ccm().waitForUp(2); + assertThat(cluster).host(2).comesUpWithin(2, MINUTES); + + assertThat(cluster.manager.sessions.size()).isEqualTo(0); + assertOpenConnections(1, cluster); + + // ensure a new session gets registered and core connections are established + // there should be corePoolSize more connections to accommodate for the new host. + Session thisSession = cluster.connect(); + assertThat(cluster.manager.sessions.size()).isEqualTo(1); + assertOpenConnections(1 + (corePoolSize * 2), cluster); + + // ensure bootstrapping a node does not create additional connections that won't get cleaned up + thisSession.close(); + + assertThat(cluster.manager.sessions.size()).isEqualTo(0); + assertOpenConnections(1, cluster); + cluster.close(); + // Ensure no channels remain open. + channelMonitor.stop(); + channelMonitor.report(); + assertThat( + channelMonitor + .openChannels(newArrayList(ccm().addressOfNode(1), ccm().addressOfNode(2))) + .size()) + .isEqualTo(0); + } + + @Test(groups = "short") + public void should_not_leak_session_when_wrong_keyspace() throws Exception { + // Checking for JAVA-806 + channelMonitor = new SocketChannelMonitor(); + channelMonitor.reportAtFixedInterval(1, TimeUnit.SECONDS); + Cluster cluster = + register(createClusterBuilder().withNettyOptions(channelMonitor.nettyOptions()).build()); + cluster.init(); + assertThat(cluster.manager.sessions.size()).isEqualTo(0); + try { + // Should be 1 control connection after initialization. + assertOpenConnections(1, cluster); + cluster.connect("wrong_keyspace"); + fail("Should not have connected to a wrong keyspace"); + } catch (InvalidQueryException e) { + // ok } + assertThat(cluster.manager.sessions.size()).isEqualTo(0); + cluster.close(); + // Ensure no channels remain open. + channelMonitor.stop(); + channelMonitor.report(); + assertThat(channelMonitor.openChannels(ccm().addressOfNode(1), ccm().addressOfNode(2)).size()) + .isEqualTo(0); + } + + private void assertOpenConnections(int expected, Cluster cluster) { + assertThat(cluster.getMetrics().getOpenConnections().getValue()).isEqualTo(expected); + assertThat(channelMonitor.openChannels(ccm().addressOfNode(1), ccm().addressOfNode(2)).size()) + .isEqualTo(expected); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/SessionStressTest.java b/driver-core/src/test/java/com/datastax/driver/core/SessionStressTest.java index a2827d37da0..ca792c3eda1 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/SessionStressTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/SessionStressTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,276 +17,297 @@ */ package com.datastax.driver.core; +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.fail; + import com.datastax.driver.core.utils.SocketChannelMonitor; import com.google.common.collect.Lists; import com.google.common.util.concurrent.ListenableFuture; import com.google.common.util.concurrent.ListeningExecutorService; import com.google.common.util.concurrent.MoreExecutors; import com.google.common.util.concurrent.Uninterruptibles; +import java.util.Iterator; +import java.util.List; +import java.util.Stack; +import java.util.concurrent.Callable; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.testng.annotations.AfterMethod; import org.testng.annotations.Test; -import java.util.Iterator; -import java.util.List; -import java.util.Stack; -import java.util.concurrent.*; - -import static org.testng.Assert.assertEquals; -import static org.testng.Assert.fail; - @CCMConfig(dirtiesContext = true) public class SessionStressTest extends CCMTestsSupport { - private static final Logger logger = LoggerFactory.getLogger(SessionStressTest.class); + private static final Logger logger = LoggerFactory.getLogger(SessionStressTest.class); - private ListeningExecutorService executorService; + private ListeningExecutorService executorService; - private Cluster stressCluster; + private Cluster stressCluster; - private final SocketChannelMonitor channelMonitor = new SocketChannelMonitor(); + private final SocketChannelMonitor channelMonitor = new SocketChannelMonitor(); - public SessionStressTest() { - // 8 threads should be enough so that we stress the driver and not the OS thread scheduler - executorService = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(8)); - } + public SessionStressTest() { + // 8 threads should be enough so that we stress the driver and not the OS thread scheduler + executorService = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(8)); + } - @AfterMethod(groups = "long", alwaysRun = true) - public void shutdown() throws Exception { - executorService.shutdown(); - try { - boolean shutdown = executorService.awaitTermination(30, TimeUnit.SECONDS); - if (!shutdown) - fail("executor ran for longer than expected"); - } catch (InterruptedException e) { - fail("Interrupted while waiting for executor to shutdown"); - } finally { - executorService = null; - System.gc(); - } + @AfterMethod(groups = "long", alwaysRun = true) + public void shutdown() throws Exception { + executorService.shutdown(); + try { + boolean shutdown = executorService.awaitTermination(30, TimeUnit.SECONDS); + if (!shutdown) fail("executor ran for longer than expected"); + } catch (InterruptedException e) { + fail("Interrupted while waiting for executor to shutdown"); + } finally { + executorService = null; + System.gc(); } - - /** - * Stress test on opening/closing sessions. - *

    - * This test opens and closes {@code Session} in a multithreaded environment and makes sure that there is not - * connection leak. More specifically, this test performs the following steps: - *

    - *

      - *
    • Open 2000 {@code Session} concurrently
    • - *
    • Verify that 2000 sessions are reported as open by the {@code Cluster}
    • - *
    • Verify that 4001 connections are reported as open by the {@code Cluster}
    • - *
    • Close 1000 {@code Session} concurrently
    • - *
    • Verify that 1000 sessions are reported as open by the {@code Cluster}
    • - *
    • Verify that 2001 connections are reported as open by the {@code Cluster}
    • - *
    • Open concurrently 1000 {@code Session} while 1000 other {@code Session} are closed concurrently
    • - *
    • Verify that 1000 sessions are reported as open by the {@code Cluster}
    • - *
    • Verify that 2001 connections are reported as open by the {@code Cluster}
    • - *
    • Close 1000 {@code Session} concurrently
    • - *
    • Verify that 0 sessions are reported as open by the {@code Cluster}
    • - *
    • Verify that 1 connection is reported as open by the {@code Cluster}
    • - *
    - *

    - * This test is linked to JAVA-432. - */ - @Test(groups = "long") - public void sessions_should_not_leak_connections() { - // override inherited field with a new cluster object and ensure 0 sessions and connections - channelMonitor.reportAtFixedInterval(1, TimeUnit.SECONDS); - stressCluster = Cluster.builder() - .addContactPoints(getContactPoints()) - .withPort(ccm().getBinaryPort()) - .withPoolingOptions(new PoolingOptions().setCoreConnectionsPerHost(HostDistance.LOCAL, 1)) - .withNettyOptions(channelMonitor.nettyOptions()).build(); - - try { - stressCluster.init(); - - // The cluster has been initialized, we should have 1 connection. - assertEquals(stressCluster.manager.sessions.size(), 0); - assertEquals((int) stressCluster.getMetrics().getOpenConnections().getValue(), 1); - - // The first session initializes the cluster and its control connection - // This is a local cluster so we also have 2 connections per session - Session session = stressCluster.connect(); - assertEquals(stressCluster.manager.sessions.size(), 1); - int coreConnections = TestUtils.numberOfLocalCoreConnections(stressCluster); - assertEquals((int) stressCluster.getMetrics().getOpenConnections().getValue(), 1 + coreConnections); - assertEquals(channelMonitor.openChannels(getContactPointsWithPorts()).size(), 1 + coreConnections); - - // Closing the session keeps the control connection opened - session.close(); - assertEquals(stressCluster.manager.sessions.size(), 0); - assertEquals((int) stressCluster.getMetrics().getOpenConnections().getValue(), 1); - assertEquals(channelMonitor.openChannels(getContactPointsWithPorts()).size(), 1); - - int nbOfSessions = 2000; - int halfOfTheSessions = nbOfSessions / 2; - int nbOfIterations = 5; - int sleepTime = 20; - - for (int iteration = 1; iteration <= nbOfIterations; iteration++) { - logger.info("On iteration {}/{}.", iteration, nbOfIterations); - logger.info("Creating {} sessions.", nbOfSessions); - waitFor(openSessionsConcurrently(nbOfSessions)); - - // We should see the exact number of opened sessions - // Since we have 2 connections per session, we should see 2 * sessions + control connection - assertEquals(stressCluster.manager.sessions.size(), nbOfSessions); - assertEquals((int) stressCluster.getMetrics().getOpenConnections().getValue(), - coreConnections * nbOfSessions + 1); - assertEquals(channelMonitor.openChannels(getContactPointsWithPorts()).size(), coreConnections * nbOfSessions + 1); - - // Close half of the sessions asynchronously - logger.info("Closing {}/{} sessions.", halfOfTheSessions, nbOfSessions); - waitFor(closeSessionsConcurrently(halfOfTheSessions)); - - // Check that we have the right number of sessions and connections - assertEquals(stressCluster.manager.sessions.size(), halfOfTheSessions); - assertEquals((int) stressCluster.getMetrics().getOpenConnections().getValue(), - coreConnections * (nbOfSessions / 2) + 1); - assertEquals(channelMonitor.openChannels(getContactPointsWithPorts()).size(), - coreConnections * (nbOfSessions / 2) + 1); - - // Close and open the same number of sessions concurrently - logger.info("Closing and Opening {} sessions concurrently.", halfOfTheSessions); - CountDownLatch startSignal = new CountDownLatch(2); - List> openSessionFutures = - openSessionsConcurrently(halfOfTheSessions, startSignal); - List> closeSessionsFutures = closeSessionsConcurrently(halfOfTheSessions, - startSignal); - startSignal.countDown(); - waitFor(openSessionFutures); - waitFor(closeSessionsFutures); - - // Check that we have the same number of sessions and connections - assertEquals(stressCluster.manager.sessions.size(), halfOfTheSessions); - assertEquals((int) stressCluster.getMetrics().getOpenConnections().getValue(), - coreConnections * (nbOfSessions / 2) + 1); - assertEquals(channelMonitor.openChannels(getContactPointsWithPorts()).size(), - coreConnections * (nbOfSessions / 2) + 1); - - // Close the remaining sessions - logger.info("Closing remaining {} sessions.", halfOfTheSessions); - waitFor(closeSessionsConcurrently(halfOfTheSessions)); - - // Check that we have a clean state - assertEquals(stressCluster.manager.sessions.size(), 0); - assertEquals((int) stressCluster.getMetrics().getOpenConnections().getValue(), 1); - assertEquals(channelMonitor.openChannels(getContactPointsWithPorts()).size(), 1); - - // On OSX, the TCP connections are released after 15s by default (sysctl -a net.inet.tcp.msl) - logger.info("Sleeping {} seconds so that TCP connections are released by the OS", sleepTime); - Uninterruptibles.sleepUninterruptibly(sleepTime, TimeUnit.SECONDS); - } - } finally { - stressCluster.close(); - stressCluster = null; - - // Ensure no channels remain open. - assertEquals(channelMonitor.openChannels(getContactPointsWithPorts()).size(), 0); - - channelMonitor.stop(); - channelMonitor.report(); - - logger.info("Sleeping 60 extra seconds"); - Uninterruptibles.sleepUninterruptibly(60, TimeUnit.SECONDS); - - } + } + + /** + * Stress test on opening/closing sessions. + * + *

    This test opens and closes {@code Session} in a multithreaded environment and makes sure + * that there is not connection leak. More specifically, this test performs the following steps: + * + *

    + * + *

      + *
    • Open 2000 {@code Session} concurrently + *
    • Verify that 2000 sessions are reported as open by the {@code Cluster} + *
    • Verify that 4001 connections are reported as open by the {@code Cluster} + *
    • Close 1000 {@code Session} concurrently + *
    • Verify that 1000 sessions are reported as open by the {@code Cluster} + *
    • Verify that 2001 connections are reported as open by the {@code Cluster} + *
    • Open concurrently 1000 {@code Session} while 1000 other {@code Session} are closed + * concurrently + *
    • Verify that 1000 sessions are reported as open by the {@code Cluster} + *
    • Verify that 2001 connections are reported as open by the {@code Cluster} + *
    • Close 1000 {@code Session} concurrently + *
    • Verify that 0 sessions are reported as open by the {@code Cluster} + *
    • Verify that 1 connection is reported as open by the {@code Cluster} + *
    + * + *

    This test is linked to JAVA-432. + */ + @Test(groups = "long") + public void sessions_should_not_leak_connections() { + // override inherited field with a new cluster object and ensure 0 sessions and connections + channelMonitor.reportAtFixedInterval(1, TimeUnit.SECONDS); + stressCluster = + createClusterBuilder() + .withPoolingOptions( + new PoolingOptions().setCoreConnectionsPerHost(HostDistance.LOCAL, 1)) + .withNettyOptions(channelMonitor.nettyOptions()) + .build(); + + try { + stressCluster.init(); + + // The cluster has been initialized, we should have 1 connection. + assertEquals(stressCluster.manager.sessions.size(), 0); + assertEquals((int) stressCluster.getMetrics().getOpenConnections().getValue(), 1); + + // The first session initializes the cluster and its control connection + // This is a local cluster so we also have 2 connections per session + Session session = stressCluster.connect(); + assertEquals(stressCluster.manager.sessions.size(), 1); + int coreConnections = TestUtils.numberOfLocalCoreConnections(stressCluster); + assertEquals( + (int) stressCluster.getMetrics().getOpenConnections().getValue(), 1 + coreConnections); + assertEquals( + channelMonitor.openChannels(getContactPointsWithPorts()).size(), 1 + coreConnections); + + // Closing the session keeps the control connection opened + session.close(); + assertEquals(stressCluster.manager.sessions.size(), 0); + assertEquals((int) stressCluster.getMetrics().getOpenConnections().getValue(), 1); + assertEquals(channelMonitor.openChannels(getContactPointsWithPorts()).size(), 1); + + int nbOfSessions = 2000; + int halfOfTheSessions = nbOfSessions / 2; + int nbOfIterations = 5; + int sleepTime = 20; + + for (int iteration = 1; iteration <= nbOfIterations; iteration++) { + logger.info("On iteration {}/{}.", iteration, nbOfIterations); + logger.info("Creating {} sessions.", nbOfSessions); + waitFor(openSessionsConcurrently(nbOfSessions)); + + // We should see the exact number of opened sessions + // Since we have 2 connections per session, we should see 2 * sessions + control connection + assertEquals(stressCluster.manager.sessions.size(), nbOfSessions); + assertEquals( + (int) stressCluster.getMetrics().getOpenConnections().getValue(), + coreConnections * nbOfSessions + 1); + assertEquals( + channelMonitor.openChannels(getContactPointsWithPorts()).size(), + coreConnections * nbOfSessions + 1); + + // Close half of the sessions asynchronously + logger.info("Closing {}/{} sessions.", halfOfTheSessions, nbOfSessions); + waitFor(closeSessionsConcurrently(halfOfTheSessions)); + + // Check that we have the right number of sessions and connections + assertEquals(stressCluster.manager.sessions.size(), halfOfTheSessions); + assertEquals( + (int) stressCluster.getMetrics().getOpenConnections().getValue(), + coreConnections * (nbOfSessions / 2) + 1); + assertEquals( + channelMonitor.openChannels(getContactPointsWithPorts()).size(), + coreConnections * (nbOfSessions / 2) + 1); + + // Close and open the same number of sessions concurrently + logger.info("Closing and Opening {} sessions concurrently.", halfOfTheSessions); + CountDownLatch startSignal = new CountDownLatch(2); + List> openSessionFutures = + openSessionsConcurrently(halfOfTheSessions, startSignal); + List> closeSessionsFutures = + closeSessionsConcurrently(halfOfTheSessions, startSignal); + startSignal.countDown(); + waitFor(openSessionFutures); + waitFor(closeSessionsFutures); + + // Check that we have the same number of sessions and connections + assertEquals(stressCluster.manager.sessions.size(), halfOfTheSessions); + assertEquals( + (int) stressCluster.getMetrics().getOpenConnections().getValue(), + coreConnections * (nbOfSessions / 2) + 1); + assertEquals( + channelMonitor.openChannels(getContactPointsWithPorts()).size(), + coreConnections * (nbOfSessions / 2) + 1); + + // Close the remaining sessions + logger.info("Closing remaining {} sessions.", halfOfTheSessions); + waitFor(closeSessionsConcurrently(halfOfTheSessions)); + + // Check that we have a clean state + assertEquals(stressCluster.manager.sessions.size(), 0); + assertEquals((int) stressCluster.getMetrics().getOpenConnections().getValue(), 1); + assertEquals(channelMonitor.openChannels(getContactPointsWithPorts()).size(), 1); + + // On OSX, the TCP connections are released after 15s by default (sysctl -a + // net.inet.tcp.msl) + logger.info( + "Sleeping {} seconds so that TCP connections are released by the OS", sleepTime); + Uninterruptibles.sleepUninterruptibly(sleepTime, TimeUnit.SECONDS); + } + } finally { + stressCluster.close(); + stressCluster = null; + + // Ensure no channels remain open. + assertEquals(channelMonitor.openChannels(getContactPointsWithPorts()).size(), 0); + + channelMonitor.stop(); + channelMonitor.report(); + + logger.info("Sleeping 60 extra seconds"); + Uninterruptibles.sleepUninterruptibly(60, TimeUnit.SECONDS); } - - private List> openSessionsConcurrently(int iterations) { - final CountDownLatch countDownLatch = new CountDownLatch(1); - return openSessionsConcurrently(iterations, countDownLatch); + } + + private List> openSessionsConcurrently(int iterations) { + final CountDownLatch countDownLatch = new CountDownLatch(1); + return openSessionsConcurrently(iterations, countDownLatch); + } + + private List> openSessionsConcurrently( + int iterations, CountDownLatch countDownLatch) { + // Open new sessions once all tasks have been created + List> sessionFutures = Lists.newArrayListWithCapacity(iterations); + for (int i = 0; i < iterations; i++) { + sessionFutures.add(executorService.submit(new OpenSession(countDownLatch))); } - - private List> openSessionsConcurrently(int iterations, CountDownLatch countDownLatch) { - // Open new sessions once all tasks have been created - List> sessionFutures = Lists.newArrayListWithCapacity(iterations); - for (int i = 0; i < iterations; i++) { - sessionFutures.add(executorService.submit(new OpenSession(countDownLatch))); - } - countDownLatch.countDown(); - return sessionFutures; + countDownLatch.countDown(); + return sessionFutures; + } + + private List> closeSessionsConcurrently(int iterations) { + final CountDownLatch countDownLatch = new CountDownLatch(1); + return closeSessionsConcurrently(iterations, countDownLatch); + } + + private List> closeSessionsConcurrently( + int iterations, CountDownLatch countDownLatch) { + // Get a reference to every session we want to close + Stack sessionsToClose = new Stack(); + Iterator iterator = stressCluster.manager.sessions.iterator(); + for (int i = 0; i < iterations; i++) { + sessionsToClose.push(iterator.next()); } - private List> closeSessionsConcurrently(int iterations) { - final CountDownLatch countDownLatch = new CountDownLatch(1); - return closeSessionsConcurrently(iterations, countDownLatch); + // Close sessions asynchronously once all tasks have been created + List> closeFutures = Lists.newArrayListWithCapacity(iterations); + for (int i = 0; i < iterations; i++) { + closeFutures.add( + executorService.submit(new CloseSession(sessionsToClose.pop(), countDownLatch))); } - - private List> closeSessionsConcurrently(int iterations, CountDownLatch countDownLatch) { - // Get a reference to every session we want to close - Stack sessionsToClose = new Stack(); - Iterator iterator = stressCluster.manager.sessions.iterator(); - for (int i = 0; i < iterations; i++) { - sessionsToClose.push(iterator.next()); - } - - // Close sessions asynchronously once all tasks have been created - List> closeFutures = Lists.newArrayListWithCapacity(iterations); - for (int i = 0; i < iterations; i++) { - closeFutures.add(executorService.submit(new CloseSession(sessionsToClose.pop(), countDownLatch))); - } - countDownLatch.countDown(); - - // Immediately wait for CloseFutures, this should be very quick since all this work does is call closeAsync. - List> futures = Lists.newArrayListWithCapacity(iterations); - for (ListenableFuture closeFuture : closeFutures) { - try { - futures.add(closeFuture.get()); - } catch (Exception e) { - logger.error("Got interrupted exception while waiting on closeFuture.", e); - } - } - return futures; + countDownLatch.countDown(); + + // Immediately wait for CloseFutures, this should be very quick since all this work does is call + // closeAsync. + List> futures = Lists.newArrayListWithCapacity(iterations); + for (ListenableFuture closeFuture : closeFutures) { + try { + futures.add(closeFuture.get()); + } catch (Exception e) { + logger.error("Got interrupted exception while waiting on closeFuture.", e); + } + } + return futures; + } + + private void waitFor(List> futures) { + for (Future future : futures) { + try { + future.get(); + } catch (InterruptedException e) { + throw new RuntimeException("Interrupted while waiting for future", e); + } catch (ExecutionException e) { + e.printStackTrace(); + fail(e.getMessage()); + } } + } - private void waitFor(List> futures) { - for (Future future : futures) { - try { - future.get(); - } catch (InterruptedException e) { - throw new RuntimeException("Interrupted while waiting for future", e); - } catch (ExecutionException e) { - e.printStackTrace(); - fail(e.getMessage()); - } - } + private class OpenSession implements Callable { + private final CountDownLatch startSignal; + + OpenSession(CountDownLatch startSignal) { + this.startSignal = startSignal; } - private class OpenSession implements Callable { - private final CountDownLatch startSignal; + @Override + public Session call() throws Exception { + startSignal.await(); + return stressCluster.connect(); + } + } - OpenSession(CountDownLatch startSignal) { - this.startSignal = startSignal; - } + private static class CloseSession implements Callable { + private Session session; + private final CountDownLatch startSignal; - @Override - public Session call() throws Exception { - startSignal.await(); - return stressCluster.connect(); - } + CloseSession(Session session, CountDownLatch startSignal) { + this.session = session; + this.startSignal = startSignal; } - private static class CloseSession implements Callable { - private Session session; - private final CountDownLatch startSignal; - - CloseSession(Session session, CountDownLatch startSignal) { - this.session = session; - this.startSignal = startSignal; - } - - @Override - public CloseFuture call() throws Exception { - startSignal.await(); - try { - return session.closeAsync(); - } finally { - session = null; - } - } + @Override + public CloseFuture call() throws Exception { + startSignal.await(); + try { + return session.closeAsync(); + } finally { + session = null; + } } + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java b/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java index a1d56fdfa1c..e71d90d8e08 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,191 +17,238 @@ */ package com.datastax.driver.core; -import java.util.List; -import java.util.Locale; -import java.util.concurrent.*; - -import com.google.common.util.concurrent.ListenableFuture; -import org.testng.annotations.Test; - -import com.datastax.driver.core.exceptions.SyntaxError; - import static com.datastax.driver.core.Assertions.assertThat; import static com.datastax.driver.core.Assertions.fail; import static com.datastax.driver.core.Assertions.offset; import static com.datastax.driver.core.TestUtils.nonQuietClusterCloseOptions; -/** - * Simple test of the Sessions methods against a one node cluster. - */ -public class SessionTest extends CCMTestsSupport { - - private static final String TABLE1 = TestUtils.generateIdentifier("test1"); - private static final String TABLE2 = TestUtils.generateIdentifier("test2"); - private static final String TABLE3 = TestUtils.generateIdentifier("test3"); - private static final String COUNTER_TABLE = TestUtils.generateIdentifier("counters"); - - @Override - public void onTestContextInitialized() { - execute(String.format("CREATE TABLE %s (k text PRIMARY KEY, t text, i int, f float)", TABLE1), - String.format("CREATE TABLE %s (k text PRIMARY KEY, t text, i int, f float)", TABLE2), - String.format("CREATE TABLE %s (k text PRIMARY KEY, t text, i int, f float)", TABLE3), - String.format("CREATE TABLE %s (k text PRIMARY KEY, c counter)", COUNTER_TABLE)); - } - - @Test(groups = "short") - public void should_execute_simple_statements() throws Exception { - // Simple calls to all versions of the execute/executeAsync methods - String key = "execute_test"; - ResultSet rs = session().execute(String.format(Locale.US, "INSERT INTO %s (k, t, i, f) VALUES ('%s', '%s', %d, %f)", TABLE1, key, "foo", 42, 24.03f)); - assertThat(rs.isExhausted()).isTrue(); - - // execute - checkExecuteResultSet(session().execute(String.format(TestUtils.SELECT_ALL_FORMAT, TABLE1)), key); - checkExecuteResultSet(session().execute(new SimpleStatement(String.format(TestUtils.SELECT_ALL_FORMAT, TABLE1)).setConsistencyLevel(ConsistencyLevel.ONE)), key); - - // executeAsync - checkExecuteResultSet(session().executeAsync(String.format(TestUtils.SELECT_ALL_FORMAT, TABLE1)).getUninterruptibly(), key); - checkExecuteResultSet(session().executeAsync(new SimpleStatement(String.format(TestUtils.SELECT_ALL_FORMAT, TABLE1)).setConsistencyLevel(ConsistencyLevel.ONE)).getUninterruptibly(), key); - } - - @Test(groups = "short") - public void should_execute_prepared_statements() throws Exception { - // Simple calls to all versions of the execute/executeAsync methods for prepared statements - // Note: the goal is only to exercice the Session methods, PreparedStatementTest have better prepared statement tests. - String key = "execute_prepared_test"; - ResultSet rs = session().execute(String.format(Locale.US, "INSERT INTO %s (k, t, i, f) VALUES ('%s', '%s', %d, %f)", TABLE2, key, "foo", 42, 24.03f)); - assertThat(rs.isExhausted()).isTrue(); - - PreparedStatement p = session().prepare(String.format(TestUtils.SELECT_ALL_FORMAT + " WHERE k = ?", TABLE2)); - BoundStatement bs = p.bind(key); - - // executePrepared - checkExecuteResultSet(session().execute(bs), key); - checkExecuteResultSet(session().execute(bs.setConsistencyLevel(ConsistencyLevel.ONE)), key); - - // executePreparedAsync - checkExecuteResultSet(session().executeAsync(bs).getUninterruptibly(), key); - checkExecuteResultSet(session().executeAsync(bs.setConsistencyLevel(ConsistencyLevel.ONE)).getUninterruptibly(), key); - } - - static void checkExecuteResultSet(ResultSet rs, String key) { - assertThat(rs.isExhausted()).isFalse(); - Row row = rs.one(); - assertThat(rs.isExhausted()).isTrue(); - assertThat(row.getString("k")).isEqualTo(key); - assertThat(row.getString("t")).isEqualTo("foo"); - assertThat(row.getInt("i")).isEqualTo(42); - assertThat(row.getFloat("f")).isEqualTo(24.03f, offset(0.1f)); - } - - @Test(groups = "short") - public void should_execute_prepared_counter_statement() throws Exception { - PreparedStatement p = session().prepare("UPDATE " + COUNTER_TABLE + " SET c = c + ? WHERE k = ?"); - - session().execute(p.bind(1L, "row")); - session().execute(p.bind(1L, "row")); +import com.datastax.driver.core.exceptions.SyntaxError; +import com.google.common.util.concurrent.ListenableFuture; +import java.util.List; +import java.util.Locale; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; +import org.testng.annotations.Test; - ResultSet rs = session().execute("SELECT * FROM " + COUNTER_TABLE); - List rows = rs.all(); - assertThat(rows).hasSize(1); - assertThat(rows.get(0).getLong("c")).isEqualTo(2L); - } +/** Simple test of the Sessions methods against a one node cluster. */ +public class SessionTest extends CCMTestsSupport { - /** - * Checks for deadlocks when a session shutdown races with the initialization of the cluster (JAVA-418). - */ - @Test(groups = "short") - public void should_close_properly_when_racing_with_cluster_init() throws InterruptedException { - for (int i = 0; i < 500; i++) { - - // Use our own cluster and session (not the ones provided by the parent class) because we want an uninitialized cluster - // (note the use of newSession below) - final Cluster cluster = Cluster.builder() - .addContactPoints(getContactPoints()) - .withPort(ccm().getBinaryPort()) - .withNettyOptions(nonQuietClusterCloseOptions) - .build(); - try { - final Session session = cluster.newSession(); - - // Spawn two threads to simulate the race - ExecutorService executor = Executors.newFixedThreadPool(2); - final CountDownLatch startLatch = new CountDownLatch(1); - - executor.execute(new Runnable() { - @Override - public void run() { - try { - startLatch.await(); - cluster.init(); - } catch (InterruptedException e) { - fail("unexpected interruption", e); - } - } - }); - - executor.execute(new Runnable() { - @Override - public void run() { - try { - startLatch.await(); - TimeUnit.MILLISECONDS.sleep(10); - session.close(); - } catch (InterruptedException e) { - fail("unexpected interruption", e); - } - } - }); - - // Start the threads - startLatch.countDown(); - - executor.shutdown(); - boolean normalShutdown = executor.awaitTermination(5, TimeUnit.SECONDS); - assertThat(normalShutdown).isTrue(); - - } finally { - // The deadlock occurred here before JAVA-418 - cluster.close(); - } - } + private static final String TABLE1 = TestUtils.generateIdentifier("test1"); + private static final String TABLE2 = TestUtils.generateIdentifier("test2"); + private static final String TABLE3 = TestUtils.generateIdentifier("test3"); + private static final String COUNTER_TABLE = TestUtils.generateIdentifier("counters"); + + @Override + public void onTestContextInitialized() { + execute( + String.format("CREATE TABLE %s (k text PRIMARY KEY, t text, i int, f float)", TABLE1), + String.format("CREATE TABLE %s (k text PRIMARY KEY, t text, i int, f float)", TABLE2), + String.format("CREATE TABLE %s (k text PRIMARY KEY, t text, i int, f float)", TABLE3), + String.format("CREATE TABLE %s (k text PRIMARY KEY, c counter)", COUNTER_TABLE)); + } + + @Test(groups = "short") + public void should_execute_simple_statements() throws Exception { + // Simple calls to all versions of the execute/executeAsync methods + String key = "execute_test"; + ResultSet rs = + session() + .execute( + String.format( + Locale.US, + "INSERT INTO %s (k, t, i, f) VALUES ('%s', '%s', %d, %f)", + TABLE1, + key, + "foo", + 42, + 24.03f)); + assertThat(rs.isExhausted()).isTrue(); + + // execute + checkExecuteResultSet( + session().execute(String.format(TestUtils.SELECT_ALL_FORMAT, TABLE1)), key); + checkExecuteResultSet( + session() + .execute( + new SimpleStatement(String.format(TestUtils.SELECT_ALL_FORMAT, TABLE1)) + .setConsistencyLevel(ConsistencyLevel.ONE)), + key); + + // executeAsync + checkExecuteResultSet( + session() + .executeAsync(String.format(TestUtils.SELECT_ALL_FORMAT, TABLE1)) + .getUninterruptibly(), + key); + checkExecuteResultSet( + session() + .executeAsync( + new SimpleStatement(String.format(TestUtils.SELECT_ALL_FORMAT, TABLE1)) + .setConsistencyLevel(ConsistencyLevel.ONE)) + .getUninterruptibly(), + key); + } + + @Test(groups = "short") + public void should_execute_prepared_statements() throws Exception { + // Simple calls to all versions of the execute/executeAsync methods for prepared statements + // Note: the goal is only to exercice the Session methods, PreparedStatementTest have better + // prepared statement tests. + String key = "execute_prepared_test"; + ResultSet rs = + session() + .execute( + String.format( + Locale.US, + "INSERT INTO %s (k, t, i, f) VALUES ('%s', '%s', %d, %f)", + TABLE2, + key, + "foo", + 42, + 24.03f)); + assertThat(rs.isExhausted()).isTrue(); + + PreparedStatement p = + session().prepare(String.format(TestUtils.SELECT_ALL_FORMAT + " WHERE k = ?", TABLE2)); + BoundStatement bs = p.bind(key); + + // executePrepared + checkExecuteResultSet(session().execute(bs), key); + checkExecuteResultSet(session().execute(bs.setConsistencyLevel(ConsistencyLevel.ONE)), key); + + // executePreparedAsync + checkExecuteResultSet(session().executeAsync(bs).getUninterruptibly(), key); + checkExecuteResultSet( + session().executeAsync(bs.setConsistencyLevel(ConsistencyLevel.ONE)).getUninterruptibly(), + key); + } + + static void checkExecuteResultSet(ResultSet rs, String key) { + assertThat(rs.isExhausted()).isFalse(); + Row row = rs.one(); + assertThat(rs.isExhausted()).isTrue(); + assertThat(row.getString("k")).isEqualTo(key); + assertThat(row.getString("t")).isEqualTo("foo"); + assertThat(row.getInt("i")).isEqualTo(42); + assertThat(row.getFloat("f")).isEqualTo(24.03f, offset(0.1f)); + } + + @Test(groups = "short") + public void should_execute_prepared_counter_statement() throws Exception { + PreparedStatement p = + session().prepare("UPDATE " + COUNTER_TABLE + " SET c = c + ? WHERE k = ?"); + + session().execute(p.bind(1L, "row")); + session().execute(p.bind(1L, "row")); + + ResultSet rs = session().execute("SELECT * FROM " + COUNTER_TABLE); + List rows = rs.all(); + assertThat(rows).hasSize(1); + assertThat(rows.get(0).getLong("c")).isEqualTo(2L); + } + + /** + * Checks for deadlocks when a session shutdown races with the initialization of the cluster + * (JAVA-418). + */ + @Test(groups = "short") + public void should_close_properly_when_racing_with_cluster_init() throws InterruptedException { + for (int i = 0; i < 500; i++) { + + // Use our own cluster and session (not the ones provided by the parent class) because we want + // an uninitialized cluster + // (note the use of newSession below) + final Cluster cluster = + Cluster.builder() + .addContactPoints(getContactPoints()) + .withPort(ccm().getBinaryPort()) + .withNettyOptions(nonQuietClusterCloseOptions) + .build(); + try { + final Session session = cluster.newSession(); + + // Spawn two threads to simulate the race + ExecutorService executor = Executors.newFixedThreadPool(2); + final CountDownLatch startLatch = new CountDownLatch(1); + + executor.execute( + new Runnable() { + @Override + public void run() { + try { + startLatch.await(); + cluster.init(); + } catch (InterruptedException e) { + fail("unexpected interruption", e); + } + } + }); + + executor.execute( + new Runnable() { + @Override + public void run() { + try { + startLatch.await(); + TimeUnit.MILLISECONDS.sleep(10); + session.close(); + } catch (InterruptedException e) { + fail("unexpected interruption", e); + } + } + }); + + // Start the threads + startLatch.countDown(); + + executor.shutdown(); + boolean normalShutdown = executor.awaitTermination(5, TimeUnit.SECONDS); + assertThat(normalShutdown).isTrue(); + + } finally { + // The deadlock occurred here before JAVA-418 + cluster.close(); + } } - - /** - * Ensures that if an attempt is made to create a {@link Session} via {@link Cluster#connect} with an invalid - * keyspace that the returned exception is decorated with an indication to check that your keyspace name is valid - * and includes the original {@link SyntaxError}. - */ - @Test(groups = "short") - public void should_give_explicit_error_message_when_keyspace_name_invalid() { - try { - cluster().connect("%!;"); - fail("Expected a SyntaxError"); - } catch (SyntaxError e) { - assertThat(e.getMessage()) - .contains("Error executing \"USE %!;\"") - .contains("Check that your keyspace name is valid"); - } + } + + /** + * Ensures that if an attempt is made to create a {@link Session} via {@link Cluster#connect} with + * an invalid keyspace that the returned exception is decorated with an indication to check that + * your keyspace name is valid and includes the original {@link SyntaxError}. + */ + @Test(groups = "short") + public void should_give_explicit_error_message_when_keyspace_name_invalid() { + try { + cluster().connect("%!;"); + fail("Expected a SyntaxError"); + } catch (SyntaxError e) { + assertThat(e.getMessage()) + .contains("Error executing \"USE %!;\"") + .contains("Check that your keyspace name is valid"); } - - /** - * Ensures that if an attempt is made to create a {@link Session} via {@link Cluster#connectAsync} with an invalid - * keyspace that the returned exception is decorated with an indication to check that your keyspace name is valid - * and includes the original {@link SyntaxError}. - */ - @Test(groups = "short") - public void should_give_explicit_error_message_when_keyspace_name_invalid_async() { - ListenableFuture sessionFuture = cluster().connectAsync(""); - try { - sessionFuture.get(); - } catch (ExecutionException e) { - assertThat(e.getCause()).isInstanceOf(SyntaxError.class); - assertThat(e.getCause().getMessage()) - .contains("no viable alternative at input ''") - .contains("Check that your keyspace name is valid"); - } catch (Exception e) { - fail("Did not expect Exception", e); - } + } + + /** + * Ensures that if an attempt is made to create a {@link Session} via {@link Cluster#connectAsync} + * with an invalid keyspace that the returned exception is decorated with an indication to check + * that your keyspace name is valid and includes the original {@link SyntaxError}. + */ + @Test(groups = "short") + public void should_give_explicit_error_message_when_keyspace_name_invalid_async() { + ListenableFuture sessionFuture = cluster().connectAsync(""); + try { + sessionFuture.get(); + } catch (ExecutionException e) { + assertThat(e.getCause()).isInstanceOf(SyntaxError.class); + assertThat(e.getCause().getMessage()) + .contains("no viable alternative at input ''") + .contains("Check that your keyspace name is valid"); + } catch (Exception e) { + fail("Did not expect Exception", e); } + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/SimpleJSONParserTest.java b/driver-core/src/test/java/com/datastax/driver/core/SimpleJSONParserTest.java index 18e15390a73..4a987dd875c 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/SimpleJSONParserTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/SimpleJSONParserTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,21 +17,24 @@ */ package com.datastax.driver.core; +import static org.testng.Assert.assertEquals; + import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import org.testng.annotations.Test; -import static org.testng.Assert.assertEquals; - public class SimpleJSONParserTest { - @Test(groups = "unit") - public void SimpleParsingTest() throws Exception { + @Test(groups = "unit") + public void SimpleParsingTest() throws Exception { - assertEquals(ImmutableList.of("1", "2", "3"), SimpleJSONParser.parseStringList("[\"1\",\"2\",\"3\"]")); - assertEquals(ImmutableList.of("foo ' bar \""), SimpleJSONParser.parseStringList("[\"foo ' bar \\\"\"]")); + assertEquals( + ImmutableList.of("1", "2", "3"), SimpleJSONParser.parseStringList("[\"1\",\"2\",\"3\"]")); + assertEquals( + ImmutableList.of("foo ' bar \""), SimpleJSONParser.parseStringList("[\"foo ' bar \\\"\"]")); - assertEquals(ImmutableMap.of("foo", "bar", "bar", "foo"), SimpleJSONParser.parseStringMap("{\"foo\":\"bar\",\"bar\":\"foo\"}")); - } + assertEquals( + ImmutableMap.of("foo", "bar", "bar", "foo"), + SimpleJSONParser.parseStringMap("{\"foo\":\"bar\",\"bar\":\"foo\"}")); + } } - diff --git a/driver-core/src/test/java/com/datastax/driver/core/SimpleStatementIntegrationTest.java b/driver-core/src/test/java/com/datastax/driver/core/SimpleStatementIntegrationTest.java index fd86faceebd..591a393e877 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/SimpleStatementIntegrationTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/SimpleStatementIntegrationTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,6 +17,8 @@ */ package com.datastax.driver.core; +import static org.assertj.core.api.Assertions.assertThat; + import com.datastax.driver.core.exceptions.InvalidQueryException; import com.datastax.driver.core.exceptions.UnsupportedFeatureException; import com.datastax.driver.core.utils.CassandraVersion; @@ -22,96 +26,105 @@ import org.testng.SkipException; import org.testng.annotations.Test; -import static org.assertj.core.api.Assertions.assertThat; - public class SimpleStatementIntegrationTest extends CCMTestsSupport { - @Override - public void onTestContextInitialized() { - execute( - "CREATE TABLE users(id int, id2 int, name text, primary key (id, id2))", - "INSERT INTO users(id, id2, name) VALUES (1, 2, 'test')" - ); - } - - @Test(groups = "short") - @CassandraVersion("2.1.0") - public void should_execute_query_with_named_values() { - // Given - SimpleStatement statement = new SimpleStatement("SELECT * FROM users WHERE id = :id and id2 = :id2", - ImmutableMap.of("id", 1, "id2", 2)); - - // When - Row row = session().execute(statement).one(); - - // Then - assertThat(row).isNotNull(); - assertThat(row.getString("name")).isEqualTo("test"); - } - - @Test(groups = "short", expectedExceptions = InvalidQueryException.class) - @CassandraVersion("2.1.0") - public void should_fail_if_query_with_named_values_but_missing_parameter() { - // Given a Statement missing named parameters. - SimpleStatement statement = new SimpleStatement("SELECT * FROM users WHERE id = :id and id2 = :id2", - ImmutableMap.of("id2", 2)); - - // When - session().execute(statement).one(); - - // Then - The driver does allow this because it doesn't know what parameters are required, but C* should - // throw an InvalidQueryException. - } - - @Test(groups = "short", expectedExceptions = InvalidQueryException.class) - @CassandraVersion("2.1.0") - public void should_fail_if_query_with_named_values_but_using_wrong_type() { - // Given a Statement using a named parameter with the wrong value for the type (id is of type int, using double) - SimpleStatement statement = new SimpleStatement("SELECT * FROM users WHERE id = :id and id2 = :id2", - ImmutableMap.of("id", 2.7, "id2", 2)); - - // When - session().execute(statement).one(); - - // Then - The driver does allow this because it doesn't know the type information, but C* should throw an - // InvalidQueryException. + @Override + public void onTestContextInitialized() { + execute( + "CREATE TABLE users(id int, id2 int, name text, primary key (id, id2))", + "INSERT INTO users(id, id2, name) VALUES (1, 2, 'test')"); + } + + @Test(groups = "short") + @CassandraVersion("2.1.0") + public void should_execute_query_with_named_values() { + // Given + SimpleStatement statement = + new SimpleStatement( + "SELECT * FROM users WHERE id = :id and id2 = :id2", + ImmutableMap.of("id", 1, "id2", 2)); + + // When + Row row = session().execute(statement).one(); + + // Then + assertThat(row).isNotNull(); + assertThat(row.getString("name")).isEqualTo("test"); + } + + @Test(groups = "short", expectedExceptions = InvalidQueryException.class) + @CassandraVersion("2.1.0") + public void should_fail_if_query_with_named_values_but_missing_parameter() { + // Given a Statement missing named parameters. + SimpleStatement statement = + new SimpleStatement( + "SELECT * FROM users WHERE id = :id and id2 = :id2", + ImmutableMap.of("id2", 2)); + + // When + session().execute(statement).one(); + + // Then - The driver does allow this because it doesn't know what parameters are required, but + // C* should + // throw an InvalidQueryException. + } + + @Test(groups = "short", expectedExceptions = InvalidQueryException.class) + @CassandraVersion("2.1.0") + public void should_fail_if_query_with_named_values_but_using_wrong_type() { + // Given a Statement using a named parameter with the wrong value for the type (id is of type + // int, using double) + SimpleStatement statement = + new SimpleStatement( + "SELECT * FROM users WHERE id = :id and id2 = :id2", + ImmutableMap.of("id", 2.7, "id2", 2)); + + // When + session().execute(statement).one(); + + // Then - The driver does allow this because it doesn't know the type information, but C* should + // throw an + // InvalidQueryException. + } + + public void useNamedValuesWithProtocol(ProtocolVersion version) { + Cluster vCluster = + createClusterBuilder() + .addContactPoints(getContactPoints()) + .withPort(ccm().getBinaryPort()) + .withProtocolVersion(version) + .build(); + try { + Session vSession = vCluster.connect(this.keyspace); + // Given - A simple statement with named parameters. + SimpleStatement statement = + new SimpleStatement( + "SELECT * FROM users WHERE id = :id", ImmutableMap.of("id", 1)); + + // When - Executing that statement against a Cluster instance using Protocol Version V2. + vSession.execute(statement).one(); + + // Then - Should throw an UnsupportedFeatureException + } finally { + vCluster.close(); } + } - public void useNamedValuesWithProtocol(ProtocolVersion version) { - Cluster vCluster = createClusterBuilder() - .addContactPoints(getContactPoints()) - .withPort(ccm().getBinaryPort()) - .withProtocolVersion(version).build(); - try { - Session vSession = vCluster.connect(this.keyspace); - // Given - A simple statement with named parameters. - SimpleStatement statement = new SimpleStatement("SELECT * FROM users WHERE id = :id", - ImmutableMap.of("id", 1)); - - // When - Executing that statement against a Cluster instance using Protocol Version V2. - vSession.execute(statement).one(); - - // Then - Should throw an UnsupportedFeatureException - } finally { - vCluster.close(); - } + @Test(groups = "short", expectedExceptions = UnsupportedFeatureException.class) + @CassandraVersion("2.0.0") + public void should_fail_if_query_with_named_values_if_protocol_is_V2() { + if (ccm().getCassandraVersion().getMajor() >= 3) { + throw new SkipException("Skipping since Cassandra 3.0+ does not support protocol v2"); } - - @Test(groups = "short", expectedExceptions = UnsupportedFeatureException.class) - @CassandraVersion("2.0.0") - public void should_fail_if_query_with_named_values_if_protocol_is_V2() { - if (ccm().getCassandraVersion().getMajor() >= 3) { - throw new SkipException("Skipping since Cassandra 3.0+ does not support protocol v2"); - } - useNamedValuesWithProtocol(ProtocolVersion.V2); - } - - @Test(groups = "short", expectedExceptions = UnsupportedFeatureException.class) - @CassandraVersion("2.0.0") - public void should_fail_if_query_with_named_values_if_protocol_is_V1() { - if (ccm().getCassandraVersion().getMajor() >= 3) { - throw new SkipException("Skipping since Cassandra 3.0+ does not support protocol v1"); - } - useNamedValuesWithProtocol(ProtocolVersion.V1); + useNamedValuesWithProtocol(ProtocolVersion.V2); + } + + @Test(groups = "short", expectedExceptions = UnsupportedFeatureException.class) + @CassandraVersion("2.0.0") + public void should_fail_if_query_with_named_values_if_protocol_is_V1() { + if (ccm().getCassandraVersion().getMajor() >= 3) { + throw new SkipException("Skipping since Cassandra 3.0+ does not support protocol v1"); } + useNamedValuesWithProtocol(ProtocolVersion.V1); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/SimpleStatementTest.java b/driver-core/src/test/java/com/datastax/driver/core/SimpleStatementTest.java index 1030586bcf3..8f827c89fb0 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/SimpleStatementTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/SimpleStatementTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,85 +17,89 @@ */ package com.datastax.driver.core; -import org.testng.annotations.Test; +import static org.assertj.core.api.Assertions.assertThat; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; - -import static org.assertj.core.api.Assertions.assertThat; +import org.testng.annotations.Test; public class SimpleStatementTest { - @Test(groups = "unit", expectedExceptions = {IllegalArgumentException.class}) - public void should_fail_if_too_many_variables() { - List args = Collections.nCopies(1 << 16, (Object) 1); - new SimpleStatement("mock query", args.toArray()); - } - - @Test(groups = "unit", expectedExceptions = {IllegalStateException.class}) - public void should_throw_ISE_if_getObject_called_on_statement_without_values() { - new SimpleStatement("doesn't matter").getObject(0); - } + @Test( + groups = "unit", + expectedExceptions = {IllegalArgumentException.class}) + public void should_fail_if_too_many_variables() { + List args = Collections.nCopies(1 << 16, (Object) 1); + new SimpleStatement("mock query", args.toArray()); + } - @Test(groups = "unit", expectedExceptions = {IndexOutOfBoundsException.class}) - public void should_throw_IOOBE_if_getObject_called_with_wrong_index() { - new SimpleStatement("doesn't matter", new Object()).getObject(1); - } + @Test( + groups = "unit", + expectedExceptions = {IllegalStateException.class}) + public void should_throw_ISE_if_getObject_called_on_statement_without_values() { + new SimpleStatement("doesn't matter").getObject(0); + } - @Test(groups = "unit") - public void should_return_object_at_ith_index() { - Object expected = new Object(); - Object actual = new SimpleStatement("doesn't matter", expected).getObject(0); - assertThat(actual).isSameAs(expected); - } + @Test( + groups = "unit", + expectedExceptions = {IndexOutOfBoundsException.class}) + public void should_throw_IOOBE_if_getObject_called_with_wrong_index() { + new SimpleStatement("doesn't matter", new Object()).getObject(1); + } - @Test(groups = "unit", expectedExceptions = {IllegalStateException.class}) - public void should_throw_ISE_if_getObject_called_on_statement_without_named_values() { - new SimpleStatement("doesn't matter").getObject("name"); - } + @Test(groups = "unit") + public void should_return_object_at_ith_index() { + Object expected = new Object(); + Object actual = new SimpleStatement("doesn't matter", expected).getObject(0); + assertThat(actual).isSameAs(expected); + } - @Test(groups = "unit") - public void should_return_null_if_getObject_called_on_statement_with_wrong_name() { - Map namedVales = new HashMap(); - namedVales.put("name", new Object()); - Object actual = new SimpleStatement("doesn't matter", namedVales).getObject("wrong name"); - assertThat(actual).isNull(); - } + @Test( + groups = "unit", + expectedExceptions = {IllegalStateException.class}) + public void should_throw_ISE_if_getObject_called_on_statement_without_named_values() { + new SimpleStatement("doesn't matter").getObject("name"); + } - @Test(groups = "unit") - public void should_return_object_with_name() { - Object expected = new Object(); - String valueName = "name"; - Map namedVales = new HashMap(); - namedVales.put(valueName, expected); - Object actual = new SimpleStatement("doesn't matter", namedVales).getObject(valueName); - assertThat(actual).isSameAs(expected); - } + @Test(groups = "unit") + public void should_return_null_if_getObject_called_on_statement_with_wrong_name() { + Map namedVales = new HashMap(); + namedVales.put("name", new Object()); + Object actual = new SimpleStatement("doesn't matter", namedVales).getObject("wrong name"); + assertThat(actual).isNull(); + } - @Test(groups = "unit", expectedExceptions = {IllegalStateException.class}) - public void should_throw_ISE_if_getValueNames_called_on_statement_without_named_values() { - new SimpleStatement("doesn't matter").getValueNames(); - } + @Test(groups = "unit") + public void should_return_object_with_name() { + Object expected = new Object(); + String valueName = "name"; + Map namedVales = new HashMap(); + namedVales.put(valueName, expected); + Object actual = new SimpleStatement("doesn't matter", namedVales).getObject(valueName); + assertThat(actual).isSameAs(expected); + } - @Test(groups = "unit") - public void should_return_named_value() { - Object expected = new Object(); - Map namedValues = new HashMap(); - namedValues.put("name", expected); - Object actual = new SimpleStatement("doesn't matter", namedValues).getObject("name"); - assertThat(actual).isEqualTo(expected); - } + @Test( + groups = "unit", + expectedExceptions = {IllegalStateException.class}) + public void should_throw_ISE_if_getValueNames_called_on_statement_without_named_values() { + new SimpleStatement("doesn't matter").getValueNames(); + } - @Test(groups = "unit") - public void should_return_number_of_values() { - assertThat( - new SimpleStatement("doesn't matter").valuesCount() - ).isEqualTo(0); - assertThat( - new SimpleStatement("doesn't matter", 1, 2).valuesCount() - ).isEqualTo(2); - } + @Test(groups = "unit") + public void should_return_named_value() { + Object expected = new Object(); + Map namedValues = new HashMap(); + namedValues.put("name", expected); + Object actual = new SimpleStatement("doesn't matter", namedValues).getObject("name"); + assertThat(actual).isEqualTo(expected); + } + @Test(groups = "unit") + public void should_return_number_of_values() { + assertThat(new SimpleStatement("doesn't matter").valuesCount()).isEqualTo(0); + assertThat(new SimpleStatement("doesn't matter", 1, 2).valuesCount()).isEqualTo(2); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/SimpleStrategyTest.java b/driver-core/src/test/java/com/datastax/driver/core/SimpleStrategyTest.java index df24ce8e447..91cf49dc3ad 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/SimpleStrategyTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/SimpleStrategyTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,234 +19,233 @@ import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; -import org.testng.annotations.Test; - import java.net.InetSocketAddress; import java.util.List; import java.util.Map; import java.util.Set; +import org.testng.annotations.Test; public class SimpleStrategyTest extends AbstractReplicationStrategyTest { - private static ReplicationStrategy simpleStrategy(int replicationFactor) { - return ReplicationStrategy.create(ImmutableMap.builder()// - .put("class", "SimpleStrategy")// - .put("replication_factor", String.valueOf(replicationFactor))// - .build()); - } - - /* - * --------------------------------------------------------------------------- - * Ring, replication, etc... setup. These are reusable for the tests - * This data is based on a real ring topology. Some tests are using - * smaller and more specific topologies instead. - * --------------------------------------------------------------------------- - */ - - private static final Token TOKEN01 = token("-9000000000000000000"); - private static final Token TOKEN02 = token("-8000000000000000000"); - private static final Token TOKEN03 = token("-7000000000000000000"); - private static final Token TOKEN04 = token("-6000000000000000000"); - private static final Token TOKEN05 = token("-5000000000000000000"); - private static final Token TOKEN06 = token("-4000000000000000000"); - private static final Token TOKEN07 = token("-3000000000000000000"); - private static final Token TOKEN08 = token("-2000000000000000000"); - private static final Token TOKEN09 = token("-1000000000000000000"); - private static final Token TOKEN10 = token("0"); - private static final Token TOKEN11 = token("1000000000000000000"); - private static final Token TOKEN12 = token("2000000000000000000"); - private static final Token TOKEN13 = token("3000000000000000000"); - private static final Token TOKEN14 = token("4000000000000000000"); - private static final Token TOKEN15 = token("5000000000000000000"); - private static final Token TOKEN16 = token("6000000000000000000"); - private static final Token TOKEN17 = token("7000000000000000000"); - private static final Token TOKEN18 = token("8000000000000000000"); - private static final Token TOKEN19 = token("9000000000000000000"); - - private static final InetSocketAddress IP1 = socketAddress("127.0.0.101"); - private static final InetSocketAddress IP2 = socketAddress("127.0.0.102"); - private static final InetSocketAddress IP3 = socketAddress("127.0.0.103"); - private static final InetSocketAddress IP4 = socketAddress("127.0.0.104"); - private static final InetSocketAddress IP5 = socketAddress("127.0.0.105"); - private static final InetSocketAddress IP6 = socketAddress("127.0.0.106"); - - private static final ReplicationStrategy exampleStrategy = simpleStrategy(3); - - private static final ReplicationStrategy exampleStrategyTooManyReplicas = simpleStrategy(8); - - private static final List exampleRing = ImmutableList.builder() - .add(TOKEN01) - .add(TOKEN02) - .add(TOKEN03) - .add(TOKEN04) - .add(TOKEN05) - .add(TOKEN06) - .add(TOKEN07) - .add(TOKEN08) - .add(TOKEN09) - .add(TOKEN10) - .add(TOKEN11) - .add(TOKEN12) - .add(TOKEN13) - .add(TOKEN14) - .add(TOKEN15) - .add(TOKEN16) - .add(TOKEN17) - .add(TOKEN18) + private static ReplicationStrategy simpleStrategy(int replicationFactor) { + return ReplicationStrategy.create( + ImmutableMap.builder() // + .put("class", "SimpleStrategy") // + .put("replication_factor", String.valueOf(replicationFactor)) // + .build()); + } + + /* + * --------------------------------------------------------------------------- + * Ring, replication, etc... setup. These are reusable for the tests + * This data is based on a real ring topology. Some tests are using + * smaller and more specific topologies instead. + * --------------------------------------------------------------------------- + */ + + private static final Token TOKEN01 = token("-9000000000000000000"); + private static final Token TOKEN02 = token("-8000000000000000000"); + private static final Token TOKEN03 = token("-7000000000000000000"); + private static final Token TOKEN04 = token("-6000000000000000000"); + private static final Token TOKEN05 = token("-5000000000000000000"); + private static final Token TOKEN06 = token("-4000000000000000000"); + private static final Token TOKEN07 = token("-3000000000000000000"); + private static final Token TOKEN08 = token("-2000000000000000000"); + private static final Token TOKEN09 = token("-1000000000000000000"); + private static final Token TOKEN10 = token("0"); + private static final Token TOKEN11 = token("1000000000000000000"); + private static final Token TOKEN12 = token("2000000000000000000"); + private static final Token TOKEN13 = token("3000000000000000000"); + private static final Token TOKEN14 = token("4000000000000000000"); + private static final Token TOKEN15 = token("5000000000000000000"); + private static final Token TOKEN16 = token("6000000000000000000"); + private static final Token TOKEN17 = token("7000000000000000000"); + private static final Token TOKEN18 = token("8000000000000000000"); + private static final Token TOKEN19 = token("9000000000000000000"); + + private static final InetSocketAddress IP1 = socketAddress("127.0.0.101"); + private static final InetSocketAddress IP2 = socketAddress("127.0.0.102"); + private static final InetSocketAddress IP3 = socketAddress("127.0.0.103"); + private static final InetSocketAddress IP4 = socketAddress("127.0.0.104"); + private static final InetSocketAddress IP5 = socketAddress("127.0.0.105"); + private static final InetSocketAddress IP6 = socketAddress("127.0.0.106"); + + private static final ReplicationStrategy exampleStrategy = simpleStrategy(3); + + private static final ReplicationStrategy exampleStrategyTooManyReplicas = simpleStrategy(8); + + private static final List exampleRing = + ImmutableList.builder() + .add(TOKEN01) + .add(TOKEN02) + .add(TOKEN03) + .add(TOKEN04) + .add(TOKEN05) + .add(TOKEN06) + .add(TOKEN07) + .add(TOKEN08) + .add(TOKEN09) + .add(TOKEN10) + .add(TOKEN11) + .add(TOKEN12) + .add(TOKEN13) + .add(TOKEN14) + .add(TOKEN15) + .add(TOKEN16) + .add(TOKEN17) + .add(TOKEN18) + .build(); + + private static final Map exampleTokenToPrimary = + ImmutableMap.builder() + .put(TOKEN01, host(IP1)) + .put(TOKEN02, host(IP1)) + .put(TOKEN03, host(IP5)) + .put(TOKEN04, host(IP3)) + .put(TOKEN05, host(IP1)) + .put(TOKEN06, host(IP5)) + .put(TOKEN07, host(IP2)) + .put(TOKEN08, host(IP6)) + .put(TOKEN09, host(IP3)) + .put(TOKEN10, host(IP4)) + .put(TOKEN11, host(IP5)) + .put(TOKEN12, host(IP4)) + .put(TOKEN13, host(IP4)) + .put(TOKEN14, host(IP2)) + .put(TOKEN15, host(IP6)) + .put(TOKEN16, host(IP3)) + .put(TOKEN17, host(IP2)) + .put(TOKEN18, host(IP6)) + .build(); + + private static final String keyspace = "excalibur"; + + /* + * -------------- + * Tests + * -------------- + */ + + @Test(groups = "unit") + public void simpleStrategySimpleTopologyTest() { + List ring = + ImmutableList.builder().add(TOKEN01).add(TOKEN06).add(TOKEN14).add(TOKEN19).build(); + + Map tokenToPrimary = + ImmutableMap.builder() + .put(TOKEN01, host(IP1)) + .put(TOKEN06, host(IP2)) + .put(TOKEN14, host(IP1)) + .put(TOKEN19, host(IP2)) .build(); - private static final Map exampleTokenToPrimary = ImmutableMap.builder() + ReplicationStrategy strategy = simpleStrategy(2); + + Map> replicaMap = + strategy.computeTokenToReplicaMap(keyspace, tokenToPrimary, ring); + + assertReplicaPlacement(replicaMap, TOKEN01, IP1, IP2); + assertReplicaPlacement(replicaMap, TOKEN06, IP2, IP1); + assertReplicaPlacement(replicaMap, TOKEN14, IP1, IP2); + assertReplicaPlacement(replicaMap, TOKEN19, IP2, IP1); + } + + @Test(groups = "unit") + public void simpleStrategyConsecutiveRingSectionsTest() { + List ring = + ImmutableList.builder().add(TOKEN01).add(TOKEN06).add(TOKEN14).add(TOKEN19).build(); + + Map tokenToPrimary = + ImmutableMap.builder() + .put(TOKEN01, host(IP1)) + .put(TOKEN06, host(IP1)) + .put(TOKEN14, host(IP2)) + .put(TOKEN19, host(IP2)) + .build(); + + ReplicationStrategy strategy = simpleStrategy(2); + + Map> replicaMap = + strategy.computeTokenToReplicaMap(keyspace, tokenToPrimary, ring); + + assertReplicaPlacement(replicaMap, TOKEN01, IP1, IP2); + assertReplicaPlacement(replicaMap, TOKEN06, IP1, IP2); + assertReplicaPlacement(replicaMap, TOKEN14, IP2, IP1); + assertReplicaPlacement(replicaMap, TOKEN19, IP2, IP1); + } + + @Test(groups = "unit") + public void simpleStrategyUnbalancedRingTest() { + List ring = + ImmutableList.builder().add(TOKEN01).add(TOKEN06).add(TOKEN14).add(TOKEN19).build(); + + Map tokenToPrimary = + ImmutableMap.builder() .put(TOKEN01, host(IP1)) - .put(TOKEN02, host(IP1)) - .put(TOKEN03, host(IP5)) - .put(TOKEN04, host(IP3)) - .put(TOKEN05, host(IP1)) - .put(TOKEN06, host(IP5)) - .put(TOKEN07, host(IP2)) - .put(TOKEN08, host(IP6)) - .put(TOKEN09, host(IP3)) - .put(TOKEN10, host(IP4)) - .put(TOKEN11, host(IP5)) - .put(TOKEN12, host(IP4)) - .put(TOKEN13, host(IP4)) + .put(TOKEN06, host(IP1)) .put(TOKEN14, host(IP2)) - .put(TOKEN15, host(IP6)) - .put(TOKEN16, host(IP3)) - .put(TOKEN17, host(IP2)) - .put(TOKEN18, host(IP6)) + .put(TOKEN19, host(IP1)) .build(); - private static final String keyspace = "excalibur"; - - /* - * -------------- - * Tests - * -------------- - */ - - @Test(groups = "unit") - public void simpleStrategySimpleTopologyTest() { - List ring = ImmutableList.builder() - .add(TOKEN01) - .add(TOKEN06) - .add(TOKEN14) - .add(TOKEN19) - .build(); - - Map tokenToPrimary = ImmutableMap.builder() - .put(TOKEN01, host(IP1)) - .put(TOKEN06, host(IP2)) - .put(TOKEN14, host(IP1)) - .put(TOKEN19, host(IP2)) - .build(); - - ReplicationStrategy strategy = simpleStrategy(2); - - Map> replicaMap = strategy.computeTokenToReplicaMap(keyspace, tokenToPrimary, ring); - - assertReplicaPlacement(replicaMap, TOKEN01, IP1, IP2); - assertReplicaPlacement(replicaMap, TOKEN06, IP2, IP1); - assertReplicaPlacement(replicaMap, TOKEN14, IP1, IP2); - assertReplicaPlacement(replicaMap, TOKEN19, IP2, IP1); - } - - @Test(groups = "unit") - public void simpleStrategyConsecutiveRingSectionsTest() { - List ring = ImmutableList.builder() - .add(TOKEN01) - .add(TOKEN06) - .add(TOKEN14) - .add(TOKEN19) - .build(); - - Map tokenToPrimary = ImmutableMap.builder() - .put(TOKEN01, host(IP1)) - .put(TOKEN06, host(IP1)) - .put(TOKEN14, host(IP2)) - .put(TOKEN19, host(IP2)) - .build(); - - ReplicationStrategy strategy = simpleStrategy(2); - - Map> replicaMap = strategy.computeTokenToReplicaMap(keyspace, tokenToPrimary, ring); - - assertReplicaPlacement(replicaMap, TOKEN01, IP1, IP2); - assertReplicaPlacement(replicaMap, TOKEN06, IP1, IP2); - assertReplicaPlacement(replicaMap, TOKEN14, IP2, IP1); - assertReplicaPlacement(replicaMap, TOKEN19, IP2, IP1); - } - - @Test(groups = "unit") - public void simpleStrategyUnbalancedRingTest() { - List ring = ImmutableList.builder() - .add(TOKEN01) - .add(TOKEN06) - .add(TOKEN14) - .add(TOKEN19) - .build(); - - Map tokenToPrimary = ImmutableMap.builder() - .put(TOKEN01, host(IP1)) - .put(TOKEN06, host(IP1)) - .put(TOKEN14, host(IP2)) - .put(TOKEN19, host(IP1)) - .build(); - - ReplicationStrategy strategy = simpleStrategy(2); - - Map> replicaMap = strategy.computeTokenToReplicaMap(keyspace, tokenToPrimary, ring); - - assertReplicaPlacement(replicaMap, TOKEN01, IP1, IP2); - assertReplicaPlacement(replicaMap, TOKEN06, IP1, IP2); - assertReplicaPlacement(replicaMap, TOKEN14, IP2, IP1); - assertReplicaPlacement(replicaMap, TOKEN19, IP1, IP2); - } - - @Test(groups = "unit") - public void simpleStrategyExampleTopologyMapTest() { - Map> replicaMap = exampleStrategy.computeTokenToReplicaMap(keyspace, exampleTokenToPrimary, exampleRing); - - assertReplicaPlacement(replicaMap, TOKEN01, IP1, IP5, IP3); - assertReplicaPlacement(replicaMap, TOKEN02, IP1, IP5, IP3); - assertReplicaPlacement(replicaMap, TOKEN03, IP5, IP3, IP1); - assertReplicaPlacement(replicaMap, TOKEN04, IP3, IP1, IP5); - assertReplicaPlacement(replicaMap, TOKEN05, IP1, IP5, IP2); - assertReplicaPlacement(replicaMap, TOKEN06, IP5, IP2, IP6); - assertReplicaPlacement(replicaMap, TOKEN07, IP2, IP6, IP3); - assertReplicaPlacement(replicaMap, TOKEN08, IP6, IP3, IP4); - assertReplicaPlacement(replicaMap, TOKEN09, IP3, IP4, IP5); - assertReplicaPlacement(replicaMap, TOKEN10, IP4, IP5, IP2); - assertReplicaPlacement(replicaMap, TOKEN11, IP5, IP4, IP2); - assertReplicaPlacement(replicaMap, TOKEN12, IP4, IP2, IP6); - assertReplicaPlacement(replicaMap, TOKEN13, IP4, IP2, IP6); - assertReplicaPlacement(replicaMap, TOKEN14, IP2, IP6, IP3); - assertReplicaPlacement(replicaMap, TOKEN15, IP6, IP3, IP2); - assertReplicaPlacement(replicaMap, TOKEN16, IP3, IP2, IP6); - assertReplicaPlacement(replicaMap, TOKEN17, IP2, IP6, IP1); - assertReplicaPlacement(replicaMap, TOKEN18, IP6, IP1, IP5); - } - - @Test(groups = "unit") - public void simpleStrategyExampleTopologyTooManyReplicasTest() { - Map> replicaMap = exampleStrategyTooManyReplicas.computeTokenToReplicaMap(keyspace, exampleTokenToPrimary, exampleRing); - - assertReplicaPlacement(replicaMap, TOKEN01, IP1, IP5, IP3, IP2, IP6, IP4); - assertReplicaPlacement(replicaMap, TOKEN02, IP1, IP5, IP3, IP2, IP6, IP4); - assertReplicaPlacement(replicaMap, TOKEN03, IP5, IP3, IP1, IP2, IP6, IP4); - assertReplicaPlacement(replicaMap, TOKEN04, IP3, IP1, IP5, IP2, IP6, IP4); - assertReplicaPlacement(replicaMap, TOKEN05, IP1, IP5, IP2, IP6, IP3, IP4); - assertReplicaPlacement(replicaMap, TOKEN06, IP5, IP2, IP6, IP3, IP4, IP1); - assertReplicaPlacement(replicaMap, TOKEN07, IP2, IP6, IP3, IP4, IP5, IP1); - assertReplicaPlacement(replicaMap, TOKEN08, IP6, IP3, IP4, IP5, IP2, IP1); - assertReplicaPlacement(replicaMap, TOKEN09, IP3, IP4, IP5, IP2, IP6, IP1); - assertReplicaPlacement(replicaMap, TOKEN10, IP4, IP5, IP2, IP6, IP3, IP1); - assertReplicaPlacement(replicaMap, TOKEN11, IP5, IP4, IP2, IP6, IP3, IP1); - assertReplicaPlacement(replicaMap, TOKEN12, IP4, IP2, IP6, IP3, IP1, IP5); - assertReplicaPlacement(replicaMap, TOKEN13, IP4, IP2, IP6, IP3, IP1, IP5); - assertReplicaPlacement(replicaMap, TOKEN14, IP2, IP6, IP3, IP1, IP5, IP4); - assertReplicaPlacement(replicaMap, TOKEN15, IP6, IP3, IP2, IP1, IP5, IP4); - assertReplicaPlacement(replicaMap, TOKEN16, IP3, IP2, IP6, IP1, IP5, IP4); - assertReplicaPlacement(replicaMap, TOKEN17, IP2, IP6, IP1, IP5, IP3, IP4); - assertReplicaPlacement(replicaMap, TOKEN18, IP6, IP1, IP5, IP3, IP2, IP4); - } + ReplicationStrategy strategy = simpleStrategy(2); + + Map> replicaMap = + strategy.computeTokenToReplicaMap(keyspace, tokenToPrimary, ring); + + assertReplicaPlacement(replicaMap, TOKEN01, IP1, IP2); + assertReplicaPlacement(replicaMap, TOKEN06, IP1, IP2); + assertReplicaPlacement(replicaMap, TOKEN14, IP2, IP1); + assertReplicaPlacement(replicaMap, TOKEN19, IP1, IP2); + } + + @Test(groups = "unit") + public void simpleStrategyExampleTopologyMapTest() { + Map> replicaMap = + exampleStrategy.computeTokenToReplicaMap(keyspace, exampleTokenToPrimary, exampleRing); + + assertReplicaPlacement(replicaMap, TOKEN01, IP1, IP5, IP3); + assertReplicaPlacement(replicaMap, TOKEN02, IP1, IP5, IP3); + assertReplicaPlacement(replicaMap, TOKEN03, IP5, IP3, IP1); + assertReplicaPlacement(replicaMap, TOKEN04, IP3, IP1, IP5); + assertReplicaPlacement(replicaMap, TOKEN05, IP1, IP5, IP2); + assertReplicaPlacement(replicaMap, TOKEN06, IP5, IP2, IP6); + assertReplicaPlacement(replicaMap, TOKEN07, IP2, IP6, IP3); + assertReplicaPlacement(replicaMap, TOKEN08, IP6, IP3, IP4); + assertReplicaPlacement(replicaMap, TOKEN09, IP3, IP4, IP5); + assertReplicaPlacement(replicaMap, TOKEN10, IP4, IP5, IP2); + assertReplicaPlacement(replicaMap, TOKEN11, IP5, IP4, IP2); + assertReplicaPlacement(replicaMap, TOKEN12, IP4, IP2, IP6); + assertReplicaPlacement(replicaMap, TOKEN13, IP4, IP2, IP6); + assertReplicaPlacement(replicaMap, TOKEN14, IP2, IP6, IP3); + assertReplicaPlacement(replicaMap, TOKEN15, IP6, IP3, IP2); + assertReplicaPlacement(replicaMap, TOKEN16, IP3, IP2, IP6); + assertReplicaPlacement(replicaMap, TOKEN17, IP2, IP6, IP1); + assertReplicaPlacement(replicaMap, TOKEN18, IP6, IP1, IP5); + } + + @Test(groups = "unit") + public void simpleStrategyExampleTopologyTooManyReplicasTest() { + Map> replicaMap = + exampleStrategyTooManyReplicas.computeTokenToReplicaMap( + keyspace, exampleTokenToPrimary, exampleRing); + + assertReplicaPlacement(replicaMap, TOKEN01, IP1, IP5, IP3, IP2, IP6, IP4); + assertReplicaPlacement(replicaMap, TOKEN02, IP1, IP5, IP3, IP2, IP6, IP4); + assertReplicaPlacement(replicaMap, TOKEN03, IP5, IP3, IP1, IP2, IP6, IP4); + assertReplicaPlacement(replicaMap, TOKEN04, IP3, IP1, IP5, IP2, IP6, IP4); + assertReplicaPlacement(replicaMap, TOKEN05, IP1, IP5, IP2, IP6, IP3, IP4); + assertReplicaPlacement(replicaMap, TOKEN06, IP5, IP2, IP6, IP3, IP4, IP1); + assertReplicaPlacement(replicaMap, TOKEN07, IP2, IP6, IP3, IP4, IP5, IP1); + assertReplicaPlacement(replicaMap, TOKEN08, IP6, IP3, IP4, IP5, IP2, IP1); + assertReplicaPlacement(replicaMap, TOKEN09, IP3, IP4, IP5, IP2, IP6, IP1); + assertReplicaPlacement(replicaMap, TOKEN10, IP4, IP5, IP2, IP6, IP3, IP1); + assertReplicaPlacement(replicaMap, TOKEN11, IP5, IP4, IP2, IP6, IP3, IP1); + assertReplicaPlacement(replicaMap, TOKEN12, IP4, IP2, IP6, IP3, IP1, IP5); + assertReplicaPlacement(replicaMap, TOKEN13, IP4, IP2, IP6, IP3, IP1, IP5); + assertReplicaPlacement(replicaMap, TOKEN14, IP2, IP6, IP3, IP1, IP5, IP4); + assertReplicaPlacement(replicaMap, TOKEN15, IP6, IP3, IP2, IP1, IP5, IP4); + assertReplicaPlacement(replicaMap, TOKEN16, IP3, IP2, IP6, IP1, IP5, IP4); + assertReplicaPlacement(replicaMap, TOKEN17, IP2, IP6, IP1, IP5, IP3, IP4); + assertReplicaPlacement(replicaMap, TOKEN18, IP6, IP1, IP5, IP3, IP2, IP4); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/SingleConnectionPoolTest.java b/driver-core/src/test/java/com/datastax/driver/core/SingleConnectionPoolTest.java index 31d80abd662..21c5e79a324 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/SingleConnectionPoolTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/SingleConnectionPoolTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,47 +17,49 @@ */ package com.datastax.driver.core; -import com.datastax.driver.core.utils.CassandraVersion; -import org.testng.annotations.Test; +import static org.testng.Assert.fail; +import com.datastax.driver.core.utils.CassandraVersion; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; - -import static org.testng.Assert.fail; +import org.testng.annotations.Test; @CassandraVersion("2.1.0") public class SingleConnectionPoolTest extends CCMTestsSupport { - @Test(groups = "short") - public void should_throttle_requests() { - // Throttle to a very low value. Even a single thread can generate a higher throughput. - final int maxRequests = 10; - cluster().getConfiguration().getPoolingOptions() - .setMaxRequestsPerConnection(HostDistance.LOCAL, maxRequests); + @Test(groups = "short") + public void should_throttle_requests() { + // Throttle to a very low value. Even a single thread can generate a higher throughput. + final int maxRequests = 10; + cluster() + .getConfiguration() + .getPoolingOptions() + .setMaxRequestsPerConnection(HostDistance.LOCAL, maxRequests); - // Track in flight requests in a dedicated thread every second - final AtomicBoolean excessInflightQueriesSpotted = new AtomicBoolean(false); - final Host host = cluster().getMetadata().getHost(ccm().addressOfNode(1)); - ScheduledExecutorService openConnectionsWatcherExecutor = Executors.newScheduledThreadPool(1); - final Runnable openConnectionsWatcher = new Runnable() { - @Override - public void run() { - int inFlight = session().getState().getInFlightQueries(host); - if (inFlight > maxRequests) - excessInflightQueriesSpotted.set(true); - } + // Track in flight requests in a dedicated thread every second + final AtomicBoolean excessInflightQueriesSpotted = new AtomicBoolean(false); + final Host host = cluster().getMetadata().getHost(ccm().addressOfNode(1)); + ScheduledExecutorService openConnectionsWatcherExecutor = Executors.newScheduledThreadPool(1); + final Runnable openConnectionsWatcher = + new Runnable() { + @Override + public void run() { + int inFlight = session().getState().getInFlightQueries(host); + if (inFlight > maxRequests) excessInflightQueriesSpotted.set(true); + } }; - openConnectionsWatcherExecutor.scheduleAtFixedRate(openConnectionsWatcher, 200, 200, TimeUnit.MILLISECONDS); + openConnectionsWatcherExecutor.scheduleAtFixedRate( + openConnectionsWatcher, 200, 200, TimeUnit.MILLISECONDS); - // Generate the load - for (int i = 0; i < 10000; i++) - session().executeAsync("SELECT release_version FROM system.local"); + // Generate the load + for (int i = 0; i < 10000; i++) + session().executeAsync("SELECT release_version FROM system.local"); - openConnectionsWatcherExecutor.shutdownNow(); - if (excessInflightQueriesSpotted.get()) { - fail("Inflight queries exceeded the limit"); - } + openConnectionsWatcherExecutor.shutdownNow(); + if (excessInflightQueriesSpotted.get()) { + fail("Inflight queries exceeded the limit"); } + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/SingleTokenIntegrationTest.java b/driver-core/src/test/java/com/datastax/driver/core/SingleTokenIntegrationTest.java index f11e7798545..8992a3a7fbd 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/SingleTokenIntegrationTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/SingleTokenIntegrationTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,46 +17,43 @@ */ package com.datastax.driver.core; -import com.datastax.driver.core.utils.Bytes; -import org.testng.annotations.Test; +import static com.datastax.driver.core.Assertions.assertThat; +import com.datastax.driver.core.utils.Bytes; import java.nio.ByteBuffer; import java.util.Set; - -import static com.datastax.driver.core.Assertions.assertThat; +import org.testng.annotations.Test; @CCMConfig( - // force the initial token to a non-min value to validate that the single range will always be ]minToken, minToken] - config = "initial_token:1", - clusterProvider = "createClusterBuilderNoDebouncing" -) + // force the initial token to a non-min value to validate that the single range will always be + // ]minToken, minToken] + config = "initial_token:1", + clusterProvider = "createClusterBuilderNoDebouncing") public class SingleTokenIntegrationTest extends CCMTestsSupport { - /** - * JAVA-684: Empty TokenRange returned in a one token cluster - */ - @Test(groups = "short") - public void should_return_single_non_empty_range_when_cluster_has_one_single_token() { - cluster().manager.controlConnection.refreshNodeListAndTokenMap(); - Metadata metadata = cluster().getMetadata(); - Set tokenRanges = metadata.getTokenRanges(); - assertThat(tokenRanges).hasSize(1); - TokenRange tokenRange = tokenRanges.iterator().next(); - assertThat(tokenRange) - .startsWith(Token.M3PToken.FACTORY.minToken()) - .endsWith(Token.M3PToken.FACTORY.minToken()) - .isNotEmpty() - .isNotWrappedAround(); - - Set hostsForRange = metadata.getReplicas(keyspace, tokenRange); - Host host1 = TestUtils.findHost(cluster(), 1); - assertThat(hostsForRange).containsOnly(host1); - - ByteBuffer randomPartitionKey = Bytes.fromHexString("0xCAFEBABE"); - Set hostsForKey = metadata.getReplicas(keyspace, randomPartitionKey); - assertThat(hostsForKey).containsOnly(host1); - - Set rangesForHost = metadata.getTokenRanges(keyspace, host1); - assertThat(rangesForHost).containsOnly(tokenRange); - } + /** JAVA-684: Empty TokenRange returned in a one token cluster */ + @Test(groups = "short") + public void should_return_single_non_empty_range_when_cluster_has_one_single_token() { + cluster().manager.controlConnection.refreshNodeListAndTokenMap(); + Metadata metadata = cluster().getMetadata(); + Set tokenRanges = metadata.getTokenRanges(); + assertThat(tokenRanges).hasSize(1); + TokenRange tokenRange = tokenRanges.iterator().next(); + assertThat(tokenRange) + .startsWith(Token.M3PToken.FACTORY.minToken()) + .endsWith(Token.M3PToken.FACTORY.minToken()) + .isNotEmpty() + .isNotWrappedAround(); + + Set hostsForRange = metadata.getReplicas(keyspace, tokenRange); + Host host1 = TestUtils.findHost(cluster(), 1); + assertThat(hostsForRange).containsOnly(host1); + + ByteBuffer randomPartitionKey = Bytes.fromHexString("0xCAFEBABE"); + Set hostsForKey = metadata.getReplicas(keyspace, randomPartitionKey); + assertThat(hostsForKey).containsOnly(host1); + + Set rangesForHost = metadata.getTokenRanges(keyspace, host1); + assertThat(rangesForHost).containsOnly(tokenRange); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/SortingLoadBalancingPolicy.java b/driver-core/src/test/java/com/datastax/driver/core/SortingLoadBalancingPolicy.java index a0c45941ddb..cda97f77cc0 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/SortingLoadBalancingPolicy.java +++ b/driver-core/src/test/java/com/datastax/driver/core/SortingLoadBalancingPolicy.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,7 +19,6 @@ import com.datastax.driver.core.policies.LoadBalancingPolicy; import com.google.common.primitives.UnsignedBytes; - import java.util.Collection; import java.util.Comparator; import java.util.Iterator; @@ -25,57 +26,60 @@ import java.util.concurrent.ConcurrentSkipListSet; /** - * A load balancing policy that sorts hosts on the last byte of the address, - * so that the query plan is always [host1, host2, host3]. + * A load balancing policy that sorts hosts on the last byte of the address, so that the query plan + * is always [host1, host2, host3]. */ public class SortingLoadBalancingPolicy implements LoadBalancingPolicy { - final SortedSet hosts = new ConcurrentSkipListSet(new Comparator() { - @Override - public int compare(Host host1, Host host2) { - byte[] address1 = host1.getAddress().getAddress(); - byte[] address2 = host2.getAddress().getAddress(); - return UnsignedBytes.compare( - address1[address1.length - 1], - address2[address2.length - 1]); - } - }); + final SortedSet hosts = + new ConcurrentSkipListSet( + new Comparator() { + @Override + public int compare(Host host1, Host host2) { + byte[] address1 = host1.getEndPoint().resolve().getAddress().getAddress(); + byte[] address2 = host2.getEndPoint().resolve().getAddress().getAddress(); + return UnsignedBytes.compare( + address1[address1.length - 1], address2[address2.length - 1]); + } + }); - @Override - public void init(Cluster cluster, Collection hosts) { - this.hosts.addAll(hosts); - } + @Override + public void init(Cluster cluster, Collection hosts) { + this.hosts.addAll(hosts); + } - @Override - public HostDistance distance(Host host) { - return HostDistance.LOCAL; - } + @Override + public HostDistance distance(Host host) { + return HostDistance.LOCAL; + } - @Override - public Iterator newQueryPlan(String loggedKeyspace, Statement statement) { - return hosts.iterator(); - } + @Override + public Iterator newQueryPlan(String loggedKeyspace, Statement statement) { + return hosts.iterator(); + } - @Override - public void onAdd(Host host) { - onUp(host); - } + @Override + public void onAdd(Host host) { + onUp(host); + } - @Override - public void onUp(Host host) { - hosts.add(host); - } + @Override + public void onUp(Host host) { + hosts.add(host); + } - @Override - public void onDown(Host host) { - hosts.remove(host); - } + @Override + public void onDown(Host host) { + hosts.remove(host); + } - @Override - public void onRemove(Host host) { - onDown(host); - } + @Override + public void onRemove(Host host) { + onDown(host); + } - @Override - public void close() {/*nothing to do*/} + @Override + public void close() { + /*nothing to do*/ + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/SpeculativeExecutionTest.java b/driver-core/src/test/java/com/datastax/driver/core/SpeculativeExecutionTest.java index 02f5f043292..75eae4bccac 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/SpeculativeExecutionTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/SpeculativeExecutionTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,12 +17,21 @@ */ package com.datastax.driver.core; +import static com.datastax.driver.core.Assertions.assertThat; +import static com.datastax.driver.core.TestUtils.nonQuietClusterCloseOptions; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.scassandra.http.client.PrimingRequest.then; + import com.datastax.driver.core.exceptions.DriverException; import com.datastax.driver.core.policies.ConstantSpeculativeExecutionPolicy; import com.datastax.driver.core.policies.RetryPolicy; import com.datastax.driver.core.policies.SpeculativeExecutionPolicy; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; +import java.util.List; +import java.util.Map; import org.mockito.Mockito; import org.scassandra.http.client.Consistency; import org.scassandra.http.client.PrimingRequest; @@ -29,296 +40,395 @@ import org.testng.annotations.BeforeMethod; import org.testng.annotations.Test; -import java.util.List; -import java.util.Map; - -import static com.datastax.driver.core.Assertions.assertThat; -import static com.datastax.driver.core.TestUtils.nonQuietClusterCloseOptions; -import static org.mockito.Mockito.*; -import static org.scassandra.http.client.PrimingRequest.then; - public class SpeculativeExecutionTest { - ScassandraCluster scassandras; - - Cluster cluster = null; - SortingLoadBalancingPolicy loadBalancingPolicy; - Metrics.Errors errors; - Host host1, host2, host3; - Session session; - - @BeforeMethod(groups = "short") - public void beforeMethod() { - scassandras = ScassandraCluster.builder().withNodes(3).build(); - scassandras.init(); - - int speculativeExecutionDelay = 200; - - loadBalancingPolicy = new SortingLoadBalancingPolicy(); - cluster = Cluster.builder() - .addContactPoints(scassandras.address(2).getAddress()) - .withPort(scassandras.getBinaryPort()) - .withLoadBalancingPolicy(loadBalancingPolicy) - .withSpeculativeExecutionPolicy(new ConstantSpeculativeExecutionPolicy(speculativeExecutionDelay, 1)) - .withQueryOptions(new QueryOptions().setDefaultIdempotence(true)) - .withRetryPolicy(new CustomRetryPolicy()) - .withNettyOptions(nonQuietClusterCloseOptions) - .build(); - - session = cluster.connect(); - - host1 = TestUtils.findHost(cluster, 1); - host2 = TestUtils.findHost(cluster, 2); - host3 = TestUtils.findHost(cluster, 3); - - errors = cluster.getMetrics().getErrorMetrics(); - } - - @AfterMethod(groups = "short", alwaysRun = true) - public void afterMethod() { - if (cluster != null) - cluster.close(); - if (scassandras != null) - scassandras.stop(); - } - - @Test(groups = "short") - public void should_not_start_speculative_execution_if_first_execution_completes_successfully() { - scassandras.node(1).primingClient().prime(PrimingRequest.queryBuilder() + ScassandraCluster scassandras; + + Cluster cluster = null; + SortingLoadBalancingPolicy loadBalancingPolicy; + Metrics.Errors errors; + Host host1, host2, host3; + Session session; + + @BeforeMethod(groups = "short") + public void beforeMethod() { + scassandras = ScassandraCluster.builder().withNodes(3).build(); + scassandras.init(); + + int speculativeExecutionDelay = 200; + + loadBalancingPolicy = new SortingLoadBalancingPolicy(); + cluster = + Cluster.builder() + .addContactPoints(scassandras.address(2).getAddress()) + .withPort(scassandras.getBinaryPort()) + .withLoadBalancingPolicy(loadBalancingPolicy) + .withSpeculativeExecutionPolicy( + new ConstantSpeculativeExecutionPolicy(speculativeExecutionDelay, 1)) + .withQueryOptions(new QueryOptions().setDefaultIdempotence(true)) + .withRetryPolicy(new CustomRetryPolicy()) + .withNettyOptions(nonQuietClusterCloseOptions) + .build(); + + session = cluster.connect(); + + host1 = TestUtils.findHost(cluster, 1); + host2 = TestUtils.findHost(cluster, 2); + host3 = TestUtils.findHost(cluster, 3); + + errors = cluster.getMetrics().getErrorMetrics(); + } + + @AfterMethod(groups = "short", alwaysRun = true) + public void afterMethod() { + if (cluster != null) cluster.close(); + if (scassandras != null) scassandras.stop(); + } + + @Test(groups = "short") + public void should_not_start_speculative_execution_if_first_execution_completes_successfully() { + scassandras + .node(1) + .primingClient() + .prime( + PrimingRequest.queryBuilder() .withQuery("mock query") .withThen(then().withRows(row("result", "result1"))) - .build() - ); - - long execStartCount = errors.getSpeculativeExecutions().getCount(); - - ResultSet rs = session.execute("mock query"); - Row row = rs.one(); - - assertThat(row.getString("result")).isEqualTo("result1"); - assertThat(errors.getSpeculativeExecutions().getCount()).isEqualTo(execStartCount); - ExecutionInfo executionInfo = rs.getExecutionInfo(); - assertThat(executionInfo.getTriedHosts()).containsOnly(host1); - assertThat(executionInfo.getQueriedHost()).isEqualTo(host1); - assertThat(executionInfo.getSpeculativeExecutions()).isEqualTo(0); - assertThat(executionInfo.getSuccessfulExecutionIndex()).isEqualTo(0); - } - - @Test(groups = "short") - public void should_not_start_speculative_execution_if_first_execution_retries_but_is_still_fast_enough() { - // will retry once on this node: - scassandras.node(1).primingClient().prime(PrimingRequest.queryBuilder() + .build()); + + long execStartCount = errors.getSpeculativeExecutions().getCount(); + + ResultSet rs = session.execute("mock query"); + Row row = rs.one(); + + assertThat(row.getString("result")).isEqualTo("result1"); + assertThat(errors.getSpeculativeExecutions().getCount()).isEqualTo(execStartCount); + ExecutionInfo executionInfo = rs.getExecutionInfo(); + assertThat(executionInfo.getTriedHosts()).containsOnly(host1); + assertThat(executionInfo.getQueriedHost()).isEqualTo(host1); + assertThat(executionInfo.getSpeculativeExecutions()).isEqualTo(0); + assertThat(executionInfo.getSuccessfulExecutionIndex()).isEqualTo(0); + } + + @Test(groups = "short") + public void + should_not_start_speculative_execution_if_first_execution_retries_but_is_still_fast_enough() { + // will retry once on this node: + scassandras + .node(1) + .primingClient() + .prime( + PrimingRequest.queryBuilder() .withQuery("mock query") .withConsistency(Consistency.TWO) .withThen(then().withResult(Result.read_request_timeout)) - .build() - ); + .build()); - scassandras.node(1).primingClient().prime(PrimingRequest.queryBuilder() + scassandras + .node(1) + .primingClient() + .prime( + PrimingRequest.queryBuilder() .withQuery("mock query") .withConsistency(Consistency.ONE) .withThen(then().withRows(row("result", "result1"))) - .build() - ); - - long execStartCount = errors.getSpeculativeExecutions().getCount(); - long retriesStartCount = errors.getRetriesOnUnavailable().getCount(); - - SimpleStatement statement = new SimpleStatement("mock query"); - statement.setConsistencyLevel(ConsistencyLevel.TWO); - ResultSet rs = session.execute(statement); - Row row = rs.one(); - - assertThat(row).isNotNull(); - assertThat(row.getString("result")).isEqualTo("result1"); - assertThat(errors.getSpeculativeExecutions().getCount()).isEqualTo(execStartCount); - assertThat(errors.getRetriesOnReadTimeout().getCount()).isEqualTo(retriesStartCount + 1); - ExecutionInfo executionInfo = rs.getExecutionInfo(); - assertThat(executionInfo.getTriedHosts()).containsOnly(host1); - assertThat(executionInfo.getQueriedHost()).isEqualTo(host1); - assertThat(executionInfo.getSpeculativeExecutions()).isEqualTo(0); - assertThat(executionInfo.getSuccessfulExecutionIndex()).isEqualTo(0); - } - - @Test(groups = "short") - public void should_start_speculative_execution_if_first_execution_takes_too_long() { - scassandras.node(1).primingClient().prime(PrimingRequest.queryBuilder() + .build()); + + long execStartCount = errors.getSpeculativeExecutions().getCount(); + long retriesStartCount = errors.getRetriesOnUnavailable().getCount(); + + SimpleStatement statement = new SimpleStatement("mock query"); + statement.setConsistencyLevel(ConsistencyLevel.TWO); + ResultSet rs = session.execute(statement); + Row row = rs.one(); + + assertThat(row).isNotNull(); + assertThat(row.getString("result")).isEqualTo("result1"); + assertThat(errors.getSpeculativeExecutions().getCount()).isEqualTo(execStartCount); + assertThat(errors.getRetriesOnReadTimeout().getCount()).isEqualTo(retriesStartCount + 1); + ExecutionInfo executionInfo = rs.getExecutionInfo(); + assertThat(executionInfo.getTriedHosts()).containsOnly(host1); + assertThat(executionInfo.getQueriedHost()).isEqualTo(host1); + assertThat(executionInfo.getSpeculativeExecutions()).isEqualTo(0); + assertThat(executionInfo.getSuccessfulExecutionIndex()).isEqualTo(0); + } + + @Test(groups = "short") + public void should_start_speculative_execution_if_first_execution_takes_too_long() { + scassandras + .node(1) + .primingClient() + .prime( + PrimingRequest.queryBuilder() .withQuery("mock query") .withThen(then().withRows(row("result", "result1")).withFixedDelay(400L)) - .build() - ); + .build()); - scassandras.node(2).primingClient().prime(PrimingRequest.queryBuilder() + scassandras + .node(2) + .primingClient() + .prime( + PrimingRequest.queryBuilder() .withQuery("mock query") .withThen(then().withRows(row("result", "result2"))) - .build() - ); - long execStartCount = errors.getSpeculativeExecutions().getCount(); - - ResultSet rs = session.execute("mock query"); - Row row = rs.one(); - - assertThat(row.getString("result")).isEqualTo("result2"); - assertThat(errors.getSpeculativeExecutions().getCount()).isEqualTo(execStartCount + 1); - ExecutionInfo executionInfo = rs.getExecutionInfo(); - // triedHosts does not contain host1 because the request to it had not completed yet - assertThat(executionInfo.getTriedHosts()).containsOnly(host2); - assertThat(executionInfo.getQueriedHost()).isEqualTo(host2); - assertThat(executionInfo.getSpeculativeExecutions()).isEqualTo(1); - assertThat(executionInfo.getSuccessfulExecutionIndex()).isEqualTo(1); + .build()); + long execStartCount = errors.getSpeculativeExecutions().getCount(); + + ResultSet rs = session.execute("mock query"); + Row row = rs.one(); + + assertThat(row.getString("result")).isEqualTo("result2"); + assertThat(errors.getSpeculativeExecutions().getCount()).isEqualTo(execStartCount + 1); + ExecutionInfo executionInfo = rs.getExecutionInfo(); + // triedHosts does not contain host1 because the request to it had not completed yet + assertThat(executionInfo.getTriedHosts()).containsOnly(host2); + assertThat(executionInfo.getQueriedHost()).isEqualTo(host2); + assertThat(executionInfo.getSpeculativeExecutions()).isEqualTo(1); + assertThat(executionInfo.getSuccessfulExecutionIndex()).isEqualTo(1); + } + + @Test(groups = "short") + public void should_start_speculative_execution_on_multiple_hosts_with_zero_delay() { + Cluster cluster = + Cluster.builder() + .addContactPoints(scassandras.address(2).getAddress()) + .withPort(scassandras.getBinaryPort()) + .withLoadBalancingPolicy(loadBalancingPolicy) + .withSpeculativeExecutionPolicy(new ConstantSpeculativeExecutionPolicy(0, 2)) + .withQueryOptions(new QueryOptions().setDefaultIdempotence(true)) + .withRetryPolicy(new CustomRetryPolicy()) + .withNettyOptions(nonQuietClusterCloseOptions) + .build(); + Session session = cluster.connect(); + host1 = TestUtils.findHost(cluster, 1); + host2 = TestUtils.findHost(cluster, 2); + host3 = TestUtils.findHost(cluster, 3); + errors = cluster.getMetrics().getErrorMetrics(); + + scassandras + .node(1) + .primingClient() + .prime( + PrimingRequest.queryBuilder() + .withQuery("mock query") + .withThen(then().withRows(row("result", "result1")).withFixedDelay(1000L)) + .build()); + + scassandras + .node(2) + .primingClient() + .prime( + PrimingRequest.queryBuilder() + .withQuery("mock query") + .withThen(then().withRows(row("result", "result2")).withFixedDelay(1000L)) + .build()); + + scassandras + .node(3) + .primingClient() + .prime( + PrimingRequest.queryBuilder() + .withQuery("mock query") + .withThen(then().withRows(row("result", "result3"))) + .build()); + long execStartCount = errors.getSpeculativeExecutions().getCount(); + + ResultSet rs = session.execute("mock query"); + Row row = rs.one(); + + assertThat(row.getString("result")).isEqualTo("result3"); + assertThat(errors.getSpeculativeExecutions().getCount()).isEqualTo(execStartCount + 2); + ExecutionInfo executionInfo = rs.getExecutionInfo(); + // triedHosts does not contain host1 because the request to it had not completed yet + assertThat(executionInfo.getTriedHosts()).containsOnly(host3); + assertThat(executionInfo.getQueriedHost()).isEqualTo(host3); + assertThat(executionInfo.getSpeculativeExecutions()).isEqualTo(2); + assertThat(executionInfo.getSuccessfulExecutionIndex()).isEqualTo(2); + } + + @Test(groups = "short") + public void should_wait_until_all_executions_have_finished() { + // Rely on read timeouts to trigger errors that cause an execution to move to the next node + cluster.getConfiguration().getSocketOptions().setReadTimeoutMillis(1000); + + scassandras + .node(1) + .primingClient() + // execution1 starts with host1, which will time out at t=1000 + .prime( + PrimingRequest.queryBuilder() + .withQuery("mock query") + .withThen(then().withRows(row("result", "result1")).withFixedDelay(2000L)) + .build()); + // at t=1000, execution1 moves to host3, which eventually succeeds at t=1500 + scassandras + .node(3) + .primingClient() + .prime( + PrimingRequest.queryBuilder() + .withQuery("mock query") + .withThen(then().withRows(row("result", "result3")).withFixedDelay(500L)) + .build()); + // meanwhile, execution2 starts at t=200, using host2 which times out at t=1200 + // at that time, the query plan is empty so execution2 fails + // The goal of this test is to check that execution2 does not fail the query, since execution1 + // is still running + scassandras + .node(2) + .primingClient() + .prime( + PrimingRequest.queryBuilder() + .withQuery("mock query") + .withThen(then().withRows(row("result", "result2")).withFixedDelay(2000L)) + .build()); + long execStartCount = errors.getSpeculativeExecutions().getCount(); + + ResultSet rs = session.execute("mock query"); + Row row = rs.one(); + + assertThat(row.getString("result")).isEqualTo("result3"); + assertThat(errors.getSpeculativeExecutions().getCount()).isEqualTo(execStartCount + 1); + ExecutionInfo executionInfo = rs.getExecutionInfo(); + assertThat(executionInfo.getTriedHosts()).containsOnly(host1, host2, host3); + assertThat(executionInfo.getQueriedHost()).isEqualTo(host3); + assertThat(executionInfo.getSpeculativeExecutions()).isEqualTo(1); + assertThat(executionInfo.getSuccessfulExecutionIndex()).isEqualTo(0); + } + + /** + * Validates that when a Cluster is initialized that {@link + * SpeculativeExecutionPolicy#init(Cluster)} is called and that when a Cluster is closed {@link + * SpeculativeExecutionPolicy#close()} is called. + * + * @test_category queries:speculative_execution + * @expected_result init and close are called on cluster init and close. + * @jira_ticket JAVA-796 + * @since 2.0.11, 2.1.7, 2.2.1 + */ + @Test(groups = "short") + public void should_init_and_close_policy_on_cluster() { + SpeculativeExecutionPolicy mockPolicy = mock(SpeculativeExecutionPolicy.class); + + Cluster cluster = + Cluster.builder() + .addContactPoints(scassandras.address(2).getAddress()) + .withPort(scassandras.getBinaryPort()) + .withSpeculativeExecutionPolicy(mockPolicy) + .build(); + + verify(mockPolicy, times(0)).init(cluster); + verify(mockPolicy, times(0)).close(); + + try { + cluster.init(); + verify(mockPolicy, times(1)).init(cluster); + } finally { + cluster.close(); + verify(mockPolicy, times(1)).close(); } - - @Test(groups = "short") - public void should_wait_until_all_executions_have_finished() { - // Rely on read timeouts to trigger errors that cause an execution to move to the next node - cluster.getConfiguration().getSocketOptions().setReadTimeoutMillis(1000); - - scassandras.node(1).primingClient() - // execution1 starts with host1, which will time out at t=1000 - .prime(PrimingRequest.queryBuilder() - .withQuery("mock query") - .withThen(then().withRows(row("result", "result1")).withFixedDelay(2000L)) - .build()); - // at t=1000, execution1 moves to host3, which eventually succeeds at t=1500 - scassandras.node(3).primingClient() - .prime(PrimingRequest.queryBuilder() - .withQuery("mock query") - .withThen(then().withRows(row("result", "result3")).withFixedDelay(500L)) - .build()); - // meanwhile, execution2 starts at t=200, using host2 which times out at t=1200 - // at that time, the query plan is empty so execution2 fails - // The goal of this test is to check that execution2 does not fail the query, since execution1 is still running - scassandras.node(2).primingClient() - .prime(PrimingRequest.queryBuilder() - .withQuery("mock query") - .withThen(then().withRows(row("result", "result2")).withFixedDelay(2000L)) - .build()); - long execStartCount = errors.getSpeculativeExecutions().getCount(); - - ResultSet rs = session.execute("mock query"); - Row row = rs.one(); - - assertThat(row.getString("result")).isEqualTo("result3"); - assertThat(errors.getSpeculativeExecutions().getCount()).isEqualTo(execStartCount + 1); - ExecutionInfo executionInfo = rs.getExecutionInfo(); - assertThat(executionInfo.getTriedHosts()).containsOnly(host1, host2, host3); - assertThat(executionInfo.getQueriedHost()).isEqualTo(host3); - assertThat(executionInfo.getSpeculativeExecutions()).isEqualTo(1); - assertThat(executionInfo.getSuccessfulExecutionIndex()).isEqualTo(0); + } + + /** + * Validates that if a query gets speculatively re-executed, the second execution uses the same + * default timestamp. + * + * @test_category tracing + * @jira_ticket JAVA-724 + * @expected_result timestamp generator invoked only once for a query that caused two executions. + */ + @Test(groups = "short") + public void should_use_same_default_timestamp_for_all_executions() { + TimestampGenerator timestampGenerator = Mockito.spy(ServerSideTimestampGenerator.INSTANCE); + Cluster cluster = + Cluster.builder() + .addContactPoints(scassandras.address(2).getAddress()) + .withPort(scassandras.getBinaryPort()) + .withLoadBalancingPolicy(loadBalancingPolicy) + .withTimestampGenerator(timestampGenerator) + .withSpeculativeExecutionPolicy(new ConstantSpeculativeExecutionPolicy(1, 2)) + .withNettyOptions(nonQuietClusterCloseOptions) + .build(); + + try { + scassandras + .node(1) + .primingClient() + // execution1 starts with host1, which will time out at t=1000 + .prime( + PrimingRequest.queryBuilder() + .withQuery("mock query") + .withThen(then().withRows(row("result", "result1")).withFixedDelay(2000L)) + .build()); + + Session session = cluster.connect(); + Metrics.Errors errors = cluster.getMetrics().getErrorMetrics(); + + long execStartCount = errors.getSpeculativeExecutions().getCount(); + + SimpleStatement statement = new SimpleStatement("mock query"); + statement.setIdempotent(true); + session.execute(statement); + + // Should have only requested one timestamp. + if (errors.getSpeculativeExecutions().getCount() == execStartCount + 1) { + Mockito.verify(timestampGenerator, times(1)).next(); + } + } finally { + cluster.close(); } - - /** - * Validates that when a Cluster is initialized that {@link SpeculativeExecutionPolicy#init(Cluster)} is called and - * that when a Cluster is closed {@link SpeculativeExecutionPolicy#close()} is called. - * - * @test_category queries:speculative_execution - * @expected_result init and close are called on cluster init and close. - * @jira_ticket JAVA-796 - * @since 2.0.11, 2.1.7, 2.2.1 - */ - @Test(groups = "short") - public void should_init_and_close_policy_on_cluster() { - SpeculativeExecutionPolicy mockPolicy = mock(SpeculativeExecutionPolicy.class); - - Cluster cluster = Cluster.builder() - .addContactPoints(scassandras.address(2).getAddress()) - .withPort(scassandras.getBinaryPort()) - .withSpeculativeExecutionPolicy(mockPolicy) - .build(); - - verify(mockPolicy, times(0)).init(cluster); - verify(mockPolicy, times(0)).close(); - - try { - cluster.init(); - verify(mockPolicy, times(1)).init(cluster); - } finally { - cluster.close(); - verify(mockPolicy, times(1)).close(); - } + } + + /** + * Custom retry policy that retries at ONE on read timeout. This deals with the fact that + * Scassandra only allows read timeouts with 0 replicas. + */ + static class CustomRetryPolicy implements RetryPolicy { + @Override + public RetryDecision onReadTimeout( + Statement statement, + ConsistencyLevel cl, + int requiredResponses, + int receivedResponses, + boolean dataRetrieved, + int nbRetry) { + if (nbRetry != 0) return RetryDecision.rethrow(); + return RetryDecision.retry(ConsistencyLevel.ONE); } - /** - * Validates that if a query gets speculatively re-executed, the second execution uses the same default timestamp. - * - * @test_category tracing - * @jira_ticket JAVA-724 - * @expected_result timestamp generator invoked only once for a query that caused two executions. - */ - @Test(groups = "short") - public void should_use_same_default_timestamp_for_all_executions() { - TimestampGenerator timestampGenerator = Mockito.spy(ServerSideTimestampGenerator.INSTANCE); - Cluster cluster = Cluster.builder() - .addContactPoints(scassandras.address(2).getAddress()) - .withPort(scassandras.getBinaryPort()) - .withLoadBalancingPolicy(loadBalancingPolicy) - .withTimestampGenerator(timestampGenerator) - .withSpeculativeExecutionPolicy(new ConstantSpeculativeExecutionPolicy(1, 2)) - .withNettyOptions(nonQuietClusterCloseOptions) - .build(); - - try { - scassandras.node(1).primingClient() - // execution1 starts with host1, which will time out at t=1000 - .prime(PrimingRequest.queryBuilder() - .withQuery("mock query") - .withThen(then().withRows(row("result", "result1")).withFixedDelay(2000L)) - .build()); - - Session session = cluster.connect(); - Metrics.Errors errors = cluster.getMetrics().getErrorMetrics(); - - long execStartCount = errors.getSpeculativeExecutions().getCount(); - - SimpleStatement statement = new SimpleStatement("mock query"); - statement.setIdempotent(true); - session.execute(statement); - - // Should have only requested one timestamp. - if (errors.getSpeculativeExecutions().getCount() == execStartCount + 1) { - Mockito.verify(timestampGenerator, times(1)).next(); - } - } finally { - cluster.close(); - } + @Override + public RetryDecision onWriteTimeout( + Statement statement, + ConsistencyLevel cl, + WriteType writeType, + int requiredAcks, + int receivedAcks, + int nbRetry) { + return RetryDecision.rethrow(); } - /** - * Custom retry policy that retries at ONE on read timeout. - * This deals with the fact that Scassandra only allows read timeouts with 0 replicas. - */ - static class CustomRetryPolicy implements RetryPolicy { - @Override - public RetryDecision onReadTimeout(Statement statement, ConsistencyLevel cl, int requiredResponses, int receivedResponses, boolean dataRetrieved, int nbRetry) { - if (nbRetry != 0) - return RetryDecision.rethrow(); - return RetryDecision.retry(ConsistencyLevel.ONE); - } - - @Override - public RetryDecision onWriteTimeout(Statement statement, ConsistencyLevel cl, WriteType writeType, int requiredAcks, int receivedAcks, int nbRetry) { - return RetryDecision.rethrow(); - } - - @Override - public RetryDecision onUnavailable(Statement statement, ConsistencyLevel cl, int requiredReplica, int aliveReplica, int nbRetry) { - return RetryDecision.rethrow(); - } - - @Override - public RetryDecision onRequestError(Statement statement, ConsistencyLevel cl, DriverException e, int nbRetry) { - return RetryDecision.tryNextHost(cl); - } - - @Override - public void init(Cluster cluster) { - } - - @Override - public void close() { - } + @Override + public RetryDecision onUnavailable( + Statement statement, + ConsistencyLevel cl, + int requiredReplica, + int aliveReplica, + int nbRetry) { + return RetryDecision.rethrow(); } - private static List> row(String key, String value) { - return ImmutableList.>of(ImmutableMap.of(key, value)); + @Override + public RetryDecision onRequestError( + Statement statement, ConsistencyLevel cl, DriverException e, int nbRetry) { + return RetryDecision.tryNextHost(cl); } + + @Override + public void init(Cluster cluster) {} + + @Override + public void close() {} + } + + private static List> row(String key, String value) { + return ImmutableList.>of(ImmutableMap.of(key, value)); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/StartupOptionTest.java b/driver-core/src/test/java/com/datastax/driver/core/StartupOptionTest.java new file mode 100644 index 00000000000..b4a294d36de --- /dev/null +++ b/driver-core/src/test/java/com/datastax/driver/core/StartupOptionTest.java @@ -0,0 +1,46 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.datastax.driver.core.utils.CassandraVersion; +import org.testng.annotations.Test; + +@CassandraVersion("4.0.0") +public class StartupOptionTest extends CCMTestsSupport { + + /** + * Ensures that when connecting, the driver STARTUP message contains DRIVER_NAME and + * DRIVER_VERSION configuration in its option map. This should be reflected in the + * system_views.clients table. + */ + @Test(groups = "short") + public void should_send_driver_name_and_version() { + ResultSet result = + session().execute("select driver_name, driver_version from system_views.clients"); + + // Should be at least 2 connections (1 control connection, 1 pooled connection) + assertThat(result.getAvailableWithoutFetching()).isGreaterThanOrEqualTo(2); + + for (Row row : result) { + assertThat(row.getString("driver_version")).isEqualTo(Cluster.getDriverVersion()); + assertThat(row.getString("driver_name")).isEqualTo("Apache Cassandra Java Driver"); + } + } +} diff --git a/driver-core/src/test/java/com/datastax/driver/core/StateListenerBase.java b/driver-core/src/test/java/com/datastax/driver/core/StateListenerBase.java index c908d195026..24108ca0878 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/StateListenerBase.java +++ b/driver-core/src/test/java/com/datastax/driver/core/StateListenerBase.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,30 +18,21 @@ package com.datastax.driver.core; public class StateListenerBase implements Host.StateListener { - @Override - public void onAdd(Host host) { - } - - @Override - public void onUp(Host host) { - } - - @Override - public void onDown(Host host) { - } - - @Override - public void onRemove(Host host) { - } + @Override + public void onAdd(Host host) {} - @Override - public void onRegister(Cluster cluster) { + @Override + public void onUp(Host host) {} - } + @Override + public void onDown(Host host) {} - @Override - public void onUnregister(Cluster cluster) { + @Override + public void onRemove(Host host) {} - } + @Override + public void onRegister(Cluster cluster) {} + @Override + public void onUnregister(Cluster cluster) {} } diff --git a/driver-core/src/test/java/com/datastax/driver/core/StateListenerTest.java b/driver-core/src/test/java/com/datastax/driver/core/StateListenerTest.java index 462b8b61b30..bb05d3a4175 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/StateListenerTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/StateListenerTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,103 +17,111 @@ */ package com.datastax.driver.core; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.testng.annotations.Test; - -import java.util.concurrent.CountDownLatch; - import static com.datastax.driver.core.CreateCCM.TestMode.PER_METHOD; -import static com.datastax.driver.core.StateListenerTest.TestListener.Event.*; +import static com.datastax.driver.core.StateListenerTest.TestListener.Event.ADD; +import static com.datastax.driver.core.StateListenerTest.TestListener.Event.DOWN; +import static com.datastax.driver.core.StateListenerTest.TestListener.Event.REMOVE; +import static com.datastax.driver.core.StateListenerTest.TestListener.Event.UP; import static java.util.concurrent.TimeUnit.MINUTES; import static org.assertj.core.api.Assertions.assertThat; +import java.util.concurrent.CountDownLatch; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.testng.annotations.Test; + @CreateCCM(PER_METHOD) @CCMConfig(dirtiesContext = true, createSession = false) public class StateListenerTest extends CCMTestsSupport { - private static final Logger logger = LoggerFactory.getLogger(StateListenerTest.class); + private static final Logger logger = LoggerFactory.getLogger(StateListenerTest.class); + + @Test(groups = "long") + public void should_receive_events_when_node_states_change() throws InterruptedException { + TestListener listener = new TestListener(); + cluster().register(listener); + + listener.setExpectedEvent(ADD); + ccm().add(2); + ccm().start(2); + listener.waitForEvent(); + + listener.setExpectedEvent(DOWN); + ccm().forceStop(1); + listener.waitForEvent(); + + listener.setExpectedEvent(UP); + ccm().start(1); + listener.waitForEvent(); + + listener.setExpectedEvent(REMOVE); + ccm().decommission(2); + listener.waitForEvent(); + } + + static class TestListener implements Host.StateListener { + enum Event { + ADD, + UP, + SUSPECTED, + DOWN, + REMOVE + } - @Test(groups = "long") - public void should_receive_events_when_node_states_change() throws InterruptedException { - TestListener listener = new TestListener(); - cluster().register(listener); + volatile CountDownLatch latch; + volatile Event expectedEvent; + volatile Event actualEvent; - listener.setExpectedEvent(ADD); - ccm().add(2); - ccm().start(2); - listener.waitForEvent(); + void setExpectedEvent(Event expectedEvent) { + logger.debug("Set expected event {}", expectedEvent); + this.expectedEvent = expectedEvent; + latch = new CountDownLatch(1); + } - listener.setExpectedEvent(DOWN); - ccm().forceStop(1); - listener.waitForEvent(); + void waitForEvent() throws InterruptedException { + assertThat(latch.await(2, MINUTES)) + .as("Timed out waiting for event " + expectedEvent) + .isTrue(); + assertThat(actualEvent).isEqualTo(expectedEvent); + } - listener.setExpectedEvent(UP); - ccm().start(1); - listener.waitForEvent(); + private void reportActualEvent(Event event) { + if (latch.getCount() == 0) { + // TODO this actually happens because C* sends REMOVE/ADD/REMOVE on a remove + logger.error( + "Was not waiting for an event but got {} (this should eventually be fixed by JAVA-657)", + event); + return; + } + logger.debug("Got event {}", event); + actualEvent = event; + latch.countDown(); + } - listener.setExpectedEvent(REMOVE); - ccm().decommission(2); - listener.waitForEvent(); + @Override + public void onAdd(Host host) { + reportActualEvent(ADD); } - static class TestListener implements Host.StateListener { - enum Event {ADD, UP, SUSPECTED, DOWN, REMOVE} - - volatile CountDownLatch latch; - volatile Event expectedEvent; - volatile Event actualEvent; - - void setExpectedEvent(Event expectedEvent) { - logger.debug("Set expected event {}", expectedEvent); - this.expectedEvent = expectedEvent; - latch = new CountDownLatch(1); - } - - void waitForEvent() throws InterruptedException { - assertThat(latch.await(2, MINUTES)) - .as("Timed out waiting for event " + expectedEvent) - .isTrue(); - assertThat(actualEvent).isEqualTo(expectedEvent); - } - - private void reportActualEvent(Event event) { - if (latch.getCount() == 0) { - // TODO this actually happens because C* sends REMOVE/ADD/REMOVE on a remove - logger.error("Was not waiting for an event but got {} (this should eventually be fixed by JAVA-657)", event); - return; - } - logger.debug("Got event {}", event); - actualEvent = event; - latch.countDown(); - } - - @Override - public void onAdd(Host host) { - reportActualEvent(ADD); - } - - @Override - public void onUp(Host host) { - reportActualEvent(UP); - } - - @Override - public void onDown(Host host) { - reportActualEvent(DOWN); - } - - @Override - public void onRemove(Host host) { - reportActualEvent(REMOVE); - } - - @Override - public void onRegister(Cluster cluster) { - } - - @Override - public void onUnregister(Cluster cluster) { - } + @Override + public void onUp(Host host) { + reportActualEvent(UP); } + + @Override + public void onDown(Host host) { + reportActualEvent(DOWN); + } + + @Override + public void onRemove(Host host) { + reportActualEvent(REMOVE); + } + + @Override + public void onRegister(Cluster cluster) {} + + @Override + public void onUnregister(Cluster cluster) {} + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/StatementIdempotenceTest.java b/driver-core/src/test/java/com/datastax/driver/core/StatementIdempotenceTest.java index bea976cc049..20f0b99b3a6 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/StatementIdempotenceTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/StatementIdempotenceTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,6 +17,27 @@ */ package com.datastax.driver.core; +import static com.datastax.driver.core.querybuilder.QueryBuilder.add; +import static com.datastax.driver.core.querybuilder.QueryBuilder.append; +import static com.datastax.driver.core.querybuilder.QueryBuilder.contains; +import static com.datastax.driver.core.querybuilder.QueryBuilder.delete; +import static com.datastax.driver.core.querybuilder.QueryBuilder.eq; +import static com.datastax.driver.core.querybuilder.QueryBuilder.fcall; +import static com.datastax.driver.core.querybuilder.QueryBuilder.incr; +import static com.datastax.driver.core.querybuilder.QueryBuilder.insertInto; +import static com.datastax.driver.core.querybuilder.QueryBuilder.lt; +import static com.datastax.driver.core.querybuilder.QueryBuilder.now; +import static com.datastax.driver.core.querybuilder.QueryBuilder.prepend; +import static com.datastax.driver.core.querybuilder.QueryBuilder.put; +import static com.datastax.driver.core.querybuilder.QueryBuilder.raw; +import static com.datastax.driver.core.querybuilder.QueryBuilder.select; +import static com.datastax.driver.core.querybuilder.QueryBuilder.set; +import static com.datastax.driver.core.querybuilder.QueryBuilder.update; +import static com.datastax.driver.core.querybuilder.QueryBuilder.uuid; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + import com.datastax.driver.core.querybuilder.BuiltStatement; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; @@ -23,167 +46,157 @@ import org.testng.annotations.BeforeMethod; import org.testng.annotations.Test; -import static com.datastax.driver.core.querybuilder.QueryBuilder.*; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - public class StatementIdempotenceTest { - private Cluster cluster; - - @BeforeMethod(groups = "unit") - public void setUpQueryBuilder() throws Exception { - CodecRegistry codecRegistry = new CodecRegistry(); - cluster = mock(Cluster.class); - Configuration configuration = mock(Configuration.class); - ProtocolOptions protocolOptions = mock(ProtocolOptions.class); - when(cluster.getConfiguration()).thenReturn(configuration); - when(configuration.getCodecRegistry()).thenReturn(codecRegistry); - when(configuration.getProtocolOptions()).thenReturn(protocolOptions); - when(protocolOptions.getProtocolVersion()).thenReturn(ProtocolVersion.NEWEST_SUPPORTED); - } - - @Test(groups = "unit") - public void should_default_to_false_when_not_set_on_statement_nor_query_options() { - QueryOptions queryOptions = new QueryOptions(); - SimpleStatement statement = new SimpleStatement("", cluster); - - assertThat(statement.isIdempotentWithDefault(queryOptions)).isFalse(); - } - - @Test(groups = "unit") - public void should_use_query_options_when_not_set_on_statement() { - QueryOptions queryOptions = new QueryOptions(); - SimpleStatement statement = new SimpleStatement("", cluster); - - for (boolean valueInOptions : new boolean[]{true, false}) { - queryOptions.setDefaultIdempotence(valueInOptions); - assertThat(statement.isIdempotentWithDefault(queryOptions)).isEqualTo(valueInOptions); - } - } - - @Test(groups = "unit") - public void should_use_statement_when_set_on_statement() { - QueryOptions queryOptions = new QueryOptions(); - SimpleStatement statement = new SimpleStatement("", cluster); - - for (boolean valueInOptions : new boolean[]{true, false}) - for (boolean valueInStatement : new boolean[]{true, false}) { - queryOptions.setDefaultIdempotence(valueInOptions); - statement.setIdempotent(valueInStatement); - assertThat(statement.isIdempotentWithDefault(queryOptions)).isEqualTo(valueInStatement); - } + private Cluster cluster; + + @BeforeMethod(groups = "unit") + public void setUpQueryBuilder() throws Exception { + CodecRegistry codecRegistry = new CodecRegistry(); + cluster = mock(Cluster.class); + Configuration configuration = mock(Configuration.class); + ProtocolOptions protocolOptions = mock(ProtocolOptions.class); + when(cluster.getConfiguration()).thenReturn(configuration); + when(configuration.getCodecRegistry()).thenReturn(codecRegistry); + when(configuration.getProtocolOptions()).thenReturn(protocolOptions); + when(protocolOptions.getProtocolVersion()).thenReturn(ProtocolVersion.NEWEST_SUPPORTED); + } + + @Test(groups = "unit") + public void should_default_to_false_when_not_set_on_statement_nor_query_options() { + QueryOptions queryOptions = new QueryOptions(); + SimpleStatement statement = new SimpleStatement("", cluster); + + assertThat(statement.isIdempotentWithDefault(queryOptions)).isFalse(); + } + + @Test(groups = "unit") + public void should_use_query_options_when_not_set_on_statement() { + QueryOptions queryOptions = new QueryOptions(); + SimpleStatement statement = new SimpleStatement("", cluster); + + for (boolean valueInOptions : new boolean[] {true, false}) { + queryOptions.setDefaultIdempotence(valueInOptions); + assertThat(statement.isIdempotentWithDefault(queryOptions)).isEqualTo(valueInOptions); } - - @Test(groups = "unit") - public void should_infer_for_built_statement() { - for (BuiltStatement statement : idempotentBuiltStatements()) - assertThat(statement.isIdempotent()) - .as(statement.getQueryString()) - .isTrue(); - - for (BuiltStatement statement : nonIdempotentBuiltStatements()) - assertThat(statement.isIdempotent()) - .as(statement.getQueryString()) - .isFalse(); - } - - @Test(groups = "unit") - public void should_override_inferred_value_when_manually_set_on_built_statement() { - for (boolean manualValue : new boolean[]{true, false}) { - for (BuiltStatement statement : idempotentBuiltStatements()) { - statement.setIdempotent(manualValue); - assertThat(statement.isIdempotent()).isEqualTo(manualValue); - } - - for (BuiltStatement statement : nonIdempotentBuiltStatements()) { - statement.setIdempotent(manualValue); - assertThat(statement.isIdempotent()).isEqualTo(manualValue); - } - } - } - - private ImmutableList idempotentBuiltStatements() { - return ImmutableList.of( - update("foo").with(set("v", 1)).where(eq("k", 1)), // set simple value - update("foo").with(add("s", 1)).where(eq("k", 1)), // add to set - update("foo").with(put("m", "a", 1)).where(eq("k", 1)), // put in map - - // select statements should be idempotent even with function calls - select().countAll().from("foo").where(eq("k", 1)), - select().ttl("v").from("foo").where(eq("k", 1)), - select().writeTime("v").from("foo").where(eq("k", 1)), - select().fcall("token", "k").from("foo").where(eq("k", 1)) - - ); - } - - private ImmutableList nonIdempotentBuiltStatements() { - return ImmutableList.of( - update("foo").with(append("l", 1)).where(eq("k", 1)), // append to list - update("foo").with(set("v", 1)).and(prepend("l", 1)).where(eq("k", 1)), // prepend to list - update("foo").with(incr("c")).where(eq("k", 1)), // counter update - - // function calls - - insertInto("foo").value("k", 1).value("v", fcall("token", "k")), - insertInto("foo").value("k", 1).value("v", now()), - insertInto("foo").value("k", 1).value("v", uuid()), - - insertInto("foo").value("k", 1).value("v", Sets.newHashSet(fcall("token", "k"))), - insertInto("foo").value("k", 1).value("v", Sets.newHashSet(now())), - insertInto("foo").value("k", 1).value("v", Sets.newHashSet(uuid())), - - insertInto("foo").values(new String[]{"k", "v"}, new Object[]{1, fcall("token", "k")}), - insertInto("foo").values(new String[]{"k", "v"}, new Object[]{1, now()}), - insertInto("foo").values(new String[]{"k", "v"}, new Object[]{1, uuid()}), - - insertInto("foo").values(new String[]{"k", "v"}, new Object[]{1, ImmutableMap.of("foo", fcall("token", "k"))}), - insertInto("foo").values(new String[]{"k", "v"}, new Object[]{1, ImmutableMap.of("foo", now())}), - insertInto("foo").values(new String[]{"k", "v"}, new Object[]{1, ImmutableMap.of("foo", uuid())}), - - insertInto("foo").values(new String[]{"k", "v"}, new Object[]{1, ImmutableMap.of(fcall("token", "k"), "foo")}), - insertInto("foo").values(new String[]{"k", "v"}, new Object[]{1, ImmutableMap.of(now(), "foo")}), - insertInto("foo").values(new String[]{"k", "v"}, new Object[]{1, ImmutableMap.of(uuid(), "foo")}), - - update("foo").with(set("v", fcall("token", "k"))).where(eq("k", 1)), - update("foo").with(set("v", now())).where(eq("k", 1)), - update("foo").with(set("v", uuid())).where(eq("k", 1)), - - update("foo").with(set("v", Lists.newArrayList(fcall("token", "k")))).where(eq("k", 1)), - update("foo").with(set("v", Lists.newArrayList(now()))).where(eq("k", 1)), - update("foo").with(set("v", Lists.newArrayList(uuid()))).where(eq("k", 1)), - - delete().from("foo").where(lt("k", fcall("now"))), - delete().from("foo").where(lt("k", now())), - update("foo").where(eq("k", fcall("now"))), - delete().listElt("a", 1).from("test_coll"), - - // LWT - update("foo").where(eq("is", "charlie?")).ifExists(), - update("foo").where(eq("good", "drivers")).onlyIf(contains("developers", "datastax")), - update("foo").onlyIf().and(contains("developers", "datastax")).where(eq("good", "drivers")), - update("foo").onlyIf(contains("developers", "datastax")).with(set("v", 0)), - update("foo").with(set("v", 0)).onlyIf(contains("hello", "world")), - - insertInto("foo").value("k", 1).value("v", Sets.newHashSet(now())).ifNotExists(), - - delete().from("foo").where(eq("k", 2)).ifExists(), - delete().from("foo").onlyIf(eq("k", 2)), - - // raw() calls - - insertInto("foo").value("k", 1).value("v", raw("foo()")), - insertInto("foo").value("k", 1).value("v", Sets.newHashSet(raw("foo()"))), - - insertInto("foo").values(new String[]{"k", "v"}, new Object[]{1, raw("foo()")}), - insertInto("foo").values(new String[]{"k", "v"}, new Object[]{1, ImmutableMap.of("foo", raw("foo()"))}), - insertInto("foo").values(new String[]{"k", "v"}, new Object[]{1, ImmutableMap.of(raw("foo()"), "foo")}), - - update("foo").with(set("v", raw("foo()"))).where(eq("k", 1)), - update("foo").with(set("v", Lists.newArrayList(raw("foo()")))).where(eq("k", 1)) - - ); + } + + @Test(groups = "unit") + public void should_use_statement_when_set_on_statement() { + QueryOptions queryOptions = new QueryOptions(); + SimpleStatement statement = new SimpleStatement("", cluster); + + for (boolean valueInOptions : new boolean[] {true, false}) + for (boolean valueInStatement : new boolean[] {true, false}) { + queryOptions.setDefaultIdempotence(valueInOptions); + statement.setIdempotent(valueInStatement); + assertThat(statement.isIdempotentWithDefault(queryOptions)).isEqualTo(valueInStatement); + } + } + + @Test(groups = "unit") + public void should_infer_for_built_statement() { + for (BuiltStatement statement : idempotentBuiltStatements()) + assertThat(statement.isIdempotent()).as(statement.getQueryString()).isTrue(); + + for (BuiltStatement statement : nonIdempotentBuiltStatements()) + assertThat(statement.isIdempotent()).as(statement.getQueryString()).isFalse(); + } + + @Test(groups = "unit") + public void should_override_inferred_value_when_manually_set_on_built_statement() { + for (boolean manualValue : new boolean[] {true, false}) { + for (BuiltStatement statement : idempotentBuiltStatements()) { + statement.setIdempotent(manualValue); + assertThat(statement.isIdempotent()).isEqualTo(manualValue); + } + + for (BuiltStatement statement : nonIdempotentBuiltStatements()) { + statement.setIdempotent(manualValue); + assertThat(statement.isIdempotent()).isEqualTo(manualValue); + } } + } + + private ImmutableList idempotentBuiltStatements() { + return ImmutableList.of( + update("foo").with(set("v", 1)).where(eq("k", 1)), // set simple value + update("foo").with(add("s", 1)).where(eq("k", 1)), // add to set + update("foo").with(put("m", "a", 1)).where(eq("k", 1)), // put in map + + // select statements should be idempotent even with function calls + select().countAll().from("foo").where(eq("k", 1)), + select().ttl("v").from("foo").where(eq("k", 1)), + select().writeTime("v").from("foo").where(eq("k", 1)), + select().fcall("token", "k").from("foo").where(eq("k", 1))); + } + + private ImmutableList nonIdempotentBuiltStatements() { + return ImmutableList.of( + update("foo").with(append("l", 1)).where(eq("k", 1)), // append to list + update("foo").with(set("v", 1)).and(prepend("l", 1)).where(eq("k", 1)), // prepend to list + update("foo").with(incr("c")).where(eq("k", 1)), // counter update + + // function calls + + insertInto("foo").value("k", 1).value("v", fcall("token", "k")), + insertInto("foo").value("k", 1).value("v", now()), + insertInto("foo").value("k", 1).value("v", uuid()), + insertInto("foo").value("k", 1).value("v", Sets.newHashSet(fcall("token", "k"))), + insertInto("foo").value("k", 1).value("v", Sets.newHashSet(now())), + insertInto("foo").value("k", 1).value("v", Sets.newHashSet(uuid())), + insertInto("foo").values(new String[] {"k", "v"}, new Object[] {1, fcall("token", "k")}), + insertInto("foo").values(new String[] {"k", "v"}, new Object[] {1, now()}), + insertInto("foo").values(new String[] {"k", "v"}, new Object[] {1, uuid()}), + insertInto("foo") + .values( + new String[] {"k", "v"}, + new Object[] {1, ImmutableMap.of("foo", fcall("token", "k"))}), + insertInto("foo") + .values(new String[] {"k", "v"}, new Object[] {1, ImmutableMap.of("foo", now())}), + insertInto("foo") + .values(new String[] {"k", "v"}, new Object[] {1, ImmutableMap.of("foo", uuid())}), + insertInto("foo") + .values( + new String[] {"k", "v"}, + new Object[] {1, ImmutableMap.of(fcall("token", "k"), "foo")}), + insertInto("foo") + .values(new String[] {"k", "v"}, new Object[] {1, ImmutableMap.of(now(), "foo")}), + insertInto("foo") + .values(new String[] {"k", "v"}, new Object[] {1, ImmutableMap.of(uuid(), "foo")}), + update("foo").with(set("v", fcall("token", "k"))).where(eq("k", 1)), + update("foo").with(set("v", now())).where(eq("k", 1)), + update("foo").with(set("v", uuid())).where(eq("k", 1)), + update("foo").with(set("v", Lists.newArrayList(fcall("token", "k")))).where(eq("k", 1)), + update("foo").with(set("v", Lists.newArrayList(now()))).where(eq("k", 1)), + update("foo").with(set("v", Lists.newArrayList(uuid()))).where(eq("k", 1)), + delete().from("foo").where(lt("k", fcall("now"))), + delete().from("foo").where(lt("k", now())), + update("foo").where(eq("k", fcall("now"))), + delete().listElt("a", 1).from("test_coll"), + + // LWT + update("foo").where(eq("is", "charlie?")).ifExists(), + update("foo").where(eq("good", "drivers")).onlyIf(contains("developers", "datastax")), + update("foo").onlyIf().and(contains("developers", "datastax")).where(eq("good", "drivers")), + update("foo").onlyIf(contains("developers", "datastax")).with(set("v", 0)), + update("foo").with(set("v", 0)).onlyIf(contains("hello", "world")), + insertInto("foo").value("k", 1).value("v", Sets.newHashSet(now())).ifNotExists(), + delete().from("foo").where(eq("k", 2)).ifExists(), + delete().from("foo").onlyIf(eq("k", 2)), + + // raw() calls + + insertInto("foo").value("k", 1).value("v", raw("foo()")), + insertInto("foo").value("k", 1).value("v", Sets.newHashSet(raw("foo()"))), + insertInto("foo").values(new String[] {"k", "v"}, new Object[] {1, raw("foo()")}), + insertInto("foo") + .values( + new String[] {"k", "v"}, new Object[] {1, ImmutableMap.of("foo", raw("foo()"))}), + insertInto("foo") + .values( + new String[] {"k", "v"}, new Object[] {1, ImmutableMap.of(raw("foo()"), "foo")}), + update("foo").with(set("v", raw("foo()"))).where(eq("k", 1)), + update("foo").with(set("v", Lists.newArrayList(raw("foo()")))).where(eq("k", 1))); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/StatementSizeTest.java b/driver-core/src/test/java/com/datastax/driver/core/StatementSizeTest.java new file mode 100644 index 00000000000..d824d2365c9 --- /dev/null +++ b/driver-core/src/test/java/com/datastax/driver/core/StatementSizeTest.java @@ -0,0 +1,220 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import static com.datastax.driver.core.Assertions.assertThat; + +import com.datastax.driver.core.ColumnDefinitions.Definition; +import com.datastax.driver.core.utils.Bytes; +import com.google.common.base.Charsets; +import com.google.common.collect.ImmutableMap; +import java.nio.ByteBuffer; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.MockitoAnnotations; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; + +public class StatementSizeTest { + + private static final byte[] MOCK_PAGING_STATE = Bytes.getArray(Bytes.fromHexString("0xdeadbeef")); + private static final ByteBuffer MOCK_PAYLOAD_VALUE1 = Bytes.fromHexString("0xabcd"); + private static final ByteBuffer MOCK_PAYLOAD_VALUE2 = Bytes.fromHexString("0xef"); + private static final ImmutableMap MOCK_PAYLOAD = + ImmutableMap.of("key1", MOCK_PAYLOAD_VALUE1, "key2", MOCK_PAYLOAD_VALUE2); + private static final byte[] PREPARED_ID = Bytes.getArray(Bytes.fromHexString("0xaaaa")); + private static final byte[] RESULT_METADATA_ID = Bytes.getArray(Bytes.fromHexString("0xbbbb")); + + @Mock private PreparedStatement preparedStatement; + + @BeforeMethod(groups = {"unit"}) + public void setup() { + MockitoAnnotations.initMocks(this); + + PreparedId preparedId = + new PreparedId( + new PreparedId.PreparedMetadata(MD5Digest.wrap(PREPARED_ID), null), + new PreparedId.PreparedMetadata(MD5Digest.wrap(RESULT_METADATA_ID), null), + new int[0], + ProtocolVersion.V5); + Mockito.when(preparedStatement.getPreparedId()).thenReturn(preparedId); + + ColumnDefinitions columnDefinitions = + new ColumnDefinitions( + new Definition[] { + new Definition("ks", "table", "c1", DataType.cint()), + new Definition("ks", "table", "c2", DataType.text()) + }, + CodecRegistry.DEFAULT_INSTANCE); + Mockito.when(preparedStatement.getVariables()).thenReturn(columnDefinitions); + Mockito.when(preparedStatement.getIncomingPayload()).thenReturn(null); + Mockito.when(preparedStatement.getOutgoingPayload()).thenReturn(null); + Mockito.when(preparedStatement.getCodecRegistry()).thenReturn(CodecRegistry.DEFAULT_INSTANCE); + } + + @Test(groups = "unit") + public void should_measure_size_of_simple_statement() { + String queryString = "SELECT release_version FROM system.local WHERE key = ?"; + SimpleStatement statement = new SimpleStatement(queryString); + int expectedSize = + 9 // header + + (4 + queryString.getBytes(Charsets.UTF_8).length) // query string + + 2 // consistency level + + 2 // serial consistency level + + 4 // fetch size + + 8 // timestamp + + 4; // flags + assertThat(v5SizeOf(statement)).isEqualTo(expectedSize); + + SimpleStatement statementWithAnonymousValue = + new SimpleStatement(statement.getQueryString(), "local"); + assertThat(v5SizeOf(statementWithAnonymousValue)) + .isEqualTo( + expectedSize + + 2 // size of number of values + + (4 + "local".getBytes(Charsets.UTF_8).length) // value + ); + + SimpleStatement statementWithNamedValue = + new SimpleStatement( + statement.getQueryString(), ImmutableMap.of("key", "local")); + assertThat(v5SizeOf(statementWithNamedValue)) + .isEqualTo( + expectedSize + + 2 // size of number of values + + (2 + "key".getBytes(Charsets.UTF_8).length) // key + + (4 + "local".getBytes(Charsets.UTF_8).length) // value + ); + + statement.setPagingStateUnsafe(MOCK_PAGING_STATE); + expectedSize += 4 + MOCK_PAGING_STATE.length; + assertThat(v5SizeOf(statement)).isEqualTo(expectedSize); + + statement.setOutgoingPayload(MOCK_PAYLOAD); + expectedSize += + 2 // size of number of keys in the map + // size of each key/value pair + + (2 + "key1".getBytes(Charsets.UTF_8).length) + + (4 + MOCK_PAYLOAD_VALUE1.remaining()) + + (2 + "key2".getBytes(Charsets.UTF_8).length) + + (4 + MOCK_PAYLOAD_VALUE2.remaining()); + assertThat(v5SizeOf(statement)).isEqualTo(expectedSize); + } + + @Test(groups = "unit") + public void should_measure_size_of_bound_statement() { + BoundStatement statement = new BoundStatement(preparedStatement); + int expectedSize = + 9 // header size + + (2 + PREPARED_ID.length) + + (2 + RESULT_METADATA_ID.length) + + 2 // consistency level + + 2 // serial consistency level + + 4 // fetch size + + 8 // timestamp + + 4 // flags + + 2 // size of value list + + 2 * (4) // two null values (size = -1) + ; + assertThat(v5SizeOf(statement)).isEqualTo(expectedSize); + + statement.setInt(0, 0); + expectedSize += 4; // serialized value (we already have its size from when it was null above) + statement.setString(1, "test"); + expectedSize += "test".getBytes(Charsets.UTF_8).length; + assertThat(v5SizeOf(statement)).isEqualTo(expectedSize); + + statement.setPagingStateUnsafe(MOCK_PAGING_STATE); + expectedSize += 4 + MOCK_PAGING_STATE.length; + assertThat(v5SizeOf(statement)).isEqualTo(expectedSize); + + statement.setOutgoingPayload(MOCK_PAYLOAD); + expectedSize += + 2 // size of number of keys in the map + // size of each key/value pair + + (2 + "key1".getBytes(Charsets.UTF_8).length) + + (4 + MOCK_PAYLOAD_VALUE1.remaining()) + + (2 + "key2".getBytes(Charsets.UTF_8).length) + + (4 + MOCK_PAYLOAD_VALUE2.remaining()); + assertThat(v5SizeOf(statement)).isEqualTo(expectedSize); + } + + @Test(groups = "unit") + public void should_measure_size_of_batch_statement() { + String queryString = "SELECT release_version FROM system.local"; + SimpleStatement statement1 = new SimpleStatement(queryString); + + BoundStatement statement2 = + new BoundStatement(preparedStatement).setInt(0, 0).setString(1, "test"); + BoundStatement statement3 = + new BoundStatement(preparedStatement).setInt(0, 0).setString(1, "test2"); + + BatchStatement batchStatement = + new BatchStatement().add(statement1).add(statement2).add(statement3); + + int expectedSize = + 9 // header size + + 1 + + 2 // batch type + number of queries + // statements' type of id + id (query string/prepared id): + + 1 + + (4 + queryString.getBytes(Charsets.UTF_8).length) + + 1 + + (2 + PREPARED_ID.length) + + 1 + + (2 + PREPARED_ID.length) + // value lists + + 2 + + (2 + (4 + 4) + (4 + "test".getBytes(Charsets.UTF_8).length)) + + (2 + (4 + 4) + (4 + "test2".getBytes(Charsets.UTF_8).length)) + + 2 // consistency level + + 2 // serial consistency level + + 8 // client timestamp + + 4; // flags + assertThat(v5SizeOf(batchStatement)).isEqualTo(expectedSize); + + batchStatement.setOutgoingPayload(MOCK_PAYLOAD); + expectedSize += + 2 // size of number of keys in the map + // size of each key/value pair + + (2 + "key1".getBytes(Charsets.UTF_8).length) + + (4 + MOCK_PAYLOAD_VALUE1.remaining()) + + (2 + "key2".getBytes(Charsets.UTF_8).length) + + (4 + MOCK_PAYLOAD_VALUE2.remaining()); + assertThat(v5SizeOf(batchStatement)).isEqualTo(expectedSize); + } + + @Test(groups = "unit") + public void should_measure_size_of_wrapped_statement() { + String queryString = "SELECT release_version FROM system.local WHERE key = ?"; + Statement statement = new StatementWrapper(new SimpleStatement(queryString)) {}; + int expectedSize = + 9 // header + + (4 + queryString.getBytes(Charsets.UTF_8).length) // query string + + 2 // consistency level + + 2 // serial consistency level + + 4 // fetch size + + 8 // timestamp + + 4; // flags + assertThat(v5SizeOf(statement)).isEqualTo(expectedSize); + } + + private int v5SizeOf(Statement statement) { + return statement.requestSizeInBytes(ProtocolVersion.V5, CodecRegistry.DEFAULT_INSTANCE); + } +} diff --git a/driver-core/src/test/java/com/datastax/driver/core/StatementWrapperTest.java b/driver-core/src/test/java/com/datastax/driver/core/StatementWrapperTest.java index eb66da18c12..c19153c571f 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/StatementWrapperTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/StatementWrapperTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,274 +17,290 @@ */ package com.datastax.driver.core; +import static org.assertj.core.api.Assertions.assertThat; + import com.datastax.driver.core.exceptions.DriverException; -import com.datastax.driver.core.policies.*; +import com.datastax.driver.core.policies.DelegatingLoadBalancingPolicy; +import com.datastax.driver.core.policies.DelegatingSpeculativeExecutionPolicy; +import com.datastax.driver.core.policies.NoSpeculativeExecutionPolicy; +import com.datastax.driver.core.policies.RetryPolicy; +import com.datastax.driver.core.policies.RoundRobinPolicy; import com.datastax.driver.core.utils.CassandraVersion; -import org.testng.annotations.Test; - import java.util.Iterator; import java.util.concurrent.atomic.AtomicInteger; - -import static org.assertj.core.api.Assertions.assertThat; +import org.testng.annotations.Test; public class StatementWrapperTest extends CCMTestsSupport { - private static final String INSERT_QUERY = "insert into test (k, v) values (?, ?)"; - private static final String SELECT_QUERY = "select * from test where k = ?"; + private static final String INSERT_QUERY = "insert into test (k, v) values (?, ?)"; + private static final String SELECT_QUERY = "select * from test where k = ?"; - private static final String INSERT_MULTIPAGE_QUERY = "insert into test_multipage (k, v) values (?, ?)"; - private static final String SELECT_MULTIPAGE_QUERY = "select * from test_multipage where k = ?"; + private static final String INSERT_MULTIPAGE_QUERY = + "insert into test_multipage (k, v) values (?, ?)"; + private static final String SELECT_MULTIPAGE_QUERY = "select * from test_multipage where k = ?"; - CustomLoadBalancingPolicy loadBalancingPolicy = new CustomLoadBalancingPolicy(); - CustomSpeculativeExecutionPolicy speculativeExecutionPolicy = new CustomSpeculativeExecutionPolicy(); - CustomRetryPolicy retryPolicy = new CustomRetryPolicy(); + CustomLoadBalancingPolicy loadBalancingPolicy = new CustomLoadBalancingPolicy(); + CustomSpeculativeExecutionPolicy speculativeExecutionPolicy = + new CustomSpeculativeExecutionPolicy(); + CustomRetryPolicy retryPolicy = new CustomRetryPolicy(); - @Override - public void onTestContextInitialized() { - execute("create table test (k text primary key, v int)"); - execute("create table test_multipage (k text, v int, primary key (k, v))"); - } + @Override + public void onTestContextInitialized() { + execute("create table test (k text primary key, v int)"); + execute("create table test_multipage (k text, v int, primary key (k, v))"); + } - @Override - public Cluster.Builder createClusterBuilder() { - return Cluster.builder() - .withLoadBalancingPolicy(loadBalancingPolicy) - .withSpeculativeExecutionPolicy(speculativeExecutionPolicy) - .withRetryPolicy(retryPolicy); - } + @Override + public Cluster.Builder createClusterBuilder() { + return super.createClusterBuilder() + .withLoadBalancingPolicy(loadBalancingPolicy) + .withSpeculativeExecutionPolicy(speculativeExecutionPolicy) + .withRetryPolicy(retryPolicy); + } - @Test(groups = "short") - public void should_pass_wrapped_statement_to_load_balancing_policy() { - loadBalancingPolicy.customStatementsHandled.set(0); + @Test(groups = "short") + public void should_pass_wrapped_statement_to_load_balancing_policy() { + loadBalancingPolicy.customStatementsHandled.set(0); - SimpleStatement s = new SimpleStatement("select * from system.local"); - session().execute(s); - assertThat(loadBalancingPolicy.customStatementsHandled.get()).isEqualTo(0); + SimpleStatement s = new SimpleStatement("select * from system.local"); + session().execute(s); + assertThat(loadBalancingPolicy.customStatementsHandled.get()).isEqualTo(0); - session().execute(new CustomStatement(s)); - assertThat(loadBalancingPolicy.customStatementsHandled.get()).isEqualTo(1); - } + session().execute(new CustomStatement(s)); + assertThat(loadBalancingPolicy.customStatementsHandled.get()).isEqualTo(1); + } - @Test(groups = "short") - @CassandraVersion(value = "2.0.0") - public void should_reuse_wrapped_simple_statement_for_multipage_query() { - loadBalancingPolicy.customStatementsHandled.set(0); + @Test(groups = "short") + @CassandraVersion(value = "2.0.0") + public void should_reuse_wrapped_simple_statement_for_multipage_query() { + loadBalancingPolicy.customStatementsHandled.set(0); - for (int v = 1; v <= 100; v++) - session().execute(new SimpleStatement(INSERT_MULTIPAGE_QUERY, "key_simple_multipage", v)); + for (int v = 1; v <= 100; v++) + session().execute(new SimpleStatement(INSERT_MULTIPAGE_QUERY, "key_simple_multipage", v)); - SimpleStatement s = new SimpleStatement(SELECT_MULTIPAGE_QUERY, "key_simple_multipage"); - s.setFetchSize(1); + SimpleStatement s = new SimpleStatement(SELECT_MULTIPAGE_QUERY, "key_simple_multipage"); + s.setFetchSize(1); - CustomStatement customStatement = new CustomStatement(s); + CustomStatement customStatement = new CustomStatement(s); - ResultSet rs = session().execute(customStatement); - assertThat(loadBalancingPolicy.customStatementsHandled.get()).isEqualTo(1); + ResultSet rs = session().execute(customStatement); + assertThat(loadBalancingPolicy.customStatementsHandled.get()).isEqualTo(1); - Iterator it = rs.iterator(); + Iterator it = rs.iterator(); - assertThat(it.hasNext()).isTrue(); - it.next(); - assertThat(rs.getExecutionInfo().getStatement()).isEqualTo(customStatement); - assertThat(loadBalancingPolicy.customStatementsHandled.get()).isEqualTo(1); + assertThat(it.hasNext()).isTrue(); + it.next(); + assertThat(rs.getExecutionInfo().getStatement()).isEqualTo(customStatement); + assertThat(loadBalancingPolicy.customStatementsHandled.get()).isEqualTo(1); - assertThat(it.hasNext()).isTrue(); - it.next(); - assertThat(rs.getExecutionInfo().getStatement()).isEqualTo(customStatement); + assertThat(it.hasNext()).isTrue(); + it.next(); + assertThat(rs.getExecutionInfo().getStatement()).isEqualTo(customStatement); - assertThat(loadBalancingPolicy.customStatementsHandled.get()).isEqualTo(2); - } + assertThat(loadBalancingPolicy.customStatementsHandled.get()).isEqualTo(2); + } - @Test(groups = "short") - @CassandraVersion(value = "2.0.0") - public void should_reuse_wrapped_bound_statement_for_multipage_query() { - loadBalancingPolicy.customStatementsHandled.set(0); + @Test(groups = "short") + @CassandraVersion(value = "2.0.0") + public void should_reuse_wrapped_bound_statement_for_multipage_query() { + loadBalancingPolicy.customStatementsHandled.set(0); - for (int v = 1; v <= 100; v++) - session().execute(new SimpleStatement(INSERT_MULTIPAGE_QUERY, "key_prepared_multipage", v)); + for (int v = 1; v <= 100; v++) + session().execute(new SimpleStatement(INSERT_MULTIPAGE_QUERY, "key_prepared_multipage", v)); - PreparedStatement ps = session().prepare(SELECT_MULTIPAGE_QUERY); - BoundStatement bs = ps.bind("key_prepared_multipage"); - bs.setFetchSize(1); + PreparedStatement ps = session().prepare(SELECT_MULTIPAGE_QUERY); + BoundStatement bs = ps.bind("key_prepared_multipage"); + bs.setFetchSize(1); - CustomStatement customStatement = new CustomStatement(bs); + CustomStatement customStatement = new CustomStatement(bs); - ResultSet rs = session().execute(customStatement); - assertThat(loadBalancingPolicy.customStatementsHandled.get()).isEqualTo(1); + ResultSet rs = session().execute(customStatement); + assertThat(loadBalancingPolicy.customStatementsHandled.get()).isEqualTo(1); - Iterator it = rs.iterator(); + Iterator it = rs.iterator(); - assertThat(it.hasNext()).isTrue(); - it.next(); - assertThat(rs.getExecutionInfo().getStatement()).isEqualTo(customStatement); - assertThat(loadBalancingPolicy.customStatementsHandled.get()).isEqualTo(1); + assertThat(it.hasNext()).isTrue(); + it.next(); + assertThat(rs.getExecutionInfo().getStatement()).isEqualTo(customStatement); + assertThat(loadBalancingPolicy.customStatementsHandled.get()).isEqualTo(1); - assertThat(it.hasNext()).isTrue(); - it.next(); - assertThat(rs.getExecutionInfo().getStatement()).isEqualTo(customStatement); + assertThat(it.hasNext()).isTrue(); + it.next(); + assertThat(rs.getExecutionInfo().getStatement()).isEqualTo(customStatement); - assertThat(loadBalancingPolicy.customStatementsHandled.get()).isEqualTo(2); - } + assertThat(loadBalancingPolicy.customStatementsHandled.get()).isEqualTo(2); + } - @Test(groups = "short") - public void should_pass_wrapped_statement_to_speculative_execution_policy() { - speculativeExecutionPolicy.customStatementsHandled.set(0); + @Test(groups = "short") + public void should_pass_wrapped_statement_to_speculative_execution_policy() { + speculativeExecutionPolicy.customStatementsHandled.set(0); - SimpleStatement s = new SimpleStatement("select * from system.local"); - session().execute(s); - assertThat(speculativeExecutionPolicy.customStatementsHandled.get()).isEqualTo(0); + SimpleStatement s = new SimpleStatement("select * from system.local"); + session().execute(s); + assertThat(speculativeExecutionPolicy.customStatementsHandled.get()).isEqualTo(0); - session().execute(new CustomStatement(s)); - assertThat(speculativeExecutionPolicy.customStatementsHandled.get()).isEqualTo(1); - } + session().execute(new CustomStatement(s)); + assertThat(speculativeExecutionPolicy.customStatementsHandled.get()).isEqualTo(1); + } - @Test(groups = "short") - public void should_pass_wrapped_statement_to_retry_policy() { - retryPolicy.customStatementsHandled.set(0); + @Test(groups = "short") + public void should_pass_wrapped_statement_to_retry_policy() { + retryPolicy.customStatementsHandled.set(0); - // Set CL TWO with only one node, so the statement will always cause UNAVAILABLE, - // which our custom policy ignores. - Statement s = new SimpleStatement("select * from system.local") - .setConsistencyLevel(ConsistencyLevel.TWO); + // Set CL TWO with only one node, so the statement will always cause UNAVAILABLE, + // which our custom policy ignores. + Statement s = + new SimpleStatement("select * from system.local").setConsistencyLevel(ConsistencyLevel.TWO); - session().execute(s); - assertThat(retryPolicy.customStatementsHandled.get()).isEqualTo(0); + session().execute(s); + assertThat(retryPolicy.customStatementsHandled.get()).isEqualTo(0); - session().execute(new CustomStatement(s)); - assertThat(retryPolicy.customStatementsHandled.get()).isEqualTo(1); - } + session().execute(new CustomStatement(s)); + assertThat(retryPolicy.customStatementsHandled.get()).isEqualTo(1); + } - @CassandraVersion("2.0.0") - @Test(groups = "short") - public void should_execute_wrapped_simple_statement() { - session().execute(new CustomStatement(new SimpleStatement(INSERT_QUERY, "key_simple", 1))); + @CassandraVersion("2.0.0") + @Test(groups = "short") + public void should_execute_wrapped_simple_statement() { + session().execute(new CustomStatement(new SimpleStatement(INSERT_QUERY, "key_simple", 1))); - ResultSet rs = session().execute(new CustomStatement(new SimpleStatement(SELECT_QUERY, "key_simple"))); - assertThat(rs.one().getInt("v")).isEqualTo(1); - } + ResultSet rs = + session().execute(new CustomStatement(new SimpleStatement(SELECT_QUERY, "key_simple"))); + assertThat(rs.one().getInt("v")).isEqualTo(1); + } + + @Test(groups = "short") + public void should_execute_wrapped_bound_statement() { + PreparedStatement preparedStatement = session().prepare(new SimpleStatement(INSERT_QUERY)); + session().execute(new CustomStatement(preparedStatement.bind("key_bound", 1))); + + preparedStatement = session().prepare(new SimpleStatement(SELECT_QUERY)); + ResultSet rs = session().execute(new CustomStatement(preparedStatement.bind("key_bound"))); + assertThat(rs.one().getInt("v")).isEqualTo(1); + } - @Test(groups = "short") - public void should_execute_wrapped_bound_statement() { - PreparedStatement preparedStatement = session().prepare(new SimpleStatement(INSERT_QUERY)); - session().execute(new CustomStatement(preparedStatement.bind("key_bound", 1))); + @CassandraVersion("2.0.0") + @Test(groups = "short") + public void should_execute_wrapped_batch_statement() { + BatchStatement batchStatement = new BatchStatement(); + batchStatement.add(new SimpleStatement(INSERT_QUERY, "key_batch", 1)); - preparedStatement = session().prepare(new SimpleStatement(SELECT_QUERY)); - ResultSet rs = session().execute(new CustomStatement(preparedStatement.bind("key_bound"))); - assertThat(rs.one().getInt("v")).isEqualTo(1); + session().execute(new CustomStatement(batchStatement)); + + ResultSet rs = session().execute(SELECT_QUERY, "key_batch"); + assertThat(rs.one().getInt("v")).isEqualTo(1); + } + + @CassandraVersion("2.0.0") + @Test(groups = "short") + public void should_add_wrapped_batch_statement_to_batch_statement() { + BatchStatement batchStatementForWrapping = new BatchStatement(); + batchStatementForWrapping.add(new SimpleStatement(INSERT_QUERY, "key1", 1)); + + BatchStatement batchStatement = new BatchStatement(); + batchStatement.add(new CustomStatement(new SimpleStatement(INSERT_QUERY, "key2", 2))); + batchStatement.add(new CustomStatement(batchStatementForWrapping)); + + session().execute(batchStatement); + + ResultSet rs = session().execute(SELECT_QUERY, "key1"); + assertThat(rs.one().getInt("v")).isEqualTo(1); + + rs = session().execute(SELECT_QUERY, "key2"); + assertThat(rs.one().getInt("v")).isEqualTo(2); + } + + /** A custom wrapper that's just used to mark statements. */ + static class CustomStatement extends StatementWrapper { + protected CustomStatement(Statement wrapped) { + super(wrapped); } + } - @CassandraVersion("2.0.0") - @Test(groups = "short") - public void should_execute_wrapped_batch_statement() { - BatchStatement batchStatement = new BatchStatement(); - batchStatement.add(new SimpleStatement(INSERT_QUERY, "key_batch", 1)); + /** A load balancing policy that counts how many times it has seen the custom wrapper */ + static class CustomLoadBalancingPolicy extends DelegatingLoadBalancingPolicy { + final AtomicInteger customStatementsHandled = new AtomicInteger(); - session().execute(new CustomStatement(batchStatement)); + public CustomLoadBalancingPolicy() { + super(new RoundRobinPolicy()); + } - ResultSet rs = session().execute(SELECT_QUERY, "key_batch"); - assertThat(rs.one().getInt("v")).isEqualTo(1); + @Override + public Iterator newQueryPlan(String loggedKeyspace, Statement statement) { + if (statement instanceof CustomStatement) customStatementsHandled.incrementAndGet(); + return super.newQueryPlan(loggedKeyspace, statement); } + } + + /** A speculative execution policy that counts how many times it has seen the custom wrapper */ + static class CustomSpeculativeExecutionPolicy extends DelegatingSpeculativeExecutionPolicy { + final AtomicInteger customStatementsHandled = new AtomicInteger(); - @CassandraVersion("2.0.0") - @Test(groups = "short") - public void should_add_wrapped_batch_statement_to_batch_statement() { - BatchStatement batchStatementForWrapping = new BatchStatement(); - batchStatementForWrapping.add(new SimpleStatement(INSERT_QUERY, "key1", 1)); + protected CustomSpeculativeExecutionPolicy() { + super(NoSpeculativeExecutionPolicy.INSTANCE); + } - BatchStatement batchStatement = new BatchStatement(); - batchStatement.add(new CustomStatement(new SimpleStatement(INSERT_QUERY, "key2", 2))); - batchStatement.add(new CustomStatement(batchStatementForWrapping)); + @Override + public SpeculativeExecutionPlan newPlan(String loggedKeyspace, Statement statement) { + if (statement instanceof CustomStatement) customStatementsHandled.incrementAndGet(); + return super.newPlan(loggedKeyspace, statement); + } + } - session().execute(batchStatement); + /** + * A retry policy that counts how many times it has seen the custom wrapper for UNAVAILABLE + * errors. + */ + static class CustomRetryPolicy implements RetryPolicy { + final AtomicInteger customStatementsHandled = new AtomicInteger(); - ResultSet rs = session().execute(SELECT_QUERY, "key1"); - assertThat(rs.one().getInt("v")).isEqualTo(1); + @Override + public RetryDecision onUnavailable( + Statement statement, + ConsistencyLevel cl, + int requiredReplica, + int aliveReplica, + int nbRetry) { + if (statement instanceof CustomStatement) customStatementsHandled.incrementAndGet(); + return RetryDecision.ignore(); + } - rs = session().execute(SELECT_QUERY, "key2"); - assertThat(rs.one().getInt("v")).isEqualTo(2); + @Override + public RetryDecision onReadTimeout( + Statement statement, + ConsistencyLevel cl, + int requiredResponses, + int receivedResponses, + boolean dataRetrieved, + int nbRetry) { + return RetryDecision.rethrow(); } - /** - * A custom wrapper that's just used to mark statements. - */ - static class CustomStatement extends StatementWrapper { - protected CustomStatement(Statement wrapped) { - super(wrapped); - } + @Override + public RetryDecision onWriteTimeout( + Statement statement, + ConsistencyLevel cl, + WriteType writeType, + int requiredAcks, + int receivedAcks, + int nbRetry) { + return RetryDecision.rethrow(); } - /** - * A load balancing policy that counts how many times it has seen the custom wrapper - */ - static class CustomLoadBalancingPolicy extends DelegatingLoadBalancingPolicy { - final AtomicInteger customStatementsHandled = new AtomicInteger(); - - public CustomLoadBalancingPolicy() { - super(new RoundRobinPolicy()); - } - - @Override - public Iterator newQueryPlan(String loggedKeyspace, Statement statement) { - if (statement instanceof CustomStatement) - customStatementsHandled.incrementAndGet(); - return super.newQueryPlan(loggedKeyspace, statement); - } + @Override + public RetryDecision onRequestError( + Statement statement, ConsistencyLevel cl, DriverException e, int nbRetry) { + return RetryDecision.tryNextHost(cl); } - /** - * A speculative execution policy that counts how many times it has seen the custom wrapper - */ - static class CustomSpeculativeExecutionPolicy extends DelegatingSpeculativeExecutionPolicy { - final AtomicInteger customStatementsHandled = new AtomicInteger(); - - protected CustomSpeculativeExecutionPolicy() { - super(NoSpeculativeExecutionPolicy.INSTANCE); - } - - @Override - public SpeculativeExecutionPlan newPlan(String loggedKeyspace, Statement statement) { - if (statement instanceof CustomStatement) - customStatementsHandled.incrementAndGet(); - return super.newPlan(loggedKeyspace, statement); - } + @Override + public void init(Cluster cluster) { + // nothing to do } - /** - * A retry policy that counts how many times it has seen the custom wrapper for UNAVAILABLE errors. - */ - static class CustomRetryPolicy implements RetryPolicy { - final AtomicInteger customStatementsHandled = new AtomicInteger(); - - @Override - public RetryDecision onUnavailable(Statement statement, ConsistencyLevel cl, int requiredReplica, int aliveReplica, int nbRetry) { - if (statement instanceof CustomStatement) - customStatementsHandled.incrementAndGet(); - return RetryDecision.ignore(); - } - - @Override - public RetryDecision onReadTimeout(Statement statement, ConsistencyLevel cl, int requiredResponses, int receivedResponses, boolean dataRetrieved, int nbRetry) { - return RetryDecision.rethrow(); - } - - @Override - public RetryDecision onWriteTimeout(Statement statement, ConsistencyLevel cl, WriteType writeType, int requiredAcks, int receivedAcks, int nbRetry) { - return RetryDecision.rethrow(); - } - - @Override - public RetryDecision onRequestError(Statement statement, ConsistencyLevel cl, DriverException e, int nbRetry) { - return RetryDecision.tryNextHost(cl); - } - - @Override - public void init(Cluster cluster) { - // nothing to do - } - - @Override - public void close() { - // nothing to do - } + @Override + public void close() { + // nothing to do } + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/StreamIdGeneratorTest.java b/driver-core/src/test/java/com/datastax/driver/core/StreamIdGeneratorTest.java index e89ca757157..278df134908 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/StreamIdGeneratorTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/StreamIdGeneratorTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,33 +17,32 @@ */ package com.datastax.driver.core; -import org.testng.annotations.Test; - import static org.testng.Assert.assertEquals; +import org.testng.annotations.Test; + public class StreamIdGeneratorTest { - @Test(groups = "unit") - public void SimpleGenIdTest() throws Exception { + @Test(groups = "unit") + public void SimpleGenIdTest() throws Exception { - StreamIdGenerator generator = StreamIdGenerator.newInstance(ProtocolVersion.V2); + StreamIdGenerator generator = StreamIdGenerator.newInstance(ProtocolVersion.V2); - assertEquals(generator.next(), 0); - assertEquals(generator.next(), 64); - generator.release(0); - assertEquals(generator.next(), 0); - assertEquals(generator.next(), 65); - assertEquals(generator.next(), 1); - generator.release(64); - assertEquals(generator.next(), 64); - assertEquals(generator.next(), 2); + assertEquals(generator.next(), 0); + assertEquals(generator.next(), 64); + generator.release(0); + assertEquals(generator.next(), 0); + assertEquals(generator.next(), 65); + assertEquals(generator.next(), 1); + generator.release(64); + assertEquals(generator.next(), 64); + assertEquals(generator.next(), 2); - for (int i = 5; i < 128; i++) - generator.next(); + for (int i = 5; i < 128; i++) generator.next(); - generator.release(100); - assertEquals(generator.next(), 100); + generator.release(100); + assertEquals(generator.next(), 100); - assertEquals(generator.next(), -1); - } + assertEquals(generator.next(), -1); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/TableMetadataAssert.java b/driver-core/src/test/java/com/datastax/driver/core/TableMetadataAssert.java index f7e7ff3d44c..18ac9dbcb26 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/TableMetadataAssert.java +++ b/driver-core/src/test/java/com/datastax/driver/core/TableMetadataAssert.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,80 +17,80 @@ */ package com.datastax.driver.core; -import org.assertj.core.api.AbstractAssert; - import static org.assertj.core.api.Assertions.assertThat; +import org.assertj.core.api.AbstractAssert; + public class TableMetadataAssert extends AbstractAssert { - protected TableMetadataAssert(TableMetadata actual) { - super(actual, TableMetadataAssert.class); - } - - public TableMetadataAssert hasName(String name) { - assertThat(actual.getName()).isEqualTo(name); - return this; - } - - public TableMetadataAssert isInKeyspace(String keyspaceName) { - assertThat(actual.getKeyspace().getName()).isEqualTo(keyspaceName); - return this; - } - - public TableMetadataAssert hasColumn(String columnName) { - assertThat(actual.getColumn(columnName)).isNotNull(); - return this; - } - - public TableMetadataAssert hasColumn(String columnName, DataType dataType) { - ColumnMetadata column = actual.getColumn(columnName); - assertThat(column).isNotNull(); - assertThat(column.getType()).isEqualTo(dataType); - return this; - } - - public TableMetadataAssert hasNoColumn(String columnName) { - assertThat(actual.getColumn(columnName)).isNull(); - return this; - } - - public TableMetadataAssert hasComment(String comment) { - assertThat(actual.getOptions().getComment()).isEqualTo(comment); - return this; - } - - public TableMetadataAssert doesNotHaveComment(String comment) { - assertThat(actual.getOptions().getComment()).isNotEqualTo(comment); - return this; - } - - public TableMetadataAssert doesNotHaveColumn(String columnName) { - ColumnMetadata column = actual.getColumn(columnName); - assertThat(column).isNull(); - return this; - } - - public TableMetadataAssert isCompactStorage() { - assertThat(actual.getOptions().isCompactStorage()).isTrue(); - return this; - } - - public TableMetadataAssert isNotCompactStorage() { - assertThat(actual.getOptions().isCompactStorage()).isFalse(); - return this; - } - - public TableMetadataAssert hasNumberOfColumns(int expected) { - assertThat(actual.getColumns().size()).isEqualTo(expected); - return this; - } - - public TableMetadataAssert hasMaterializedView(MaterializedViewMetadata expected) { - assertThat(actual.getView(Metadata.quote(expected.getName()))).isNotNull().isEqualTo(expected); - return this; - } - - public TableMetadataAssert hasIndex(IndexMetadata index) { - assertThat(actual.getIndexes()).contains(index); - return this; - } + protected TableMetadataAssert(TableMetadata actual) { + super(actual, TableMetadataAssert.class); + } + + public TableMetadataAssert hasName(String name) { + assertThat(actual.getName()).isEqualTo(name); + return this; + } + + public TableMetadataAssert isInKeyspace(String keyspaceName) { + assertThat(actual.getKeyspace().getName()).isEqualTo(keyspaceName); + return this; + } + + public TableMetadataAssert hasColumn(String columnName) { + assertThat(actual.getColumn(columnName)).isNotNull(); + return this; + } + + public TableMetadataAssert hasColumn(String columnName, DataType dataType) { + ColumnMetadata column = actual.getColumn(columnName); + assertThat(column).isNotNull(); + assertThat(column.getType()).isEqualTo(dataType); + return this; + } + + public TableMetadataAssert hasNoColumn(String columnName) { + assertThat(actual.getColumn(columnName)).isNull(); + return this; + } + + public TableMetadataAssert hasComment(String comment) { + assertThat(actual.getOptions().getComment()).isEqualTo(comment); + return this; + } + + public TableMetadataAssert doesNotHaveComment(String comment) { + assertThat(actual.getOptions().getComment()).isNotEqualTo(comment); + return this; + } + + public TableMetadataAssert doesNotHaveColumn(String columnName) { + ColumnMetadata column = actual.getColumn(columnName); + assertThat(column).isNull(); + return this; + } + + public TableMetadataAssert isCompactStorage() { + assertThat(actual.getOptions().isCompactStorage()).isTrue(); + return this; + } + + public TableMetadataAssert isNotCompactStorage() { + assertThat(actual.getOptions().isCompactStorage()).isFalse(); + return this; + } + + public TableMetadataAssert hasNumberOfColumns(int expected) { + assertThat(actual.getColumns().size()).isEqualTo(expected); + return this; + } + + public TableMetadataAssert hasMaterializedView(MaterializedViewMetadata expected) { + assertThat(actual.getView(Metadata.quote(expected.getName()))).isNotNull().isEqualTo(expected); + return this; + } + + public TableMetadataAssert hasIndex(IndexMetadata index) { + assertThat(actual.getIndexes()).contains(index); + return this; + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/TableMetadataCDCTest.java b/driver-core/src/test/java/com/datastax/driver/core/TableMetadataCDCTest.java index 76faa1394d7..011f0f069fe 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/TableMetadataCDCTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/TableMetadataCDCTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,39 +17,42 @@ */ package com.datastax.driver.core; +import static org.assertj.core.api.Assertions.assertThat; + import com.datastax.driver.core.utils.CassandraVersion; import org.testng.annotations.Test; -import static org.assertj.core.api.Assertions.assertThat; - @CCMConfig(config = "cdc_enabled:true") @CassandraVersion(value = "3.8", description = "Requires CASSANDRA-12041 added in 3.8") public class TableMetadataCDCTest extends CCMTestsSupport { - /** - * Ensures that if a table is configured with change data capture enabled that - * {@link TableOptionsMetadata#isCDC()} returns true for that table. - * - * @test_category metadata - * @jira_ticket JAVA-1287 - * @jira_ticket CASSANDRA-12041 - */ - @Test(groups = "short") - public void should_parse_cdc_from_table_options() { - // given - // create a simple table with cdc as true. - String cql = String.format("CREATE TABLE %s.cdc_table (\n" + /** + * Ensures that if a table is configured with change data capture enabled that {@link + * TableOptionsMetadata#isCDC()} returns true for that table. + * + * @test_category metadata + * @jira_ticket JAVA-1287 + * @jira_ticket CASSANDRA-12041 + */ + @Test(groups = "short") + public void should_parse_cdc_from_table_options() { + // given + // create a simple table with cdc as true. + String cql = + String.format( + "CREATE TABLE %s.cdc_table (\n" + " k text,\n" + " c int,\n" + " v timeuuid,\n" + " PRIMARY KEY (k, c)\n" - + ") WITH cdc=true;", keyspace); - session().execute(cql); + + ") WITH cdc=true;", + keyspace); + session().execute(cql); - // when retrieving the table's metadata. - TableMetadata table = cluster().getMetadata().getKeyspace(keyspace).getTable("cdc_table"); - // then the table's options should have cdc as true. - assertThat(table.getOptions().isCDC()).isEqualTo(true); - assertThat(table.asCQLQuery(true)).contains("cdc = true"); - } + // when retrieving the table's metadata. + TableMetadata table = cluster().getMetadata().getKeyspace(keyspace).getTable("cdc_table"); + // then the table's options should have cdc as true. + assertThat(table.getOptions().isCDC()).isEqualTo(true); + assertThat(table.asCQLQuery(true)).contains("cdc = true"); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/TableMetadataTest.java b/driver-core/src/test/java/com/datastax/driver/core/TableMetadataTest.java index 6565c97a647..88717c72dd5 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/TableMetadataTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/TableMetadataTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,555 +17,872 @@ */ package com.datastax.driver.core; -import com.datastax.driver.core.utils.CassandraVersion; -import com.google.common.collect.ImmutableMap; -import org.testng.annotations.Test; - -import java.nio.ByteBuffer; - import static com.datastax.driver.core.Assertions.assertThat; import static com.datastax.driver.core.ClusteringOrder.ASC; import static com.datastax.driver.core.ClusteringOrder.DESC; -import static com.datastax.driver.core.DataType.*; +import static com.datastax.driver.core.DataType.cdouble; +import static com.datastax.driver.core.DataType.cfloat; +import static com.datastax.driver.core.DataType.cint; +import static com.datastax.driver.core.DataType.counter; +import static com.datastax.driver.core.DataType.list; +import static com.datastax.driver.core.DataType.map; +import static com.datastax.driver.core.DataType.text; +import static com.datastax.driver.core.DataType.timeuuid; import static org.assertj.core.api.Assertions.entry; +import com.datastax.driver.core.utils.CassandraVersion; +import com.google.common.collect.ImmutableMap; +import java.nio.ByteBuffer; +import org.testng.annotations.Test; + @CCMConfig(clusterProvider = "createClusterBuilderNoDebouncing") public class TableMetadataTest extends CCMTestsSupport { - @Test(groups = "short") - public void should_parse_table_without_clustering_columns() { - // given - String cql = String.format("CREATE TABLE %s.static (\n" + @Test(groups = "short") + public void should_parse_table_without_clustering_columns() { + // given + String cql = + String.format( + "CREATE TABLE %s.static (\n" + " k text,\n" + " i int,\n" + " m map,\n" + " v int,\n" + " PRIMARY KEY (k)\n" - + ");", keyspace); - // when - session().execute(cql); - TableMetadata table = cluster().getMetadata().getKeyspace(keyspace).getTable("static"); - // then - assertThat(table).isNotNull().hasName("static").hasNumberOfColumns(4).isNotCompactStorage(); - assertThat(table.getColumns().get(0)).isNotNull().hasName("k").isPartitionKey().hasType(text()); - assertThat(table.getColumns().get(1)).isNotNull().hasName("i").isRegularColumn().hasType(cint()); - assertThat(table.getColumns().get(2)).isNotNull().hasName("m").isRegularColumn().hasType(map(text(), timeuuid())); - assertThat(table.getColumns().get(3)).isNotNull().hasName("v").isRegularColumn().hasType(cint()); - } - - @Test(groups = "short") - public void should_parse_table_with_clustering_columns() { - // given - String cql = String.format("CREATE TABLE %s.sparse (\n" + + ");", + keyspace); + // when + session().execute(cql); + TableMetadata table = cluster().getMetadata().getKeyspace(keyspace).getTable("static"); + // then + assertThat(table).isNotNull().hasName("static").hasNumberOfColumns(4).isNotCompactStorage(); + assertThat(table.getColumns().get(0)).isNotNull().hasName("k").isPartitionKey().hasType(text()); + assertThat(table.getColumns().get(1)) + .isNotNull() + .hasName("i") + .isRegularColumn() + .hasType(cint()); + assertThat(table.getColumns().get(2)) + .isNotNull() + .hasName("m") + .isRegularColumn() + .hasType(map(text(), timeuuid())); + assertThat(table.getColumns().get(3)) + .isNotNull() + .hasName("v") + .isRegularColumn() + .hasType(cint()); + } + + @Test(groups = "short") + public void should_parse_table_with_clustering_columns() { + // given + String cql = + String.format( + "CREATE TABLE %s.sparse (\n" + " k text,\n" + " c1 int,\n" + " c2 float,\n" + " l list,\n" + " v int,\n" + " PRIMARY KEY (k, c1, c2)\n" - + ") WITH CLUSTERING ORDER BY (c1 ASC, c2 DESC);", keyspace); - // when - session().execute(cql); - TableMetadata table = cluster().getMetadata().getKeyspace(keyspace).getTable("sparse"); - // then - assertThat(table).isNotNull().hasName("sparse").hasNumberOfColumns(5).isNotCompactStorage(); - assertThat(table.getColumns().get(0)).isNotNull().hasName("k").isPartitionKey().hasType(text()); - assertThat(table.getColumns().get(1)).isNotNull().hasName("c1").isClusteringColumn().hasClusteringOrder(ASC).hasType(cint()); - assertThat(table.getColumns().get(2)).isNotNull().hasName("c2").isClusteringColumn().hasClusteringOrder(DESC).hasType(cfloat()); - assertThat(table.getColumns().get(3)).isNotNull().hasName("l").isRegularColumn().hasType(list(text())); - assertThat(table.getColumns().get(4)).isNotNull().hasName("v").isRegularColumn().hasType(cint()); - } - - @Test(groups = "short") - public void should_parse_counter_table() { - // given - String cql = String.format("CREATE TABLE %s.counters (\n" + + ") WITH CLUSTERING ORDER BY (c1 ASC, c2 DESC);", + keyspace); + // when + session().execute(cql); + TableMetadata table = cluster().getMetadata().getKeyspace(keyspace).getTable("sparse"); + // then + assertThat(table).isNotNull().hasName("sparse").hasNumberOfColumns(5).isNotCompactStorage(); + assertThat(table.getColumns().get(0)).isNotNull().hasName("k").isPartitionKey().hasType(text()); + assertThat(table.getColumns().get(1)) + .isNotNull() + .hasName("c1") + .isClusteringColumn() + .hasClusteringOrder(ASC) + .hasType(cint()); + assertThat(table.getColumns().get(2)) + .isNotNull() + .hasName("c2") + .isClusteringColumn() + .hasClusteringOrder(DESC) + .hasType(cfloat()); + assertThat(table.getColumns().get(3)) + .isNotNull() + .hasName("l") + .isRegularColumn() + .hasType(list(text())); + assertThat(table.getColumns().get(4)) + .isNotNull() + .hasName("v") + .isRegularColumn() + .hasType(cint()); + } + + @Test(groups = "short") + public void should_parse_counter_table() { + // given + String cql = + String.format( + "CREATE TABLE %s.counters (\n" + " k text,\n" + " c counter,\n" + " PRIMARY KEY (k)\n" - + ");", keyspace); - // when - session().execute(cql); - TableMetadata table = cluster().getMetadata().getKeyspace(keyspace).getTable("counters"); - // then - assertThat(table).isNotNull().hasName("counters").hasNumberOfColumns(2).isNotCompactStorage(); - assertThat(table.getColumns().get(0)).isNotNull().hasName("k").isPartitionKey().hasType(text()); - assertThat(table.getColumns().get(1)).isNotNull().hasName("c").isRegularColumn().hasType(counter()); - } - - @Test(groups = "short") - public void should_parse_compact_static_table() { - // given - String cql = String.format("CREATE TABLE %s.compact_static (\n" + + ");", + keyspace); + // when + session().execute(cql); + TableMetadata table = cluster().getMetadata().getKeyspace(keyspace).getTable("counters"); + // then + assertThat(table).isNotNull().hasName("counters").hasNumberOfColumns(2).isNotCompactStorage(); + assertThat(table.getColumns().get(0)).isNotNull().hasName("k").isPartitionKey().hasType(text()); + assertThat(table.getColumns().get(1)) + .isNotNull() + .hasName("c") + .isRegularColumn() + .hasType(counter()); + } + + @Test(groups = "short") + public void should_parse_compact_static_table() { + TestUtils.compactStorageSupportCheck(ccm()); + // given + String cql = + String.format( + "CREATE TABLE %s.compact_static (\n" + " k text,\n" + " i int,\n" + " t timeuuid,\n" + " v int,\n" + " PRIMARY KEY (k)\n" - + ") WITH COMPACT STORAGE;", keyspace); - // when - session().execute(cql); - TableMetadata table = cluster().getMetadata().getKeyspace(keyspace).getTable("compact_static"); - // then - assertThat(table).isNotNull().hasName("compact_static").hasNumberOfColumns(4).isCompactStorage(); - assertThat(table.getColumns().get(0)).isNotNull().hasName("k").isPartitionKey().hasType(text()); - assertThat(table.getColumns().get(1)).isNotNull().hasName("i").isRegularColumn().hasType(cint()); - assertThat(table.getColumns().get(2)).isNotNull().hasName("t").isRegularColumn().hasType(timeuuid()); - assertThat(table.getColumns().get(3)).isNotNull().hasName("v").isRegularColumn().hasType(cint()); - } - - @Test(groups = "short") - public void should_parse_dense_table() { - // given - String cql = String.format("CREATE TABLE %s.dense (\n" + + ") WITH COMPACT STORAGE;", + keyspace); + // when + session().execute(cql); + TableMetadata table = cluster().getMetadata().getKeyspace(keyspace).getTable("compact_static"); + // then + assertThat(table) + .isNotNull() + .hasName("compact_static") + .hasNumberOfColumns(4) + .isCompactStorage(); + assertThat(table.getColumns().get(0)).isNotNull().hasName("k").isPartitionKey().hasType(text()); + assertThat(table.getColumns().get(1)) + .isNotNull() + .hasName("i") + .isRegularColumn() + .hasType(cint()); + assertThat(table.getColumns().get(2)) + .isNotNull() + .hasName("t") + .isRegularColumn() + .hasType(timeuuid()); + assertThat(table.getColumns().get(3)) + .isNotNull() + .hasName("v") + .isRegularColumn() + .hasType(cint()); + } + + @Test(groups = "short") + public void should_parse_dense_table() { + TestUtils.compactStorageSupportCheck(ccm()); + // given + String cql = + String.format( + "CREATE TABLE %s.dense (\n" + " k int,\n" + " c int,\n" + " PRIMARY KEY (k, c)\n" - + " ) WITH COMPACT STORAGE;", keyspace); - // when - session().execute(cql); - TableMetadata table = cluster().getMetadata().getKeyspace(keyspace).getTable("dense"); - // then - assertThat(table).isNotNull().hasName("dense").hasNumberOfColumns(2).isCompactStorage(); - assertThat(table.getColumns().get(0)).isNotNull().hasName("k").isPartitionKey().hasType(cint()); - assertThat(table.getColumns().get(1)).isNotNull().hasName("c").isClusteringColumn().hasType(cint()); - } - - @Test(groups = "short") - public void should_parse_compact_dynamic_table() { - // given - String cql = String.format("CREATE TABLE %s.compact_dynamic (\n" + + " ) WITH COMPACT STORAGE;", + keyspace); + // when + session().execute(cql); + TableMetadata table = cluster().getMetadata().getKeyspace(keyspace).getTable("dense"); + // then + assertThat(table).isNotNull().hasName("dense").hasNumberOfColumns(2).isCompactStorage(); + assertThat(table.getColumns().get(0)).isNotNull().hasName("k").isPartitionKey().hasType(cint()); + assertThat(table.getColumns().get(1)) + .isNotNull() + .hasName("c") + .isClusteringColumn() + .hasType(cint()); + } + + @Test(groups = "short") + public void should_parse_compact_dynamic_table() { + TestUtils.compactStorageSupportCheck(ccm()); + // given + String cql = + String.format( + "CREATE TABLE %s.compact_dynamic (\n" + " k text,\n" + " c int,\n" + " v timeuuid,\n" + " PRIMARY KEY (k, c)\n" - + ") WITH COMPACT STORAGE;", keyspace); - // when - session().execute(cql); - TableMetadata table = cluster().getMetadata().getKeyspace(keyspace).getTable("compact_dynamic"); - // then - assertThat(table).isNotNull().hasName("compact_dynamic").hasNumberOfColumns(3).isCompactStorage(); - assertThat(table.getColumns().get(0)).isNotNull().hasName("k").isPartitionKey().hasType(text()); - assertThat(table.getColumns().get(1)).isNotNull().hasName("c").isClusteringColumn().hasClusteringOrder(ASC).hasType(cint()); - assertThat(table.getColumns().get(2)).isNotNull().hasName("v").isRegularColumn().hasType(timeuuid()); - } - - @Test(groups = "short") - public void should_parse_compact_table_with_multiple_clustering_columns() { - // given - String cql = String.format("CREATE TABLE %s.compact_composite (\n" + + ") WITH COMPACT STORAGE;", + keyspace); + // when + session().execute(cql); + TableMetadata table = cluster().getMetadata().getKeyspace(keyspace).getTable("compact_dynamic"); + // then + assertThat(table) + .isNotNull() + .hasName("compact_dynamic") + .hasNumberOfColumns(3) + .isCompactStorage(); + assertThat(table.getColumns().get(0)).isNotNull().hasName("k").isPartitionKey().hasType(text()); + assertThat(table.getColumns().get(1)) + .isNotNull() + .hasName("c") + .isClusteringColumn() + .hasClusteringOrder(ASC) + .hasType(cint()); + assertThat(table.getColumns().get(2)) + .isNotNull() + .hasName("v") + .isRegularColumn() + .hasType(timeuuid()); + } + + @Test(groups = "short") + public void should_parse_compact_table_with_multiple_clustering_columns() { + TestUtils.compactStorageSupportCheck(ccm()); + // given + String cql = + String.format( + "CREATE TABLE %s.compact_composite (\n" + " k text,\n" + " c1 int,\n" + " c2 float,\n" + " c3 double,\n" + " v timeuuid,\n" + " PRIMARY KEY (k, c1, c2, c3)\n" - + ") WITH COMPACT STORAGE;", keyspace); - // when - session().execute(cql); - TableMetadata table = cluster().getMetadata().getKeyspace(keyspace).getTable("compact_composite"); - // then - assertThat(table).isNotNull().hasName("compact_composite").hasNumberOfColumns(5).isCompactStorage(); - assertThat(table.getColumns().get(0)).isNotNull().hasName("k").isPartitionKey().hasType(text()); - assertThat(table.getColumns().get(1)).isNotNull().hasName("c1").isClusteringColumn().hasClusteringOrder(ASC).hasType(cint()); - assertThat(table.getColumns().get(2)).isNotNull().hasName("c2").isClusteringColumn().hasClusteringOrder(ASC).hasType(cfloat()); - assertThat(table.getColumns().get(3)).isNotNull().hasName("c3").isClusteringColumn().hasClusteringOrder(ASC).hasType(cdouble()); - assertThat(table.getColumns().get(4)).isNotNull().hasName("v").isRegularColumn().hasType(timeuuid()); - } - - @Test(groups = "short") - public void should_parse_table_options() { - VersionNumber version = TestUtils.findHost(cluster(), 1).getCassandraVersion(); - - // given - String cql; - - // Cassandra 3.0 + - if (version.getMajor() > 2) { - cql = String.format("CREATE TABLE %s.with_options (\n" - + " k text,\n" - + " c1 int,\n" - + " c2 int,\n" - + " i int,\n" - + " PRIMARY KEY (k, c1, c2)\n" - + ") WITH CLUSTERING ORDER BY (c1 DESC, c2 ASC)\n" - + " AND read_repair_chance = 0.5\n" - + " AND dclocal_read_repair_chance = 0.6\n" - + " AND speculative_retry = '99.9PERCENTILE'\n" - // replicate_on_write not supported anymore in 3.0 - + " AND gc_grace_seconds = 42\n" - + " AND bloom_filter_fp_chance = 0.01\n" - // older caching formats not supported anymore in 3.0 - + " AND caching = { 'keys' : 'ALL', 'rows_per_partition' : 10 }\n" - + " AND comment = 'My awesome table'\n" - + " AND compaction = { 'class' : 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy', 'sstable_size_in_mb' : 15 }\n" - + " AND compression = { 'sstable_compression' : 'org.apache.cassandra.io.compress.SnappyCompressor', 'chunk_length_kb' : 128 }\n" - + " AND crc_check_chance = 0.5;", // available from C* 3.0 - keyspace); - - // older versions - } else { - cql = String.format("CREATE TABLE %s.with_options (\n" - + " k text,\n" - + " c1 int,\n" - + " c2 int,\n" - + " i int,\n" - + " PRIMARY KEY (k, c1, c2)\n" - + ") WITH CLUSTERING ORDER BY (c1 DESC, c2 ASC)\n" - + " AND read_repair_chance = 0.5\n" - + " AND dclocal_read_repair_chance = 0.6\n" - + " AND replicate_on_write = true\n" - + " AND gc_grace_seconds = 42\n" - + " AND bloom_filter_fp_chance = 0.01\n" - + " AND caching = 'ALL'\n" - + " AND comment = 'My awesome table'\n" - + " AND compaction = { 'class' : 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy', 'sstable_size_in_mb' : 15 }\n" - + " AND compression = { 'sstable_compression' : 'org.apache.cassandra.io.compress.SnappyCompressor', 'chunk_length_kb' : 128 };", - keyspace); - } - - // when - session().execute(cql); - TableMetadata table = cluster().getMetadata().getKeyspace(keyspace).getTable("with_options"); - - // then - assertThat(table).isNotNull().hasName("with_options").hasNumberOfColumns(4).isNotCompactStorage(); - assertThat(table.getColumns().get(0)).isNotNull().hasName("k").isPartitionKey().hasType(text()); - assertThat(table.getColumns().get(1)).isNotNull().hasName("c1").isClusteringColumn().hasClusteringOrder(DESC).hasType(cint()); - assertThat(table.getColumns().get(2)).isNotNull().hasName("c2").isClusteringColumn().hasClusteringOrder(ASC).hasType(cint()); - assertThat(table.getColumns().get(3)).isNotNull().hasName("i").isRegularColumn().hasType(cint()); - assertThat(table); - - // Cassandra 3.8 + - if (version.getMajor() > 3 || (version.getMajor() == 3 && version.getMinor() >= 8)) { - - assertThat(table.getOptions().getReadRepairChance()).isEqualTo(0.5); - assertThat(table.getOptions().getLocalReadRepairChance()).isEqualTo(0.6); - assertThat(table.getOptions().getGcGraceInSeconds()).isEqualTo(42); - assertThat(table.getOptions().getBloomFilterFalsePositiveChance()).isEqualTo(0.01); - assertThat(table.getOptions().getComment()).isEqualTo("My awesome table"); - assertThat(table.getOptions().getCaching()).contains(entry("keys", "ALL")); - assertThat(table.getOptions().getCaching()).contains(entry("rows_per_partition", "10")); - assertThat(table.getOptions().getCompaction()).contains(entry("class", "org.apache.cassandra.db.compaction.LeveledCompactionStrategy")); - assertThat(table.getOptions().getCompaction()).contains(entry("sstable_size_in_mb", "15")); - assertThat(table.getOptions().getCompression()).contains(entry("class", "org.apache.cassandra.io.compress.SnappyCompressor")); // sstable_compression becomes class - assertThat(table.getOptions().getCompression()).contains(entry("chunk_length_in_kb", "128")); // note the "in" prefix - assertThat(table.getOptions().getDefaultTimeToLive()).isEqualTo(0); - assertThat(table.getOptions().getSpeculativeRetry()).isEqualTo("99.9PERCENTILE"); - assertThat(table.getOptions().getIndexInterval()).isNull(); - assertThat(table.getOptions().getMinIndexInterval()).isEqualTo(128); - assertThat(table.getOptions().getMaxIndexInterval()).isEqualTo(2048); - assertThat(table.getOptions().getReplicateOnWrite()).isTrue(); // default - assertThat(table.getOptions().getCrcCheckChance()).isEqualTo(0.5); - assertThat(table.getOptions().getExtensions()).isEmpty(); // default - assertThat(table.asCQLQuery()) - .contains("read_repair_chance = 0.5") - .contains("dclocal_read_repair_chance = 0.6") - .contains("gc_grace_seconds = 42") - .contains("bloom_filter_fp_chance = 0.01") - .contains("comment = 'My awesome table'") - .contains("'keys' : 'ALL'") - .contains("'rows_per_partition' : 10") - .contains("'class' : 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy'") - .contains("'sstable_size_in_mb' : 15") - .contains("'class' : 'org.apache.cassandra.io.compress.SnappyCompressor'") // sstable_compression becomes class - .contains("'chunk_length_in_kb' : 128") // note the "in" prefix - .contains("default_time_to_live = 0") - .contains("speculative_retry = '99.9PERCENTILE'") - .contains("min_index_interval = 128") - .contains("max_index_interval = 2048") - .contains("crc_check_chance = 0.5") - .contains("cdc = false") - .doesNotContain(" index_interval") - .doesNotContain("replicate_on_write"); - // Cassandra 3.0 + - } else if (version.getMajor() > 2) { - - assertThat(table.getOptions().getReadRepairChance()).isEqualTo(0.5); - assertThat(table.getOptions().getLocalReadRepairChance()).isEqualTo(0.6); - assertThat(table.getOptions().getGcGraceInSeconds()).isEqualTo(42); - assertThat(table.getOptions().getBloomFilterFalsePositiveChance()).isEqualTo(0.01); - assertThat(table.getOptions().getComment()).isEqualTo("My awesome table"); - assertThat(table.getOptions().getCaching()).contains(entry("keys", "ALL")); - assertThat(table.getOptions().getCaching()).contains(entry("rows_per_partition", "10")); - assertThat(table.getOptions().getCompaction()).contains(entry("class", "org.apache.cassandra.db.compaction.LeveledCompactionStrategy")); - assertThat(table.getOptions().getCompaction()).contains(entry("sstable_size_in_mb", "15")); - assertThat(table.getOptions().getCompression()).contains(entry("class", "org.apache.cassandra.io.compress.SnappyCompressor")); // sstable_compression becomes class - assertThat(table.getOptions().getCompression()).contains(entry("chunk_length_in_kb", "128")); // note the "in" prefix - assertThat(table.getOptions().getDefaultTimeToLive()).isEqualTo(0); - assertThat(table.getOptions().getSpeculativeRetry()).isEqualTo("99.9PERCENTILE"); - assertThat(table.getOptions().getIndexInterval()).isNull(); - assertThat(table.getOptions().getMinIndexInterval()).isEqualTo(128); - assertThat(table.getOptions().getMaxIndexInterval()).isEqualTo(2048); - assertThat(table.getOptions().getReplicateOnWrite()).isTrue(); // default - assertThat(table.getOptions().getCrcCheckChance()).isEqualTo(0.5); - assertThat(table.getOptions().getExtensions()).isEmpty(); // default - assertThat(table.asCQLQuery()) - .contains("read_repair_chance = 0.5") - .contains("dclocal_read_repair_chance = 0.6") - .contains("gc_grace_seconds = 42") - .contains("bloom_filter_fp_chance = 0.01") - .contains("comment = 'My awesome table'") - .contains("'keys' : 'ALL'") - .contains("'rows_per_partition' : 10") - .contains("'class' : 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy'") - .contains("'sstable_size_in_mb' : 15") - .contains("'class' : 'org.apache.cassandra.io.compress.SnappyCompressor'") // sstable_compression becomes class - .contains("'chunk_length_in_kb' : 128") // note the "in" prefix - .contains("default_time_to_live = 0") - .contains("speculative_retry = '99.9PERCENTILE'") - .contains("min_index_interval = 128") - .contains("max_index_interval = 2048") - .contains("crc_check_chance = 0.5") - .doesNotContain(" index_interval") - .doesNotContain("replicate_on_write") - .doesNotContain("cdc"); // 3.8+ - - // Cassandra 2.1 and 2.2 - } else if (version.getMajor() == 2 && version.getMinor() > 0) { - - // With 2.1 we have different options, the caching option changes and replicate_on_write disappears - assertThat(table.getOptions().getReadRepairChance()).isEqualTo(0.5); - assertThat(table.getOptions().getLocalReadRepairChance()).isEqualTo(0.6); - assertThat(table.getOptions().getGcGraceInSeconds()).isEqualTo(42); - assertThat(table.getOptions().getBloomFilterFalsePositiveChance()).isEqualTo(0.01); - assertThat(table.getOptions().getComment()).isEqualTo("My awesome table"); - assertThat(table.getOptions().getCaching()).contains(entry("keys", "ALL")); - assertThat(table.getOptions().getCaching()).contains(entry("rows_per_partition", "ALL")); - assertThat(table.getOptions().getCompaction()).contains(entry("class", "org.apache.cassandra.db.compaction.LeveledCompactionStrategy")); - assertThat(table.getOptions().getCompaction()).contains(entry("sstable_size_in_mb", "15")); - assertThat(table.getOptions().getCompression()).contains(entry("sstable_compression", "org.apache.cassandra.io.compress.SnappyCompressor")); - assertThat(table.getOptions().getCompression()).contains(entry("chunk_length_kb", "128")); - assertThat(table.getOptions().getDefaultTimeToLive()).isEqualTo(0); - assertThat(table.getOptions().getSpeculativeRetry()).isEqualTo("99.0PERCENTILE"); - assertThat(table.getOptions().getIndexInterval()).isNull(); - assertThat(table.getOptions().getMinIndexInterval()).isEqualTo(128); - assertThat(table.getOptions().getMaxIndexInterval()).isEqualTo(2048); - assertThat(table.getOptions().getReplicateOnWrite()).isTrue(); // default - assertThat(table.getOptions().getExtensions()).isEmpty(); - assertThat(table.asCQLQuery()) - .contains("read_repair_chance = 0.5") - .contains("dclocal_read_repair_chance = 0.6") - .contains("gc_grace_seconds = 42") - .contains("bloom_filter_fp_chance = 0.01") - .contains("comment = 'My awesome table'") - .contains("'keys' : 'ALL'") - .contains("'rows_per_partition' : 'ALL'") - .contains("'class' : 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy'") - .contains("'sstable_size_in_mb' : 15") - .contains("'sstable_compression' : 'org.apache.cassandra.io.compress.SnappyCompressor'") - .contains("'chunk_length_kb' : 128") - .contains("default_time_to_live = 0") - .contains("speculative_retry = '99.0PERCENTILE'") - .contains("min_index_interval = 128") - .contains("max_index_interval = 2048") - .doesNotContain(" index_interval") - .doesNotContain("replicate_on_write") - .doesNotContain("cdc"); - - // Cassandra 2.0 - } else if (version.getMajor() == 2 && version.getMinor() == 0) { - - assertThat(table.getOptions().getReadRepairChance()).isEqualTo(0.5); - assertThat(table.getOptions().getLocalReadRepairChance()).isEqualTo(0.6); - assertThat(table.getOptions().getGcGraceInSeconds()).isEqualTo(42); - assertThat(table.getOptions().getBloomFilterFalsePositiveChance()).isEqualTo(0.01); - assertThat(table.getOptions().getComment()).isEqualTo("My awesome table"); - assertThat(table.getOptions().getCaching()).contains(entry("keys", "ALL")); - assertThat(table.getOptions().getCaching()).doesNotContain(entry("rows_per_partition", "ALL")); // 2.1 + - assertThat(table.getOptions().getCompaction()).contains(entry("class", "org.apache.cassandra.db.compaction.LeveledCompactionStrategy")); - assertThat(table.getOptions().getCompaction()).contains(entry("sstable_size_in_mb", "15")); - assertThat(table.getOptions().getCompression()).contains(entry("sstable_compression", "org.apache.cassandra.io.compress.SnappyCompressor")); - assertThat(table.getOptions().getCompression()).contains(entry("chunk_length_kb", "128")); - assertThat(table.getOptions().getDefaultTimeToLive()).isEqualTo(0); - assertThat(table.getOptions().getSpeculativeRetry()).isEqualTo("99.0PERCENTILE"); // default - assertThat(table.getOptions().getIndexInterval()).isEqualTo(128); - assertThat(table.getOptions().getMinIndexInterval()).isNull(); - assertThat(table.getOptions().getMaxIndexInterval()).isNull(); - assertThat(table.getOptions().getReplicateOnWrite()).isTrue(); // explicitly set - assertThat(table.getOptions().getExtensions()).isEmpty(); - assertThat(table.asCQLQuery()) - .contains("read_repair_chance = 0.5") - .contains("dclocal_read_repair_chance = 0.6") - .contains("gc_grace_seconds = 42") - .contains("bloom_filter_fp_chance = 0.01") - .contains("comment = 'My awesome table'") - .contains("caching = 'ALL'") - .contains("'class' : 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy'") - .contains("'sstable_size_in_mb' : 15") - .contains("'sstable_compression' : 'org.apache.cassandra.io.compress.SnappyCompressor'") - .contains("'chunk_length_kb' : 128") - .contains("replicate_on_write = true") - .contains("index_interval = 128") - .contains("speculative_retry = '99.0PERCENTILE'") - .contains("default_time_to_live = 0") - .doesNotContain("min_index_interval") // 2.1 + - .doesNotContain("max_index_interval") // 2.1 + - .doesNotContain("cdc"); - - // Cassandra 1.2 - } else { - - assertThat(table.getOptions().getReadRepairChance()).isEqualTo(0.5); - assertThat(table.getOptions().getLocalReadRepairChance()).isEqualTo(0.6); - assertThat(table.getOptions().getGcGraceInSeconds()).isEqualTo(42); - assertThat(table.getOptions().getBloomFilterFalsePositiveChance()).isEqualTo(0.01); - assertThat(table.getOptions().getComment()).isEqualTo("My awesome table"); - assertThat(table.getOptions().getCaching()).contains(entry("keys", "ALL")); - assertThat(table.getOptions().getCaching()).doesNotContain(entry("rows_per_partition", "ALL")); // 2.1 + - assertThat(table.getOptions().getCompaction()).contains(entry("class", "org.apache.cassandra.db.compaction.LeveledCompactionStrategy")); - assertThat(table.getOptions().getCompaction()).contains(entry("sstable_size_in_mb", "15")); - assertThat(table.getOptions().getCompression()).contains(entry("sstable_compression", "org.apache.cassandra.io.compress.SnappyCompressor")); - assertThat(table.getOptions().getCompression()).contains(entry("chunk_length_kb", "128")); - assertThat(table.getOptions().getDefaultTimeToLive()).isEqualTo(0); // default - assertThat(table.getOptions().getSpeculativeRetry()).isEqualTo("NONE"); // default - assertThat(table.getOptions().getIndexInterval()).isNull(); - assertThat(table.getOptions().getMinIndexInterval()).isNull(); - assertThat(table.getOptions().getMaxIndexInterval()).isNull(); - assertThat(table.getOptions().getReplicateOnWrite()).isTrue(); // explicitly set - assertThat(table.getOptions().getExtensions()).isEmpty(); - assertThat(table.asCQLQuery()) - .contains("read_repair_chance = 0.5") - .contains("dclocal_read_repair_chance = 0.6") - .contains("gc_grace_seconds = 42") - .contains("bloom_filter_fp_chance = 0.01") - .contains("comment = 'My awesome table'") - .contains("caching = 'ALL'") - .contains("'class' : 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy'") - .contains("'sstable_size_in_mb' : 15") - .contains("'sstable_compression' : 'org.apache.cassandra.io.compress.SnappyCompressor'") - .contains("'chunk_length_kb' : 128") - .contains("replicate_on_write = true") - .doesNotContain("index_interval") // 2.0 - .doesNotContain("min_index_interval") // 2.1 + - .doesNotContain("max_index_interval") // 2.1 + - .doesNotContain("speculative_retry") // 2.0 + - .doesNotContain("default_time_to_live") // 2.0 + - .doesNotContain("cdc"); - - } - + + ") WITH COMPACT STORAGE;", + keyspace); + // when + session().execute(cql); + TableMetadata table = + cluster().getMetadata().getKeyspace(keyspace).getTable("compact_composite"); + // then + assertThat(table) + .isNotNull() + .hasName("compact_composite") + .hasNumberOfColumns(5) + .isCompactStorage(); + assertThat(table.getColumns().get(0)).isNotNull().hasName("k").isPartitionKey().hasType(text()); + assertThat(table.getColumns().get(1)) + .isNotNull() + .hasName("c1") + .isClusteringColumn() + .hasClusteringOrder(ASC) + .hasType(cint()); + assertThat(table.getColumns().get(2)) + .isNotNull() + .hasName("c2") + .isClusteringColumn() + .hasClusteringOrder(ASC) + .hasType(cfloat()); + assertThat(table.getColumns().get(3)) + .isNotNull() + .hasName("c3") + .isClusteringColumn() + .hasClusteringOrder(ASC) + .hasType(cdouble()); + assertThat(table.getColumns().get(4)) + .isNotNull() + .hasName("v") + .isRegularColumn() + .hasType(timeuuid()); + } + + @Test(groups = "short") + public void should_parse_table_options() { + VersionNumber version = ccm().getCassandraVersion(); + VersionNumber dseVersion = ccm().getDSEVersion(); + boolean isRealCassandra4 = + version.getMajor() > 3 + && (dseVersion == null || dseVersion.compareTo(VersionNumber.parse("6.8")) >= 0); + + // given + String cql; + + // Cassandra 4.0 + + if (isRealCassandra4) { + cql = + String.format( + "CREATE TABLE %s.with_options (\n" + + " k text,\n" + + " c1 int,\n" + + " c2 int,\n" + + " i int,\n" + + " PRIMARY KEY (k, c1, c2)\n" + + ") WITH CLUSTERING ORDER BY (c1 DESC, c2 ASC)\n" + + " AND additional_write_policy = '99p'\n" + + " AND read_repair = 'BLOCKING'\n" + + " AND speculative_retry = '99.9p'\n" + + " AND gc_grace_seconds = 42\n" + + " AND bloom_filter_fp_chance = 0.01\n" + + " AND caching = { 'keys' : 'ALL', 'rows_per_partition' : 10 }\n" + + " AND comment = 'My awesome table'\n" + + " AND compaction = { 'class' : 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy', 'sstable_size_in_mb' : 15 }\n" + + " AND compression = { 'sstable_compression' : 'org.apache.cassandra.io.compress.SnappyCompressor', 'chunk_length_kb' : 128 }\n" + + " AND crc_check_chance = 0.5\n" // available from C* 3.0 + + " AND memtable_flush_period_in_ms = 1000;", + keyspace); + + // Cassandra 3.0 + + } else if (version.compareTo(VersionNumber.parse("3.0")) >= 0) { + cql = + String.format( + "CREATE TABLE %s.with_options (\n" + + " k text,\n" + + " c1 int,\n" + + " c2 int,\n" + + " i int,\n" + + " PRIMARY KEY (k, c1, c2)\n" + + ") WITH CLUSTERING ORDER BY (c1 DESC, c2 ASC)\n" + + " AND read_repair_chance = 0.5\n" + + " AND dclocal_read_repair_chance = 0.6\n" + + " AND speculative_retry = '99.9PERCENTILE'\n" + // replicate_on_write not supported anymore in 3.0 + + " AND gc_grace_seconds = 42\n" + + " AND bloom_filter_fp_chance = 0.01\n" + // older caching formats not supported anymore in 3.0 + + " AND caching = { 'keys' : 'ALL', 'rows_per_partition' : 10 }\n" + + " AND comment = 'My awesome table'\n" + + " AND compaction = { 'class' : 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy', 'sstable_size_in_mb' : 15 }\n" + + " AND compression = { 'sstable_compression' : 'org.apache.cassandra.io.compress.SnappyCompressor', 'chunk_length_kb' : 128 }\n" + + " AND crc_check_chance = 0.5\n" // available from C* 3.0 + + " AND memtable_flush_period_in_ms = 1000;", + keyspace); + + // Cassandra 2.0 + + } else if (version.getMajor() > 1) { + cql = + String.format( + "CREATE TABLE %s.with_options (\n" + + " k text,\n" + + " c1 int,\n" + + " c2 int,\n" + + " i int,\n" + + " PRIMARY KEY (k, c1, c2)\n" + + ") WITH CLUSTERING ORDER BY (c1 DESC, c2 ASC)\n" + + " AND read_repair_chance = 0.5\n" + + " AND dclocal_read_repair_chance = 0.6\n" + + " AND replicate_on_write = true\n" + + " AND gc_grace_seconds = 42\n" + + " AND bloom_filter_fp_chance = 0.01\n" + + " AND caching = 'ALL'\n" + + " AND comment = 'My awesome table'\n" + + " AND compaction = { 'class' : 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy', 'sstable_size_in_mb' : 15 }\n" + + " AND compression = { 'sstable_compression' : 'org.apache.cassandra.io.compress.SnappyCompressor', 'chunk_length_kb' : 128 }\n" + + " AND memtable_flush_period_in_ms = 1000;", + keyspace); + // older versions + } else { + cql = + String.format( + "CREATE TABLE %s.with_options (\n" + + " k text,\n" + + " c1 int,\n" + + " c2 int,\n" + + " i int,\n" + + " PRIMARY KEY (k, c1, c2)\n" + + ") WITH CLUSTERING ORDER BY (c1 DESC, c2 ASC)\n" + + " AND read_repair_chance = 0.5\n" + + " AND dclocal_read_repair_chance = 0.6\n" + + " AND replicate_on_write = true\n" + + " AND gc_grace_seconds = 42\n" + + " AND bloom_filter_fp_chance = 0.01\n" + + " AND caching = 'ALL'\n" + + " AND comment = 'My awesome table'\n" + + " AND compaction = { 'class' : 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy', 'sstable_size_in_mb' : 15 }\n" + + " AND compression = { 'sstable_compression' : 'org.apache.cassandra.io.compress.SnappyCompressor', 'chunk_length_kb' : 128 };", + keyspace); } - /** - * Validates that metadata is appropriately parsed when using the new compression options format introduced - * in 3.0.0-alpha1. Since compression options are parsed as a String map, this should behave exactly as - * the old style compression options used in {@link #should_parse_table_options}. - * - * @jira_ticket CASSANDRA-9424 - */ - @Test(groups = "short") - @CassandraVersion("3.0") - public void should_parse_new_compression_options() { - // given - String cql = String.format("CREATE TABLE %s.new_compression_options (\n" - + " k text,\n" - + " c1 int,\n" - + " c2 int,\n" - + " i int,\n" - + " PRIMARY KEY (k, c1, c2)\n" - + ") WITH CLUSTERING ORDER BY (c1 DESC, c2 ASC)\n" - + " AND compression = { 'class' : 'DeflateCompressor', 'chunk_length_in_kb' : 128 };", - keyspace); - - // when - session().execute(cql); - TableMetadata table = cluster().getMetadata().getKeyspace(keyspace).getTable("new_compression_options"); - - // then - assertThat(table.getOptions().getCompression()) - .contains(entry("class", "org.apache.cassandra.io.compress.DeflateCompressor")) - .contains(entry("chunk_length_in_kb", "128")); + // when + session().execute(cql); + TableMetadata table = cluster().getMetadata().getKeyspace(keyspace).getTable("with_options"); + + // then + assertThat(table) + .isNotNull() + .hasName("with_options") + .hasNumberOfColumns(4) + .isNotCompactStorage(); + assertThat(table.getColumns().get(0)).isNotNull().hasName("k").isPartitionKey().hasType(text()); + assertThat(table.getColumns().get(1)) + .isNotNull() + .hasName("c1") + .isClusteringColumn() + .hasClusteringOrder(DESC) + .hasType(cint()); + assertThat(table.getColumns().get(2)) + .isNotNull() + .hasName("c2") + .isClusteringColumn() + .hasClusteringOrder(ASC) + .hasType(cint()); + assertThat(table.getColumns().get(3)) + .isNotNull() + .hasName("i") + .isRegularColumn() + .hasType(cint()); + assertThat(table); + + // Cassandra 4.0 + + if (isRealCassandra4) { + + assertThat(table.getOptions().getGcGraceInSeconds()).isEqualTo(42); + assertThat(table.getOptions().getBloomFilterFalsePositiveChance()).isEqualTo(0.01); + assertThat(table.getOptions().getComment()).isEqualTo("My awesome table"); + assertThat(table.getOptions().getCaching()).contains(entry("keys", "ALL")); + assertThat(table.getOptions().getCaching()).contains(entry("rows_per_partition", "10")); + assertThat(table.getOptions().getCompaction()) + .contains(entry("class", "org.apache.cassandra.db.compaction.LeveledCompactionStrategy")); + assertThat(table.getOptions().getCompaction()).contains(entry("sstable_size_in_mb", "15")); + assertThat(table.getOptions().getCompression()) + .contains( + entry( + "class", + "org.apache.cassandra.io.compress.SnappyCompressor")); // sstable_compression + // becomes class + assertThat(table.getOptions().getCompression()) + .contains(entry("chunk_length_in_kb", "128")); // note the "in" prefix + assertThat(table.getOptions().getDefaultTimeToLive()).isEqualTo(0); + assertThat(table.getOptions().getSpeculativeRetry()) + .isEqualTo(dseVersion == null ? "99.9p" : "99.9PERCENTILE"); + assertThat(table.getOptions().getIndexInterval()).isNull(); + assertThat(table.getOptions().getMinIndexInterval()).isEqualTo(128); + assertThat(table.getOptions().getMaxIndexInterval()).isEqualTo(2048); + assertThat(table.getOptions().getReplicateOnWrite()).isTrue(); // default + assertThat(table.getOptions().getCrcCheckChance()).isEqualTo(0.5); + assertThat(table.getOptions().getExtensions()).isEmpty(); // default + assertThat(table.getOptions().getMemtableFlushPeriodInMs()).isEqualTo(1000); + assertThat(table.asCQLQuery()) + .contains("additional_write_policy = '99p'") + .contains("read_repair = 'BLOCKING'") + .contains("gc_grace_seconds = 42") + .contains("bloom_filter_fp_chance = 0.01") + .contains("comment = 'My awesome table'") + .contains("'keys' : 'ALL'") + .contains("'rows_per_partition' : 10") + .contains("'class' : 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy'") + .contains("'sstable_size_in_mb' : 15") + .contains( + "'class' : 'org.apache.cassandra.io.compress.SnappyCompressor'") // sstable_compression becomes class + .contains("'chunk_length_in_kb' : 128") // note the "in" prefix + .contains("default_time_to_live = 0") + .contains( + dseVersion == null + ? "speculative_retry = '99.9p'" + : "speculative_retry = '99.9PERCENTILE'") + .contains("min_index_interval = 128") + .contains("max_index_interval = 2048") + .contains("crc_check_chance = 0.5") + .contains("cdc = false") + .contains("memtable_flush_period_in_ms = 1000") + .doesNotContain(" index_interval") + .doesNotContain("replicate_on_write"); + // Cassandra 3.8 + + } else if (version.compareTo(VersionNumber.parse("3.8")) >= 0) { + + assertThat(table.getOptions().getReadRepairChance()).isEqualTo(0.5); + assertThat(table.getOptions().getLocalReadRepairChance()).isEqualTo(0.6); + assertThat(table.getOptions().getGcGraceInSeconds()).isEqualTo(42); + assertThat(table.getOptions().getBloomFilterFalsePositiveChance()).isEqualTo(0.01); + assertThat(table.getOptions().getComment()).isEqualTo("My awesome table"); + assertThat(table.getOptions().getCaching()).contains(entry("keys", "ALL")); + assertThat(table.getOptions().getCaching()).contains(entry("rows_per_partition", "10")); + assertThat(table.getOptions().getCompaction()) + .contains(entry("class", "org.apache.cassandra.db.compaction.LeveledCompactionStrategy")); + assertThat(table.getOptions().getCompaction()).contains(entry("sstable_size_in_mb", "15")); + assertThat(table.getOptions().getCompression()) + .contains( + entry( + "class", + "org.apache.cassandra.io.compress.SnappyCompressor")); // sstable_compression + // becomes class + assertThat(table.getOptions().getCompression()) + .contains(entry("chunk_length_in_kb", "128")); // note the "in" prefix + assertThat(table.getOptions().getDefaultTimeToLive()).isEqualTo(0); + assertThat(table.getOptions().getSpeculativeRetry()).isEqualTo("99.9PERCENTILE"); + assertThat(table.getOptions().getIndexInterval()).isNull(); + assertThat(table.getOptions().getMinIndexInterval()).isEqualTo(128); + assertThat(table.getOptions().getMaxIndexInterval()).isEqualTo(2048); + assertThat(table.getOptions().getReplicateOnWrite()).isTrue(); // default + assertThat(table.getOptions().getCrcCheckChance()).isEqualTo(0.5); + assertThat(table.getOptions().getExtensions()).isEmpty(); // default + assertThat(table.getOptions().getMemtableFlushPeriodInMs()).isEqualTo(1000); + assertThat(table.asCQLQuery()) + .contains("read_repair_chance = 0.5") + .contains("dclocal_read_repair_chance = 0.6") + .contains("gc_grace_seconds = 42") + .contains("bloom_filter_fp_chance = 0.01") + .contains("comment = 'My awesome table'") + .contains("'keys' : 'ALL'") + .contains("'rows_per_partition' : 10") + .contains("'class' : 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy'") + .contains("'sstable_size_in_mb' : 15") + .contains( + "'class' : 'org.apache.cassandra.io.compress.SnappyCompressor'") // sstable_compression becomes class + .contains("'chunk_length_in_kb' : 128") // note the "in" prefix + .contains("default_time_to_live = 0") + .contains("speculative_retry = '99.9PERCENTILE'") + .contains("min_index_interval = 128") + .contains("max_index_interval = 2048") + .contains("crc_check_chance = 0.5") + .contains("cdc = false") + .contains("memtable_flush_period_in_ms = 1000") + .doesNotContain(" index_interval") + .doesNotContain("replicate_on_write"); + // Cassandra 3.0 + + } else if (version.compareTo(VersionNumber.parse("3.0")) >= 0) { + + assertThat(table.getOptions().getReadRepairChance()).isEqualTo(0.5); + assertThat(table.getOptions().getLocalReadRepairChance()).isEqualTo(0.6); + assertThat(table.getOptions().getGcGraceInSeconds()).isEqualTo(42); + assertThat(table.getOptions().getBloomFilterFalsePositiveChance()).isEqualTo(0.01); + assertThat(table.getOptions().getComment()).isEqualTo("My awesome table"); + assertThat(table.getOptions().getCaching()).contains(entry("keys", "ALL")); + assertThat(table.getOptions().getCaching()).contains(entry("rows_per_partition", "10")); + assertThat(table.getOptions().getCompaction()) + .contains(entry("class", "org.apache.cassandra.db.compaction.LeveledCompactionStrategy")); + assertThat(table.getOptions().getCompaction()).contains(entry("sstable_size_in_mb", "15")); + assertThat(table.getOptions().getCompression()) + .contains( + entry( + "class", + "org.apache.cassandra.io.compress.SnappyCompressor")); // sstable_compression + // becomes class + assertThat(table.getOptions().getCompression()) + .contains(entry("chunk_length_in_kb", "128")); // note the "in" prefix + assertThat(table.getOptions().getDefaultTimeToLive()).isEqualTo(0); + assertThat(table.getOptions().getSpeculativeRetry()).isEqualTo("99.9PERCENTILE"); + assertThat(table.getOptions().getIndexInterval()).isNull(); + assertThat(table.getOptions().getMinIndexInterval()).isEqualTo(128); + assertThat(table.getOptions().getMaxIndexInterval()).isEqualTo(2048); + assertThat(table.getOptions().getReplicateOnWrite()).isTrue(); // default + assertThat(table.getOptions().getCrcCheckChance()).isEqualTo(0.5); + assertThat(table.getOptions().getExtensions()).isEmpty(); // default + assertThat(table.getOptions().getMemtableFlushPeriodInMs()).isEqualTo(1000); + assertThat(table.asCQLQuery()) + .contains("read_repair_chance = 0.5") + .contains("dclocal_read_repair_chance = 0.6") + .contains("gc_grace_seconds = 42") + .contains("bloom_filter_fp_chance = 0.01") + .contains("comment = 'My awesome table'") + .contains("'keys' : 'ALL'") + .contains("'rows_per_partition' : 10") + .contains("'class' : 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy'") + .contains("'sstable_size_in_mb' : 15") + .contains( + "'class' : 'org.apache.cassandra.io.compress.SnappyCompressor'") // sstable_compression becomes class + .contains("'chunk_length_in_kb' : 128") // note the "in" prefix + .contains("default_time_to_live = 0") + .contains("speculative_retry = '99.9PERCENTILE'") + .contains("min_index_interval = 128") + .contains("max_index_interval = 2048") + .contains("crc_check_chance = 0.5") + .contains("memtable_flush_period_in_ms = 1000") + .doesNotContain(" index_interval") + .doesNotContain("replicate_on_write") + .doesNotContain("cdc"); // 3.8+ + + // Cassandra 2.1 and 2.2 + } else if (version.getMajor() == 2 && version.getMinor() > 0) { + + // With 2.1 we have different options, the caching option changes and replicate_on_write + // disappears + assertThat(table.getOptions().getReadRepairChance()).isEqualTo(0.5); + assertThat(table.getOptions().getLocalReadRepairChance()).isEqualTo(0.6); + assertThat(table.getOptions().getGcGraceInSeconds()).isEqualTo(42); + assertThat(table.getOptions().getBloomFilterFalsePositiveChance()).isEqualTo(0.01); + assertThat(table.getOptions().getComment()).isEqualTo("My awesome table"); + assertThat(table.getOptions().getCaching()).contains(entry("keys", "ALL")); + assertThat(table.getOptions().getCaching()).contains(entry("rows_per_partition", "ALL")); + assertThat(table.getOptions().getCompaction()) + .contains(entry("class", "org.apache.cassandra.db.compaction.LeveledCompactionStrategy")); + assertThat(table.getOptions().getCompaction()).contains(entry("sstable_size_in_mb", "15")); + assertThat(table.getOptions().getCompression()) + .contains( + entry("sstable_compression", "org.apache.cassandra.io.compress.SnappyCompressor")); + assertThat(table.getOptions().getCompression()).contains(entry("chunk_length_kb", "128")); + assertThat(table.getOptions().getDefaultTimeToLive()).isEqualTo(0); + assertThat(table.getOptions().getSpeculativeRetry()).isEqualTo("99.0PERCENTILE"); + assertThat(table.getOptions().getIndexInterval()).isNull(); + assertThat(table.getOptions().getMinIndexInterval()).isEqualTo(128); + assertThat(table.getOptions().getMaxIndexInterval()).isEqualTo(2048); + assertThat(table.getOptions().getReplicateOnWrite()).isTrue(); // default + assertThat(table.getOptions().getExtensions()).isEmpty(); + assertThat(table.getOptions().getMemtableFlushPeriodInMs()).isEqualTo(1000); + assertThat(table.asCQLQuery()) + .contains("read_repair_chance = 0.5") + .contains("dclocal_read_repair_chance = 0.6") + .contains("gc_grace_seconds = 42") + .contains("bloom_filter_fp_chance = 0.01") + .contains("comment = 'My awesome table'") + .contains("'keys' : 'ALL'") + .contains("'rows_per_partition' : 'ALL'") + .contains("'class' : 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy'") + .contains("'sstable_size_in_mb' : 15") + .contains("'sstable_compression' : 'org.apache.cassandra.io.compress.SnappyCompressor'") + .contains("'chunk_length_kb' : 128") + .contains("default_time_to_live = 0") + .contains("speculative_retry = '99.0PERCENTILE'") + .contains("min_index_interval = 128") + .contains("max_index_interval = 2048") + .contains("memtable_flush_period_in_ms = 1000") + .doesNotContain(" index_interval") + .doesNotContain("replicate_on_write") + .doesNotContain("cdc"); + + // Cassandra 2.0 + } else if (version.getMajor() == 2 && version.getMinor() == 0) { + + assertThat(table.getOptions().getReadRepairChance()).isEqualTo(0.5); + assertThat(table.getOptions().getLocalReadRepairChance()).isEqualTo(0.6); + assertThat(table.getOptions().getGcGraceInSeconds()).isEqualTo(42); + assertThat(table.getOptions().getBloomFilterFalsePositiveChance()).isEqualTo(0.01); + assertThat(table.getOptions().getComment()).isEqualTo("My awesome table"); + assertThat(table.getOptions().getCaching()).contains(entry("keys", "ALL")); + assertThat(table.getOptions().getCaching()) + .doesNotContain(entry("rows_per_partition", "ALL")); // 2.1 + + assertThat(table.getOptions().getCompaction()) + .contains(entry("class", "org.apache.cassandra.db.compaction.LeveledCompactionStrategy")); + assertThat(table.getOptions().getCompaction()).contains(entry("sstable_size_in_mb", "15")); + assertThat(table.getOptions().getCompression()) + .contains( + entry("sstable_compression", "org.apache.cassandra.io.compress.SnappyCompressor")); + assertThat(table.getOptions().getCompression()).contains(entry("chunk_length_kb", "128")); + assertThat(table.getOptions().getDefaultTimeToLive()).isEqualTo(0); + assertThat(table.getOptions().getSpeculativeRetry()).isEqualTo("99.0PERCENTILE"); // default + assertThat(table.getOptions().getIndexInterval()).isEqualTo(128); + assertThat(table.getOptions().getMinIndexInterval()).isNull(); + assertThat(table.getOptions().getMaxIndexInterval()).isNull(); + assertThat(table.getOptions().getReplicateOnWrite()).isTrue(); // explicitly set + assertThat(table.getOptions().getExtensions()).isEmpty(); + assertThat(table.getOptions().getMemtableFlushPeriodInMs()).isEqualTo(1000); + assertThat(table.asCQLQuery()) + .contains("read_repair_chance = 0.5") + .contains("dclocal_read_repair_chance = 0.6") + .contains("gc_grace_seconds = 42") + .contains("bloom_filter_fp_chance = 0.01") + .contains("comment = 'My awesome table'") + .contains("caching = 'ALL'") + .contains("'class' : 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy'") + .contains("'sstable_size_in_mb' : 15") + .contains("'sstable_compression' : 'org.apache.cassandra.io.compress.SnappyCompressor'") + .contains("'chunk_length_kb' : 128") + .contains("replicate_on_write = true") + .contains("index_interval = 128") + .contains("speculative_retry = '99.0PERCENTILE'") + .contains("default_time_to_live = 0") + .contains("memtable_flush_period_in_ms = 1000") + .doesNotContain("min_index_interval") // 2.1 + + .doesNotContain("max_index_interval") // 2.1 + + .doesNotContain("cdc"); + + // Cassandra 1.2 + } else { + + assertThat(table.getOptions().getReadRepairChance()).isEqualTo(0.5); + assertThat(table.getOptions().getLocalReadRepairChance()).isEqualTo(0.6); + assertThat(table.getOptions().getGcGraceInSeconds()).isEqualTo(42); + assertThat(table.getOptions().getBloomFilterFalsePositiveChance()).isEqualTo(0.01); + assertThat(table.getOptions().getComment()).isEqualTo("My awesome table"); + assertThat(table.getOptions().getCaching()).contains(entry("keys", "ALL")); + assertThat(table.getOptions().getCaching()) + .doesNotContain(entry("rows_per_partition", "ALL")); // 2.1 + + assertThat(table.getOptions().getCompaction()) + .contains(entry("class", "org.apache.cassandra.db.compaction.LeveledCompactionStrategy")); + assertThat(table.getOptions().getCompaction()).contains(entry("sstable_size_in_mb", "15")); + assertThat(table.getOptions().getCompression()) + .contains( + entry("sstable_compression", "org.apache.cassandra.io.compress.SnappyCompressor")); + assertThat(table.getOptions().getCompression()).contains(entry("chunk_length_kb", "128")); + assertThat(table.getOptions().getDefaultTimeToLive()).isEqualTo(0); // default + assertThat(table.getOptions().getSpeculativeRetry()).isEqualTo("NONE"); // default + assertThat(table.getOptions().getIndexInterval()).isNull(); + assertThat(table.getOptions().getMinIndexInterval()).isNull(); + assertThat(table.getOptions().getMaxIndexInterval()).isNull(); + assertThat(table.getOptions().getReplicateOnWrite()).isTrue(); // explicitly set + assertThat(table.getOptions().getExtensions()).isEmpty(); + assertThat(table.asCQLQuery()) + .contains("read_repair_chance = 0.5") + .contains("dclocal_read_repair_chance = 0.6") + .contains("gc_grace_seconds = 42") + .contains("bloom_filter_fp_chance = 0.01") + .contains("comment = 'My awesome table'") + .contains("caching = 'ALL'") + .contains("'class' : 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy'") + .contains("'sstable_size_in_mb' : 15") + .contains("'sstable_compression' : 'org.apache.cassandra.io.compress.SnappyCompressor'") + .contains("'chunk_length_kb' : 128") + .contains("replicate_on_write = true") + .doesNotContain("index_interval") // 2.0 + .doesNotContain("min_index_interval") // 2.1 + + .doesNotContain("max_index_interval") // 2.1 + + .doesNotContain("speculative_retry") // 2.0 + + .doesNotContain("default_time_to_live") // 2.0 + + .doesNotContain("cdc") + .doesNotContain("memtable_flush_period_in_ms"); // 2.0 + } - @Test(groups = "short") - public void should_escape_single_quote_table_comment() { - // given - String cql = String.format("CREATE TABLE %s.single_quote (\n" - + " c1 int PRIMARY KEY\n" - + ") WITH comment = 'comment with single quote '' should work'", - keyspace); - // when - session().execute(cql); - TableMetadata table = cluster().getMetadata().getKeyspace(keyspace).getTable("single_quote"); - // then - assertThat(table.getOptions().getComment()).isEqualTo("comment with single quote ' should work"); - assertThat(table.asCQLQuery()).contains("comment = 'comment with single quote '' should work'"); - } - - @Test(groups = "short") - public void should_not_mix_indexes_from_different_tables() { - String[] statements = { - "CREATE TABLE test_ab (a int PRIMARY KEY, b int);", - "CREATE INDEX test_b on test_ab (b);", - "CREATE TABLE test_cd (c int PRIMARY KEY, d int);", - "CREATE INDEX test_d on test_cd (d);", - }; - for (String statement : statements) - session().execute(statement); - - TableMetadata table_ab = cluster().getMetadata().getKeyspace(keyspace).getTable("test_ab"); - TableMetadata table_cd = cluster().getMetadata().getKeyspace(keyspace).getTable("test_cd"); - - assertThat(table_ab.getIndexes().size()).isEqualTo(1); - assertThat(table_ab.getIndexes()).extracting("name").containsOnly("test_b"); - assertThat(table_cd.getIndexes().size()).isEqualTo(1); - assertThat(table_cd.getIndexes()).extracting("name").containsOnly("test_d"); - } - - /** - * Validates that the 'extensions' option is properly parsed when set on a table. - * This value is currently not modifiable via CQL so we fake out a table containing - * populated extensions by updating the extensions column in system_schema.tables - * and forcing a schema refresh on it. - * - * @jira_ticket JAVA-938, CASSANDRA-9426 - * @test_category metadata - */ - @Test(groups = "short") - @CassandraVersion("3.0") - public void should_parse_extensions_from_table_options() throws Exception { - // given - // create a simple table and retrieve it's metadata from system_schema.tables. - String cql = String.format("CREATE TABLE %s.table_with_extensions (\n" + // Also check that the generated CQL is valid and creates an identical table + session().execute("DROP TABLE " + table.getName()); + session().execute(table.asCQLQuery()); + TableMetadata actual = + cluster() + .getMetadata() + .getKeyspace(table.getKeyspace().getName()) + .getTable(table.getName()); + + assertThat(actual.getOptions()).isEqualTo(table.getOptions()); + assertThat(actual.asCQLQuery(true)).isEqualTo(table.asCQLQuery(true)); + } + + /** + * Validates that metadata is appropriately parsed when using the new compression options format + * introduced in 3.0.0-alpha1. Since compression options are parsed as a String map, this should + * behave exactly as the old style compression options used in {@link + * #should_parse_table_options}. + * + * @jira_ticket CASSANDRA-9424 + */ + @Test(groups = "short") + @CassandraVersion("3.0") + public void should_parse_new_compression_options() { + // given + String cql = + String.format( + "CREATE TABLE %s.new_compression_options (\n" + + " k text,\n" + + " c1 int,\n" + + " c2 int,\n" + + " i int,\n" + + " PRIMARY KEY (k, c1, c2)\n" + + ") WITH CLUSTERING ORDER BY (c1 DESC, c2 ASC)\n" + + " AND compression = { 'class' : 'DeflateCompressor', 'chunk_length_in_kb' : 128 };", + keyspace); + + // when + session().execute(cql); + TableMetadata table = + cluster().getMetadata().getKeyspace(keyspace).getTable("new_compression_options"); + + // then + assertThat(table.getOptions().getCompression()) + .contains(entry("class", "org.apache.cassandra.io.compress.DeflateCompressor")) + .contains(entry("chunk_length_in_kb", "128")); + } + + @Test(groups = "short") + public void should_escape_single_quote_table_comment() { + // given + String cql = + String.format( + "CREATE TABLE %s.single_quote (\n" + + " c1 int PRIMARY KEY\n" + + ") WITH comment = 'comment with single quote '' should work'", + keyspace); + // when + session().execute(cql); + TableMetadata table = cluster().getMetadata().getKeyspace(keyspace).getTable("single_quote"); + // then + assertThat(table.getOptions().getComment()) + .isEqualTo("comment with single quote ' should work"); + assertThat(table.asCQLQuery()).contains("comment = 'comment with single quote '' should work'"); + } + + @Test(groups = "short") + public void should_not_mix_indexes_from_different_tables() { + String[] statements = { + "CREATE TABLE test_ab (a int PRIMARY KEY, b int);", + "CREATE INDEX test_b on test_ab (b);", + "CREATE TABLE test_cd (c int PRIMARY KEY, d int);", + "CREATE INDEX test_d on test_cd (d);", + }; + for (String statement : statements) session().execute(statement); + + TableMetadata table_ab = cluster().getMetadata().getKeyspace(keyspace).getTable("test_ab"); + TableMetadata table_cd = cluster().getMetadata().getKeyspace(keyspace).getTable("test_cd"); + + assertThat(table_ab.getIndexes().size()).isEqualTo(1); + assertThat(table_ab.getIndexes()).extracting("name").containsOnly("test_b"); + assertThat(table_cd.getIndexes().size()).isEqualTo(1); + assertThat(table_cd.getIndexes()).extracting("name").containsOnly("test_d"); + } + + /** + * Validates that the 'extensions' option is properly parsed when set on a table. This value is + * currently not modifiable via CQL so we fake out a table containing populated extensions by + * updating the extensions column in system_schema.tables and forcing a schema refresh on it. + * + * @jira_ticket JAVA-938, CASSANDRA-9426 + * @test_category metadata + */ + @Test(groups = "short") + @CassandraVersion("3.0") + public void should_parse_extensions_from_table_options() throws Exception { + // given + // create a simple table and retrieve it's metadata from system_schema.tables. + String cql = + String.format( + "CREATE TABLE %s.table_with_extensions (\n" + " k text,\n" + " c int,\n" + " v timeuuid,\n" + " PRIMARY KEY (k, c)\n" - + ");", keyspace); - session().execute(cql); - - // Manually change column value in system_schema.tables and force a schema refresh on that table. - ImmutableMap extensions = ImmutableMap.of("Hello", ByteBuffer.wrap("World".getBytes("UTF-8"))); - session().execute("update system_schema.tables set extensions=? where keyspace_name=? and table_name=?", extensions, keyspace, "table_with_extensions"); - cluster().manager.controlConnection.refreshSchema(SchemaElement.TABLE, keyspace, "table_with_extensions", null); - - // when retrieving the table's metadata. - TableMetadata table = cluster().getMetadata().getKeyspace(keyspace).getTable("table_with_extensions"); - // then the table's options should contain populated extensions. - assertThat(table.getOptions().getExtensions()).isEqualTo(extensions); - } - - /** - * Validates that a table with case-sensitive column names and column names - * consisting of (quoted) reserved keywords is correctly parsed - * and that the generated CQL is valid. - * - * @jira_ticket JAVA-1064 - * @test_category metadata - */ - @Test(groups = "short") - public void should_parse_table_with_case_sensitive_column_names_and_reserved_keywords() throws Exception { - // given - String c1 = Metadata.quote("quotes go \"\" here \"\" "); - String c2 = Metadata.quote("\\x00\\x25"); - String c3 = Metadata.quote("columnfamily"); - String c4 = Metadata.quote("select"); - String c5 = Metadata.quote("who''s there'? "); - String c6 = Metadata.quote("faux )"); - String c7 = Metadata.quote("COMPACT STORAGE"); - // single partition key - String cql1 = String.format("CREATE TABLE %s.\"MyTable1\" (" + + ");", + keyspace); + session().execute(cql); + + // Manually change column value in system_schema.tables and force a schema refresh on that + // table. + ImmutableMap extensions = + ImmutableMap.of("Hello", ByteBuffer.wrap("World".getBytes("UTF-8"))); + session() + .execute( + "update system_schema.tables set extensions=? where keyspace_name=? and table_name=?", + extensions, + keyspace, + "table_with_extensions"); + cluster() + .manager + .controlConnection + .refreshSchema(SchemaElement.TABLE, keyspace, "table_with_extensions", null); + + // when retrieving the table's metadata. + TableMetadata table = + cluster().getMetadata().getKeyspace(keyspace).getTable("table_with_extensions"); + // then the table's options should contain populated extensions. + assertThat(table.getOptions().getExtensions()).isEqualTo(extensions); + } + + /** + * Validates that a table with case-sensitive column names and column names consisting of (quoted) + * reserved keywords is correctly parsed and that the generated CQL is valid. + * + * @jira_ticket JAVA-1064 + * @test_category metadata + */ + @Test(groups = "short") + public void should_parse_table_with_case_sensitive_column_names_and_reserved_keywords() + throws Exception { + // given + String c1 = Metadata.quote("quotes go \"\" here \"\" "); + String c2 = Metadata.quote("\\x00\\x25"); + String c3 = Metadata.quote("columnfamily"); + String c4 = Metadata.quote("select"); + String c5 = Metadata.quote("who''s there'? "); + String c6 = Metadata.quote("faux )"); + String c7 = Metadata.quote("COMPACT STORAGE"); + // single partition key + String cql1 = + String.format( + "CREATE TABLE %s.\"MyTable1\" (" + "%s text, " + "%s text, " + "%s text, " @@ -572,9 +891,12 @@ public void should_parse_table_with_case_sensitive_column_names_and_reserved_key + "%s text, " + "%s text, " + "PRIMARY KEY (%s, %s, %s, %s, %s, %s)" - + ")", keyspace, c1, c2, c3, c4, c5, c6, c7, c1, c2, c3, c4, c5, c6); - // composite partition key - String cql2 = String.format("CREATE TABLE %s.\"MyTable2\" (" + + ")", + keyspace, c1, c2, c3, c4, c5, c6, c7, c1, c2, c3, c4, c5, c6); + // composite partition key + String cql2 = + String.format( + "CREATE TABLE %s.\"MyTable2\" (" + "%s text, " + "%s text, " + "%s text, " @@ -583,36 +905,35 @@ public void should_parse_table_with_case_sensitive_column_names_and_reserved_key + "%s text, " + "%s text, " + "PRIMARY KEY ((%s, %s), %s, %s, %s, %s)" - + ")", keyspace, c1, c2, c3, c4, c5, c6, c7, c1, c2, c3, c4, c5, c6); - // when - execute(cql1, cql2); - TableMetadata table1 = cluster().getMetadata().getKeyspace(keyspace).getTable("\"MyTable1\""); - TableMetadata table2 = cluster().getMetadata().getKeyspace(keyspace).getTable("\"MyTable2\""); - // then - assertThat(table1) - .hasColumn(c1) - .hasColumn(c2) - .hasColumn(c3) - .hasColumn(c4) - .hasColumn(c5) - .hasColumn(c6) - .hasColumn(c7); - assertThat(table1.asCQLQuery()).startsWith(cql1); - assertThat(table2) - .hasColumn(c1) - .hasColumn(c2) - .hasColumn(c3) - .hasColumn(c4) - .hasColumn(c5) - .hasColumn(c6) - .hasColumn(c7); - assertThat(table2.asCQLQuery()).startsWith(cql2); - execute( - "DROP TABLE \"MyTable1\"", - "DROP TABLE \"MyTable2\"", - table1.asCQLQuery(), - table2.asCQLQuery() - ); - } - + + ")", + keyspace, c1, c2, c3, c4, c5, c6, c7, c1, c2, c3, c4, c5, c6); + // when + execute(cql1, cql2); + TableMetadata table1 = cluster().getMetadata().getKeyspace(keyspace).getTable("\"MyTable1\""); + TableMetadata table2 = cluster().getMetadata().getKeyspace(keyspace).getTable("\"MyTable2\""); + // then + assertThat(table1) + .hasColumn(c1) + .hasColumn(c2) + .hasColumn(c3) + .hasColumn(c4) + .hasColumn(c5) + .hasColumn(c6) + .hasColumn(c7); + assertThat(table1.asCQLQuery()).startsWith(cql1); + assertThat(table2) + .hasColumn(c1) + .hasColumn(c2) + .hasColumn(c3) + .hasColumn(c4) + .hasColumn(c5) + .hasColumn(c6) + .hasColumn(c7); + assertThat(table2.asCQLQuery()).startsWith(cql2); + execute( + "DROP TABLE \"MyTable1\"", + "DROP TABLE \"MyTable2\"", + table1.asCQLQuery(), + table2.asCQLQuery()); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/TestListener.java b/driver-core/src/test/java/com/datastax/driver/core/TestListener.java index fc379ff13b8..3a05de58a9b 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/TestListener.java +++ b/driver-core/src/test/java/com/datastax/driver/core/TestListener.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,129 +19,153 @@ import com.datastax.driver.core.utils.CassandraVersion; import com.datastax.driver.core.utils.DseVersion; -import org.testng.*; -import org.testng.internal.ConstructorOrMethod; - import java.lang.reflect.AnnotatedElement; import java.lang.reflect.Method; import java.util.concurrent.TimeUnit; +import org.testng.IInvokedMethod; +import org.testng.IInvokedMethodListener; +import org.testng.ITestNGMethod; +import org.testng.ITestResult; +import org.testng.SkipException; +import org.testng.TestListenerAdapter; +import org.testng.internal.ConstructorOrMethod; public class TestListener extends TestListenerAdapter implements IInvokedMethodListener { - private long start_time = System.nanoTime(); - private int test_index = 0; - - @Override - public void onTestFailure(ITestResult tr) { - long elapsedTime = TimeUnit.NANOSECONDS.toSeconds((System.nanoTime() - start_time)); - long testTime = tr.getEndMillis() - tr.getStartMillis(); - tr.getThrowable().printStackTrace(); - System.out.println("FAILED : " + tr.getName()); - System.out.println("Test : " + formatIntoHHMMSS(testTime / 1000)); - System.out.println("Elapsed: " + formatIntoHHMMSS(elapsedTime)); - System.out.println(); - } - - @Override - public void onTestSkipped(ITestResult tr) { - long elapsedTime = TimeUnit.NANOSECONDS.toSeconds((System.nanoTime() - start_time)); - long testTime = tr.getEndMillis() - tr.getStartMillis(); - System.out.println("SKIPPED: " + tr.getName()); - System.out.println("Test : " + formatIntoHHMMSS(testTime / 1000)); - System.out.println("Elapsed: " + formatIntoHHMMSS(elapsedTime)); - System.out.println(); + private long start_time = System.nanoTime(); + private int test_index = 0; + + @Override + public void onTestFailure(ITestResult tr) { + long elapsedTime = TimeUnit.NANOSECONDS.toSeconds((System.nanoTime() - start_time)); + long testTime = tr.getEndMillis() - tr.getStartMillis(); + tr.getThrowable().printStackTrace(); + System.out.println("FAILED : " + tr.getName()); + System.out.println("Test : " + formatIntoHHMMSS(testTime / 1000)); + System.out.println("Elapsed: " + formatIntoHHMMSS(elapsedTime)); + System.out.println(); + } + + @Override + public void onTestSkipped(ITestResult tr) { + long elapsedTime = TimeUnit.NANOSECONDS.toSeconds((System.nanoTime() - start_time)); + long testTime = tr.getEndMillis() - tr.getStartMillis(); + System.out.println("SKIPPED: " + tr.getName()); + System.out.println("Test : " + formatIntoHHMMSS(testTime / 1000)); + System.out.println("Elapsed: " + formatIntoHHMMSS(elapsedTime)); + System.out.println(); + } + + @Override + public void onTestSuccess(ITestResult tr) { + long elapsedTime = TimeUnit.NANOSECONDS.toSeconds((System.nanoTime() - start_time)); + long testTime = tr.getEndMillis() - tr.getStartMillis(); + System.out.println("SUCCESS: " + tr.getName()); + System.out.println("Test : " + formatIntoHHMMSS(testTime / 1000)); + System.out.println("Elapsed: " + formatIntoHHMMSS(elapsedTime)); + System.out.println(); + } + + @Override + public void onTestStart(ITestResult tr) { + System.out.println(); + System.out.println("-----------------------------------------------"); + System.out.println( + "Starting " + + tr.getTestClass().getName() + + '.' + + tr.getName() + + " [Test #" + + ++test_index + + "]..."); + } + + static String formatIntoHHMMSS(long secondsTotal) { + long hours = secondsTotal / 3600, + remainder = secondsTotal % 3600, + minutes = remainder / 60, + seconds = remainder % 60; + + return ((hours < 10 ? "0" : "") + + hours + + ':' + + (minutes < 10 ? "0" : "") + + minutes + + ':' + + (seconds < 10 ? "0" : "") + + seconds); + } + + @Override + public void beforeInvocation(IInvokedMethod testMethod, ITestResult testResult) { + // Check to see if the class or method is annotated with 'CassandraVersion', if so ensure the + // version we are testing with meets the requirement, if not a SkipException is thrown + // and this test is skipped. + ITestNGMethod testNgMethod = testResult.getMethod(); + ConstructorOrMethod constructorOrMethod = testNgMethod.getConstructorOrMethod(); + + Class clazz = testNgMethod.getInstance().getClass(); + if (clazz != null) { + do { + if (scanAnnotatedElement(clazz)) break; + } while (!(clazz = clazz.getSuperclass()).equals(Object.class)); } - - @Override - public void onTestSuccess(ITestResult tr) { - long elapsedTime = TimeUnit.NANOSECONDS.toSeconds((System.nanoTime() - start_time)); - long testTime = tr.getEndMillis() - tr.getStartMillis(); - System.out.println("SUCCESS: " + tr.getName()); - System.out.println("Test : " + formatIntoHHMMSS(testTime / 1000)); - System.out.println("Elapsed: " + formatIntoHHMMSS(elapsedTime)); - System.out.println(); + Method method = constructorOrMethod.getMethod(); + if (method != null) { + scanAnnotatedElement(method); } + } - @Override - public void onTestStart(ITestResult tr) { - System.out.println(); - System.out.println("-----------------------------------------------"); - System.out.println("Starting " + tr.getTestClass().getName() + '.' + tr.getName() + " [Test #" + ++test_index + "]..."); + private boolean scanAnnotatedElement(AnnotatedElement element) { + if (element.isAnnotationPresent(CassandraVersion.class)) { + CassandraVersion cassandraVersion = element.getAnnotation(CassandraVersion.class); + cassandraVersionCheck(cassandraVersion); + return true; } - - static String formatIntoHHMMSS(long secondsTotal) { - long hours = secondsTotal / 3600, - remainder = secondsTotal % 3600, - minutes = remainder / 60, - seconds = remainder % 60; - - return ((hours < 10 ? "0" : "") + hours - + ':' + (minutes < 10 ? "0" : "") + minutes - + ':' + (seconds < 10 ? "0" : "") + seconds); + if (element.isAnnotationPresent(DseVersion.class)) { + DseVersion dseVersion = element.getAnnotation(DseVersion.class); + dseVersionCheck(dseVersion); + return true; } - - @Override - public void beforeInvocation(IInvokedMethod testMethod, ITestResult testResult) { - // Check to see if the class or method is annotated with 'CassandraVersion', if so ensure the - // version we are testing with meets the requirement, if not a SkipException is thrown - // and this test is skipped. - ITestNGMethod testNgMethod = testResult.getMethod(); - ConstructorOrMethod constructorOrMethod = testNgMethod.getConstructorOrMethod(); - - Class clazz = testNgMethod.getInstance().getClass(); - if (clazz != null) { - do { - if (scanAnnotatedElement(clazz)) - break; - } while (!(clazz = clazz.getSuperclass()).equals(Object.class)); - } - Method method = constructorOrMethod.getMethod(); - if (method != null) { - scanAnnotatedElement(method); - } + return false; + } + + @Override + public void afterInvocation(IInvokedMethod testMethod, ITestResult testResult) { + // Do nothing + } + + private static void cassandraVersionCheck(CassandraVersion version) { + versionCheck( + CCMBridge.getGlobalCassandraVersion(), + VersionNumber.parse(version.value()), + version.description()); + } + + private static void dseVersionCheck(DseVersion version) { + VersionNumber dseVersion = CCMBridge.getGlobalDSEVersion(); + if (dseVersion != null) { + versionCheck( + CCMBridge.getGlobalDSEVersion(), + VersionNumber.parse(version.value()), + version.description()); + } else { + throw new SkipException( + "Skipping test because not configured for DataStax Enterprise cluster."); } - - private boolean scanAnnotatedElement(AnnotatedElement element) { - if (element.isAnnotationPresent(CassandraVersion.class)) { - CassandraVersion cassandraVersion = element.getAnnotation(CassandraVersion.class); - cassandraVersionCheck(cassandraVersion); - return true; - } - if (element.isAnnotationPresent(DseVersion.class)) { - DseVersion dseVersion = element.getAnnotation(DseVersion.class); - dseVersionCheck(dseVersion); - return true; - } - return false; - } - - @Override - public void afterInvocation(IInvokedMethod testMethod, ITestResult testResult) { - // Do nothing - } - - private static void cassandraVersionCheck(CassandraVersion version) { - versionCheck(CCMBridge.getGlobalCassandraVersion(), VersionNumber.parse(version.value()), version.description()); - } - - private static void dseVersionCheck(DseVersion version) { - VersionNumber dseVersion = CCMBridge.getGlobalDSEVersion(); - if (dseVersion != null) { - versionCheck(CCMBridge.getGlobalDSEVersion(), VersionNumber.parse(version.value()), version.description()); - } else { - throw new SkipException("Skipping test because not configured for DataStax Enterprise cluster."); - } - } - - private static void versionCheck(VersionNumber current, VersionNumber required, String skipString) { - if (current == null) { - throw new SkipException("Skipping test because provided version is null"); - } else { - if (current.compareTo(required) < 0) { - throw new SkipException( - String.format("Version >= %s required, but found %s. Justification: %s", - required, current, skipString)); - } - } + } + + private static void versionCheck( + VersionNumber current, VersionNumber required, String skipString) { + if (current == null) { + throw new SkipException("Skipping test because provided version is null"); + } else { + if (current.compareTo(required) < 0) { + throw new SkipException( + String.format( + "Version >= %s required, but found %s. Justification: %s", + required, current, skipString)); + } } + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/TestUtils.java b/driver-core/src/test/java/com/datastax/driver/core/TestUtils.java index 5ae5818e08b..6e1ff3a9588 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/TestUtils.java +++ b/driver-core/src/test/java/com/datastax/driver/core/TestUtils.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,6 +17,11 @@ */ package com.datastax.driver.core; +import static com.datastax.driver.core.ConditionChecker.check; +import static java.util.concurrent.TimeUnit.MINUTES; +import static java.util.concurrent.TimeUnit.SECONDS; + +import com.datastax.driver.core.Cluster.Builder; import com.datastax.driver.core.policies.RoundRobinPolicy; import com.datastax.driver.core.policies.WhiteListPolicy; import com.google.common.base.Predicate; @@ -24,912 +31,1001 @@ import com.google.common.util.concurrent.Uninterruptibles; import com.sun.management.OperatingSystemMXBean; import io.netty.channel.EventLoopGroup; -import org.apache.log4j.Level; -import org.scassandra.Scassandra; -import org.scassandra.ScassandraFactory; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import java.io.IOException; import java.lang.management.ManagementFactory; import java.math.BigDecimal; import java.math.BigInteger; -import java.net.*; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.net.ServerSocket; +import java.net.Socket; +import java.net.UnknownHostException; import java.nio.ByteBuffer; -import java.util.*; -import java.util.concurrent.*; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Date; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.UUID; +import java.util.concurrent.Callable; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; +import org.apache.log4j.Level; +import org.scassandra.Scassandra; +import org.scassandra.ScassandraFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.testng.SkipException; -import static com.datastax.driver.core.ConditionChecker.check; -import static java.util.concurrent.TimeUnit.MINUTES; -import static java.util.concurrent.TimeUnit.SECONDS; - -/** - * A number of static fields/methods handy for tests. - */ +/** A number of static fields/methods handy for tests. */ public abstract class TestUtils { - public static final String IP_PREFIX; + public static final String IP_PREFIX; - static { - String ip_prefix = System.getProperty("ipprefix"); - if (ip_prefix == null || ip_prefix.isEmpty()) { - ip_prefix = "127.0.1."; - } - IP_PREFIX = ip_prefix; + static { + String ip_prefix = System.getProperty("ipprefix"); + if (ip_prefix == null || ip_prefix.isEmpty()) { + ip_prefix = "127.0.1."; } - - private static final Logger logger = LoggerFactory.getLogger(TestUtils.class); - - public static final String CREATE_KEYSPACE_SIMPLE_FORMAT = "CREATE KEYSPACE %s WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor' : %d }"; - public static final String CREATE_KEYSPACE_GENERIC_FORMAT = "CREATE KEYSPACE %s WITH replication = { 'class' : '%s', %s }"; - - public static final String SELECT_ALL_FORMAT = "SELECT * FROM %s"; - - public static final int TEST_BASE_NODE_WAIT = SystemProperties.getInt("com.datastax.driver.TEST_BASE_NODE_WAIT", 60); - - public static void setValue(SettableByIndexData data, int i, DataType type, Object value) { - switch (type.getName()) { - case ASCII: - data.setString(i, (String) value); - break; - case BIGINT: - data.setLong(i, (Long) value); - break; - case BLOB: - data.setBytes(i, (ByteBuffer) value); - break; - case BOOLEAN: - data.setBool(i, (Boolean) value); - break; - case COUNTER: - // Just a no-op, we shouldn't handle counters the same way than other types - break; - case DECIMAL: - data.setDecimal(i, (BigDecimal) value); - break; - case DOUBLE: - data.setDouble(i, (Double) value); - break; - case FLOAT: - data.setFloat(i, (Float) value); - break; - case INET: - data.setInet(i, (InetAddress) value); - break; - case TINYINT: - data.setByte(i, (Byte) value); - break; - case SMALLINT: - data.setShort(i, (Short) value); - break; - case INT: - data.setInt(i, (Integer) value); - break; - case TEXT: - data.setString(i, (String) value); - break; - case TIMESTAMP: - data.setTimestamp(i, (Date) value); - break; - case DATE: - data.setDate(i, (LocalDate) value); - break; - case TIME: - data.setTime(i, (Long) value); - break; - case UUID: - data.setUUID(i, (UUID) value); - break; - case VARCHAR: - data.setString(i, (String) value); - break; - case VARINT: - data.setVarint(i, (BigInteger) value); - break; - case TIMEUUID: - data.setUUID(i, (UUID) value); - break; - case LIST: - data.setList(i, (List) value); - break; - case SET: - data.setSet(i, (Set) value); - break; - case MAP: - data.setMap(i, (Map) value); - break; - default: - throw new RuntimeException("Missing handling of " + type); - } - } - - @SuppressWarnings({"unchecked", "rawtypes"}) - public static void setValue(SettableByNameData data, String name, DataType type, Object value) { - switch (type.getName()) { - case ASCII: - data.setString(name, (String) value); - break; - case BIGINT: - data.setLong(name, (Long) value); - break; - case BLOB: - data.setBytes(name, (ByteBuffer) value); - break; - case BOOLEAN: - data.setBool(name, (Boolean) value); - break; - case COUNTER: - // Just a no-op, we shouldn't handle counters the same way than other types - break; - case DECIMAL: - data.setDecimal(name, (BigDecimal) value); - break; - case DOUBLE: - data.setDouble(name, (Double) value); - break; - case FLOAT: - data.setFloat(name, (Float) value); - break; - case INET: - data.setInet(name, (InetAddress) value); - break; - case TINYINT: - data.setByte(name, (Byte) value); - break; - case SMALLINT: - data.setShort(name, (Short) value); - break; - case INT: - data.setInt(name, (Integer) value); - break; - case TEXT: - data.setString(name, (String) value); - break; - case TIMESTAMP: - data.setTimestamp(name, (Date) value); - break; - case DATE: - data.setDate(name, (LocalDate) value); - break; - case TIME: - data.setTime(name, (Long) value); - break; - case UUID: - data.setUUID(name, (UUID) value); - break; - case VARCHAR: - data.setString(name, (String) value); - break; - case VARINT: - data.setVarint(name, (BigInteger) value); - break; - case TIMEUUID: - data.setUUID(name, (UUID) value); - break; - case LIST: - data.setList(name, (List) value); - break; - case SET: - data.setSet(name, (Set) value); - break; - case MAP: - data.setMap(name, (Map) value); - break; - default: - throw new RuntimeException("Missing handling of " + type); - } - } - - public static Object getValue(GettableByIndexData data, int i, DataType type, CodecRegistry codecRegistry) { - switch (type.getName()) { - case ASCII: - return data.getString(i); - case BIGINT: - return data.getLong(i); - case BLOB: - return data.getBytes(i); - case BOOLEAN: - return data.getBool(i); - case COUNTER: - return data.getLong(i); - case DECIMAL: - return data.getDecimal(i); - case DOUBLE: - return data.getDouble(i); - case FLOAT: - return data.getFloat(i); - case INET: - return data.getInet(i); - case TINYINT: - return data.getByte(i); - case SMALLINT: - return data.getShort(i); - case INT: - return data.getInt(i); - case TEXT: - return data.getString(i); - case TIMESTAMP: - return data.getTimestamp(i); - case DATE: - return data.getDate(i); - case TIME: - return data.getTime(i); - case UUID: - return data.getUUID(i); - case VARCHAR: - return data.getString(i); - case VARINT: - return data.getVarint(i); - case TIMEUUID: - return data.getUUID(i); - case LIST: - Class listEltClass = codecRegistry.codecFor(type.getTypeArguments().get(0)).getJavaType().getRawType(); - return data.getList(i, listEltClass); - case SET: - Class setEltClass = codecRegistry.codecFor(type.getTypeArguments().get(0)).getJavaType().getRawType(); - return data.getSet(i, setEltClass); - case MAP: - Class keyClass = codecRegistry.codecFor(type.getTypeArguments().get(0)).getJavaType().getRawType(); - Class valueClass = codecRegistry.codecFor(type.getTypeArguments().get(1)).getJavaType().getRawType(); - return data.getMap(i, keyClass, valueClass); - } + IP_PREFIX = ip_prefix; + } + + private static final Logger logger = LoggerFactory.getLogger(TestUtils.class); + + public static final String CREATE_KEYSPACE_SIMPLE_FORMAT = + "CREATE KEYSPACE %s WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor' : %d }"; + public static final String CREATE_KEYSPACE_GENERIC_FORMAT = + "CREATE KEYSPACE %s WITH replication = { 'class' : '%s', %s }"; + + public static final String SELECT_ALL_FORMAT = "SELECT * FROM %s"; + + public static final int TEST_BASE_NODE_WAIT = + SystemProperties.getInt("com.datastax.driver.TEST_BASE_NODE_WAIT", 60); + + public static void setValue(SettableByIndexData data, int i, DataType type, Object value) { + switch (type.getName()) { + case ASCII: + data.setString(i, (String) value); + break; + case BIGINT: + data.setLong(i, (Long) value); + break; + case BLOB: + data.setBytes(i, (ByteBuffer) value); + break; + case BOOLEAN: + data.setBool(i, (Boolean) value); + break; + case COUNTER: + // Just a no-op, we shouldn't handle counters the same way than other types + break; + case DECIMAL: + data.setDecimal(i, (BigDecimal) value); + break; + case DOUBLE: + data.setDouble(i, (Double) value); + break; + case FLOAT: + data.setFloat(i, (Float) value); + break; + case INET: + data.setInet(i, (InetAddress) value); + break; + case TINYINT: + data.setByte(i, (Byte) value); + break; + case SMALLINT: + data.setShort(i, (Short) value); + break; + case INT: + data.setInt(i, (Integer) value); + break; + case TEXT: + data.setString(i, (String) value); + break; + case TIMESTAMP: + data.setTimestamp(i, (Date) value); + break; + case DATE: + data.setDate(i, (LocalDate) value); + break; + case TIME: + data.setTime(i, (Long) value); + break; + case UUID: + data.setUUID(i, (UUID) value); + break; + case VARCHAR: + data.setString(i, (String) value); + break; + case VARINT: + data.setVarint(i, (BigInteger) value); + break; + case TIMEUUID: + data.setUUID(i, (UUID) value); + break; + case LIST: + data.setList(i, (List) value); + break; + case SET: + data.setSet(i, (Set) value); + break; + case MAP: + data.setMap(i, (Map) value); + break; + default: throw new RuntimeException("Missing handling of " + type); } - - public static Object getValue(GettableByNameData data, String name, DataType type, CodecRegistry codecRegistry) { - switch (type.getName()) { - case ASCII: - return data.getString(name); - case BIGINT: - return data.getLong(name); - case BLOB: - return data.getBytes(name); - case BOOLEAN: - return data.getBool(name); - case COUNTER: - return data.getLong(name); - case DECIMAL: - return data.getDecimal(name); - case DOUBLE: - return data.getDouble(name); - case FLOAT: - return data.getFloat(name); - case INET: - return data.getInet(name); - case TINYINT: - return data.getByte(name); - case SMALLINT: - return data.getShort(name); - case INT: - return data.getInt(name); - case TEXT: - return data.getString(name); - case TIMESTAMP: - return data.getTimestamp(name); - case DATE: - return data.getDate(name); - case TIME: - return data.getTime(name); - case UUID: - return data.getUUID(name); - case VARCHAR: - return data.getString(name); - case VARINT: - return data.getVarint(name); - case TIMEUUID: - return data.getUUID(name); - case LIST: - Class listEltClass = codecRegistry.codecFor(type.getTypeArguments().get(0)).getJavaType().getRawType(); - return data.getList(name, listEltClass); - case SET: - Class setEltClass = codecRegistry.codecFor(type.getTypeArguments().get(0)).getJavaType().getRawType(); - return data.getSet(name, setEltClass); - case MAP: - Class keyClass = codecRegistry.codecFor(type.getTypeArguments().get(0)).getJavaType().getRawType(); - Class valueClass = codecRegistry.codecFor(type.getTypeArguments().get(1)).getJavaType().getRawType(); - return data.getMap(name, keyClass, valueClass); - } + } + + @SuppressWarnings({"unchecked", "rawtypes"}) + public static void setValue( + SettableByNameData data, String name, DataType type, Object value) { + switch (type.getName()) { + case ASCII: + data.setString(name, (String) value); + break; + case BIGINT: + data.setLong(name, (Long) value); + break; + case BLOB: + data.setBytes(name, (ByteBuffer) value); + break; + case BOOLEAN: + data.setBool(name, (Boolean) value); + break; + case COUNTER: + // Just a no-op, we shouldn't handle counters the same way than other types + break; + case DECIMAL: + data.setDecimal(name, (BigDecimal) value); + break; + case DOUBLE: + data.setDouble(name, (Double) value); + break; + case FLOAT: + data.setFloat(name, (Float) value); + break; + case INET: + data.setInet(name, (InetAddress) value); + break; + case TINYINT: + data.setByte(name, (Byte) value); + break; + case SMALLINT: + data.setShort(name, (Short) value); + break; + case INT: + data.setInt(name, (Integer) value); + break; + case TEXT: + data.setString(name, (String) value); + break; + case TIMESTAMP: + data.setTimestamp(name, (Date) value); + break; + case DATE: + data.setDate(name, (LocalDate) value); + break; + case TIME: + data.setTime(name, (Long) value); + break; + case UUID: + data.setUUID(name, (UUID) value); + break; + case VARCHAR: + data.setString(name, (String) value); + break; + case VARINT: + data.setVarint(name, (BigInteger) value); + break; + case TIMEUUID: + data.setUUID(name, (UUID) value); + break; + case LIST: + data.setList(name, (List) value); + break; + case SET: + data.setSet(name, (Set) value); + break; + case MAP: + data.setMap(name, (Map) value); + break; + default: throw new RuntimeException("Missing handling of " + type); } - - // Always return the "same" value for each type - @SuppressWarnings("serial") - public static Object getFixedValue(final DataType type) { - try { - switch (type.getName()) { - case ASCII: - return "An ascii string"; - case BIGINT: - return 42L; - case BLOB: - return ByteBuffer.wrap(new byte[]{(byte) 4, (byte) 12, (byte) 1}); - case BOOLEAN: - return true; - case COUNTER: - throw new UnsupportedOperationException("Cannot 'getSomeValue' for counters"); - case DURATION: - return Duration.from("1h20m3s"); - case DECIMAL: - return new BigDecimal("3.1415926535897932384626433832795028841971693993751058209749445923078164062862089986280348253421170679"); - case DOUBLE: - return 3.142519; - case FLOAT: - return 3.142519f; - case INET: - return InetAddress.getByAddress(new byte[]{(byte) 127, (byte) 0, (byte) 0, (byte) 1}); - case TINYINT: - return (byte) 25; - case SMALLINT: - return (short) 26; - case INT: - return 24; - case TEXT: - return "A text string"; - case TIMESTAMP: - return new Date(1352288289L); - case DATE: - return LocalDate.fromDaysSinceEpoch(0); - case TIME: - return 54012123450000L; - case UUID: - return UUID.fromString("087E9967-CCDC-4A9B-9036-05930140A41B"); - case VARCHAR: - return "A varchar string"; - case VARINT: - return new BigInteger("123456789012345678901234567890"); - case TIMEUUID: - return UUID.fromString("FE2B4360-28C6-11E2-81C1-0800200C9A66"); - case LIST: - return new ArrayList() {{ - add(getFixedValue(type.getTypeArguments().get(0))); - }}; - case SET: - return new HashSet() {{ - add(getFixedValue(type.getTypeArguments().get(0))); - }}; - case MAP: - return new HashMap() {{ - put(getFixedValue(type.getTypeArguments().get(0)), getFixedValue(type.getTypeArguments().get(1))); - }}; - } - } catch (Exception e) { - throw new RuntimeException(e); - } - throw new RuntimeException("Missing handling of " + type); + } + + public static Object getValue( + GettableByIndexData data, int i, DataType type, CodecRegistry codecRegistry) { + switch (type.getName()) { + case ASCII: + return data.getString(i); + case BIGINT: + return data.getLong(i); + case BLOB: + return data.getBytes(i); + case BOOLEAN: + return data.getBool(i); + case COUNTER: + return data.getLong(i); + case DECIMAL: + return data.getDecimal(i); + case DOUBLE: + return data.getDouble(i); + case FLOAT: + return data.getFloat(i); + case INET: + return data.getInet(i); + case TINYINT: + return data.getByte(i); + case SMALLINT: + return data.getShort(i); + case INT: + return data.getInt(i); + case TEXT: + return data.getString(i); + case TIMESTAMP: + return data.getTimestamp(i); + case DATE: + return data.getDate(i); + case TIME: + return data.getTime(i); + case UUID: + return data.getUUID(i); + case VARCHAR: + return data.getString(i); + case VARINT: + return data.getVarint(i); + case TIMEUUID: + return data.getUUID(i); + case LIST: + Class listEltClass = + codecRegistry.codecFor(type.getTypeArguments().get(0)).getJavaType().getRawType(); + return data.getList(i, listEltClass); + case SET: + Class setEltClass = + codecRegistry.codecFor(type.getTypeArguments().get(0)).getJavaType().getRawType(); + return data.getSet(i, setEltClass); + case MAP: + Class keyClass = + codecRegistry.codecFor(type.getTypeArguments().get(0)).getJavaType().getRawType(); + Class valueClass = + codecRegistry.codecFor(type.getTypeArguments().get(1)).getJavaType().getRawType(); + return data.getMap(i, keyClass, valueClass); } - - // Always return the "same" value for each type - @SuppressWarnings("serial") - public static Object getFixedValue2(final DataType type) { - try { - switch (type.getName()) { - case ASCII: - return "A different ascii string"; - case BIGINT: - return Long.MAX_VALUE; - case BLOB: - ByteBuffer bb = ByteBuffer.allocate(64); - bb.putInt(0xCAFE); - bb.putShort((short) 3); - bb.putShort((short) 45); - return bb; - case BOOLEAN: - return false; - case COUNTER: - throw new UnsupportedOperationException("Cannot 'getSomeValue' for counters"); - case DECIMAL: - return new BigDecimal("12.3E+7"); - case DOUBLE: - return Double.POSITIVE_INFINITY; - case FLOAT: - return Float.POSITIVE_INFINITY; - case INET: - return InetAddress.getByName("123.123.123.123"); - case TINYINT: - return Byte.MAX_VALUE; - case SMALLINT: - return Short.MAX_VALUE; - case INT: - return Integer.MAX_VALUE; - case TEXT: - return "résumé"; - case TIMESTAMP: - return new Date(872835240000L); - case DATE: - return LocalDate.fromDaysSinceEpoch(0); - case TIME: - return 54012123450000L; - case UUID: - return UUID.fromString("067e6162-3b6f-4ae2-a171-2470b63dff00"); - case VARCHAR: - return "A different varchar résumé"; - case VARINT: - return new BigInteger(Integer.toString(Integer.MAX_VALUE) + "000"); - case TIMEUUID: - return UUID.fromString("FE2B4360-28C6-11E2-81C1-0800200C9A66"); - case LIST: - return new ArrayList() {{ - add(getFixedValue2(type.getTypeArguments().get(0))); - }}; - case SET: - return new HashSet() {{ - add(getFixedValue2(type.getTypeArguments().get(0))); - }}; - case MAP: - return new HashMap() {{ - put(getFixedValue2(type.getTypeArguments().get(0)), getFixedValue2(type.getTypeArguments().get(1))); - }}; + throw new RuntimeException("Missing handling of " + type); + } + + public static Object getValue( + GettableByNameData data, String name, DataType type, CodecRegistry codecRegistry) { + switch (type.getName()) { + case ASCII: + return data.getString(name); + case BIGINT: + return data.getLong(name); + case BLOB: + return data.getBytes(name); + case BOOLEAN: + return data.getBool(name); + case COUNTER: + return data.getLong(name); + case DECIMAL: + return data.getDecimal(name); + case DOUBLE: + return data.getDouble(name); + case FLOAT: + return data.getFloat(name); + case INET: + return data.getInet(name); + case TINYINT: + return data.getByte(name); + case SMALLINT: + return data.getShort(name); + case INT: + return data.getInt(name); + case TEXT: + return data.getString(name); + case TIMESTAMP: + return data.getTimestamp(name); + case DATE: + return data.getDate(name); + case TIME: + return data.getTime(name); + case UUID: + return data.getUUID(name); + case VARCHAR: + return data.getString(name); + case VARINT: + return data.getVarint(name); + case TIMEUUID: + return data.getUUID(name); + case LIST: + Class listEltClass = + codecRegistry.codecFor(type.getTypeArguments().get(0)).getJavaType().getRawType(); + return data.getList(name, listEltClass); + case SET: + Class setEltClass = + codecRegistry.codecFor(type.getTypeArguments().get(0)).getJavaType().getRawType(); + return data.getSet(name, setEltClass); + case MAP: + Class keyClass = + codecRegistry.codecFor(type.getTypeArguments().get(0)).getJavaType().getRawType(); + Class valueClass = + codecRegistry.codecFor(type.getTypeArguments().get(1)).getJavaType().getRawType(); + return data.getMap(name, keyClass, valueClass); + } + throw new RuntimeException("Missing handling of " + type); + } + + // Always return the "same" value for each type + @SuppressWarnings("serial") + public static Object getFixedValue(final DataType type) { + try { + switch (type.getName()) { + case ASCII: + return "An ascii string"; + case BIGINT: + return 42L; + case BLOB: + return ByteBuffer.wrap(new byte[] {(byte) 4, (byte) 12, (byte) 1}); + case BOOLEAN: + return true; + case COUNTER: + throw new UnsupportedOperationException("Cannot 'getSomeValue' for counters"); + case DURATION: + return Duration.from("1h20m3s"); + case DECIMAL: + return new BigDecimal( + "3.1415926535897932384626433832795028841971693993751058209749445923078164062862089986280348253421170679"); + case DOUBLE: + return 3.142519; + case FLOAT: + return 3.142519f; + case INET: + return InetAddress.getByAddress(new byte[] {(byte) 127, (byte) 0, (byte) 0, (byte) 1}); + case TINYINT: + return (byte) 25; + case SMALLINT: + return (short) 26; + case INT: + return 24; + case TEXT: + return "A text string"; + case TIMESTAMP: + return new Date(1352288289L); + case DATE: + return LocalDate.fromDaysSinceEpoch(0); + case TIME: + return 54012123450000L; + case UUID: + return UUID.fromString("087E9967-CCDC-4A9B-9036-05930140A41B"); + case VARCHAR: + return "A varchar string"; + case VARINT: + return new BigInteger("123456789012345678901234567890"); + case TIMEUUID: + return UUID.fromString("FE2B4360-28C6-11E2-81C1-0800200C9A66"); + case LIST: + return new ArrayList() { + { + add(getFixedValue(type.getTypeArguments().get(0))); } - } catch (Exception e) { - throw new RuntimeException(e); - } - throw new RuntimeException("Missing handling of " + type); + }; + case SET: + return new HashSet() { + { + add(getFixedValue(type.getTypeArguments().get(0))); + } + }; + case MAP: + return new HashMap() { + { + put( + getFixedValue(type.getTypeArguments().get(0)), + getFixedValue(type.getTypeArguments().get(1))); + } + }; + } + } catch (Exception e) { + throw new RuntimeException(e); } - - /** - * Returns a set of all primitive types supported by the given protocolVersion. - *

    - * Primitive types are defined as the types that don't have type arguments - * (that is excluding lists, sets, and maps, tuples and udts). - *

    - * - * @param protocolVersion protocol version to get types for. - * @return returns a set of all the primitive types for the given protocolVersion. - */ - static Set allPrimitiveTypes(final ProtocolVersion protocolVersion) { - return Sets.filter(DataType.allPrimitiveTypes(), new Predicate() { - - @Override - public boolean apply(DataType dataType) { - return protocolVersion.compareTo(dataType.getName().minProtocolVersion) >= 0; + throw new RuntimeException("Missing handling of " + type); + } + + // Always return the "same" value for each type + @SuppressWarnings("serial") + public static Object getFixedValue2(final DataType type) { + try { + switch (type.getName()) { + case ASCII: + return "A different ascii string"; + case BIGINT: + return Long.MAX_VALUE; + case BLOB: + ByteBuffer bb = ByteBuffer.allocate(64); + bb.putInt(0xCAFE); + bb.putShort((short) 3); + bb.putShort((short) 45); + return bb; + case BOOLEAN: + return false; + case COUNTER: + throw new UnsupportedOperationException("Cannot 'getSomeValue' for counters"); + case DECIMAL: + return new BigDecimal("12.3E+7"); + case DOUBLE: + return Double.POSITIVE_INFINITY; + case FLOAT: + return Float.POSITIVE_INFINITY; + case INET: + return InetAddress.getByName("123.123.123.123"); + case TINYINT: + return Byte.MAX_VALUE; + case SMALLINT: + return Short.MAX_VALUE; + case INT: + return Integer.MAX_VALUE; + case TEXT: + return "résumé"; + case TIMESTAMP: + return new Date(872835240000L); + case DATE: + return LocalDate.fromDaysSinceEpoch(0); + case TIME: + return 54012123450000L; + case UUID: + return UUID.fromString("067e6162-3b6f-4ae2-a171-2470b63dff00"); + case VARCHAR: + return "A different varchar résumé"; + case VARINT: + return new BigInteger(Integer.toString(Integer.MAX_VALUE) + "000"); + case TIMEUUID: + return UUID.fromString("FE2B4360-28C6-11E2-81C1-0800200C9A66"); + case LIST: + return new ArrayList() { + { + add(getFixedValue2(type.getTypeArguments().get(0))); + } + }; + case SET: + return new HashSet() { + { + add(getFixedValue2(type.getTypeArguments().get(0))); } + }; + case MAP: + return new HashMap() { + { + put( + getFixedValue2(type.getTypeArguments().get(0)), + getFixedValue2(type.getTypeArguments().get(1))); + } + }; + } + } catch (Exception e) { + throw new RuntimeException(e); + } + throw new RuntimeException("Missing handling of " + type); + } + + /** + * Returns a set of all primitive types supported by the given protocolVersion. + * + *

    Primitive types are defined as the types that don't have type arguments (that is excluding + * lists, sets, and maps, tuples and udts). + * + * @param protocolVersion protocol version to get types for. + * @return returns a set of all the primitive types for the given protocolVersion. + */ + static Set allPrimitiveTypes(final ProtocolVersion protocolVersion) { + return Sets.filter( + DataType.allPrimitiveTypes(), + new Predicate() { + + @Override + public boolean apply(DataType dataType) { + return protocolVersion.compareTo(dataType.getName().minProtocolVersion) >= 0; + } }); + } + + // Wait for a node to be up and running + // This is used because there is some delay between when a node has been + // added through ccm and when it's actually available for querying + public static void waitForUp(String node, Cluster cluster) { + waitFor(node, cluster, TEST_BASE_NODE_WAIT, false); + } + + public static void waitForUp(String node, Cluster cluster, int timeoutSeconds) { + waitFor(node, cluster, timeoutSeconds, false); + } + + public static void waitForDown(String node, Cluster cluster) { + waitFor(node, cluster, TEST_BASE_NODE_WAIT * 3, true); + } + + public static void waitForDown(String node, Cluster cluster, int timeoutSeconds) { + waitFor(node, cluster, timeoutSeconds, true); + } + + private static void waitFor( + String node, Cluster cluster, int timeoutSeconds, boolean waitForDown) { + if (waitForDown) logger.debug("Waiting for node to leave: {}", node); + else logger.debug("Waiting for upcoming node: {}", node); + // In the case where the we've killed the last node in the cluster, if we haven't + // tried doing an actual query, the driver won't realize that last node is dead until + // keep alive kicks in, but that's a fairly long time. So we cheat and trigger a force + // the detection by forcing a request. + if (waitForDown) + Futures.getUnchecked(cluster.manager.submitSchemaRefresh(null, null, null, null)); + if (waitForDown) { + check() + .every(1, SECONDS) + .before(timeoutSeconds, SECONDS) + .that(new HostIsDown(cluster, node)) + .becomesTrue(); + } else { + check() + .every(1, SECONDS) + .before(timeoutSeconds, SECONDS) + .that(new HostIsUp(cluster, node)) + .becomesTrue(); } + } - // Wait for a node to be up and running - // This is used because there is some delay between when a node has been - // added through ccm and when it's actually available for querying - public static void waitForUp(String node, Cluster cluster) { - waitFor(node, cluster, TEST_BASE_NODE_WAIT, false); - } + private static class HostIsDown implements Callable { - public static void waitForUp(String node, Cluster cluster, int timeoutSeconds) { - waitFor(node, cluster, timeoutSeconds, false); - } + private final Cluster cluster; - public static void waitForDown(String node, Cluster cluster) { - waitFor(node, cluster, TEST_BASE_NODE_WAIT * 3, true); - } + private final String ip; - public static void waitForDown(String node, Cluster cluster, int timeoutSeconds) { - waitFor(node, cluster, timeoutSeconds, true); + public HostIsDown(Cluster cluster, String ip) { + this.cluster = cluster; + this.ip = ip; } - private static void waitFor(String node, Cluster cluster, int timeoutSeconds, boolean waitForDown) { - if (waitForDown) - logger.debug("Waiting for node to leave: {}", node); - else - logger.debug("Waiting for upcoming node: {}", node); - // In the case where the we've killed the last node in the cluster, if we haven't - // tried doing an actual query, the driver won't realize that last node is dead until - // keep alive kicks in, but that's a fairly long time. So we cheat and trigger a force - // the detection by forcing a request. - if (waitForDown) - Futures.getUnchecked(cluster.manager.submitSchemaRefresh(null, null, null, null)); - if (waitForDown) { - check() - .every(1, SECONDS) - .before(timeoutSeconds, SECONDS) - .that(new HostIsDown(cluster, node)) - .becomesTrue(); - } else { - check() - .every(1, SECONDS) - .before(timeoutSeconds, SECONDS) - .that(new HostIsUp(cluster, node)) - .becomesTrue(); - } + @Override + public Boolean call() throws Exception { + final Host host = findHost(cluster, ip); + return host == null || !host.isUp(); } + } - private static class HostIsDown implements Callable { + private static class HostIsUp implements Callable { - private final Cluster cluster; + private final Cluster cluster; - private final String ip; + private final String ip; - public HostIsDown(Cluster cluster, String ip) { - this.cluster = cluster; - this.ip = ip; - } - - @Override - public Boolean call() throws Exception { - final Host host = findHost(cluster, ip); - return host == null || !host.isUp(); - } + public HostIsUp(Cluster cluster, String ip) { + this.cluster = cluster; + this.ip = ip; } - private static class HostIsUp implements Callable { - - private final Cluster cluster; - - private final String ip; - - public HostIsUp(Cluster cluster, String ip) { - this.cluster = cluster; - this.ip = ip; - } - - @Override - public Boolean call() throws Exception { - final Host host = findHost(cluster, ip); - return host != null && host.isUp(); - } - } - - /** - * Returns the IP of the {@code nth} host in the cluster (counting from 1, i.e., - * {@code ipOfNode(1)} returns the IP of the first node. - *

    - * In multi-DC setups, nodes are numbered in ascending order of their datacenter number. - * E.g. with 2 DCs and 3 nodes in each DC, the first node in DC 2 is number 4. - * - * @return the IP of the {@code nth} host in the cluster. - */ - public static String ipOfNode(int n) { - return IP_PREFIX + n; - } - - /** - * Returns the address of the {@code nth} host in the cluster (counting from 1, i.e., - * {@code ipOfNode(1)} returns the address of the first node. - *

    - * In multi-DC setups, nodes are numbered in ascending order of their datacenter number. - * E.g. with 2 DCs and 3 nodes in each DC, the first node in DC 2 is number 4. - * - * @return the IP of the {@code nth} host in the cluster. - */ - public static InetAddress addressOfNode(int i) { - try { - return InetAddress.getByName(IP_PREFIX + i); - } catch (UnknownHostException e) { - // should never happen - throw Throwables.propagate(e); - } - } - - public static Host findOrWaitForHost(Cluster cluster, int node, long duration, TimeUnit unit) { - return findOrWaitForHost(cluster, ipOfNode(node), duration, unit); - } - - public static Host findOrWaitForHost(final Cluster cluster, final String address, long duration, TimeUnit unit) { - Host host = findHost(cluster, address); - if (host == null) { - final CountDownLatch addSignal = new CountDownLatch(1); - Host.StateListener addListener = new StateListenerBase() { - @Override - public void onAdd(Host host) { - if (host.getAddress().getHostAddress().equals(address)) { - // for a new node, because of this we also listen for add events. - addSignal.countDown(); - } - } - }; - cluster.register(addListener); - try { - // Wait until an add event occurs or we timeout. - if (addSignal.await(duration, unit)) { - host = findHost(cluster, address); - } - } catch (InterruptedException e) { - return null; - } finally { - cluster.unregister(addListener); - } - } - return host; - } - - public static Host findHost(Cluster cluster, int hostNumber) { - return findHost(cluster, ipOfNode(hostNumber)); + @Override + public Boolean call() throws Exception { + final Host host = findHost(cluster, ip); + return host != null && host.isUp(); } - - public static Host findHost(Cluster cluster, String address) { - // Note: we can't rely on ProtocolOptions.getPort() to build an InetSocketAddress and call metadata.getHost, - // because the port doesn't have the correct value if addContactPointsWithPorts was used to create the Cluster - // (JAVA-860 will solve that) - for (Host host : cluster.getMetadata().allHosts()) { - if (host.getAddress().getHostAddress().equals(address)) - return host; - } - return null; + } + + /** + * Returns the IP of the {@code nth} host in the cluster (counting from 1, i.e., {@code + * ipOfNode(1)} returns the IP of the first node. + * + *

    In multi-DC setups, nodes are numbered in ascending order of their datacenter number. E.g. + * with 2 DCs and 3 nodes in each DC, the first node in DC 2 is number 4. + * + * @return the IP of the {@code nth} host in the cluster. + */ + public static String ipOfNode(int n) { + return IP_PREFIX + n; + } + + /** + * Returns the address of the {@code nth} host in the cluster (counting from 1, i.e., {@code + * ipOfNode(1)} returns the address of the first node. + * + *

    In multi-DC setups, nodes are numbered in ascending order of their datacenter number. E.g. + * with 2 DCs and 3 nodes in each DC, the first node in DC 2 is number 4. + * + * @return the IP of the {@code nth} host in the cluster. + */ + public static InetAddress addressOfNode(int i) { + try { + return InetAddress.getByName(IP_PREFIX + i); + } catch (UnknownHostException e) { + // should never happen + throw Throwables.propagate(e); } - - public static Host findOrWaitForControlConnection(final Cluster cluster, long duration, TimeUnit unit) { - ControlConnection controlConnection = cluster.manager.controlConnection; - long durationNs = TimeUnit.NANOSECONDS.convert(duration, unit); - long start = System.nanoTime(); - while (System.nanoTime() - start < durationNs) { - if (controlConnection.isOpen()) { - return controlConnection.connectedHost(); + } + + public static Host findOrWaitForHost(Cluster cluster, int node, long duration, TimeUnit unit) { + return findOrWaitForHost(cluster, ipOfNode(node), duration, unit); + } + + public static Host findOrWaitForHost( + final Cluster cluster, final String address, long duration, TimeUnit unit) { + Host host = findHost(cluster, address); + if (host == null) { + final CountDownLatch addSignal = new CountDownLatch(1); + Host.StateListener addListener = + new StateListenerBase() { + @Override + public void onAdd(Host host) { + if (host.getEndPoint().resolve().getAddress().getHostAddress().equals(address)) { + // for a new node, because of this we also listen for add events. + addSignal.countDown(); + } } - Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS); + }; + cluster.register(addListener); + try { + // Wait until an add event occurs or we timeout. + if (addSignal.await(duration, unit)) { + host = findHost(cluster, address); } + } catch (InterruptedException e) { return null; + } finally { + cluster.unregister(addListener); + } } - - public static HostConnectionPool poolOf(Session session, int hostNumber) { - SessionManager sessionManager = (SessionManager) session; - return sessionManager.pools.get(findHost(session.getCluster(), hostNumber)); - } - - public static int numberOfLocalCoreConnections(Cluster cluster) { - Configuration configuration = cluster.getConfiguration(); - return configuration.getPoolingOptions().getCoreConnectionsPerHost(HostDistance.LOCAL); + return host; + } + + public static Host findHost(Cluster cluster, int hostNumber) { + return findHost(cluster, ipOfNode(hostNumber)); + } + + public static Host findHost(Cluster cluster, String address) { + // Note: we can't rely on ProtocolOptions.getPort() to build an InetSocketAddress and call + // metadata.getHost, + // because the port doesn't have the correct value if addContactPointsWithPorts was used to + // create the Cluster + // (JAVA-860 will solve that) + for (Host host : cluster.getMetadata().allHosts()) { + if (host.getEndPoint().resolve().getAddress().getHostAddress().equals(address)) return host; } - - /** - * @return A Scassandra instance with an arbitrarily chosen binary port from 8042-8142 and admin port from - * 8052-8152. - */ - public static Scassandra createScassandraServer() { - int binaryPort = findAvailablePort(); - int adminPort = findAvailablePort(); - return ScassandraFactory.createServer(ipOfNode(1), binaryPort, ipOfNode(1), adminPort); + return null; + } + + public static Host findOrWaitForControlConnection( + final Cluster cluster, long duration, TimeUnit unit) { + ControlConnection controlConnection = cluster.manager.controlConnection; + long durationNs = TimeUnit.NANOSECONDS.convert(duration, unit); + long start = System.nanoTime(); + while (System.nanoTime() - start < durationNs) { + if (controlConnection.isOpen()) { + return controlConnection.connectedHost(); + } + Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS); } - - private static final ConcurrentMap IDENTIFIERS = new ConcurrentHashMap(); - - /** - * Generates a unique CQL identifier with the given prefix. - * - * @param prefix The prefix for the identifier - * @return a unique CQL identifier. - */ - public static String generateIdentifier(String prefix) { - AtomicInteger seq = new AtomicInteger(0); - AtomicInteger previous = IDENTIFIERS.putIfAbsent(prefix, seq); - if (previous != null) - seq = previous; - return prefix + seq.incrementAndGet(); - } - - /** - * Finds an available port in the ephemeral range. - * This is loosely inspired by Apache MINA's AvailablePortFinder. - * - * @return A local port that is currently unused. - */ - public synchronized static int findAvailablePort() throws RuntimeException { - ServerSocket ss = null; + return null; + } + + public static HostConnectionPool poolOf(Session session, int hostNumber) { + SessionManager sessionManager = (SessionManager) session; + return sessionManager.pools.get(findHost(session.getCluster(), hostNumber)); + } + + public static int numberOfLocalCoreConnections(Cluster cluster) { + Configuration configuration = cluster.getConfiguration(); + return configuration.getPoolingOptions().getCoreConnectionsPerHost(HostDistance.LOCAL); + } + + /** + * @return A Scassandra instance with an arbitrarily chosen binary port from 8042-8142 and admin + * port from 8052-8152. + */ + public static Scassandra createScassandraServer() { + int binaryPort = findAvailablePort(); + int adminPort = findAvailablePort(); + return ScassandraFactory.createServer(ipOfNode(1), binaryPort, ipOfNode(1), adminPort); + } + + private static final ConcurrentMap IDENTIFIERS = + new ConcurrentHashMap(); + + /** + * Generates a unique CQL identifier with the given prefix. + * + * @param prefix The prefix for the identifier + * @return a unique CQL identifier. + */ + public static String generateIdentifier(String prefix) { + AtomicInteger seq = new AtomicInteger(0); + AtomicInteger previous = IDENTIFIERS.putIfAbsent(prefix, seq); + if (previous != null) seq = previous; + return prefix + seq.incrementAndGet(); + } + + /** + * Finds an available port in the ephemeral range. This is loosely inspired by Apache MINA's + * AvailablePortFinder. + * + * @return A local port that is currently unused. + */ + public static synchronized int findAvailablePort() throws RuntimeException { + ServerSocket ss = null; + try { + // let the system pick an ephemeral port + ss = new ServerSocket(0); + ss.setReuseAddress(true); + return ss.getLocalPort(); + } catch (IOException e) { + throw Throwables.propagate(e); + } finally { + if (ss != null) { try { - // let the system pick an ephemeral port - ss = new ServerSocket(0); - ss.setReuseAddress(true); - return ss.getLocalPort(); + ss.close(); } catch (IOException e) { - throw Throwables.propagate(e); - } finally { - if (ss != null) { - try { - ss.close(); - } catch (IOException e) { - Throwables.propagate(e); - } - } + Throwables.propagate(e); } + } } + } - private static final Predicate PORT_IS_UP = new Predicate() { + private static final Predicate PORT_IS_UP = + new Predicate() { @Override public boolean apply(InetSocketAddress address) { - return pingPort(address.getAddress(), address.getPort()); + return pingPort(address.getAddress(), address.getPort()); } - - }; - - public static void waitUntilPortIsUp(InetSocketAddress address) { - check().before(5, MINUTES).that(address, PORT_IS_UP).becomesTrue(); - } - - public static void waitUntilPortIsDown(InetSocketAddress address) { - check().before(5, MINUTES).that(address, PORT_IS_UP).becomesFalse(); - } - - public static boolean pingPort(InetAddress address, int port) { - logger.debug("Trying {}:{}...", address, port); - boolean connectionSuccessful = false; - Socket socket = null; + }; + + public static void waitUntilPortIsUp(InetSocketAddress address) { + check().before(5, MINUTES).that(address, PORT_IS_UP).becomesTrue(); + } + + public static void waitUntilPortIsDown(InetSocketAddress address) { + check().before(5, MINUTES).that(address, PORT_IS_UP).becomesFalse(); + } + + public static boolean pingPort(InetAddress address, int port) { + logger.debug("Trying {}:{}...", address, port); + boolean connectionSuccessful = false; + Socket socket = null; + try { + socket = new Socket(address, port); + connectionSuccessful = true; + logger.debug("Successfully connected"); + } catch (IOException e) { + logger.debug("Connection failed"); + } finally { + if (socket != null) try { - socket = new Socket(address, port); - connectionSuccessful = true; - logger.debug("Successfully connected"); + socket.close(); } catch (IOException e) { - logger.debug("Connection failed"); - } finally { - if (socket != null) - try { - socket.close(); - } catch (IOException e) { - logger.warn("Error closing socket to " + address, e); - } + logger.warn("Error closing socket to " + address, e); } - return connectionSuccessful; } - - /** - * @return a {@link Cluster} instance that connects only to the control host of the given cluster. - */ - public static Cluster buildControlCluster(Cluster cluster, CCMAccess ccm) { - Host controlHost = cluster.manager.controlConnection.connectedHost(); - List singleAddress = Collections.singletonList(controlHost.getSocketAddress()); - return Cluster.builder() - .addContactPoints(controlHost.getSocketAddress().getAddress()) - .withPort(ccm.getBinaryPort()) - .withLoadBalancingPolicy(new WhiteListPolicy(new RoundRobinPolicy(), singleAddress)) - .build(); - } - - /** - * @return a {@link QueryOptions} that disables debouncing by setting intervals to 0ms. - */ - public static QueryOptions nonDebouncingQueryOptions() { - return new QueryOptions().setRefreshNodeIntervalMillis(0) - .setRefreshNodeListIntervalMillis(0) - .setRefreshSchemaIntervalMillis(0); - } - - /** - * A custom {@link NettyOptions} that shuts down the {@link EventLoopGroup} after - * no quiet time. This is useful for tests that consistently close clusters as - * otherwise there is a 2 second delay (from JAVA-914). - */ - public static NettyOptions nonQuietClusterCloseOptions = new NettyOptions() { + return connectionSuccessful; + } + + /** + * @return a {@link Cluster} instance that connects only to the control host of the given cluster. + */ + public static Cluster buildControlCluster(Cluster cluster, CCMAccess ccm) { + Host controlHost = cluster.manager.controlConnection.connectedHost(); + List singleAddress = + Collections.singletonList(controlHost.getEndPoint().resolve()); + return configureClusterBuilder(Cluster.builder(), ccm) + .withLoadBalancingPolicy(new WhiteListPolicy(new RoundRobinPolicy(), singleAddress)) + .build(); + } + + /** + * Configures the builder with one contact point and port matching the given CCM cluster. + * Therefore it's not required to call {@link Cluster.Builder#addContactPoints}, it will be done + * automatically. + * + * @return The cluster builder (for method chaining). + */ + public static Builder configureClusterBuilder(Builder builder, CCMAccess ccm) { + // add only one contact point to force node1 to become the control host; some tests rely on + // that. + return configureClusterBuilder(builder, ccm, ccm.getContactPoints().get(0)); + } + + /** + * Configures the builder with binary port matching the given CCM cluster and with the given + * contact points. Therefore it's not required to call {@link Cluster.Builder#addContactPoints}, + * it will be done automatically. + * + * @return The cluster builder (for method chaining). + */ + public static Builder configureClusterBuilder( + Builder builder, CCMAccess ccm, InetAddress... contactPoints) { + builder + // use a different codec registry for each cluster instance + .withCodecRegistry(new CodecRegistry()) + .addContactPoints(contactPoints) + .withPort(ccm.getBinaryPort()); + return builder; + } + + /** @return a {@link QueryOptions} that disables debouncing by setting intervals to 0ms. */ + public static QueryOptions nonDebouncingQueryOptions() { + return new QueryOptions() + .setRefreshNodeIntervalMillis(0) + .setRefreshNodeListIntervalMillis(0) + .setRefreshSchemaIntervalMillis(0); + } + + /** + * A custom {@link NettyOptions} that shuts down the {@link EventLoopGroup} after no quiet time. + * This is useful for tests that consistently close clusters as otherwise there is a 2 second + * delay (from JAVA-914). + */ + public static NettyOptions nonQuietClusterCloseOptions = + new NettyOptions() { @Override public void onClusterClose(EventLoopGroup eventLoopGroup) { - eventLoopGroup.shutdownGracefully(0, 15, SECONDS).syncUninterruptibly(); - } - }; - - /** - * Executes a task and catches any exception. - * - * @param task The task to execute. - * @param logException Whether to log the exception, if any. - */ - public static void executeNoFail(Runnable task, boolean logException) { - try { - task.run(); - } catch (Exception e) { - if (logException) - logger.error(e.getMessage(), e); - } - } - - /** - * Executes a task and catches any exception. - * - * @param task The task to execute. - * @param logException Whether to log the exception, if any. - */ - public static void executeNoFail(Callable task, boolean logException) { - try { - task.call(); - } catch (Exception e) { - if (logException) - logger.error(e.getMessage(), e); + eventLoopGroup.shutdownGracefully(0, 15, SECONDS).syncUninterruptibly(); } + }; + + /** + * Executes a task and catches any exception. + * + * @param task The task to execute. + * @param logException Whether to log the exception, if any. + */ + public static void executeNoFail(Runnable task, boolean logException) { + try { + task.run(); + } catch (Exception e) { + if (logException) logger.error(e.getMessage(), e); } - - /** - * Returns the system's free memory in megabytes. - *

    - * This includes the free physical memory + the free swap memory. - */ - public static long getFreeMemoryMB() { - OperatingSystemMXBean bean = (OperatingSystemMXBean) ManagementFactory.getOperatingSystemMXBean(); - return (bean.getFreePhysicalMemorySize() + bean.getFreeSwapSpaceSize()) / 1024 / 1024; + } + + /** + * Executes a task and catches any exception. + * + * @param task The task to execute. + * @param logException Whether to log the exception, if any. + */ + public static void executeNoFail(Callable task, boolean logException) { + try { + task.call(); + } catch (Exception e) { + if (logException) logger.error(e.getMessage(), e); + } catch (AssertionError e) { + if (logException) logger.error(e.getMessage(), e); } - - /** - * Helper for generating a DynamicCompositeType {@link ByteBuffer} from the given parameters. - *

    - * Any of params given as an Integer will be considered with a field name of 'i', any as String will - * be considered with a field name of 's'. - * - * @param params params to serialize. - * @return bytes representing a DynamicCompositeType. - */ - public static ByteBuffer serializeForDynamicCompositeType(Object... params) { - List l = new ArrayList(); - int size = 0; - for (Object p : params) { - if (p instanceof Integer) { - ByteBuffer elt = ByteBuffer.allocate(2 + 2 + 4 + 1); - elt.putShort((short) (0x8000 | 'i')); - elt.putShort((short) 4); - elt.putInt((Integer) p); - elt.put((byte) 0); - elt.flip(); - size += elt.remaining(); - l.add(elt); - } else if (p instanceof String) { - ByteBuffer bytes = ByteBuffer.wrap(((String) p).getBytes()); - ByteBuffer elt = ByteBuffer.allocate(2 + 2 + bytes.remaining() + 1); - elt.putShort((short) (0x8000 | 's')); - elt.putShort((short) bytes.remaining()); - elt.put(bytes); - elt.put((byte) 0); - elt.flip(); - size += elt.remaining(); - l.add(elt); - } else { - throw new RuntimeException(); - } - } - ByteBuffer res = ByteBuffer.allocate(size); - for (ByteBuffer bb : l) - res.put(bb); - res.flip(); - return res; + } + + /** + * Returns the system's free memory in megabytes. + * + *

    This includes the free physical memory + the free swap memory. + */ + public static long getFreeMemoryMB() { + OperatingSystemMXBean bean = + (OperatingSystemMXBean) ManagementFactory.getOperatingSystemMXBean(); + return (bean.getFreePhysicalMemorySize() + bean.getFreeSwapSpaceSize()) / 1024 / 1024; + } + + /** + * Helper for generating a DynamicCompositeType {@link ByteBuffer} from the given parameters. + * + *

    Any of params given as an Integer will be considered with a field name of 'i', any as String + * will be considered with a field name of 's'. + * + * @param params params to serialize. + * @return bytes representing a DynamicCompositeType. + */ + public static ByteBuffer serializeForDynamicCompositeType(Object... params) { + List l = new ArrayList(); + int size = 0; + for (Object p : params) { + if (p instanceof Integer) { + ByteBuffer elt = ByteBuffer.allocate(2 + 2 + 4 + 1); + elt.putShort((short) (0x8000 | 'i')); + elt.putShort((short) 4); + elt.putInt((Integer) p); + elt.put((byte) 0); + elt.flip(); + size += elt.remaining(); + l.add(elt); + } else if (p instanceof String) { + ByteBuffer bytes = ByteBuffer.wrap(((String) p).getBytes()); + ByteBuffer elt = ByteBuffer.allocate(2 + 2 + bytes.remaining() + 1); + elt.putShort((short) (0x8000 | 's')); + elt.putShort((short) bytes.remaining()); + elt.put(bytes); + elt.put((byte) 0); + elt.flip(); + size += elt.remaining(); + l.add(elt); + } else { + throw new RuntimeException(); + } } - - /** - * Helper for generating a Composite {@link ByteBuffer} from the given parameters. - *

    - * Expects Integer and String types for parameters. - * - * @param params params to serialize. - * @return bytes representing a CompositeType - */ - public static ByteBuffer serializeForCompositeType(Object... params) { - - List l = new ArrayList(); - int size = 0; - for (Object p : params) { - if (p instanceof Integer) { - ByteBuffer elt = ByteBuffer.allocate(2 + 4 + 1); - elt.putShort((short) 4); - elt.putInt((Integer) p); - elt.put((byte) 0); - elt.flip(); - size += elt.remaining(); - l.add(elt); - } else if (p instanceof String) { - ByteBuffer bytes = ByteBuffer.wrap(((String) p).getBytes()); - ByteBuffer elt = ByteBuffer.allocate(2 + bytes.remaining() + 1); - elt.putShort((short) bytes.remaining()); - elt.put(bytes); - elt.put((byte) 0); - elt.flip(); - size += elt.remaining(); - l.add(elt); - } else { - throw new RuntimeException(); - } - } - ByteBuffer res = ByteBuffer.allocate(size); - for (ByteBuffer bb : l) - res.put(bb); - res.flip(); - return res; + ByteBuffer res = ByteBuffer.allocate(size); + for (ByteBuffer bb : l) res.put(bb); + res.flip(); + return res; + } + + /** + * Helper for generating a Composite {@link ByteBuffer} from the given parameters. + * + *

    Expects Integer and String types for parameters. + * + * @param params params to serialize. + * @return bytes representing a CompositeType + */ + public static ByteBuffer serializeForCompositeType(Object... params) { + + List l = new ArrayList(); + int size = 0; + for (Object p : params) { + if (p instanceof Integer) { + ByteBuffer elt = ByteBuffer.allocate(2 + 4 + 1); + elt.putShort((short) 4); + elt.putInt((Integer) p); + elt.put((byte) 0); + elt.flip(); + size += elt.remaining(); + l.add(elt); + } else if (p instanceof String) { + ByteBuffer bytes = ByteBuffer.wrap(((String) p).getBytes()); + ByteBuffer elt = ByteBuffer.allocate(2 + bytes.remaining() + 1); + elt.putShort((short) bytes.remaining()); + elt.put(bytes); + elt.put((byte) 0); + elt.flip(); + size += elt.remaining(); + l.add(elt); + } else { + throw new RuntimeException(); + } } - - public static Level setLogLevel(Class logger, Level newLevel) { - return setLogLevel(logger.getName(), newLevel); + ByteBuffer res = ByteBuffer.allocate(size); + for (ByteBuffer bb : l) res.put(bb); + res.flip(); + return res; + } + + public static Level setLogLevel(Class logger, Level newLevel) { + return setLogLevel(logger.getName(), newLevel); + } + + public static Level setLogLevel(Logger logger, Level newLevel) { + return setLogLevel(logger.getName(), newLevel); + } + + public static Level setLogLevel(String logger, Level newLevel) { + org.apache.log4j.Logger log4jLogger = org.apache.log4j.Logger.getLogger(logger); + Level oldLevel = log4jLogger.getLevel(); + log4jLogger.setLevel(newLevel); + return oldLevel; + } + + /** + * Throws a {@link SkipException} if the input {@link CCMAccess} does not support compact storage + * (C* 4.0+ or DSE 6.0+). + * + * @param ccm cluster to check against + */ + public static void compactStorageSupportCheck(CCMAccess ccm) { + if (ccm.getCassandraVersion().nextStable().compareTo(VersionNumber.parse("4.0")) >= 0) { + throw new SkipException( + "Compact tables are not allowed in Cassandra starting with 4.0 version"); } - - public static Level setLogLevel(Logger logger, Level newLevel) { - return setLogLevel(logger.getName(), newLevel); + if (ccm.getDSEVersion() != null + && ccm.getDSEVersion().compareTo(VersionNumber.parse("6.0")) >= 0) { + throw new SkipException("Compact tables are not allowed in DSE starting with 6.0 version"); } - - public static Level setLogLevel(String logger, Level newLevel) { - org.apache.log4j.Logger log4jLogger = org.apache.log4j.Logger.getLogger(logger); - Level oldLevel = log4jLogger.getLevel(); - log4jLogger.setLevel(newLevel); - return oldLevel; - } - + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/ThreadLocalMonotonicTimestampGeneratorTest.java b/driver-core/src/test/java/com/datastax/driver/core/ThreadLocalMonotonicTimestampGeneratorTest.java index 4bb081e86c7..6fdf9378b49 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/ThreadLocalMonotonicTimestampGeneratorTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/ThreadLocalMonotonicTimestampGeneratorTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,106 +17,114 @@ */ package com.datastax.driver.core; -import com.google.common.collect.Lists; -import com.google.common.util.concurrent.*; -import org.apache.log4j.Level; -import org.apache.log4j.Logger; -import org.testng.annotations.Test; +import static org.assertj.core.api.Assertions.assertThat; +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.fail; +import com.google.common.collect.Lists; +import com.google.common.util.concurrent.Futures; +import com.google.common.util.concurrent.ListenableFuture; +import com.google.common.util.concurrent.ListeningExecutorService; +import com.google.common.util.concurrent.MoreExecutors; +import com.google.common.util.concurrent.Uninterruptibles; import java.util.List; import java.util.concurrent.ExecutionException; import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.testng.Assert.assertEquals; -import static org.testng.Assert.fail; +import org.apache.log4j.Level; +import org.apache.log4j.Logger; +import org.testng.annotations.Test; public class ThreadLocalMonotonicTimestampGeneratorTest { - @Test(groups = "unit") - public void should_generate_incrementing_timestamps_for_each_thread() throws InterruptedException { - // Create a generator with a fixed millisecond value - final long fixedTime = 1; - final ThreadLocalMonotonicTimestampGenerator generator = new ThreadLocalMonotonicTimestampGenerator(); - generator.clock = new MockClocks.FixedTimeClock(fixedTime); + @Test(groups = "unit") + public void should_generate_incrementing_timestamps_for_each_thread() + throws InterruptedException { + // Create a generator with a fixed millisecond value + final long fixedTime = 1; + final ThreadLocalMonotonicTimestampGenerator generator = + new ThreadLocalMonotonicTimestampGenerator(); + generator.clock = new MockClocks.FixedTimeClock(fixedTime); - // Generate 1000 timestamps on each thread - int testThreadsCount = 2; - ListeningExecutorService executor = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(testThreadsCount)); + // Generate 1000 timestamps on each thread + int testThreadsCount = 2; + ListeningExecutorService executor = + MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(testThreadsCount)); - List> futures = Lists.newArrayListWithExpectedSize(testThreadsCount); - for (int i = 0; i < testThreadsCount; i++) { - futures.add(executor.submit( - new Runnable() { - @Override - public void run() { - // Ensure that each thread gets the 1000 microseconds for the mocked millisecond value, - // in order - for (int i = 0; i < 1000; i++) - assertEquals(generator.next(), fixedTime + i); - } - })); - } - executor.shutdown(); - executor.awaitTermination(1, TimeUnit.SECONDS); + List> futures = Lists.newArrayListWithExpectedSize(testThreadsCount); + for (int i = 0; i < testThreadsCount; i++) { + futures.add( + executor.submit( + new Runnable() { + @Override + public void run() { + // Ensure that each thread gets the 1000 microseconds for the mocked millisecond + // value, + // in order + for (int i = 0; i < 1000; i++) assertEquals(generator.next(), fixedTime + i); + } + })); + } + executor.shutdown(); + executor.awaitTermination(1, TimeUnit.SECONDS); - try { - Futures.allAsList(futures).get(); - } catch (ExecutionException e) { - Throwable cause = e.getCause(); - if (cause instanceof AssertionError) - throw (AssertionError) cause; - else - fail("Error in a test thread", cause); - } + try { + Futures.allAsList(futures).get(); + } catch (ExecutionException e) { + Throwable cause = e.getCause(); + if (cause instanceof AssertionError) throw (AssertionError) cause; + else fail("Error in a test thread", cause); } + } - @Test(groups = "unit") - public void should_generate_incrementing_timestamps_on_clock_resync() { - ThreadLocalMonotonicTimestampGenerator generator = new ThreadLocalMonotonicTimestampGenerator(0, TimeUnit.SECONDS, 1, TimeUnit.SECONDS); - generator.clock = new MockClocks.BackInTimeClock(); + @Test(groups = "unit") + public void should_generate_incrementing_timestamps_on_clock_resync() { + ThreadLocalMonotonicTimestampGenerator generator = + new ThreadLocalMonotonicTimestampGenerator(0, TimeUnit.SECONDS, 1, TimeUnit.SECONDS); + generator.clock = new MockClocks.BackInTimeClock(); - MemoryAppender appender = new MemoryAppender(); - Logger logger = Logger.getLogger(TimestampGenerator.class); - Level originalLevel = logger.getLevel(); - logger.setLevel(Level.WARN); - logger.addAppender(appender); - String logFormat = "Clock skew detected: current tick (%d) was %d microseconds " + - "behind the last generated timestamp (%d), returned timestamps will be artificially incremented " + - "to guarantee monotonicity."; + MemoryAppender appender = new MemoryAppender(); + Logger logger = Logger.getLogger(TimestampGenerator.class); + Level originalLevel = logger.getLevel(); + logger.setLevel(Level.WARN); + logger.addAppender(appender); + String logFormat = + "Clock skew detected: current tick (%d) was %d microseconds " + + "behind the last generated timestamp (%d), returned timestamps will be artificially incremented " + + "to guarantee monotonicity."; - try { - long start = generator.next(); - long previous = start; - long next = 0; - for (int i = 0; i < 1001; i++) { - next = generator.next(); - assertEquals(next, previous + 1); - previous = next; - } + try { + long start = generator.next(); + long previous = start; + long next = 0; + for (int i = 0; i < 1001; i++) { + next = generator.next(); + assertEquals(next, previous + 1); + previous = next; + } - // Ensure log statement generated indicating clock skew, but only once. - assertEquals(next, start + 1001); - assertThat(appender.getNext()) - .containsOnlyOnce("Clock skew detected:") - .containsOnlyOnce(String.format(logFormat, start - 1, 1, start)); + // Ensure log statement generated indicating clock skew, but only once. + assertEquals(next, start + 1001); + assertThat(appender.getNext()) + .containsOnlyOnce("Clock skew detected:") + .containsOnlyOnce(String.format(logFormat, start - 1, 1, start)); - // Wait for 1.1 seconds to see if we get an additional clock skew message. We wait slightly longer - // than 1 second to deal with system clock precision on platforms like windows. - Uninterruptibles.sleepUninterruptibly(1100, TimeUnit.MILLISECONDS); + // Wait for 1.1 seconds to see if we get an additional clock skew message. We wait slightly + // longer + // than 1 second to deal with system clock precision on platforms like windows. + Uninterruptibles.sleepUninterruptibly(1100, TimeUnit.MILLISECONDS); - next = generator.next(); - assertThat(next).isEqualTo(previous + 1); - // Clock has gone backwards 1002 us since we've had that many iterations. - // The difference should be 2003 (clock backwards 1002 + 1001 prior compute next calls). - // Current timestamp should match the previous one. - assertThat(appender.getNext()) - .containsOnlyOnce("Clock skew detected:") - .containsOnlyOnce(String.format(logFormat, start - 1002, 2003, previous)); - } finally { - logger.removeAppender(appender); - logger.setLevel(originalLevel); - } + next = generator.next(); + assertThat(next).isEqualTo(previous + 1); + // Clock has gone backwards 1002 us since we've had that many iterations. + // The difference should be 2003 (clock backwards 1002 + 1001 prior compute next calls). + // Current timestamp should match the previous one. + assertThat(appender.getNext()) + .containsOnlyOnce("Clock skew detected:") + .containsOnlyOnce(String.format(logFormat, start - 1002, 2003, previous)); + } finally { + logger.removeAppender(appender); + logger.setLevel(originalLevel); } + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/ThreadingOptionsTest.java b/driver-core/src/test/java/com/datastax/driver/core/ThreadingOptionsTest.java index 637bc50ef98..f0bc1c9f3e7 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/ThreadingOptionsTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/ThreadingOptionsTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,148 +17,172 @@ */ package com.datastax.driver.core; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.verify; +import static org.scassandra.http.client.ClosedConnectionReport.CloseType.CLOSE; + import com.datastax.driver.core.policies.ConstantReconnectionPolicy; import com.google.common.util.concurrent.ThreadFactoryBuilder; import io.netty.util.concurrent.DefaultThreadFactory; -import org.assertj.core.api.iterable.Extractor; -import org.mockito.Mockito; -import org.testng.annotations.Test; - import java.util.Set; -import java.util.concurrent.*; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ScheduledThreadPoolExecutor; +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.TimeUnit; import java.util.regex.Matcher; import java.util.regex.Pattern; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.verify; -import static org.scassandra.http.client.ClosedConnectionReport.CloseType.CLOSE; +import org.assertj.core.api.iterable.Extractor; +import org.mockito.Mockito; +import org.testng.annotations.Test; public class ThreadingOptionsTest extends ScassandraTestBase { - private String customPrefix = "custom"; + private String customPrefix = "custom"; - private ThreadingOptions threadingOptions = new ThreadingOptions() { + private ThreadingOptions threadingOptions = + new ThreadingOptions() { @Override public ThreadFactory createThreadFactory(String clusterName, String executorName) { - return new ThreadFactoryBuilder() - .setNameFormat(clusterName + "-" + customPrefix + "-" + executorName + "-%d") - // Back with Netty's thread factory in order to create FastThreadLocalThread instances. This allows - // an optimization around ThreadLocals (we could use DefaultThreadFactory directly but it creates - // slightly different thread names, so keep we keep a ThreadFactoryBuilder wrapper for backward - // compatibility). - .setThreadFactory(new DefaultThreadFactory("ignored name")) - .setDaemon(true) - .build(); + return new ThreadFactoryBuilder() + .setNameFormat(clusterName + "-" + customPrefix + "-" + executorName + "-%d") + // Back with Netty's thread factory in order to create FastThreadLocalThread + // instances. This allows + // an optimization around ThreadLocals (we could use DefaultThreadFactory directly but + // it creates + // slightly different thread names, so keep we keep a ThreadFactoryBuilder wrapper for + // backward + // compatibility). + .setThreadFactory(new DefaultThreadFactory("ignored name")) + .setDaemon(true) + .build(); } @Override public ExecutorService createExecutor(String clusterName) { - return new ThreadPoolExecutor(1, 1, - 0L, TimeUnit.MILLISECONDS, - new LinkedBlockingQueue(), - createThreadFactory(clusterName, "myExecutor") - ); + return new ThreadPoolExecutor( + 1, + 1, + 0L, + TimeUnit.MILLISECONDS, + new LinkedBlockingQueue(), + createThreadFactory(clusterName, "myExecutor")); } @Override public ExecutorService createBlockingExecutor(String clusterName) { - return new ThreadPoolExecutor(1, 1, - 0L, TimeUnit.MILLISECONDS, - new LinkedBlockingQueue(), - createThreadFactory(clusterName, "myBlockingExecutor") - ); + return new ThreadPoolExecutor( + 1, + 1, + 0L, + TimeUnit.MILLISECONDS, + new LinkedBlockingQueue(), + createThreadFactory(clusterName, "myBlockingExecutor")); } @Override public ScheduledExecutorService createReconnectionExecutor(String clusterName) { - return new ScheduledThreadPoolExecutor(1, createThreadFactory(clusterName, "myReconnection")); + return new ScheduledThreadPoolExecutor( + 1, createThreadFactory(clusterName, "myReconnection")); } @Override public ScheduledExecutorService createScheduledTasksExecutor(String clusterName) { - return new ScheduledThreadPoolExecutor(1, createThreadFactory(clusterName, "myScheduled-task-worker")); + return new ScheduledThreadPoolExecutor( + 1, createThreadFactory(clusterName, "myScheduled-task-worker")); } @Override public ScheduledExecutorService createReaperExecutor(String clusterName) { - return new ScheduledThreadPoolExecutor(1, createThreadFactory(clusterName, "myConnection-reaper")); + return new ScheduledThreadPoolExecutor( + 1, createThreadFactory(clusterName, "myConnection-reaper")); } - }; - - /** - * Validates that when using a provided {@link ThreadingOptions} that its methods are used for creating - * executors and that its {@link ThreadingOptions#createThreadFactory(String, String)} is used for initializing - * netty resources. - * - * @test_category configuration - */ - @Test(groups = "short") - public void should_use_provided_threading_options() { - ThreadingOptions spy = Mockito.spy(threadingOptions); - Cluster cluster = createClusterBuilder().withPoolingOptions(new PoolingOptions() - .setConnectionsPerHost(HostDistance.LOCAL, 1, 1)) - .withReconnectionPolicy(new ConstantReconnectionPolicy(100)) - .withThreadingOptions(spy).build(); - try { - String clusterName = cluster.getClusterName(); - cluster.init(); - - // Ensure each method was invoked appropriately: - // 1) 1 time for each create*Executor. - // 2) createThreadFactory for netty executor group and timeouter. - verify(spy).createExecutor(clusterName); - verify(spy).createBlockingExecutor(clusterName); - verify(spy).createReconnectionExecutor(clusterName); - verify(spy).createScheduledTasksExecutor(clusterName); - verify(spy).createReaperExecutor(clusterName); - verify(spy).createThreadFactory(clusterName, "nio-worker"); - verify(spy).createThreadFactory(clusterName, "timeouter"); - - cluster.connect(); - - // Close all connections bringing the host down, this should cause some activity on - // executor and reconnection executor. - currentClient.disableListener(); - currentClient.closeConnections(CLOSE); - TestUtils.waitForDown(TestUtils.IP_PREFIX + "1", cluster); - currentClient.enableListener(); - TestUtils.waitForUp(TestUtils.IP_PREFIX + "1", cluster); - - Set threads = Thread.getAllStackTraces().keySet(); - for(Thread thread : threads) { - // all threads should use the custom factory and thus be marked daemon - if(thread.getName().startsWith(clusterName + "-" + customPrefix)) { - // all created threads should be daemon this should indicate that our custom thread factory was - // used. - assertThat(thread.isDaemon()).isTrue(); - } - } + }; + + /** + * Validates that when using a provided {@link ThreadingOptions} that its methods are used for + * creating executors and that its {@link ThreadingOptions#createThreadFactory(String, String)} is + * used for initializing netty resources. + * + * @test_category configuration + */ + @Test(groups = "short") + public void should_use_provided_threading_options() { + ThreadingOptions spy = Mockito.spy(threadingOptions); + Cluster cluster = + createClusterBuilder() + .withPoolingOptions( + new PoolingOptions().setConnectionsPerHost(HostDistance.LOCAL, 1, 1)) + .withReconnectionPolicy(new ConstantReconnectionPolicy(100)) + .withThreadingOptions(spy) + .build(); + try { + String clusterName = cluster.getClusterName(); + cluster.init(); + + // Ensure each method was invoked appropriately: + // 1) 1 time for each create*Executor. + // 2) createThreadFactory for netty executor group and timeouter. + verify(spy).createExecutor(clusterName); + verify(spy).createBlockingExecutor(clusterName); + verify(spy).createReconnectionExecutor(clusterName); + verify(spy).createScheduledTasksExecutor(clusterName); + verify(spy).createReaperExecutor(clusterName); + verify(spy).createThreadFactory(clusterName, "nio-worker"); + verify(spy).createThreadFactory(clusterName, "timeouter"); + + cluster.connect(); + + // Close all connections bringing the host down, this should cause some activity on + // executor and reconnection executor. + currentClient.disableListener(); + currentClient.closeConnections(CLOSE); + TestUtils.waitForDown(TestUtils.IP_PREFIX + "1", cluster); + currentClient.enableListener(); + TestUtils.waitForUp(TestUtils.IP_PREFIX + "1", cluster); + + Set threads = Thread.getAllStackTraces().keySet(); + for (Thread thread : threads) { + // all threads should use the custom factory and thus be marked daemon + if (thread.getName().startsWith(clusterName + "-" + customPrefix)) { + // all created threads should be daemon this should indicate that our custom thread + // factory was + // used. + assertThat(thread.isDaemon()).isTrue(); + } + } - final Pattern threadNamePattern = Pattern.compile(clusterName + "-" + customPrefix + "-(.*)-0"); + final Pattern threadNamePattern = + Pattern.compile(clusterName + "-" + customPrefix + "-(.*)-0"); - // Custom executor threads should be present. - // NOTE: we don't validate blocking executor since it is hard to deterministically cause it to be used. - assertThat(threads).extracting(new Extractor() { + // Custom executor threads should be present. + // NOTE: we don't validate blocking executor since it is hard to deterministically cause it to + // be used. + assertThat(threads) + .extracting( + new Extractor() { @Override public String extract(Thread thread) { - Matcher matcher = threadNamePattern.matcher(thread.getName()); - if(matcher.matches()) { - return matcher.group(1); - } else { - return thread.getName(); - } + Matcher matcher = threadNamePattern.matcher(thread.getName()); + if (matcher.matches()) { + return matcher.group(1); + } else { + return thread.getName(); + } } - }).contains( - "nio-worker", - "timeouter", - "myExecutor", - "myReconnection", - "myScheduled-task-worker", - "myConnection-reaper" - ); - } finally { - cluster.close(); - } + }) + .contains( + "nio-worker", + "timeouter", + "myExecutor", + "myReconnection", + "myScheduled-task-worker", + "myConnection-reaper"); + } finally { + cluster.close(); } + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/TimeoutStressTest.java b/driver-core/src/test/java/com/datastax/driver/core/TimeoutStressTest.java index 05990129cf0..0111f5cedbf 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/TimeoutStressTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/TimeoutStressTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,18 +17,15 @@ */ package com.datastax.driver.core; +import static com.datastax.driver.core.Assertions.assertThat; + import com.datastax.driver.core.exceptions.NoHostAvailableException; import com.datastax.driver.core.policies.ConstantReconnectionPolicy; import com.datastax.driver.core.utils.SocketChannelMonitor; import com.google.common.util.concurrent.FutureCallback; -import com.google.common.util.concurrent.Futures; import com.google.common.util.concurrent.ThreadFactoryBuilder; import com.google.common.util.concurrent.Uninterruptibles; import io.netty.channel.socket.SocketChannel; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.testng.annotations.Test; - import java.util.Collection; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; @@ -34,211 +33,244 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; - -import static com.datastax.driver.core.Assertions.assertThat; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.testng.annotations.Test; @CCMConfig(numberOfNodes = 3) public class TimeoutStressTest extends CCMTestsSupport { - static final Logger logger = LoggerFactory.getLogger(TimeoutStressTest.class); - - // Maximum number of concurrent queries running at a given time. - static final int CONCURRENT_QUERIES = 25; - - // How long the test should run for, may want to consider running for longer periods to time to check for leaks - // that could occur over very tiny timing windows. - static final long DURATION = 60000; - - // Configured read timeout - this may need to be tuned to the host system running the test. - static final int READ_TIMEOUT_IN_MS = 50; - - // Configured connection timeout - this may need to be tuned to the host system running the test. - static final int CONNECTION_TIMEOUT_IN_MS = 20; - - private static AtomicInteger executedQueries = new AtomicInteger(0); + static final Logger logger = LoggerFactory.getLogger(TimeoutStressTest.class); + + // Maximum number of concurrent queries running at a given time. + static final int CONCURRENT_QUERIES = 25; + + // How long the test should run for, may want to consider running for longer periods to time to + // check for leaks + // that could occur over very tiny timing windows. + static final long DURATION = 60000; + + // Configured read timeout - this may need to be tuned to the host system running the test. + static final int READ_TIMEOUT_IN_MS = 50; + + // Configured connection timeout - this may need to be tuned to the host system running the test. + static final int CONNECTION_TIMEOUT_IN_MS = 20; + + private static AtomicInteger executedQueries = new AtomicInteger(0); + + private SocketChannelMonitor channelMonitor; + + @Override + public Cluster.Builder createClusterBuilder() { + channelMonitor = register(new SocketChannelMonitor()); + PoolingOptions poolingOptions = + new PoolingOptions().setConnectionsPerHost(HostDistance.LOCAL, 8, 8); + return super.createClusterBuilder() + .withPoolingOptions(poolingOptions) + .withNettyOptions(channelMonitor.nettyOptions()) + .withReconnectionPolicy(new ConstantReconnectionPolicy(1000)); + } + + @Override + public void onTestContextInitialized() { + execute( + "create table record (\n" + + " name text,\n" + + " phone text,\n" + + " value text,\n" + + " PRIMARY KEY (name, phone)\n" + + ")"); + } + + /** + * Validates that under extreme timeout conditions the driver is able to properly maintain + * connection pools in addition to not leaking connections. + * + *

    + * + *

    Does the following: + * + *

      + *
    1. Creates a table and loads 30k rows in a single partition. + *
    2. Sets the connection and read timeout {@link SocketOptions} to very low values. + *
    3. Spawns workers that concurrently execute queries. + *
    4. For some duration, repeatedly measures number of open socket connections and warns if + * exceeded. + *
    5. After a duration, resets {@link SocketOptions} to defaults. + *
    6. Wait for 20 seconds for reaper to remove old connections and restore pools. + *
    7. Ensure pools are restored. + *
    8. Shutdown session and ensure that there remains only 1 open connection. + *
    + * + * @test_category connection:connection_pool + * @expected_result no connections leak and all host pools are maintained. + * @jira_ticket JAVA-692 + * @since 2.0.10, 2.1.6 + */ + @Test(groups = "stress") + public void host_state_should_be_maintained_with_timeouts() throws Exception { + insertRecords(); + session().close(); + + // Set very low timeouts. + cluster() + .getConfiguration() + .getSocketOptions() + .setConnectTimeoutMillis(CONNECTION_TIMEOUT_IN_MS); + cluster().getConfiguration().getSocketOptions().setReadTimeoutMillis(READ_TIMEOUT_IN_MS); + Session newSession = cluster().connect(keyspace); + PreparedStatement statement = + newSession.prepare("select * from record where name=? limit 1000;"); + + int workers = Runtime.getRuntime().availableProcessors(); + ExecutorService workerPool = + Executors.newFixedThreadPool( + workers, + new ThreadFactoryBuilder() + .setNameFormat("timeout-stress-test-worker-%d") + .setDaemon(true) + .build()); + + AtomicBoolean stopped = new AtomicBoolean(false); + + // Ensure that we never exceed MaxConnectionsPerHost * nodes + 1 control connection. + int maxConnections = + TestUtils.numberOfLocalCoreConnections(cluster()) * getContactPoints().size() + 1; + + try { + Semaphore concurrentQueries = new Semaphore(CONCURRENT_QUERIES); + for (int i = 0; i < workers; i++) { + workerPool.submit( + new TimeoutStressWorker(newSession, statement, concurrentQueries, stopped)); + } + + long startTime = System.currentTimeMillis(); + while (System.currentTimeMillis() - startTime < DURATION) { + Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS); + channelMonitor.report(); + // Some connections that are being closed may have had active requests which are delegated + // to the + // reaper for cleanup later. + Collection openChannels = + channelMonitor.openChannels(getContactPointsWithPorts()); + + // Ensure that we don't exceed maximum connections. Log as warning as there will be a bit + // of a timing + // factor between retrieving open connections and checking the reaper. + if (openChannels.size() > maxConnections) { + logger.warn( + "{} of open channels: {} exceeds maximum expected: {}. " + + "This could be because there are connections to be cleaned up in the reaper.", + openChannels.size(), + maxConnections, + openChannels); + } + } + } finally { + stopped.set(true); + + // Reset socket timeouts to allow pool to recover. + cluster() + .getConfiguration() + .getSocketOptions() + .setConnectTimeoutMillis(SocketOptions.DEFAULT_CONNECT_TIMEOUT_MILLIS); + cluster() + .getConfiguration() + .getSocketOptions() + .setReadTimeoutMillis(SocketOptions.DEFAULT_READ_TIMEOUT_MILLIS); + + logger.debug( + "Sleeping 20 seconds to allow connection reaper to clean up connections " + + "and for the pools to recover."); + Uninterruptibles.sleepUninterruptibly(20, TimeUnit.SECONDS); + + Collection openChannels = + channelMonitor.openChannels(getContactPointsWithPorts()); + assertThat(openChannels.size()) + .as("Number of open connections does not meet expected: %s", openChannels) + .isLessThanOrEqualTo(maxConnections); + + // Each host should be in an up state. + assertThat(cluster()).host(1).comesUpWithin(0, TimeUnit.SECONDS); + assertThat(cluster()).host(2).comesUpWithin(0, TimeUnit.SECONDS); + assertThat(cluster()).host(3).comesUpWithin(0, TimeUnit.SECONDS); + + newSession.close(); + + openChannels = channelMonitor.openChannels(getContactPointsWithPorts()); + assertThat(openChannels.size()) + .as("Number of open connections does not meet expected: %s", openChannels) + .isEqualTo(1); + + workerPool.shutdown(); + } + } - private SocketChannelMonitor channelMonitor; + private void insertRecords() { + int records = 30000; + PreparedStatement insertStmt = + session().prepare("insert into record (name, phone, value) values (?, ?, ?)"); - @Override - public Cluster.Builder createClusterBuilder() { - channelMonitor = register(new SocketChannelMonitor()); - PoolingOptions poolingOptions = new PoolingOptions().setConnectionsPerHost(HostDistance.LOCAL, 8, 8); - return Cluster.builder() - .withPoolingOptions(poolingOptions) - .withNettyOptions(channelMonitor.nettyOptions()) - .withReconnectionPolicy(new ConstantReconnectionPolicy(1000)); + for (int i = 0; i < records; i++) { + if (i % 1000 == 0) logger.debug("Inserting record {}.", i); + session().execute(insertStmt.bind("0", Integer.toString(i), "test")); } - - @Override - public void onTestContextInitialized() { - execute( - "create table record (\n" - + " name text,\n" - + " phone text,\n" - + " value text,\n" - + " PRIMARY KEY (name, phone)\n" - + ")" - - ); + logger.debug("Inserts complete."); + } + + public static class TimeoutStressWorker implements Runnable { + + private final Semaphore concurrentQueries; + private final AtomicBoolean stopped; + private final Session session; + private final PreparedStatement statement; + + public TimeoutStressWorker( + Session session, + PreparedStatement statement, + Semaphore concurrentQueries, + AtomicBoolean stopped) { + this.session = session; + this.statement = statement; + this.concurrentQueries = concurrentQueries; + this.stopped = stopped; } - /** - *

    - * Validates that under extreme timeout conditions the driver is able to properly maintain connection pools in - * addition to not leaking connections. - *

    - *

    - * Does the following: - *

      - *
    1. Creates a table and loads 30k rows in a single partition.
    2. - *
    3. Sets the connection and read timeout {@link SocketOptions} to very low values.
    4. - *
    5. Spawns workers that concurrently execute queries.
    6. - *
    7. For some duration, repeatedly measures number of open socket connections and warns if exceeded.
    8. - *
    9. After a duration, resets {@link SocketOptions} to defaults.
    10. - *
    11. Wait for 20 seconds for reaper to remove old connections and restore pools.
    12. - *
    13. Ensure pools are restored.
    14. - *
    15. Shutdown session and ensure that there remains only 1 open connection.
    16. - *
    - * - * @test_category connection:connection_pool - * @expected_result no connections leak and all host pools are maintained. - * @jira_ticket JAVA-692 - * @since 2.0.10, 2.1.6 - */ - @Test(groups = "stress") - public void host_state_should_be_maintained_with_timeouts() throws Exception { - insertRecords(); - session().close(); - - // Set very low timeouts. - cluster().getConfiguration().getSocketOptions().setConnectTimeoutMillis(CONNECTION_TIMEOUT_IN_MS); - cluster().getConfiguration().getSocketOptions().setReadTimeoutMillis(READ_TIMEOUT_IN_MS); - Session newSession = cluster().connect(keyspace); - PreparedStatement statement = newSession.prepare("select * from record where name=? limit 1000;"); - - int workers = Runtime.getRuntime().availableProcessors(); - ExecutorService workerPool = Executors.newFixedThreadPool(workers, - new ThreadFactoryBuilder().setNameFormat("timeout-stress-test-worker-%d").setDaemon(true).build()); - - AtomicBoolean stopped = new AtomicBoolean(false); - - // Ensure that we never exceed MaxConnectionsPerHost * nodes + 1 control connection. - int maxConnections = TestUtils.numberOfLocalCoreConnections(cluster()) * getContactPoints().size() + 1; + @Override + public void run() { + while (!stopped.get()) { try { - Semaphore concurrentQueries = new Semaphore(CONCURRENT_QUERIES); - for (int i = 0; i < workers; i++) { - workerPool.submit(new TimeoutStressWorker(newSession, statement, concurrentQueries, stopped)); - } - - long startTime = System.currentTimeMillis(); - while (System.currentTimeMillis() - startTime < DURATION) { - Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS); - channelMonitor.report(); - // Some connections that are being closed may have had active requests which are delegated to the - // reaper for cleanup later. - Collection openChannels = channelMonitor.openChannels(getContactPointsWithPorts()); - - // Ensure that we don't exceed maximum connections. Log as warning as there will be a bit of a timing - // factor between retrieving open connections and checking the reaper. - if (openChannels.size() > maxConnections) { - logger.warn("{} of open channels: {} exceeds maximum expected: {}. " + - "This could be because there are connections to be cleaned up in the reaper.", - openChannels.size(), maxConnections, openChannels); + concurrentQueries.acquire(); + ResultSetFuture future = session.executeAsync(statement.bind("0")); + GuavaCompatibility.INSTANCE.addCallback( + future, + new FutureCallback() { + + @Override + public void onSuccess(ResultSet result) { + concurrentQueries.release(); + if (executedQueries.incrementAndGet() % 1000 == 0) + logger.debug( + "Successfully executed {}. rows: {}", + executedQueries.get(), + result.getAvailableWithoutFetching()); } - } - } finally { - stopped.set(true); - - // Reset socket timeouts to allow pool to recover. - cluster().getConfiguration().getSocketOptions() - .setConnectTimeoutMillis(SocketOptions.DEFAULT_CONNECT_TIMEOUT_MILLIS); - cluster().getConfiguration().getSocketOptions() - .setReadTimeoutMillis(SocketOptions.DEFAULT_READ_TIMEOUT_MILLIS); - - logger.debug("Sleeping 20 seconds to allow connection reaper to clean up connections " + - "and for the pools to recover."); - Uninterruptibles.sleepUninterruptibly(20, TimeUnit.SECONDS); - - Collection openChannels = channelMonitor.openChannels(getContactPointsWithPorts()); - assertThat(openChannels.size()) - .as("Number of open connections does not meet expected: %s", openChannels) - .isLessThanOrEqualTo(maxConnections); - - // Each host should be in an up state. - assertThat(cluster()).host(1).comesUpWithin(0, TimeUnit.SECONDS); - assertThat(cluster()).host(2).comesUpWithin(0, TimeUnit.SECONDS); - assertThat(cluster()).host(3).comesUpWithin(0, TimeUnit.SECONDS); - - newSession.close(); - - openChannels = channelMonitor.openChannels(getContactPointsWithPorts()); - assertThat(openChannels.size()) - .as("Number of open connections does not meet expected: %s", openChannels) - .isEqualTo(1); - - workerPool.shutdown(); - } - } - private void insertRecords() { - int records = 30000; - PreparedStatement insertStmt = session().prepare("insert into record (name, phone, value) values (?, ?, ?)"); - - for (int i = 0; i < records; i++) { - if (i % 1000 == 0) - logger.debug("Inserting record {}.", i); - session().execute(insertStmt.bind("0", Integer.toString(i), "test")); - } - logger.debug("Inserts complete."); - } - - public static class TimeoutStressWorker implements Runnable { - - private final Semaphore concurrentQueries; - private final AtomicBoolean stopped; - private final Session session; - private final PreparedStatement statement; - - public TimeoutStressWorker(Session session, PreparedStatement statement, Semaphore concurrentQueries, AtomicBoolean stopped) { - this.session = session; - this.statement = statement; - this.concurrentQueries = concurrentQueries; - this.stopped = stopped; - } - - - @Override - public void run() { - - while (!stopped.get()) { - try { - concurrentQueries.acquire(); - ResultSetFuture future = session.executeAsync(statement.bind("0")); - Futures.addCallback(future, new FutureCallback() { - - @Override - public void onSuccess(ResultSet result) { - concurrentQueries.release(); - if (executedQueries.incrementAndGet() % 1000 == 0) - logger.debug("Successfully executed {}. rows: {}", executedQueries.get(), result.getAvailableWithoutFetching()); - } - - @Override - public void onFailure(Throwable t) { - concurrentQueries.release(); - if (t instanceof NoHostAvailableException) { - //logger.error("NHAE: {}", t.getMessage()); - } else { - //logger.error("Exception", t); - } - } - }); - } catch (Exception e) { - logger.error("Failure while submitting query.", e); + @Override + public void onFailure(Throwable t) { + concurrentQueries.release(); + if (t instanceof NoHostAvailableException) { + // logger.error("NHAE: {}", t.getMessage()); + } else { + // logger.error("Exception", t); + } } - } - + }); + } catch (Exception e) { + logger.error("Failure while submitting query.", e); } + } } + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/TokenIntegrationTest.java b/driver-core/src/test/java/com/datastax/driver/core/TokenIntegrationTest.java index 701d06ef849..f9bbeaa820a 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/TokenIntegrationTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/TokenIntegrationTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,6 +17,8 @@ */ package com.datastax.driver.core; +import static com.datastax.driver.core.Assertions.assertThat; + import com.datastax.driver.core.exceptions.InvalidTypeException; import com.datastax.driver.core.policies.LoadBalancingPolicy; import com.datastax.driver.core.policies.RoundRobinPolicy; @@ -23,373 +27,393 @@ import com.google.common.base.Function; import com.google.common.collect.Iterables; import com.google.common.collect.Lists; -import org.testng.annotations.Test; - import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Set; - -import static com.datastax.driver.core.Assertions.assertThat; +import org.testng.annotations.Test; /** * This class uses subclasses for each type of partitioner. - *

    - * There's normally a way to parametrize a TestNG class with @Factory and @DataProvider, - * but it doesn't seem to work with multiple methods. + * + *

    There's normally a way to parametrize a TestNG class with @Factory and @DataProvider, but it + * doesn't seem to work with multiple methods. */ @CCMConfig(numberOfNodes = 3, createKeyspace = false) public abstract class TokenIntegrationTest extends CCMTestsSupport { - private final DataType expectedTokenType; - private final int numTokens; - private final boolean useVnodes; - private String ks1; - private String ks2; - - public TokenIntegrationTest(DataType expectedTokenType, boolean useVnodes) { - this.expectedTokenType = expectedTokenType; - this.numTokens = useVnodes ? 256 : 1; - this.useVnodes = useVnodes; - } - - @Override - public Cluster.Builder createClusterBuilder() { - // Only connect to node 1, which makes it easier to query system tables in should_expose_tokens_per_host() - LoadBalancingPolicy lbp = new WhiteListPolicy(new RoundRobinPolicy(), - Collections.singleton(ccm().addressOfNode(1))); - return Cluster.builder() - .addContactPoints(getContactPoints().get(0)) - .withPort(ccm().getBinaryPort()) - .withLoadBalancingPolicy(lbp); - } - - @Override - public void onTestContextInitialized() { - ks1 = TestUtils.generateIdentifier("ks_"); - ks2 = TestUtils.generateIdentifier("ks_"); - execute( - String.format("CREATE KEYSPACE %s WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}", ks1), - String.format("CREATE KEYSPACE %s WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 2}", ks2), - String.format("USE %s", ks1), - "CREATE TABLE foo(i int primary key)", - "INSERT INTO foo (i) VALUES (1)", - "INSERT INTO foo (i) VALUES (2)", - "INSERT INTO foo (i) VALUES (3)" - ); - } - - /** - *

    - * Validates that {@link TokenRange}s are exposed via a {@link Cluster}'s {@link Metadata} and they - * can be used to query data. - *

    - * - * @test_category metadata:token - * @expected_result token ranges are exposed and usable. - * @jira_ticket JAVA-312 - * @since 2.0.10, 2.1.5 - */ - @Test(groups = "short") - public void should_expose_token_ranges() throws Exception { - Metadata metadata = cluster().getMetadata(); - - // Find the replica for a given partition key - int testKey = 1; - Set replicas = metadata.getReplicas(ks1, TypeCodec.cint().serialize(testKey, cluster().getConfiguration().getProtocolOptions().getProtocolVersion())); - assertThat(replicas).hasSize(1); - Host replica = replicas.iterator().next(); - - // Iterate the cluster's token ranges. For each one, use a range query to ask Cassandra which partition keys - // are in this range. - - PreparedStatement rangeStmt = session().prepare("SELECT i FROM foo WHERE token(i) > ? and token(i) <= ?"); - - TokenRange foundRange = null; - for (TokenRange range : metadata.getTokenRanges()) { - List rows = rangeQuery(rangeStmt, range); - for (Row row : rows) { - if (row.getInt("i") == testKey) { - // We should find our test key exactly once - assertThat(foundRange) - .describedAs("found the same key in two ranges: " + foundRange + " and " + range) - .isNull(); - foundRange = range; - // That range should be managed by the replica - assertThat(metadata.getReplicas(ks1, range)).contains(replica); - } - } + private final DataType expectedTokenType; + private final int numTokens; + private final boolean useVnodes; + private String ks1; + private String ks2; + + public TokenIntegrationTest(DataType expectedTokenType, boolean useVnodes) { + this.expectedTokenType = expectedTokenType; + this.numTokens = useVnodes ? 256 : 1; + this.useVnodes = useVnodes; + } + + @Override + public Cluster.Builder createClusterBuilder() { + // Only connect to node 1, which makes it easier to query system tables in + // should_expose_tokens_per_host() + LoadBalancingPolicy lbp = + new WhiteListPolicy(new RoundRobinPolicy(), Collections.singleton(ccm().addressOfNode(1))); + return Cluster.builder() + .addContactPoints(getContactPoints().get(0)) + .withPort(ccm().getBinaryPort()) + .withLoadBalancingPolicy(lbp); + } + + @Override + public void onTestContextInitialized() { + ks1 = TestUtils.generateIdentifier("ks_"); + ks2 = TestUtils.generateIdentifier("ks_"); + execute( + String.format( + "CREATE KEYSPACE %s WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}", + ks1), + String.format( + "CREATE KEYSPACE %s WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 2}", + ks2), + String.format("USE %s", ks1), + "CREATE TABLE foo(i int primary key)", + "INSERT INTO foo (i) VALUES (1)", + "INSERT INTO foo (i) VALUES (2)", + "INSERT INTO foo (i) VALUES (3)"); + } + + /** + * Validates that {@link TokenRange}s are exposed via a {@link Cluster}'s {@link Metadata} and + * they can be used to query data. + * + * @test_category metadata:token + * @expected_result token ranges are exposed and usable. + * @jira_ticket JAVA-312 + * @since 2.0.10, 2.1.5 + */ + @Test(groups = "short") + public void should_expose_token_ranges() throws Exception { + Metadata metadata = cluster().getMetadata(); + + // Find the replica for a given partition key + int testKey = 1; + Set replicas = + metadata.getReplicas( + ks1, + TypeCodec.cint() + .serialize( + testKey, + cluster().getConfiguration().getProtocolOptions().getProtocolVersion())); + assertThat(replicas).hasSize(1); + Host replica = replicas.iterator().next(); + + // Iterate the cluster's token ranges. For each one, use a range query to ask Cassandra which + // partition keys + // are in this range. + + PreparedStatement rangeStmt = + session().prepare("SELECT i FROM foo WHERE token(i) > ? and token(i) <= ?"); + + TokenRange foundRange = null; + for (TokenRange range : metadata.getTokenRanges()) { + List rows = rangeQuery(rangeStmt, range); + for (Row row : rows) { + if (row.getInt("i") == testKey) { + // We should find our test key exactly once + assertThat(foundRange) + .describedAs("found the same key in two ranges: " + foundRange + " and " + range) + .isNull(); + foundRange = range; + // That range should be managed by the replica + assertThat(metadata.getReplicas(ks1, range)).contains(replica); } - assertThat(foundRange).isNotNull(); + } } - - private List rangeQuery(PreparedStatement rangeStmt, TokenRange range) { - List rows = Lists.newArrayList(); - for (TokenRange subRange : range.unwrap()) { - Statement statement = rangeStmt.bind(subRange.getStart(), subRange.getEnd()); - rows.addAll(session().execute(statement).all()); - } - return rows; + assertThat(foundRange).isNotNull(); + } + + private List rangeQuery(PreparedStatement rangeStmt, TokenRange range) { + List rows = Lists.newArrayList(); + for (TokenRange subRange : range.unwrap()) { + Statement statement = rangeStmt.bind(subRange.getStart(), subRange.getEnd()); + rows.addAll(session().execute(statement).all()); } - - /** - *

    - * Validates that a {@link Token} can be retrieved and parsed by executing 'select token(name)' and - * then used to find data matching that token. - *

    - *

    - *

    - * This test does the following: - *

    - *

      - *
    1. Retrieve the token for the key with value '1', get it by index, and ensure if is of the expected token type.
    2. - *
    3. Retrieve the token for the partition key with getPartitionKeyToken
    4. - *
    5. Select data by token with a BoundStatement.
    6. - *
    7. Select data by token using setToken by index.
    8. - *
    9. Select data by token with setPartitionKeyToken.
    10. - *
    - * - * @test_category token - * @expected_result tokens are selectable, properly parsed, and usable as input. - * @jira_ticket JAVA-312 - * @since 2.0.10, 2.1.5 - */ - @Test(groups = "short") - public void should_get_token_from_row_and_set_token_in_query() { - // get by index: - Row row = session().execute("SELECT token(i) FROM foo WHERE i = 1").one(); - Token token = row.getToken(0); - assertThat(token.getType()).isEqualTo(expectedTokenType); - - assertThat( - row.getPartitionKeyToken() - ).isEqualTo(token); - - PreparedStatement pst = session().prepare("SELECT * FROM foo WHERE token(i) = ?"); - row = session().execute(pst.bind(token)).one(); - assertThat(row.getInt(0)).isEqualTo(1); - - row = session().execute(pst.bind().setToken(0, token)).one(); - assertThat(row.getInt(0)).isEqualTo(1); - - row = session().execute(pst.bind().setPartitionKeyToken(token)).one(); - assertThat(row.getInt(0)).isEqualTo(1); + return rows; + } + + /** + * Validates that a {@link Token} can be retrieved and parsed by executing 'select token(name)' + * and then used to find data matching that token. + * + *

    + * + *

    This test does the following: + * + *

    + * + *

      + *
    1. Retrieve the token for the key with value '1', get it by index, and ensure if is of the + * expected token type. + *
    2. Retrieve the token for the partition key with getPartitionKeyToken + *
    3. Select data by token with a BoundStatement. + *
    4. Select data by token using setToken by index. + *
    5. Select data by token with setPartitionKeyToken. + *
    + * + * @test_category token + * @expected_result tokens are selectable, properly parsed, and usable as input. + * @jira_ticket JAVA-312 + * @since 2.0.10, 2.1.5 + */ + @Test(groups = "short") + public void should_get_token_from_row_and_set_token_in_query() { + // get by index: + Row row = session().execute("SELECT token(i) FROM foo WHERE i = 1").one(); + Token token = row.getToken(0); + assertThat(token.getType()).isEqualTo(expectedTokenType); + + assertThat(row.getPartitionKeyToken()).isEqualTo(token); + + PreparedStatement pst = session().prepare("SELECT * FROM foo WHERE token(i) = ?"); + row = session().execute(pst.bind(token)).one(); + assertThat(row.getInt(0)).isEqualTo(1); + + row = session().execute(pst.bind().setToken(0, token)).one(); + assertThat(row.getInt(0)).isEqualTo(1); + + row = session().execute(pst.bind().setPartitionKeyToken(token)).one(); + assertThat(row.getInt(0)).isEqualTo(1); + } + + /** + * Validates that a {@link Token} can be retrieved and parsed by using bind variables and + * aliasing. + * + *

    + * + *

    This test does the following: + * + *

    + * + *

      + *
    1. Retrieve the token by alias for the key '1', and ensure it matches the token by index. + *
    2. Select data by token using setToken by name. + *
    + */ + @Test(groups = "short") + @CassandraVersion("2.0") + public void should_get_token_from_row_and_set_token_in_query_with_binding_and_aliasing() { + Row row = session().execute("SELECT token(i) AS t FROM foo WHERE i = 1").one(); + Token token = row.getToken("t"); + assertThat(token.getType()).isEqualTo(expectedTokenType); + + PreparedStatement pst = session().prepare("SELECT * FROM foo WHERE token(i) = :myToken"); + row = session().execute(pst.bind().setToken("myToken", token)).one(); + assertThat(row.getInt(0)).isEqualTo(1); + + row = session().execute("SELECT * FROM foo WHERE token(i) = ?", token).one(); + assertThat(row.getInt(0)).isEqualTo(1); + } + + /** + * Ensures that an exception is raised when attempting to retrieve a token a non-token column. + * + * @test_category token + * @expected_result an exception is raised. + * @jira_ticket JAVA-312 + * @since 2.0.10, 2.1.5 + */ + @Test(groups = "short", expectedExceptions = InvalidTypeException.class) + public void should_raise_exception_when_get_token_on_non_token() { + Row row = session().execute("SELECT i FROM foo WHERE i = 1").one(); + row.getToken(0); + } + + /** + * Ensures that @{link TokenRange}s are exposed at a per host level, the ranges are complete, the + * entire ring is represented, and that ranges do not overlap. + * + *

    + * + *

    Also ensures that ranges from another replica are present when a Host is a replica for + * another node. + * + * @test_category metadata:token + * @expected_result The entire token range is represented collectively and the ranges do not + * overlap. + * @jira_ticket JAVA-312 + * @since 2.0.10, 2.1.5 + */ + @Test(groups = "short") + public void should_expose_token_ranges_per_host() { + checkRangesPerHost(ks1, 1); + checkRangesPerHost(ks2, 2); + assertThat(cluster()).hasValidTokenRanges(); + } + + private void checkRangesPerHost(String keyspace, int replicationFactor) { + List allRangesWithReplicas = Lists.newArrayList(); + + // Get each host's ranges, the count should match the replication factor + for (int i = 1; i <= 3; i++) { + Host host = TestUtils.findHost(cluster(), i); + Set hostRanges = cluster().getMetadata().getTokenRanges(keyspace, host); + // Special case: When using vnodes the tokens are not evenly assigned to each replica. + if (!useVnodes) { + assertThat(hostRanges).hasSize(replicationFactor * numTokens); + } + allRangesWithReplicas.addAll(hostRanges); } - /** - *

    - * Validates that a {@link Token} can be retrieved and parsed by using bind variables and - * aliasing. - *

    - *

    - *

    - * This test does the following: - *

    - *

      - *
    1. Retrieve the token by alias for the key '1', and ensure it matches the token by index.
    2. - *
    3. Select data by token using setToken by name.
    4. - *
    - */ - @Test(groups = "short") - @CassandraVersion("2.0") - public void should_get_token_from_row_and_set_token_in_query_with_binding_and_aliasing() { - Row row = session().execute("SELECT token(i) AS t FROM foo WHERE i = 1").one(); - Token token = row.getToken("t"); - assertThat(token.getType()).isEqualTo(expectedTokenType); - - PreparedStatement pst = session().prepare("SELECT * FROM foo WHERE token(i) = :myToken"); - row = session().execute(pst.bind().setToken("myToken", token)).one(); - assertThat(row.getInt(0)).isEqualTo(1); - - row = session().execute("SELECT * FROM foo WHERE token(i) = ?", token).one(); - assertThat(row.getInt(0)).isEqualTo(1); - } - - /** - *

    - * Ensures that an exception is raised when attempting to retrieve a token a non-token column. - *

    - * - * @test_category token - * @expected_result an exception is raised. - * @jira_ticket JAVA-312 - * @since 2.0.10, 2.1.5 - */ - @Test(groups = "short", expectedExceptions = InvalidTypeException.class) - public void should_raise_exception_when_get_token_on_non_token() { - Row row = session().execute("SELECT i FROM foo WHERE i = 1").one(); - row.getToken(0); - } - - /** - *

    - * Ensures that @{link TokenRange}s are exposed at a per host level, the ranges are complete, - * the entire ring is represented, and that ranges do not overlap. - *

    - *

    - *

    - * Also ensures that ranges from another replica are present when a Host is a replica for - * another node. - *

    - * - * @test_category metadata:token - * @expected_result The entire token range is represented collectively and the ranges do not overlap. - * @jira_ticket JAVA-312 - * @since 2.0.10, 2.1.5 - */ - @Test(groups = "short") - public void should_expose_token_ranges_per_host() { - checkRangesPerHost(ks1, 1); - checkRangesPerHost(ks2, 2); - assertThat(cluster()).hasValidTokenRanges(); - } - - private void checkRangesPerHost(String keyspace, int replicationFactor) { - List allRangesWithReplicas = Lists.newArrayList(); - - // Get each host's ranges, the count should match the replication factor - for (int i = 1; i <= 3; i++) { - Host host = TestUtils.findHost(cluster(), i); - Set hostRanges = cluster().getMetadata().getTokenRanges(keyspace, host); - // Special case: When using vnodes the tokens are not evenly assigned to each replica. - if (!useVnodes) { - assertThat(hostRanges).hasSize(replicationFactor * numTokens); - } - allRangesWithReplicas.addAll(hostRanges); - } - - // Special case check for vnodes to ensure that total number of replicated ranges is correct. - assertThat(allRangesWithReplicas).hasSize(3 * numTokens * replicationFactor); - - // Once we ignore duplicates, the number of ranges should match the number of nodes. - Set allRanges = new HashSet(allRangesWithReplicas); - assertThat(allRanges).hasSize(3 * numTokens); - - // And the ranges should cover the whole ring and no ranges intersect. - assertThat(cluster()).hasValidTokenRanges(keyspace); - } - - /** - *

    - * Ensures that Tokens are exposed for each Host and that the match those in the system tables. - *

    - *

    - *

    - * Also validates that tokens are not present for multiple hosts. - *

    - * - * @test_category metadata:token - * @expected_result Tokens are exposed by Host and match those in the system tables. - * @jira_ticket JAVA-312 - * @since 2.0.10, 2.1.5 - */ - @Test(groups = "short") - public void should_expose_tokens_per_host() { - for (Host host : cluster().getMetadata().allHosts()) { - assertThat(host.getTokens()).hasSize(numTokens); - // Check against the info in the system tables, which is a bit weak since it's exactly how the metadata is - // constructed in the first place, but there's not much else we can do. - // Note that this relies on all queries going to node 1, which is why we use a WhiteList LBP in setup(). - boolean isControlHost = host.getSocketAddress().equals(cluster().manager.controlConnection.connectionRef.get().address); - Row row; - if (isControlHost) { - row = session().execute("select tokens from system.local").one(); - } else { - // non-control hosts are populated from system.peers and their broadcast address should be known - assertThat(host.getBroadcastAddress()).isNotNull(); - row = session().execute("select tokens from system.peers where peer = '" + host.getBroadcastAddress().getHostAddress() + "'").one(); - } - Set tokenStrings = row.getSet("tokens", String.class); - assertThat(tokenStrings).hasSize(numTokens); - Iterable tokensFromSystemTable = Iterables.transform(tokenStrings, new Function() { + // Special case check for vnodes to ensure that total number of replicated ranges is correct. + assertThat(allRangesWithReplicas).hasSize(3 * numTokens * replicationFactor); + + // Once we ignore duplicates, the number of ranges should match the number of nodes. + Set allRanges = new HashSet(allRangesWithReplicas); + assertThat(allRanges).hasSize(3 * numTokens); + + // And the ranges should cover the whole ring and no ranges intersect. + assertThat(cluster()).hasValidTokenRanges(keyspace); + } + + /** + * Ensures that Tokens are exposed for each Host and that the match those in the system tables. + * + *

    + * + *

    Also validates that tokens are not present for multiple hosts. + * + * @test_category metadata:token + * @expected_result Tokens are exposed by Host and match those in the system tables. + * @jira_ticket JAVA-312 + * @since 2.0.10, 2.1.5 + */ + @Test(groups = "short") + public void should_expose_tokens_per_host() { + for (Host host : cluster().getMetadata().allHosts()) { + assertThat(host.getTokens()).hasSize(numTokens); + // Check against the info in the system tables, which is a bit weak since it's exactly how the + // metadata is + // constructed in the first place, but there's not much else we can do. + // Note that this relies on all queries going to node 1, which is why we use a WhiteList LBP + // in setup(). + boolean isControlHost = + host.getEndPoint() + .equals(cluster().manager.controlConnection.connectionRef.get().endPoint); + Row row; + if (isControlHost) { + row = session().execute("select tokens from system.local").one(); + } else { + // non-control hosts are populated from system.peers and their broadcast address should be + // known + assertThat(host.getBroadcastSocketAddress()).isNotNull(); + row = + session() + .execute( + "select tokens from system.peers where peer = '" + + host.getBroadcastSocketAddress().getAddress().getHostAddress() + + "'") + .one(); + } + Set tokenStrings = row.getSet("tokens", String.class); + assertThat(tokenStrings).hasSize(numTokens); + Iterable tokensFromSystemTable = + Iterables.transform( + tokenStrings, + new Function() { @Override public Token apply(String input) { - return tokenFactory().fromString(input); + return tokenFactory().fromString(input); } - }); + }); - assertThat(host.getTokens()).containsOnlyOnce(Iterables.toArray(tokensFromSystemTable, Token.class)); - } + assertThat(host.getTokens()) + .containsOnlyOnce(Iterables.toArray(tokensFromSystemTable, Token.class)); } - - /** - *

    - * Ensures that for the {@link TokenRange}s returned by {@link Metadata#getTokenRanges()} that there exists at - * most one {@link TokenRange} for which calling {@link TokenRange#isWrappedAround()} returns true and - * {@link TokenRange#unwrap()} returns two {@link TokenRange}s. - *

    - * - * @test_category metadata:token - * @expected_result Tokens are exposed by Host and match those in the system tables. - * @jira_ticket JAVA-312 - * @since 2.0.10, 2.1.5 - */ - @Test(groups = "short") - public void should_only_unwrap_one_range_for_all_ranges() { - Set ranges = cluster().getMetadata().getTokenRanges(); - - assertOnlyOneWrapped(ranges); - - Iterable splitRanges = Iterables.concat(Iterables.transform(ranges, + } + + /** + * Ensures that for the {@link TokenRange}s returned by {@link Metadata#getTokenRanges()} that + * there exists at most one {@link TokenRange} for which calling {@link + * TokenRange#isWrappedAround()} returns true and {@link TokenRange#unwrap()} returns two {@link + * TokenRange}s. + * + * @test_category metadata:token + * @expected_result Tokens are exposed by Host and match those in the system tables. + * @jira_ticket JAVA-312 + * @since 2.0.10, 2.1.5 + */ + @Test(groups = "short") + public void should_only_unwrap_one_range_for_all_ranges() { + Set ranges = cluster().getMetadata().getTokenRanges(); + + assertOnlyOneWrapped(ranges); + + Iterable splitRanges = + Iterables.concat( + Iterables.transform( + ranges, new Function>() { - @Override - public Iterable apply(TokenRange input) { - return input.splitEvenly(10); - } - }) - ); - - assertOnlyOneWrapped(splitRanges); + @Override + public Iterable apply(TokenRange input) { + return input.splitEvenly(10); + } + })); + + assertOnlyOneWrapped(splitRanges); + } + + /** + * Asserts that given the input {@link TokenRange}s that at most one of them wraps the token ring. + * + * @param ranges Ranges to validate against. + */ + protected void assertOnlyOneWrapped(Iterable ranges) { + TokenRange wrappedRange = null; + + for (TokenRange range : ranges) { + if (range.isWrappedAround()) { + assertThat(wrappedRange) + .as( + "Found a wrapped around TokenRange (%s) when one already exists (%s).", + range, wrappedRange) + .isNull(); + wrappedRange = range; + + assertThat(range).isWrappedAround(); // this also checks the unwrapped ranges + } else { + assertThat(range).isNotWrappedAround(); + } } + } - /** - * Asserts that given the input {@link TokenRange}s that at most one of them wraps the token ring. - * - * @param ranges Ranges to validate against. - */ - protected void assertOnlyOneWrapped(Iterable ranges) { - TokenRange wrappedRange = null; - - for (TokenRange range : ranges) { - if (range.isWrappedAround()) { - assertThat(wrappedRange) - .as("Found a wrapped around TokenRange (%s) when one already exists (%s).", range, wrappedRange) - .isNull(); - wrappedRange = range; - - assertThat(range).isWrappedAround(); // this also checks the unwrapped ranges - } else { - assertThat(range).isNotWrappedAround(); - } - } - } - - @Test(groups = "short") - public void should_expose_token_and_range_creation_methods() { - Metadata metadata = cluster().getMetadata(); + @Test(groups = "short") + public void should_expose_token_and_range_creation_methods() { + Metadata metadata = cluster().getMetadata(); - // Pick a random range - TokenRange range = metadata.getTokenRanges().iterator().next(); + // Pick a random range + TokenRange range = metadata.getTokenRanges().iterator().next(); - Token start = metadata.newToken(range.getStart().toString()); - Token end = metadata.newToken(range.getEnd().toString()); + Token start = metadata.newToken(range.getStart().toString()); + Token end = metadata.newToken(range.getEnd().toString()); - assertThat(metadata.newTokenRange(start, end)) - .isEqualTo(range); - } + assertThat(metadata.newTokenRange(start, end)).isEqualTo(range); + } - @Test(groups = "short") - public void should_create_token_from_partition_key() { - Metadata metadata = cluster().getMetadata(); + @Test(groups = "short") + public void should_create_token_from_partition_key() { + Metadata metadata = cluster().getMetadata(); - Row row = session().execute("SELECT token(i) FROM foo WHERE i = 1").one(); - Token expected = row.getToken(0); + Row row = session().execute("SELECT token(i) FROM foo WHERE i = 1").one(); + Token expected = row.getToken(0); - ProtocolVersion protocolVersion = cluster().getConfiguration().getProtocolOptions().getProtocolVersion(); - assertThat( - metadata.newToken(TypeCodec.cint().serialize(1, protocolVersion)) - ).isEqualTo(expected); - } + ProtocolVersion protocolVersion = + cluster().getConfiguration().getProtocolOptions().getProtocolVersion(); + assertThat(metadata.newToken(TypeCodec.cint().serialize(1, protocolVersion))) + .isEqualTo(expected); + } - protected abstract Token.Factory tokenFactory(); + protected abstract Token.Factory tokenFactory(); } diff --git a/driver-core/src/test/java/com/datastax/driver/core/TokenRangeAssert.java b/driver-core/src/test/java/com/datastax/driver/core/TokenRangeAssert.java index eff51316542..9dc25541311 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/TokenRangeAssert.java +++ b/driver-core/src/test/java/com/datastax/driver/core/TokenRangeAssert.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,98 +17,93 @@ */ package com.datastax.driver.core; -import org.assertj.core.api.AbstractAssert; +import static com.datastax.driver.core.Assertions.assertThat; import java.util.Iterator; import java.util.List; - -import static com.datastax.driver.core.Assertions.assertThat; +import org.assertj.core.api.AbstractAssert; public class TokenRangeAssert extends AbstractAssert { - protected TokenRangeAssert(TokenRange actual) { - super(actual, TokenRangeAssert.class); - } - - public TokenRangeAssert startsWith(Token token) { - assertThat(actual.getStart()).isEqualTo(token); - return this; - } - - public TokenRangeAssert endsWith(Token token) { - assertThat(actual.getEnd()).isEqualTo(token); - return this; - } - - public TokenRangeAssert isEmpty() { - assertThat(actual.isEmpty()).isTrue(); - return this; - } - - public TokenRangeAssert isNotEmpty() { - assertThat(actual.isEmpty()).isFalse(); - return this; - } - - public TokenRangeAssert isWrappedAround() { - assertThat(actual.isWrappedAround()).isTrue(); - - Token.Factory factory = actual.factory; - - List unwrapped = actual.unwrap(); - assertThat(unwrapped.size()) - .as("%s should unwrap to two ranges, but unwrapped to %s", actual, unwrapped) - .isEqualTo(2); - - Iterator unwrappedIt = unwrapped.iterator(); - TokenRange firstRange = unwrappedIt.next(); - assertThat(firstRange).endsWith(factory.minToken()); - - TokenRange secondRange = unwrappedIt.next(); - assertThat(secondRange).startsWith(factory.minToken()); - - return this; - } - - public TokenRangeAssert isNotWrappedAround() { - assertThat(actual.isWrappedAround()).isFalse(); - assertThat(actual.unwrap()).containsExactly(actual); - return this; - } - - public TokenRangeAssert unwrapsTo(TokenRange... subRanges) { - assertThat(actual.unwrap()).containsExactly(subRanges); - return this; - } - - public TokenRangeAssert intersects(TokenRange that) { - assertThat(actual.intersects(that)) - .as("%s should intersect %s", actual, that) - .isTrue(); - assertThat(that.intersects(actual)) - .as("%s should intersect %s", that, actual) - .isTrue(); - return this; - } - - public TokenRangeAssert doesNotIntersect(TokenRange... that) { - for (TokenRange thatRange : that) { - assertThat(actual.intersects(thatRange)) - .as("%s should not intersect %s", actual, thatRange) - .isFalse(); - assertThat(thatRange.intersects(actual)) - .as("%s should not intersect %s", thatRange, actual) - .isFalse(); - } - return this; - } - - public TokenRangeAssert contains(Token token, boolean isStart) { - assertThat(actual.contains(token, isStart)).isTrue(); - return this; - } - - public TokenRangeAssert doesNotContain(Token token, boolean isStart) { - assertThat(actual.contains(token, isStart)).isFalse(); - return this; + protected TokenRangeAssert(TokenRange actual) { + super(actual, TokenRangeAssert.class); + } + + public TokenRangeAssert startsWith(Token token) { + assertThat(actual.getStart()).isEqualTo(token); + return this; + } + + public TokenRangeAssert endsWith(Token token) { + assertThat(actual.getEnd()).isEqualTo(token); + return this; + } + + public TokenRangeAssert isEmpty() { + assertThat(actual.isEmpty()).isTrue(); + return this; + } + + public TokenRangeAssert isNotEmpty() { + assertThat(actual.isEmpty()).isFalse(); + return this; + } + + public TokenRangeAssert isWrappedAround() { + assertThat(actual.isWrappedAround()).isTrue(); + + Token.Factory factory = actual.factory; + + List unwrapped = actual.unwrap(); + assertThat(unwrapped.size()) + .as("%s should unwrap to two ranges, but unwrapped to %s", actual, unwrapped) + .isEqualTo(2); + + Iterator unwrappedIt = unwrapped.iterator(); + TokenRange firstRange = unwrappedIt.next(); + assertThat(firstRange).endsWith(factory.minToken()); + + TokenRange secondRange = unwrappedIt.next(); + assertThat(secondRange).startsWith(factory.minToken()); + + return this; + } + + public TokenRangeAssert isNotWrappedAround() { + assertThat(actual.isWrappedAround()).isFalse(); + assertThat(actual.unwrap()).containsExactly(actual); + return this; + } + + public TokenRangeAssert unwrapsTo(TokenRange... subRanges) { + assertThat(actual.unwrap()).containsExactly(subRanges); + return this; + } + + public TokenRangeAssert intersects(TokenRange that) { + assertThat(actual.intersects(that)).as("%s should intersect %s", actual, that).isTrue(); + assertThat(that.intersects(actual)).as("%s should intersect %s", that, actual).isTrue(); + return this; + } + + public TokenRangeAssert doesNotIntersect(TokenRange... that) { + for (TokenRange thatRange : that) { + assertThat(actual.intersects(thatRange)) + .as("%s should not intersect %s", actual, thatRange) + .isFalse(); + assertThat(thatRange.intersects(actual)) + .as("%s should not intersect %s", thatRange, actual) + .isFalse(); } + return this; + } + + public TokenRangeAssert contains(Token token, boolean isStart) { + assertThat(actual.contains(token, isStart)).isTrue(); + return this; + } + + public TokenRangeAssert doesNotContain(Token token, boolean isStart) { + assertThat(actual.contains(token, isStart)).isFalse(); + return this; + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/TokenRangeTest.java b/driver-core/src/test/java/com/datastax/driver/core/TokenRangeTest.java index 5f7e6b9b2b9..4c0a9b8cb3c 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/TokenRangeTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/TokenRangeTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,282 +17,283 @@ */ package com.datastax.driver.core; -import com.google.common.collect.ImmutableList; -import org.testng.annotations.Test; - -import java.util.List; - import static com.datastax.driver.core.Assertions.assertThat; import static org.testng.Assert.fail; -public class TokenRangeTest { - // The tests in this class don't depend on the kind of factory used, so use Murmur3 everywhere - Token.Factory factory = Token.getFactory("Murmur3Partitioner"); - private Token minToken = factory.minToken(); - - @Test(groups = "unit") - public void should_check_intersection() { - // NB - to make the test more visual, we use watch face numbers - assertThat(tokenRange(3, 9)) - .doesNotIntersect(tokenRange(11, 1)) - .doesNotIntersect(tokenRange(1, 2)) - .doesNotIntersect(tokenRange(11, 3)) - .doesNotIntersect(tokenRange(2, 3)) - .doesNotIntersect(tokenRange(3, 3)) - .intersects(tokenRange(2, 6)) - .intersects(tokenRange(2, 10)) - .intersects(tokenRange(6, 10)) - .intersects(tokenRange(4, 8)) - .intersects(tokenRange(3, 9)) - .doesNotIntersect(tokenRange(9, 10)) - .doesNotIntersect(tokenRange(10, 11)) - ; - assertThat(tokenRange(9, 3)) - .doesNotIntersect(tokenRange(5, 7)) - .doesNotIntersect(tokenRange(7, 8)) - .doesNotIntersect(tokenRange(5, 9)) - .doesNotIntersect(tokenRange(8, 9)) - .doesNotIntersect(tokenRange(9, 9)) - .intersects(tokenRange(8, 2)) - .intersects(tokenRange(8, 4)) - .intersects(tokenRange(2, 4)) - .intersects(tokenRange(10, 2)) - .intersects(tokenRange(9, 3)) - .doesNotIntersect(tokenRange(3, 4)) - .doesNotIntersect(tokenRange(4, 5)) - ; - assertThat(tokenRange(3, 3)).doesNotIntersect(tokenRange(3, 3)); - - // Reminder: minToken serves as both lower and upper bound - assertThat(tokenRange(minToken, 5)) - .doesNotIntersect(tokenRange(6, 7)) - .doesNotIntersect(tokenRange(6, minToken)) - .intersects(tokenRange(6, 4)) - .intersects(tokenRange(2, 4)) - .intersects(tokenRange(minToken, 4)) - .intersects(tokenRange(minToken, 5)) - ; - - assertThat(tokenRange(5, minToken)) - .doesNotIntersect(tokenRange(3, 4)) - .doesNotIntersect(tokenRange(minToken, 4)) - .intersects(tokenRange(6, 7)) - .intersects(tokenRange(4, 1)) - .intersects(tokenRange(6, minToken)) - .intersects(tokenRange(5, minToken)) - ; - - assertThat(tokenRange(minToken, minToken)) - .intersects(tokenRange(3, 4)) - .intersects(tokenRange(3, minToken)) - .intersects(tokenRange(minToken, 3)) - .doesNotIntersect(tokenRange(3, 3)) - ; - } - - @Test(groups = "unit") - public void should_compute_intersection() { - assertThat(tokenRange(3, 9).intersectWith(tokenRange(2, 4))) - .isEqualTo(ImmutableList.of(tokenRange(3, 4))); - assertThat(tokenRange(3, 9).intersectWith(tokenRange(3, 5))) - .isEqualTo(ImmutableList.of(tokenRange(3, 5))); - assertThat(tokenRange(3, 9).intersectWith(tokenRange(4, 6))) - .isEqualTo(ImmutableList.of(tokenRange(4, 6))); - assertThat(tokenRange(3, 9).intersectWith(tokenRange(7, 9))) - .isEqualTo(ImmutableList.of(tokenRange(7, 9))); - assertThat(tokenRange(3, 9).intersectWith(tokenRange(8, 10))) - .isEqualTo(ImmutableList.of(tokenRange(8, 9))); - } - - @Test(groups = "unit") - public void should_compute_intersection_with_ranges_around_ring() { - // If a range wraps the ring like 10, -10 does this will produce two separate - // intersected ranges. - assertThat(tokenRange(10, -10).intersectWith(tokenRange(-20, 20))) - .isEqualTo(ImmutableList.of(tokenRange(10, 20), tokenRange(-20, -10))); - assertThat(tokenRange(-20, 20).intersectWith(tokenRange(10, -10))) - .isEqualTo(ImmutableList.of(tokenRange(10, 20), tokenRange(-20, -10))); - - // If both ranges wrap the ring, they should be merged together wrapping across - // the range. - assertThat(tokenRange(10, -30).intersectWith(tokenRange(20, -20))) - .isEqualTo(ImmutableList.of(tokenRange(20, -30))); - } - - @Test(groups = "unit", expectedExceptions = IllegalArgumentException.class) - public void should_fail_to_compute_intersection_when_ranges_dont_intersect() { - tokenRange(1, 2).intersectWith(tokenRange(2, 3)); - } - - @Test(groups = "unit") - public void should_merge_with_other_range() { - assertThat(tokenRange(3, 9).mergeWith(tokenRange(2, 3))).isEqualTo(tokenRange(2, 9)); - assertThat(tokenRange(3, 9).mergeWith(tokenRange(2, 4))).isEqualTo(tokenRange(2, 9)); - assertThat(tokenRange(3, 9).mergeWith(tokenRange(11, 3))).isEqualTo(tokenRange(11, 9)); - assertThat(tokenRange(3, 9).mergeWith(tokenRange(11, 4))).isEqualTo(tokenRange(11, 9)); - - assertThat(tokenRange(3, 9).mergeWith(tokenRange(4, 8))).isEqualTo(tokenRange(3, 9)); - assertThat(tokenRange(3, 9).mergeWith(tokenRange(3, 9))).isEqualTo(tokenRange(3, 9)); - assertThat(tokenRange(3, 9).mergeWith(tokenRange(3, 3))).isEqualTo(tokenRange(3, 9)); - assertThat(tokenRange(3, 3).mergeWith(tokenRange(3, 9))).isEqualTo(tokenRange(3, 9)); - - assertThat(tokenRange(3, 9).mergeWith(tokenRange(9, 11))).isEqualTo(tokenRange(3, 11)); - assertThat(tokenRange(3, 9).mergeWith(tokenRange(8, 11))).isEqualTo(tokenRange(3, 11)); - assertThat(tokenRange(3, 9).mergeWith(tokenRange(9, 1))).isEqualTo(tokenRange(3, 1)); - assertThat(tokenRange(3, 9).mergeWith(tokenRange(8, 1))).isEqualTo(tokenRange(3, 1)); - - assertThat(tokenRange(3, 9).mergeWith(tokenRange(9, 3))).isEqualTo(tokenRange(minToken, minToken)); - assertThat(tokenRange(3, 9).mergeWith(tokenRange(9, 4))).isEqualTo(tokenRange(minToken, minToken)); - assertThat(tokenRange(3, 10).mergeWith(tokenRange(9, 4))).isEqualTo(tokenRange(minToken, minToken)); - - assertThat(tokenRange(9, 3).mergeWith(tokenRange(8, 9))).isEqualTo(tokenRange(8, 3)); - assertThat(tokenRange(9, 3).mergeWith(tokenRange(8, 10))).isEqualTo(tokenRange(8, 3)); - assertThat(tokenRange(9, 3).mergeWith(tokenRange(4, 9))).isEqualTo(tokenRange(4, 3)); - assertThat(tokenRange(9, 3).mergeWith(tokenRange(4, 10))).isEqualTo(tokenRange(4, 3)); - - assertThat(tokenRange(9, 3).mergeWith(tokenRange(10, 2))).isEqualTo(tokenRange(9, 3)); - assertThat(tokenRange(9, 3).mergeWith(tokenRange(9, 3))).isEqualTo(tokenRange(9, 3)); - assertThat(tokenRange(9, 3).mergeWith(tokenRange(9, 9))).isEqualTo(tokenRange(9, 3)); - assertThat(tokenRange(9, 9).mergeWith(tokenRange(9, 3))).isEqualTo(tokenRange(9, 3)); - - assertThat(tokenRange(9, 3).mergeWith(tokenRange(3, 5))).isEqualTo(tokenRange(9, 5)); - assertThat(tokenRange(9, 3).mergeWith(tokenRange(2, 5))).isEqualTo(tokenRange(9, 5)); - assertThat(tokenRange(9, 3).mergeWith(tokenRange(3, 7))).isEqualTo(tokenRange(9, 7)); - assertThat(tokenRange(9, 3).mergeWith(tokenRange(2, 7))).isEqualTo(tokenRange(9, 7)); - - assertThat(tokenRange(9, 3).mergeWith(tokenRange(3, 9))).isEqualTo(tokenRange(minToken, minToken)); - assertThat(tokenRange(9, 3).mergeWith(tokenRange(3, 10))).isEqualTo(tokenRange(minToken, minToken)); - - assertThat(tokenRange(3, 3).mergeWith(tokenRange(3, 3))).isEqualTo(tokenRange(3, 3)); - - assertThat(tokenRange(5, minToken).mergeWith(tokenRange(6, 7))).isEqualTo(tokenRange(5, minToken)); - assertThat(tokenRange(5, minToken).mergeWith(tokenRange(minToken, 3))).isEqualTo(tokenRange(5, 3)); - assertThat(tokenRange(5, minToken).mergeWith(tokenRange(3, 5))).isEqualTo(tokenRange(3, minToken)); - - assertThat(tokenRange(minToken, 5).mergeWith(tokenRange(2, 3))).isEqualTo(tokenRange(minToken, 5)); - assertThat(tokenRange(minToken, 5).mergeWith(tokenRange(7, minToken))).isEqualTo(tokenRange(7, 5)); - assertThat(tokenRange(minToken, 5).mergeWith(tokenRange(5, 7))).isEqualTo(tokenRange(minToken, 7)); - } - - @Test(groups = "unit", expectedExceptions = IllegalArgumentException.class) - public void should_not_merge_with_nonadjacent_and_disjoint_ranges() { - tokenRange(0, 5).mergeWith(tokenRange(7, 14)); - } - - @Test(groups = "unit") - public void should_return_non_empty_range_if_other_range_is_empty() { - assertThat(tokenRange(1, 5).mergeWith(tokenRange(5, 5))).isEqualTo(tokenRange(1, 5)); - } - - @Test(groups = "unit") - public void should_unwrap_to_non_wrapping_ranges() { - assertThat(tokenRange(9, 3)).unwrapsTo(tokenRange(9, minToken), tokenRange(minToken, 3)); - assertThat(tokenRange(3, 9)).isNotWrappedAround(); - assertThat(tokenRange(3, minToken)).isNotWrappedAround(); - assertThat(tokenRange(minToken, 3)).isNotWrappedAround(); - assertThat(tokenRange(3, 3)).isNotWrappedAround(); - assertThat(tokenRange(minToken, minToken)).isNotWrappedAround(); - } - - @Test(groups = "unit") - public void should_split_evenly() { - // Simply exercise splitEvenly, split logic is exercised in TokenFactoryTest implementation for each partitioner. - List splits = tokenRange(3, 9).splitEvenly(3); - - assertThat(splits).hasSize(3); - assertThat(splits).containsExactly(tokenRange(3, 5), tokenRange(5, 7), tokenRange(7, 9)); - } - - @Test(groups = "unit") - public void should_throw_error_with_less_than_1_splits() { - for (int i = -255; i < 1; i++) { - try { - tokenRange(0, 1).splitEvenly(i); - fail("Expected error when providing " + i + " splits."); - } catch (IllegalArgumentException e) { - // expected. - } - } - } - - @Test(groups = "unit", expectedExceptions = IllegalArgumentException.class) - public void should_not_split_empty_token_range() { - tokenRange(0, 0).splitEvenly(1); - } - - @Test(groups = "unit") - public void should_create_empty_token_ranges_if_too_many_splits() { - TokenRange range = tokenRange(0, 10); - - List ranges = range.splitEvenly(255); - assertThat(ranges).hasSize(255); - - for (int i = 0; i < ranges.size(); i++) { - TokenRange tr = ranges.get(i); - if (i < 10) { - assertThat(tr).isEqualTo(tokenRange(i, i + 1)); - } else { - assertThat(tr.isEmpty()); - } - } - } - - @Test(groups = "unit") - public void should_check_if_range_contains_token() { - // ]1,2] contains 2, but it does not contain the start of ]2,3] - assertThat(tokenRange(1, 2)) - .contains(newM3PToken(2), false) - .doesNotContain(newM3PToken(2), true); - // ]1,2] does not contain 1, but it contains the start of ]1,3] - assertThat(tokenRange(1, 2)) - .doesNotContain(newM3PToken(1), false) - .contains(newM3PToken(1), true); - - // ]2,1] contains the start of ]min,5] - assertThat(tokenRange(2, 1)) - .contains(minToken, true); - - // ]min, 1] does not contain min, but it contains the start of ]min, 2] - assertThat(tokenRange(minToken, 1)) - .doesNotContain(minToken, false) - .contains(minToken, true); - // ]1, min] contains min, but not the start of ]min, 2] - assertThat(tokenRange(1, minToken)) - .contains(minToken, false) - .doesNotContain(minToken, true); - - // An empty range contains nothing - assertThat(tokenRange(1, 1)) - .doesNotContain(newM3PToken(1), true) - .doesNotContain(newM3PToken(1), false) - .doesNotContain(minToken, true) - .doesNotContain(minToken, false); - - // The whole ring contains everything - assertThat(tokenRange(minToken, minToken)) - .contains(minToken, true) - .contains(minToken, false) - .contains(newM3PToken(1), true) - .contains(newM3PToken(1), false); - } - - private TokenRange tokenRange(long start, long end) { - return new TokenRange(newM3PToken(start), newM3PToken(end), factory); - } - - private TokenRange tokenRange(Token start, long end) { - return new TokenRange(start, newM3PToken(end), factory); - } - - private TokenRange tokenRange(long start, Token end) { - return new TokenRange(newM3PToken(start), end, factory); - } +import com.google.common.collect.ImmutableList; +import java.util.List; +import org.testng.annotations.Test; - private TokenRange tokenRange(Token start, Token end) { - return new TokenRange(start, end, factory); +public class TokenRangeTest { + // The tests in this class don't depend on the kind of factory used, so use Murmur3 everywhere + Token.Factory factory = Token.getFactory("Murmur3Partitioner"); + private Token minToken = factory.minToken(); + + @Test(groups = "unit") + public void should_check_intersection() { + // NB - to make the test more visual, we use watch face numbers + assertThat(tokenRange(3, 9)) + .doesNotIntersect(tokenRange(11, 1)) + .doesNotIntersect(tokenRange(1, 2)) + .doesNotIntersect(tokenRange(11, 3)) + .doesNotIntersect(tokenRange(2, 3)) + .doesNotIntersect(tokenRange(3, 3)) + .intersects(tokenRange(2, 6)) + .intersects(tokenRange(2, 10)) + .intersects(tokenRange(6, 10)) + .intersects(tokenRange(4, 8)) + .intersects(tokenRange(3, 9)) + .doesNotIntersect(tokenRange(9, 10)) + .doesNotIntersect(tokenRange(10, 11)); + assertThat(tokenRange(9, 3)) + .doesNotIntersect(tokenRange(5, 7)) + .doesNotIntersect(tokenRange(7, 8)) + .doesNotIntersect(tokenRange(5, 9)) + .doesNotIntersect(tokenRange(8, 9)) + .doesNotIntersect(tokenRange(9, 9)) + .intersects(tokenRange(8, 2)) + .intersects(tokenRange(8, 4)) + .intersects(tokenRange(2, 4)) + .intersects(tokenRange(10, 2)) + .intersects(tokenRange(9, 3)) + .doesNotIntersect(tokenRange(3, 4)) + .doesNotIntersect(tokenRange(4, 5)); + assertThat(tokenRange(3, 3)).doesNotIntersect(tokenRange(3, 3)); + + // Reminder: minToken serves as both lower and upper bound + assertThat(tokenRange(minToken, 5)) + .doesNotIntersect(tokenRange(6, 7)) + .doesNotIntersect(tokenRange(6, minToken)) + .intersects(tokenRange(6, 4)) + .intersects(tokenRange(2, 4)) + .intersects(tokenRange(minToken, 4)) + .intersects(tokenRange(minToken, 5)); + + assertThat(tokenRange(5, minToken)) + .doesNotIntersect(tokenRange(3, 4)) + .doesNotIntersect(tokenRange(minToken, 4)) + .intersects(tokenRange(6, 7)) + .intersects(tokenRange(4, 1)) + .intersects(tokenRange(6, minToken)) + .intersects(tokenRange(5, minToken)); + + assertThat(tokenRange(minToken, minToken)) + .intersects(tokenRange(3, 4)) + .intersects(tokenRange(3, minToken)) + .intersects(tokenRange(minToken, 3)) + .doesNotIntersect(tokenRange(3, 3)); + } + + @Test(groups = "unit") + public void should_compute_intersection() { + assertThat(tokenRange(3, 9).intersectWith(tokenRange(2, 4))) + .isEqualTo(ImmutableList.of(tokenRange(3, 4))); + assertThat(tokenRange(3, 9).intersectWith(tokenRange(3, 5))) + .isEqualTo(ImmutableList.of(tokenRange(3, 5))); + assertThat(tokenRange(3, 9).intersectWith(tokenRange(4, 6))) + .isEqualTo(ImmutableList.of(tokenRange(4, 6))); + assertThat(tokenRange(3, 9).intersectWith(tokenRange(7, 9))) + .isEqualTo(ImmutableList.of(tokenRange(7, 9))); + assertThat(tokenRange(3, 9).intersectWith(tokenRange(8, 10))) + .isEqualTo(ImmutableList.of(tokenRange(8, 9))); + } + + @Test(groups = "unit") + public void should_compute_intersection_with_ranges_around_ring() { + // If a range wraps the ring like 10, -10 does this will produce two separate + // intersected ranges. + assertThat(tokenRange(10, -10).intersectWith(tokenRange(-20, 20))) + .isEqualTo(ImmutableList.of(tokenRange(10, 20), tokenRange(-20, -10))); + assertThat(tokenRange(-20, 20).intersectWith(tokenRange(10, -10))) + .isEqualTo(ImmutableList.of(tokenRange(10, 20), tokenRange(-20, -10))); + + // If both ranges wrap the ring, they should be merged together wrapping across + // the range. + assertThat(tokenRange(10, -30).intersectWith(tokenRange(20, -20))) + .isEqualTo(ImmutableList.of(tokenRange(20, -30))); + } + + @Test(groups = "unit", expectedExceptions = IllegalArgumentException.class) + public void should_fail_to_compute_intersection_when_ranges_dont_intersect() { + tokenRange(1, 2).intersectWith(tokenRange(2, 3)); + } + + @Test(groups = "unit") + public void should_merge_with_other_range() { + assertThat(tokenRange(3, 9).mergeWith(tokenRange(2, 3))).isEqualTo(tokenRange(2, 9)); + assertThat(tokenRange(3, 9).mergeWith(tokenRange(2, 4))).isEqualTo(tokenRange(2, 9)); + assertThat(tokenRange(3, 9).mergeWith(tokenRange(11, 3))).isEqualTo(tokenRange(11, 9)); + assertThat(tokenRange(3, 9).mergeWith(tokenRange(11, 4))).isEqualTo(tokenRange(11, 9)); + + assertThat(tokenRange(3, 9).mergeWith(tokenRange(4, 8))).isEqualTo(tokenRange(3, 9)); + assertThat(tokenRange(3, 9).mergeWith(tokenRange(3, 9))).isEqualTo(tokenRange(3, 9)); + assertThat(tokenRange(3, 9).mergeWith(tokenRange(3, 3))).isEqualTo(tokenRange(3, 9)); + assertThat(tokenRange(3, 3).mergeWith(tokenRange(3, 9))).isEqualTo(tokenRange(3, 9)); + + assertThat(tokenRange(3, 9).mergeWith(tokenRange(9, 11))).isEqualTo(tokenRange(3, 11)); + assertThat(tokenRange(3, 9).mergeWith(tokenRange(8, 11))).isEqualTo(tokenRange(3, 11)); + assertThat(tokenRange(3, 9).mergeWith(tokenRange(9, 1))).isEqualTo(tokenRange(3, 1)); + assertThat(tokenRange(3, 9).mergeWith(tokenRange(8, 1))).isEqualTo(tokenRange(3, 1)); + + assertThat(tokenRange(3, 9).mergeWith(tokenRange(9, 3))) + .isEqualTo(tokenRange(minToken, minToken)); + assertThat(tokenRange(3, 9).mergeWith(tokenRange(9, 4))) + .isEqualTo(tokenRange(minToken, minToken)); + assertThat(tokenRange(3, 10).mergeWith(tokenRange(9, 4))) + .isEqualTo(tokenRange(minToken, minToken)); + + assertThat(tokenRange(9, 3).mergeWith(tokenRange(8, 9))).isEqualTo(tokenRange(8, 3)); + assertThat(tokenRange(9, 3).mergeWith(tokenRange(8, 10))).isEqualTo(tokenRange(8, 3)); + assertThat(tokenRange(9, 3).mergeWith(tokenRange(4, 9))).isEqualTo(tokenRange(4, 3)); + assertThat(tokenRange(9, 3).mergeWith(tokenRange(4, 10))).isEqualTo(tokenRange(4, 3)); + + assertThat(tokenRange(9, 3).mergeWith(tokenRange(10, 2))).isEqualTo(tokenRange(9, 3)); + assertThat(tokenRange(9, 3).mergeWith(tokenRange(9, 3))).isEqualTo(tokenRange(9, 3)); + assertThat(tokenRange(9, 3).mergeWith(tokenRange(9, 9))).isEqualTo(tokenRange(9, 3)); + assertThat(tokenRange(9, 9).mergeWith(tokenRange(9, 3))).isEqualTo(tokenRange(9, 3)); + + assertThat(tokenRange(9, 3).mergeWith(tokenRange(3, 5))).isEqualTo(tokenRange(9, 5)); + assertThat(tokenRange(9, 3).mergeWith(tokenRange(2, 5))).isEqualTo(tokenRange(9, 5)); + assertThat(tokenRange(9, 3).mergeWith(tokenRange(3, 7))).isEqualTo(tokenRange(9, 7)); + assertThat(tokenRange(9, 3).mergeWith(tokenRange(2, 7))).isEqualTo(tokenRange(9, 7)); + + assertThat(tokenRange(9, 3).mergeWith(tokenRange(3, 9))) + .isEqualTo(tokenRange(minToken, minToken)); + assertThat(tokenRange(9, 3).mergeWith(tokenRange(3, 10))) + .isEqualTo(tokenRange(minToken, minToken)); + + assertThat(tokenRange(3, 3).mergeWith(tokenRange(3, 3))).isEqualTo(tokenRange(3, 3)); + + assertThat(tokenRange(5, minToken).mergeWith(tokenRange(6, 7))) + .isEqualTo(tokenRange(5, minToken)); + assertThat(tokenRange(5, minToken).mergeWith(tokenRange(minToken, 3))) + .isEqualTo(tokenRange(5, 3)); + assertThat(tokenRange(5, minToken).mergeWith(tokenRange(3, 5))) + .isEqualTo(tokenRange(3, minToken)); + + assertThat(tokenRange(minToken, 5).mergeWith(tokenRange(2, 3))) + .isEqualTo(tokenRange(minToken, 5)); + assertThat(tokenRange(minToken, 5).mergeWith(tokenRange(7, minToken))) + .isEqualTo(tokenRange(7, 5)); + assertThat(tokenRange(minToken, 5).mergeWith(tokenRange(5, 7))) + .isEqualTo(tokenRange(minToken, 7)); + } + + @Test(groups = "unit", expectedExceptions = IllegalArgumentException.class) + public void should_not_merge_with_nonadjacent_and_disjoint_ranges() { + tokenRange(0, 5).mergeWith(tokenRange(7, 14)); + } + + @Test(groups = "unit") + public void should_return_non_empty_range_if_other_range_is_empty() { + assertThat(tokenRange(1, 5).mergeWith(tokenRange(5, 5))).isEqualTo(tokenRange(1, 5)); + } + + @Test(groups = "unit") + public void should_unwrap_to_non_wrapping_ranges() { + assertThat(tokenRange(9, 3)).unwrapsTo(tokenRange(9, minToken), tokenRange(minToken, 3)); + assertThat(tokenRange(3, 9)).isNotWrappedAround(); + assertThat(tokenRange(3, minToken)).isNotWrappedAround(); + assertThat(tokenRange(minToken, 3)).isNotWrappedAround(); + assertThat(tokenRange(3, 3)).isNotWrappedAround(); + assertThat(tokenRange(minToken, minToken)).isNotWrappedAround(); + } + + @Test(groups = "unit") + public void should_split_evenly() { + // Simply exercise splitEvenly, split logic is exercised in TokenFactoryTest implementation for + // each partitioner. + List splits = tokenRange(3, 9).splitEvenly(3); + + assertThat(splits).hasSize(3); + assertThat(splits).containsExactly(tokenRange(3, 5), tokenRange(5, 7), tokenRange(7, 9)); + } + + @Test(groups = "unit") + public void should_throw_error_with_less_than_1_splits() { + for (int i = -255; i < 1; i++) { + try { + tokenRange(0, 1).splitEvenly(i); + fail("Expected error when providing " + i + " splits."); + } catch (IllegalArgumentException e) { + // expected. + } } - - private Token newM3PToken(long value) { - return factory.fromString(Long.toString(value)); + } + + @Test(groups = "unit", expectedExceptions = IllegalArgumentException.class) + public void should_not_split_empty_token_range() { + tokenRange(0, 0).splitEvenly(1); + } + + @Test(groups = "unit") + public void should_create_empty_token_ranges_if_too_many_splits() { + TokenRange range = tokenRange(0, 10); + + List ranges = range.splitEvenly(255); + assertThat(ranges).hasSize(255); + + for (int i = 0; i < ranges.size(); i++) { + TokenRange tr = ranges.get(i); + if (i < 10) { + assertThat(tr).isEqualTo(tokenRange(i, i + 1)); + } else { + assertThat(tr.isEmpty()); + } } -} \ No newline at end of file + } + + @Test(groups = "unit") + public void should_check_if_range_contains_token() { + // ]1,2] contains 2, but it does not contain the start of ]2,3] + assertThat(tokenRange(1, 2)) + .contains(newM3PToken(2), false) + .doesNotContain(newM3PToken(2), true); + // ]1,2] does not contain 1, but it contains the start of ]1,3] + assertThat(tokenRange(1, 2)) + .doesNotContain(newM3PToken(1), false) + .contains(newM3PToken(1), true); + + // ]2,1] contains the start of ]min,5] + assertThat(tokenRange(2, 1)).contains(minToken, true); + + // ]min, 1] does not contain min, but it contains the start of ]min, 2] + assertThat(tokenRange(minToken, 1)).doesNotContain(minToken, false).contains(minToken, true); + // ]1, min] contains min, but not the start of ]min, 2] + assertThat(tokenRange(1, minToken)).contains(minToken, false).doesNotContain(minToken, true); + + // An empty range contains nothing + assertThat(tokenRange(1, 1)) + .doesNotContain(newM3PToken(1), true) + .doesNotContain(newM3PToken(1), false) + .doesNotContain(minToken, true) + .doesNotContain(minToken, false); + + // The whole ring contains everything + assertThat(tokenRange(minToken, minToken)) + .contains(minToken, true) + .contains(minToken, false) + .contains(newM3PToken(1), true) + .contains(newM3PToken(1), false); + } + + private TokenRange tokenRange(long start, long end) { + return new TokenRange(newM3PToken(start), newM3PToken(end), factory); + } + + private TokenRange tokenRange(Token start, long end) { + return new TokenRange(start, newM3PToken(end), factory); + } + + private TokenRange tokenRange(long start, Token end) { + return new TokenRange(newM3PToken(start), end, factory); + } + + private TokenRange tokenRange(Token start, Token end) { + return new TokenRange(start, end, factory); + } + + private Token newM3PToken(long value) { + return factory.fromString(Long.toString(value)); + } +} diff --git a/driver-core/src/test/java/com/datastax/driver/core/TracingTest.java b/driver-core/src/test/java/com/datastax/driver/core/TracingTest.java index bd9531b7506..14ae502546a 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/TracingTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/TracingTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,82 +17,86 @@ */ package com.datastax.driver.core; -import com.datastax.driver.core.policies.DowngradingConsistencyRetryPolicy; +import static org.assertj.core.api.Assertions.assertThat; + +import com.datastax.driver.core.Cluster.Builder; import com.datastax.driver.core.utils.CassandraVersion; import com.google.common.util.concurrent.Uninterruptibles; -import org.testng.annotations.Test; - import java.util.List; import java.util.UUID; import java.util.concurrent.TimeUnit; - -import static org.assertj.core.api.Assertions.assertThat; +import org.testng.annotations.Test; @CassandraVersion("2.0.0") public class TracingTest extends CCMTestsSupport { - private static final String KEY = "tracing_test"; + private static final String KEY = "tracing_test"; - @Override - public Cluster.Builder createClusterBuilder() { - return Cluster.builder().withRetryPolicy(DowngradingConsistencyRetryPolicy.INSTANCE); - } + @Override + public Cluster.Builder createClusterBuilder() { + @SuppressWarnings("deprecation") + Builder builder = + Cluster.builder() + .withRetryPolicy( + com.datastax.driver.core.policies.DowngradingConsistencyRetryPolicy.INSTANCE); + return builder; + } - @Override - public void onTestContextInitialized() { - execute("CREATE TABLE test (k text, v int, PRIMARY KEY (k, v))"); - for (int i = 0; i < 100; i++) { - execute(String.format("INSERT INTO test (k, v) VALUES ('%s', %d)", KEY, i)); - } + @Override + public void onTestContextInitialized() { + execute("CREATE TABLE test (k text, v int, PRIMARY KEY (k, v))"); + for (int i = 0; i < 100; i++) { + execute(String.format("INSERT INTO test (k, v) VALUES ('%s', %d)", KEY, i)); } + } - /** - * Validates that for each page the {@link ExecutionInfo} will have a different tracing ID. - * - * @test_category tracing - * @expected_result {@link ResultSet} where all the {@link ExecutionInfo} will contains a different tracing ID and - * that the events can be retrieved for the last query. - */ - @Test(groups = "short") - public void should_have_a_different_tracingId_for_each_page() { - SimpleStatement st = new SimpleStatement(String.format("SELECT v FROM test WHERE k='%s'", KEY)); - ResultSet result = session().execute(st.setFetchSize(40).enableTracing()); - result.all(); - // sleep 10 seconds to make sure the trace will be complete - Uninterruptibles.sleepUninterruptibly(10, TimeUnit.SECONDS); - List executions = result.getAllExecutionInfo(); - - UUID previousTraceId = null; - for (ExecutionInfo executionInfo : executions) { - QueryTrace queryTrace = executionInfo.getQueryTrace(); - assertThat(queryTrace).isNotNull(); - assertThat(queryTrace.getTraceId()).isNotEqualTo(previousTraceId); - previousTraceId = queryTrace.getTraceId(); - } + /** + * Validates that for each page the {@link ExecutionInfo} will have a different tracing ID. + * + * @test_category tracing + * @expected_result {@link ResultSet} where all the {@link ExecutionInfo} will contains a + * different tracing ID and that the events can be retrieved for the last query. + */ + @Test(groups = "short") + public void should_have_a_different_tracingId_for_each_page() { + SimpleStatement st = new SimpleStatement(String.format("SELECT v FROM test WHERE k='%s'", KEY)); + ResultSet result = session().execute(st.setFetchSize(40).enableTracing()); + result.all(); + // sleep 10 seconds to make sure the trace will be complete + Uninterruptibles.sleepUninterruptibly(10, TimeUnit.SECONDS); + List executions = result.getAllExecutionInfo(); - assertThat(result.getExecutionInfo().getQueryTrace().getEvents()).isNotNull() - .isNotEmpty(); + UUID previousTraceId = null; + for (ExecutionInfo executionInfo : executions) { + QueryTrace queryTrace = executionInfo.getQueryTrace(); + assertThat(queryTrace).isNotNull(); + assertThat(queryTrace.getTraceId()).isNotEqualTo(previousTraceId); + previousTraceId = queryTrace.getTraceId(); } - /** - * Validates that if a query gets retried, the second internal query will still have tracing enabled. - *

    - * To force a retry, we use the downgrading policy with an impossible CL. - * - * @test_category tracing - * @jira_ticket JAVA-815 - * @expected_result {@link ResultSet} where {@link ExecutionInfo} contains trace information after a retry. - */ - @Test(groups = "short") - public void should_preserve_tracing_status_across_retries() { - SimpleStatement st = new SimpleStatement(String.format("SELECT v FROM test WHERE k='%s'", KEY)); - st.setConsistencyLevel(ConsistencyLevel.THREE).enableTracing(); + assertThat(result.getExecutionInfo().getQueryTrace().getEvents()).isNotNull().isNotEmpty(); + } - ResultSet result = session().execute(st); - // sleep 10 seconds to make sure the trace will be complete - Uninterruptibles.sleepUninterruptibly(10, TimeUnit.SECONDS); + /** + * Validates that if a query gets retried, the second internal query will still have tracing + * enabled. + * + *

    To force a retry, we use the downgrading policy with an impossible CL. + * + * @test_category tracing + * @jira_ticket JAVA-815 + * @expected_result {@link ResultSet} where {@link ExecutionInfo} contains trace information after + * a retry. + */ + @Test(groups = "short") + public void should_preserve_tracing_status_across_retries() { + SimpleStatement st = new SimpleStatement(String.format("SELECT v FROM test WHERE k='%s'", KEY)); + st.setConsistencyLevel(ConsistencyLevel.THREE).enableTracing(); - assertThat(result.getExecutionInfo().getQueryTrace()).isNotNull(); - } -} + ResultSet result = session().execute(st); + // sleep 10 seconds to make sure the trace will be complete + Uninterruptibles.sleepUninterruptibly(10, TimeUnit.SECONDS); + assertThat(result.getExecutionInfo().getQueryTrace()).isNotNull(); + } +} diff --git a/driver-core/src/test/java/com/datastax/driver/core/TrafficMetersTest.java b/driver-core/src/test/java/com/datastax/driver/core/TrafficMetersTest.java new file mode 100644 index 00000000000..c503fb3fab1 --- /dev/null +++ b/driver-core/src/test/java/com/datastax/driver/core/TrafficMetersTest.java @@ -0,0 +1,69 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import static com.datastax.driver.core.Assertions.assertThat; + +import com.codahale.metrics.Meter; +import com.datastax.driver.core.utils.CassandraVersion; +import org.testng.annotations.Test; + +@CassandraVersion( + "3.0") // Limit to recent Cassandra versions to avoid special-casing for old protocols +public class TrafficMetersTest extends CCMTestsSupport { + + @Test(groups = "short") + public void should_measure_inbound_and_outbound_traffic() { + Metrics metrics = session().getCluster().getMetrics(); + Meter bytesReceived = metrics.getBytesReceived(); + Meter bytesSent = metrics.getBytesSent(); + + long bytesReceivedBefore = bytesReceived.getCount(); + long bytesSentBefore = bytesSent.getCount(); + + SimpleStatement statement = new SimpleStatement("SELECT host_id FROM system.local"); + // Set serial CL to something non-default so request size estimate is accurate. + statement.setSerialConsistencyLevel(ConsistencyLevel.LOCAL_SERIAL); + int requestSize = + statement.requestSizeInBytes( + cluster().getConfiguration().getProtocolOptions().getProtocolVersion(), + cluster().getConfiguration().getCodecRegistry()); + + int responseSize = + 9 // header + + 4 // kind (ROWS) + + 4 // flags + + 4 // column count + + CBUtil.sizeOfString("system") + + CBUtil.sizeOfString("local") // global table specs + + CBUtil.sizeOfString("host_id") // column name + + 2 // column type (uuid) + + 4 // row count + + 4 + + 16; // uuid length + uuid + + for (int i = 0; i < 1000; i++) { + session().execute(statement); + } + + // Do not check for an exact value, in case there were heartbeats or control queries + assertThat(bytesSent.getCount()).isGreaterThanOrEqualTo(bytesSentBefore + requestSize * 1000); + assertThat(bytesReceived.getCount()) + .isGreaterThanOrEqualTo(bytesReceivedBefore + responseSize * 1000); + } +} diff --git a/driver-core/src/test/java/com/datastax/driver/core/TransientReplicationTest.java b/driver-core/src/test/java/com/datastax/driver/core/TransientReplicationTest.java new file mode 100644 index 00000000000..cd29bcf18e5 --- /dev/null +++ b/driver-core/src/test/java/com/datastax/driver/core/TransientReplicationTest.java @@ -0,0 +1,103 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import static com.datastax.driver.core.Assertions.assertThat; +import static org.testng.Assert.fail; + +import com.datastax.driver.core.exceptions.InvalidQueryException; +import com.datastax.driver.core.schemabuilder.SchemaBuilder; +import com.datastax.driver.core.schemabuilder.TableOptions; +import com.datastax.driver.core.utils.CassandraVersion; +import com.google.common.collect.ImmutableMap; +import java.util.Map; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Test; + +@CassandraVersion(value = "4.0.0", description = "Transient Replication is for Cassandra 4.0+") +@CCMConfig(config = "enable_transient_replication:true") +public class TransientReplicationTest extends CCMTestsSupport { + + private static final String TRANSIENT_REPLICATION_KEYSPACE = "transient_rep_ks"; + + @BeforeClass(groups = "short") + public void createKeyspace() { + Map replicationOptions = + ImmutableMap.of("class", "SimpleStrategy", "replication_factor", "3/1"); + + // create keyspace + session() + .execute( + SchemaBuilder.createKeyspace(TRANSIENT_REPLICATION_KEYSPACE) + .with() + .replication(replicationOptions)); + + // verify the replication factor in the metadata + assertThat(cluster().getMetadata().getKeyspace(TRANSIENT_REPLICATION_KEYSPACE).getReplication()) + .containsEntry("replication_factor", "3/1"); + } + + @AfterClass(groups = "short") + public void dropKeyspace() { + session().execute("drop keyspace " + TRANSIENT_REPLICATION_KEYSPACE); + } + + @Test(groups = "short") + public void should_handle_read_reapir_none() { + // create a table with read_repair set to 'NONE' + Session session = cluster().connect(TRANSIENT_REPLICATION_KEYSPACE); + session.execute( + SchemaBuilder.createTable("valid_table") + .addPartitionKey("pk", DataType.text()) + .addColumn("data", DataType.text()) + .withOptions() + .readRepair(TableOptions.ReadRepairValue.NONE)); + TableOptionsMetadata options = + cluster() + .getMetadata() + .getKeyspace(TRANSIENT_REPLICATION_KEYSPACE) + .getTable("valid_table") + .getOptions(); + + assertThat(options.getReadRepair()).isEqualTo("NONE"); + // assert that the default additional_write_policy exists as well + assertThat(options.getAdditionalWritePolicy()).isEqualTo("99p"); + } + + @Test(groups = "short") + public void should_fail_to_create_table_with_read_repair_blocking() { + /** + * Attempt to create a table with the default read_repair 'BLOCKING'. This should fail when the + * keyspace uses transient replicas. + */ + try { + Session session = cluster().connect(TRANSIENT_REPLICATION_KEYSPACE); + session.execute( + SchemaBuilder.createTable("invalid_table") + .addPartitionKey("pk", DataType.text()) + .addColumn("data", DataType.text())); + fail( + "Create table with default read_repair ('BLOCKING') is not supported when keyspace uses transient replicas."); + } catch (InvalidQueryException iqe) { + assertThat(iqe) + .hasMessageContaining( + "read_repair must be set to 'NONE' for transiently replicated keyspaces"); + } + } +} diff --git a/driver-core/src/test/java/com/datastax/driver/core/TupleTest.java b/driver-core/src/test/java/com/datastax/driver/core/TupleTest.java index f83664c01b3..5c0188b90b5 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/TupleTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/TupleTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,482 +17,555 @@ */ package com.datastax.driver.core; -import java.nio.ByteBuffer; -import java.util.*; - -import com.google.common.base.Joiner; -import org.testng.annotations.Test; - import static org.assertj.core.api.Assertions.assertThat; import static org.testng.Assert.assertEquals; import static org.testng.Assert.fail; import com.datastax.driver.core.utils.CassandraVersion; +import com.google.common.base.Joiner; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import org.testng.annotations.Test; @CassandraVersion("2.1.0") public class TupleTest extends CCMTestsSupport { - private ProtocolVersion protocolVersion; - private Map samples; - - @Override - public void onTestContextInitialized() { - protocolVersion = ccm().getProtocolVersion(); - samples = PrimitiveTypeSamples.samples(protocolVersion); - execute("CREATE TABLE t (k int PRIMARY KEY, v frozen>)"); - } - - @Test(groups = "short") - public void simpleValueTest() throws Exception { - TupleType t = cluster().getMetadata().newTupleType(DataType.cint(), DataType.text(), DataType.cfloat()); - TupleValue v = t.newValue(); - v.setInt(0, 1); - v.setString(1, "a"); - v.setFloat(2, 1.0f); - - assertEquals(v.getType().getComponentTypes().size(), 3); - assertEquals(v.getType().getComponentTypes().get(0), DataType.cint()); - assertEquals(v.getType().getComponentTypes().get(1), DataType.text()); - assertEquals(v.getType().getComponentTypes().get(2), DataType.cfloat()); - assertEquals(v.getInt(0), 1); - assertEquals(v.getString(1), "a"); - assertEquals(v.getFloat(2), 1.0f); - - assertEquals(TypeCodec.tuple(t).format(v), "(1,'a',1.0)"); - } - - @Test(groups = "short") - public void simpleWriteReadTest() throws Exception { - session().execute("USE " + keyspace); - PreparedStatement ins = session().prepare("INSERT INTO t(k, v) VALUES (?, ?)"); - PreparedStatement sel = session().prepare("SELECT * FROM t WHERE k=?"); - - TupleType t = cluster().getMetadata().newTupleType(DataType.cint(), DataType.text(), DataType.cfloat()); - - int k = 1; - TupleValue v = t.newValue(1, "a", 1.0f); - - session().execute(ins.bind(k, v)); - TupleValue v2 = session().execute(sel.bind(k)).one().getTupleValue("v"); - - assertEquals(v2, v); - - // Test simple statement interpolation - k = 2; - v = t.newValue(2, "b", 2.0f); - - session().execute("INSERT INTO t(k, v) VALUES (?, ?)", k, v); - v2 = session().execute(sel.bind(k)).one().getTupleValue("v"); - - assertEquals(v2, v); + private ProtocolVersion protocolVersion; + private Map samples; + + @Override + public void onTestContextInitialized() { + protocolVersion = ccm().getProtocolVersion(); + samples = PrimitiveTypeSamples.samples(protocolVersion); + execute("CREATE TABLE t (k int PRIMARY KEY, v frozen>)"); + } + + @Test(groups = "short") + public void simpleValueTest() throws Exception { + TupleType t = + cluster().getMetadata().newTupleType(DataType.cint(), DataType.text(), DataType.cfloat()); + TupleValue v = t.newValue(); + v.setInt(0, 1); + v.setString(1, "a"); + v.setFloat(2, 1.0f); + + assertEquals(v.getType().getComponentTypes().size(), 3); + assertEquals(v.getType().getComponentTypes().get(0), DataType.cint()); + assertEquals(v.getType().getComponentTypes().get(1), DataType.text()); + assertEquals(v.getType().getComponentTypes().get(2), DataType.cfloat()); + assertEquals(v.getInt(0), 1); + assertEquals(v.getString(1), "a"); + assertEquals(v.getFloat(2), 1.0f); + + assertEquals(TypeCodec.tuple(t).format(v), "(1,'a',1.0)"); + } + + @Test(groups = "short") + public void simpleWriteReadTest() throws Exception { + session().execute("USE " + keyspace); + PreparedStatement ins = session().prepare("INSERT INTO t(k, v) VALUES (?, ?)"); + PreparedStatement sel = session().prepare("SELECT * FROM t WHERE k=?"); + + TupleType t = + cluster().getMetadata().newTupleType(DataType.cint(), DataType.text(), DataType.cfloat()); + + int k = 1; + TupleValue v = t.newValue(1, "a", 1.0f); + + session().execute(ins.bind(k, v)); + TupleValue v2 = session().execute(sel.bind(k)).one().getTupleValue("v"); + + assertEquals(v2, v); + + // Test simple statement interpolation + k = 2; + v = t.newValue(2, "b", 2.0f); + + session().execute("INSERT INTO t(k, v) VALUES (?, ?)", k, v); + v2 = session().execute(sel.bind(k)).one().getTupleValue("v"); + + assertEquals(v2, v); + } + + /** + * Basic test of tuple functionality. Original code found in + * python-driver:integration.standard.test_types.py:test_tuple_type + */ + @Test(groups = "short") + public void tupleTypeTest() throws Exception { + session() + .execute( + "CREATE KEYSPACE test_tuple_type " + + "WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor': '1'}"); + session().execute("USE test_tuple_type"); + session() + .execute("CREATE TABLE mytable (a int PRIMARY KEY, b frozen>)"); + + TupleType t = + cluster() + .getMetadata() + .newTupleType(DataType.ascii(), DataType.cint(), DataType.cboolean()); + + // test non-prepared statement + TupleValue complete = t.newValue("foo", 123, true); + session().execute("INSERT INTO mytable (a, b) VALUES (0, ?)", complete); + TupleValue r = session().execute("SELECT b FROM mytable WHERE a=0").one().getTupleValue("b"); + assertEquals(r, complete); + + // test incomplete tuples + try { + t.newValue("bar", 456); + fail(); + } catch (IllegalArgumentException e) { + // ok } - /** - * Basic test of tuple functionality. - * Original code found in python-driver:integration.standard.test_types.py:test_tuple_type - */ - @Test(groups = "short") - public void tupleTypeTest() throws Exception { - session().execute("CREATE KEYSPACE test_tuple_type " + - "WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor': '1'}"); - session().execute("USE test_tuple_type"); - session().execute("CREATE TABLE mytable (a int PRIMARY KEY, b frozen>)"); - - TupleType t = cluster().getMetadata().newTupleType(DataType.ascii(), DataType.cint(), DataType.cboolean()); - - // test non-prepared statement - TupleValue complete = t.newValue("foo", 123, true); - session().execute("INSERT INTO mytable (a, b) VALUES (0, ?)", complete); - TupleValue r = session().execute("SELECT b FROM mytable WHERE a=0").one().getTupleValue("b"); - assertEquals(r, complete); - - // test incomplete tuples - try { - t.newValue("bar", 456); - fail(); - } catch (IllegalArgumentException e) { - //ok - } - - // test incomplete tuples with new TupleType - TupleType t1 = cluster().getMetadata().newTupleType(DataType.ascii(), DataType.cint()); - TupleValue partial = t1.newValue("bar", 456); - TupleValue partionResult = t.newValue("bar", 456, null); - session().execute("INSERT INTO mytable (a, b) VALUES (0, ?)", partial); - r = session().execute("SELECT b FROM mytable WHERE a=0").one().getTupleValue("b"); - assertEquals(r, partionResult); - - // test single value tuples - try { - t.newValue("zoo"); - fail(); - } catch (IllegalArgumentException e) { - //ok - } - - // test single value tuples with new TupleType - TupleType t2 = cluster().getMetadata().newTupleType(DataType.ascii()); - TupleValue subpartial = t2.newValue("zoo"); - TupleValue subpartialResult = t.newValue("zoo", null, null); - session().execute("INSERT INTO mytable (a, b) VALUES (0, ?)", subpartial); - r = session().execute("SELECT b FROM mytable WHERE a=0").one().getTupleValue("b"); - assertEquals(r, subpartialResult); - - // test prepared statements - PreparedStatement prepared = session().prepare("INSERT INTO mytable (a, b) VALUES (?, ?)"); - session().execute(prepared.bind(3, complete)); - session().execute(prepared.bind(4, partial)); - session().execute(prepared.bind(5, subpartial)); - - prepared = session().prepare("SELECT b FROM mytable WHERE a=?"); - assertEquals(session().execute(prepared.bind(3)).one().getTupleValue("b"), complete); - assertEquals(session().execute(prepared.bind(4)).one().getTupleValue("b"), partionResult); - assertEquals(session().execute(prepared.bind(5)).one().getTupleValue("b"), subpartialResult); + // test incomplete tuples with new TupleType + TupleType t1 = cluster().getMetadata().newTupleType(DataType.ascii(), DataType.cint()); + TupleValue partial = t1.newValue("bar", 456); + TupleValue partionResult = t.newValue("bar", 456, null); + session().execute("INSERT INTO mytable (a, b) VALUES (0, ?)", partial); + r = session().execute("SELECT b FROM mytable WHERE a=0").one().getTupleValue("b"); + assertEquals(r, partionResult); + + // test single value tuples + try { + t.newValue("zoo"); + fail(); + } catch (IllegalArgumentException e) { + // ok } - /** - * Test tuple types of lengths of 1, 2, 3, and 384 to ensure edge cases work - * as expected. - * Original code found in python-driver:integration.standard.test_types.py:test_tuple_type_varying_lengths - */ - @Test(groups = "short") - public void tupleTestTypeVaryingLengths() throws Exception { - session().execute("CREATE KEYSPACE test_tuple_type_varying_lengths " + - "WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor': '1'}"); - session().execute("USE test_tuple_type_varying_lengths"); - - // programmatically create the table with tuples of said sizes - int[] lengths = {1, 2, 3, 384}; - ArrayList valueSchema = new ArrayList(); - for (int i : lengths) { - ArrayList ints = new ArrayList(); - for (int j = 0; j < i; ++j) { - ints.add("int"); - } - valueSchema.add(String.format(" v_%d frozen>", i, Joiner.on(',').join(ints))); - } - session().execute(String.format("CREATE TABLE mytable (k int PRIMARY KEY, %s)", Joiner.on(',').join(valueSchema))); - - // insert tuples into same key using different columns - // and verify the results - for (int i : lengths) { - // create tuple - ArrayList dataTypes = new ArrayList(); - ArrayList values = new ArrayList(); - for (int j = 0; j < i; ++j) { - dataTypes.add(DataType.cint()); - values.add(j); - } - TupleType t = new TupleType(dataTypes, protocolVersion, cluster().getConfiguration().getCodecRegistry()); - TupleValue createdTuple = t.newValue(values.toArray()); - - // write tuple - session().execute(String.format("INSERT INTO mytable (k, v_%s) VALUES (0, ?)", i), createdTuple); - - // read tuple - TupleValue r = session().execute(String.format("SELECT v_%s FROM mytable WHERE k=0", i)).one().getTupleValue(String.format("v_%s", i)); - assertEquals(r, createdTuple); - } + // test single value tuples with new TupleType + TupleType t2 = cluster().getMetadata().newTupleType(DataType.ascii()); + TupleValue subpartial = t2.newValue("zoo"); + TupleValue subpartialResult = t.newValue("zoo", null, null); + session().execute("INSERT INTO mytable (a, b) VALUES (0, ?)", subpartial); + r = session().execute("SELECT b FROM mytable WHERE a=0").one().getTupleValue("b"); + assertEquals(r, subpartialResult); + + // test prepared statements + PreparedStatement prepared = session().prepare("INSERT INTO mytable (a, b) VALUES (?, ?)"); + session().execute(prepared.bind(3, complete)); + session().execute(prepared.bind(4, partial)); + session().execute(prepared.bind(5, subpartial)); + + prepared = session().prepare("SELECT b FROM mytable WHERE a=?"); + assertEquals(session().execute(prepared.bind(3)).one().getTupleValue("b"), complete); + assertEquals(session().execute(prepared.bind(4)).one().getTupleValue("b"), partionResult); + assertEquals(session().execute(prepared.bind(5)).one().getTupleValue("b"), subpartialResult); + } + + /** + * Test tuple types of lengths of 1, 2, 3, and 384 to ensure edge cases work as expected. Original + * code found in python-driver:integration.standard.test_types.py:test_tuple_type_varying_lengths + */ + @Test(groups = "short") + public void tupleTestTypeVaryingLengths() throws Exception { + session() + .execute( + "CREATE KEYSPACE test_tuple_type_varying_lengths " + + "WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor': '1'}"); + session().execute("USE test_tuple_type_varying_lengths"); + + // programmatically create the table with tuples of said sizes + int[] lengths = {1, 2, 3, 384}; + ArrayList valueSchema = new ArrayList(); + for (int i : lengths) { + ArrayList ints = new ArrayList(); + for (int j = 0; j < i; ++j) { + ints.add("int"); + } + valueSchema.add(String.format(" v_%d frozen>", i, Joiner.on(',').join(ints))); } - - /** - * Ensure tuple subtypes are appropriately handled. - * Original code found in python-driver:integration.standard.test_types.py:test_tuple_subtypes - */ - @Test(groups = "short") - public void tupleSubtypesTest() throws Exception { - List DATA_TYPE_PRIMITIVES = new ArrayList(samples.keySet()); - session().execute("CREATE KEYSPACE test_tuple_subtypes " + - "WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor': '1'}"); - session().execute("USE test_tuple_subtypes"); - - // programmatically create the table with a tuple of all datatypes - session().execute(String.format("CREATE TABLE mytable (k int PRIMARY KEY, v frozen>)", Joiner.on(',').join(DATA_TYPE_PRIMITIVES))); - - // insert tuples into same key using different columns - // and verify the results - int i = 1; - for (DataType ignored : DATA_TYPE_PRIMITIVES) { - // create tuples to be written and ensure they match with the expected response - // responses have trailing None values for every element that has not been written - ArrayList dataTypes = new ArrayList(); - ArrayList completeDataTypes = new ArrayList(); - ArrayList createdValues = new ArrayList(); - ArrayList completeValues = new ArrayList(); - - // create written portion of the arrays - for (int j = 0; j < i; ++j) { - dataTypes.add(DATA_TYPE_PRIMITIVES.get(j)); - completeDataTypes.add(DATA_TYPE_PRIMITIVES.get(j)); - createdValues.add(samples.get(DATA_TYPE_PRIMITIVES.get(j))); - completeValues.add(samples.get(DATA_TYPE_PRIMITIVES.get(j))); - } - - // complete portion of the arrays needed for trailing nulls - for (int j = 0; j < DATA_TYPE_PRIMITIVES.size() - i; ++j) { - completeDataTypes.add(DATA_TYPE_PRIMITIVES.get(i + j)); - completeValues.add(null); - } - - // actually create the tuples - TupleType t = new TupleType(dataTypes, protocolVersion, cluster().getConfiguration().getCodecRegistry()); - TupleType t2 = new TupleType(completeDataTypes, protocolVersion, cluster().getConfiguration().getCodecRegistry()); - TupleValue createdTuple = t.newValue(createdValues.toArray()); - TupleValue completeTuple = t2.newValue(completeValues.toArray()); - - // write tuple - session().execute(String.format("INSERT INTO mytable (k, v) VALUES (%s, ?)", i), createdTuple); - - // read tuple - TupleValue r = session().execute("SELECT v FROM mytable WHERE k=?", i).one().getTupleValue("v"); - - assertEquals(r, completeTuple); - ++i; - } + session() + .execute( + String.format( + "CREATE TABLE mytable (k int PRIMARY KEY, %s)", Joiner.on(',').join(valueSchema))); + + // insert tuples into same key using different columns + // and verify the results + for (int i : lengths) { + // create tuple + ArrayList dataTypes = new ArrayList(); + ArrayList values = new ArrayList(); + for (int j = 0; j < i; ++j) { + dataTypes.add(DataType.cint()); + values.add(j); + } + TupleType t = + new TupleType( + dataTypes, protocolVersion, cluster().getConfiguration().getCodecRegistry()); + TupleValue createdTuple = t.newValue(values.toArray()); + + // write tuple + session() + .execute(String.format("INSERT INTO mytable (k, v_%s) VALUES (0, ?)", i), createdTuple); + + // read tuple + TupleValue r = + session() + .execute(String.format("SELECT v_%s FROM mytable WHERE k=0", i)) + .one() + .getTupleValue(String.format("v_%s", i)); + assertEquals(r, createdTuple); } - - /** - * Ensure tuple subtypes are appropriately handled for maps, sets, and lists. - * Original code found in python-driver:integration.standard.test_types.py:test_tuple_non_primitive_subtypes - */ - @Test(groups = "short") - public void tupleNonPrimitiveSubTypesTest() throws Exception { - List DATA_TYPE_PRIMITIVES = new ArrayList(samples.keySet()); - session().execute("CREATE KEYSPACE test_tuple_non_primitive_subtypes " + - "WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor': '1'}"); - session().execute("USE test_tuple_non_primitive_subtypes"); - - ArrayList values = new ArrayList(); - - //create list values - for (DataType datatype : DATA_TYPE_PRIMITIVES) { - values.add(String.format("v_%s frozen>>", values.size(), datatype)); - } - - // create set values - for (DataType datatype : DATA_TYPE_PRIMITIVES) { - // Duration not supported in Set. - if (datatype != DataType.duration()) - values.add(String.format("v_%s frozen>>", values.size(), datatype)); - } - - // create map values - for (DataType datatype : DATA_TYPE_PRIMITIVES) { - // Duration not supported as Map key. - if (datatype != DataType.duration()) - values.add(String.format("v_%s frozen>>", values.size(), datatype, datatype)); - } - - // create table - session().execute(String.format("CREATE TABLE mytable (k int PRIMARY KEY, %s)", Joiner.on(',').join(values))); - - - int i = 0; - // test tuple> - for (DataType datatype : DATA_TYPE_PRIMITIVES) { - // create tuple - ArrayList dataTypes = new ArrayList(); - ArrayList createdValues = new ArrayList(); - - dataTypes.add(DataType.list(datatype)); - createdValues.add(Collections.singletonList(samples.get(datatype))); - - TupleType t = new TupleType(dataTypes, protocolVersion, cluster().getConfiguration().getCodecRegistry()); - TupleValue createdTuple = t.newValue(createdValues.toArray()); - - // write tuple - session().execute(String.format("INSERT INTO mytable (k, v_%s) VALUES (0, ?)", i), createdTuple); - - // read tuple - TupleValue r = session().execute(String.format("SELECT v_%s FROM mytable WHERE k=0", i)) - .one().getTupleValue(String.format("v_%s", i)); - - assertEquals(r, createdTuple); - ++i; - } - - // test tuple> - for (DataType datatype : DATA_TYPE_PRIMITIVES) { - if (datatype == DataType.duration()) - continue; - - // create tuple - ArrayList dataTypes = new ArrayList(); - ArrayList createdValues = new ArrayList(); - - dataTypes.add(DataType.set(datatype)); - createdValues.add(new HashSet(Collections.singletonList(samples.get(datatype)))); - - TupleType t = new TupleType(dataTypes, protocolVersion, cluster().getConfiguration().getCodecRegistry()); - TupleValue createdTuple = t.newValue(createdValues.toArray()); - - // write tuple - session().execute(String.format("INSERT INTO mytable (k, v_%s) VALUES (0, ?)", i), createdTuple); - - // read tuple - TupleValue r = session().execute(String.format("SELECT v_%s FROM mytable WHERE k=0", i)) - .one().getTupleValue(String.format("v_%s", i)); - - assertEquals(r, createdTuple); - ++i; - } - - // test tuple> - for (DataType datatype : DATA_TYPE_PRIMITIVES) { - if (datatype == DataType.duration()) - continue; - // create tuple - ArrayList dataTypes = new ArrayList(); - ArrayList createdValues = new ArrayList(); - - HashMap hm = new HashMap(); - hm.put(samples.get(datatype), samples.get(datatype)); - - dataTypes.add(DataType.map(datatype, datatype)); - createdValues.add(hm); - - TupleType t = new TupleType(dataTypes, protocolVersion, cluster().getConfiguration().getCodecRegistry()); - TupleValue createdTuple = t.newValue(createdValues.toArray()); - - // write tuple - session().execute(String.format("INSERT INTO mytable (k, v_%s) VALUES (0, ?)", i), createdTuple); - - // read tuple - TupleValue r = session().execute(String.format("SELECT v_%s FROM mytable WHERE k=0", i)) - .one().getTupleValue(String.format("v_%s", i)); - - assertEquals(r, createdTuple); - ++i; - } + } + + /** + * Ensure tuple subtypes are appropriately handled. Original code found in + * python-driver:integration.standard.test_types.py:test_tuple_subtypes + */ + @Test(groups = "short") + public void tupleSubtypesTest() throws Exception { + List DATA_TYPE_PRIMITIVES = new ArrayList(samples.keySet()); + session() + .execute( + "CREATE KEYSPACE test_tuple_subtypes " + + "WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor': '1'}"); + session().execute("USE test_tuple_subtypes"); + + // programmatically create the table with a tuple of all datatypes + session() + .execute( + String.format( + "CREATE TABLE mytable (k int PRIMARY KEY, v frozen>)", + Joiner.on(',').join(DATA_TYPE_PRIMITIVES))); + + // insert tuples into same key using different columns + // and verify the results + int i = 1; + for (DataType ignored : DATA_TYPE_PRIMITIVES) { + // create tuples to be written and ensure they match with the expected response + // responses have trailing None values for every element that has not been written + ArrayList dataTypes = new ArrayList(); + ArrayList completeDataTypes = new ArrayList(); + ArrayList createdValues = new ArrayList(); + ArrayList completeValues = new ArrayList(); + + // create written portion of the arrays + for (int j = 0; j < i; ++j) { + dataTypes.add(DATA_TYPE_PRIMITIVES.get(j)); + completeDataTypes.add(DATA_TYPE_PRIMITIVES.get(j)); + createdValues.add(samples.get(DATA_TYPE_PRIMITIVES.get(j))); + completeValues.add(samples.get(DATA_TYPE_PRIMITIVES.get(j))); + } + + // complete portion of the arrays needed for trailing nulls + for (int j = 0; j < DATA_TYPE_PRIMITIVES.size() - i; ++j) { + completeDataTypes.add(DATA_TYPE_PRIMITIVES.get(i + j)); + completeValues.add(null); + } + + // actually create the tuples + TupleType t = + new TupleType( + dataTypes, protocolVersion, cluster().getConfiguration().getCodecRegistry()); + TupleType t2 = + new TupleType( + completeDataTypes, protocolVersion, cluster().getConfiguration().getCodecRegistry()); + TupleValue createdTuple = t.newValue(createdValues.toArray()); + TupleValue completeTuple = t2.newValue(completeValues.toArray()); + + // write tuple + session() + .execute(String.format("INSERT INTO mytable (k, v) VALUES (%s, ?)", i), createdTuple); + + // read tuple + TupleValue r = + session().execute("SELECT v FROM mytable WHERE k=?", i).one().getTupleValue("v"); + + assertEquals(r, completeTuple); + ++i; } - - /** - * Validates that tuple values generated from an attached type (cluster-provided TupleType) and - * a detached type (using TupleType.of) are the same. - * - * @since 2.2.0 - */ - @Test(groups = "short") - public void detachedTupleTypeTest() { - TupleType detachedType = TupleType.of(protocolVersion, CodecRegistry.DEFAULT_INSTANCE, - DataType.cint(), DataType.text(), DataType.cfloat()); - TupleValue detachedValue = detachedType.newValue(1, "hello", 2.0f); - - TupleType attachedType = cluster().getMetadata().newTupleType(DataType.cint(), DataType.text(), DataType.cfloat()); - TupleValue attachedValue = attachedType.newValue(1, "hello", 2.0f); - - assertThat(detachedValue).isEqualTo(attachedValue); + } + + /** + * Ensure tuple subtypes are appropriately handled for maps, sets, and lists. Original code found + * in python-driver:integration.standard.test_types.py:test_tuple_non_primitive_subtypes + */ + @Test(groups = "short") + public void tupleNonPrimitiveSubTypesTest() throws Exception { + List DATA_TYPE_PRIMITIVES = new ArrayList(samples.keySet()); + session() + .execute( + "CREATE KEYSPACE test_tuple_non_primitive_subtypes " + + "WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor': '1'}"); + session().execute("USE test_tuple_non_primitive_subtypes"); + + ArrayList values = new ArrayList(); + + // create list values + for (DataType datatype : DATA_TYPE_PRIMITIVES) { + values.add(String.format("v_%s frozen>>", values.size(), datatype)); } - /** - * Helper method for creating nested tuple schema - */ - private String nestedTuplesSchemaHelper(int depth) { - if (depth == 0) - return "int"; - else - return String.format("frozen>", nestedTuplesSchemaHelper(depth - 1)); + // create set values + for (DataType datatype : DATA_TYPE_PRIMITIVES) { + // Duration not supported in Set. + if (datatype != DataType.duration()) + values.add(String.format("v_%s frozen>>", values.size(), datatype)); } - /** - * Helper method for creating nested tuples - */ - private TupleValue nestedTuplesCreatorHelper(int depth) { - if (depth == 1) { - TupleType baseTuple = cluster().getMetadata().newTupleType(DataType.cint()); - return baseTuple.newValue(303); - } else { - TupleValue innerTuple = nestedTuplesCreatorHelper(depth - 1); - TupleType t = cluster().getMetadata().newTupleType(innerTuple.getType()); - return t.newValue(innerTuple); - } + // create map values + for (DataType datatype : DATA_TYPE_PRIMITIVES) { + // Duration not supported as Map key. + if (datatype != DataType.duration()) + values.add( + String.format("v_%s frozen>>", values.size(), datatype, datatype)); } - /** - * Ensure nested are appropriately handled. - * Original code found in python-driver:integration.standard.test_types.py:test_nested_tuples - */ - @Test(groups = "short") - public void nestedTuplesTest() throws Exception { - session().execute("CREATE KEYSPACE test_nested_tuples " + - "WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor': '1'}"); - session().execute("USE test_nested_tuples"); - - // create a table with multiple sizes of nested tuples - session().execute(String.format("CREATE TABLE mytable (" + - "k int PRIMARY KEY, " + - "v_1 %s, " + - "v_2 %s, " + - "v_3 %s, " + - "v_32 %s)", nestedTuplesSchemaHelper(1), - nestedTuplesSchemaHelper(2), - nestedTuplesSchemaHelper(3), - nestedTuplesSchemaHelper(32))); - - for (int i : Arrays.asList(1, 2, 3, 32)) { - // create tuple - TupleValue createdTuple = nestedTuplesCreatorHelper(i); - - // write tuple - session().execute(String.format("INSERT INTO mytable (k, v_%s) VALUES (?, ?)", i), i, createdTuple); - - // verify tuple was written and read correctly - TupleValue r = session().execute(String.format("SELECT v_%s FROM mytable WHERE k=?", i), i) - .one().getTupleValue(String.format("v_%s", i)); - - assertEquals(r, createdTuple); - } + // create table + session() + .execute( + String.format( + "CREATE TABLE mytable (k int PRIMARY KEY, %s)", Joiner.on(',').join(values))); + + int i = 0; + // test tuple> + for (DataType datatype : DATA_TYPE_PRIMITIVES) { + // create tuple + ArrayList dataTypes = new ArrayList(); + ArrayList createdValues = new ArrayList(); + + dataTypes.add(DataType.list(datatype)); + createdValues.add(Collections.singletonList(samples.get(datatype))); + + TupleType t = + new TupleType( + dataTypes, protocolVersion, cluster().getConfiguration().getCodecRegistry()); + TupleValue createdTuple = t.newValue(createdValues.toArray()); + + // write tuple + session() + .execute(String.format("INSERT INTO mytable (k, v_%s) VALUES (0, ?)", i), createdTuple); + + // read tuple + TupleValue r = + session() + .execute(String.format("SELECT v_%s FROM mytable WHERE k=0", i)) + .one() + .getTupleValue(String.format("v_%s", i)); + + assertEquals(r, createdTuple); + ++i; } - /** - * Test for inserting null Tuple values into UDT's - * Original code found in python-driver:integration.standard.test_types.py:test_tuples_with_nulls - */ - @Test(groups = "short") - public void testTuplesWithNulls() throws Exception { - // create keyspace - session().execute("CREATE KEYSPACE testTuplesWithNulls " + - "WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor': '1'}"); - session().execute("USE testTuplesWithNulls"); + // test tuple> + for (DataType datatype : DATA_TYPE_PRIMITIVES) { + if (datatype == DataType.duration()) continue; - // create UDT - session().execute("CREATE TYPE user (a text, b frozen>)"); - session().execute("CREATE TABLE mytable (a int PRIMARY KEY, b frozen)"); + // create tuple + ArrayList dataTypes = new ArrayList(); + ArrayList createdValues = new ArrayList(); - // insert UDT data - UserType userTypeDef = cluster().getMetadata().getKeyspace("testTuplesWithNulls").getUserType("user"); - UDTValue userType = userTypeDef.newValue(); + dataTypes.add(DataType.set(datatype)); + createdValues.add(new HashSet(Collections.singletonList(samples.get(datatype)))); - TupleType t = cluster().getMetadata().newTupleType(DataType.text(), DataType.cint(), DataType.uuid(), DataType.blob()); - TupleValue v = t.newValue(null, null, null, null); - userType.setTupleValue("b", v); + TupleType t = + new TupleType( + dataTypes, protocolVersion, cluster().getConfiguration().getCodecRegistry()); + TupleValue createdTuple = t.newValue(createdValues.toArray()); - PreparedStatement ins = session().prepare("INSERT INTO mytable (a, b) VALUES (?, ?)"); - session().execute(ins.bind(0, userType)); + // write tuple + session() + .execute(String.format("INSERT INTO mytable (k, v_%s) VALUES (0, ?)", i), createdTuple); - // retrieve and verify data - ResultSet rs = session().execute("SELECT * FROM mytable"); - List rows = rs.all(); - assertEquals(1, rows.size()); + // read tuple + TupleValue r = + session() + .execute(String.format("SELECT v_%s FROM mytable WHERE k=0", i)) + .one() + .getTupleValue(String.format("v_%s", i)); - Row row = rows.get(0); + assertEquals(r, createdTuple); + ++i; + } - assertEquals(row.getInt("a"), 0); - assertEquals(row.getUDTValue("b"), userType); + // test tuple> + for (DataType datatype : DATA_TYPE_PRIMITIVES) { + if (datatype == DataType.duration()) continue; + // create tuple + ArrayList dataTypes = new ArrayList(); + ArrayList createdValues = new ArrayList(); + + HashMap hm = new HashMap(); + hm.put(samples.get(datatype), samples.get(datatype)); + + dataTypes.add(DataType.map(datatype, datatype)); + createdValues.add(hm); + + TupleType t = + new TupleType( + dataTypes, protocolVersion, cluster().getConfiguration().getCodecRegistry()); + TupleValue createdTuple = t.newValue(createdValues.toArray()); + + // write tuple + session() + .execute(String.format("INSERT INTO mytable (k, v_%s) VALUES (0, ?)", i), createdTuple); + + // read tuple + TupleValue r = + session() + .execute(String.format("SELECT v_%s FROM mytable WHERE k=0", i)) + .one() + .getTupleValue(String.format("v_%s", i)); + + assertEquals(r, createdTuple); + ++i; + } + } + + /** + * Validates that tuple values generated from an attached type (cluster-provided TupleType) and a + * detached type (using TupleType.of) are the same. + * + * @since 2.2.0 + */ + @Test(groups = "short") + public void detachedTupleTypeTest() { + TupleType detachedType = + TupleType.of( + protocolVersion, + CodecRegistry.DEFAULT_INSTANCE, + DataType.cint(), + DataType.text(), + DataType.cfloat()); + TupleValue detachedValue = detachedType.newValue(1, "hello", 2.0f); + + TupleType attachedType = + cluster().getMetadata().newTupleType(DataType.cint(), DataType.text(), DataType.cfloat()); + TupleValue attachedValue = attachedType.newValue(1, "hello", 2.0f); + + assertThat(detachedValue).isEqualTo(attachedValue); + } + + /** Helper method for creating nested tuple schema */ + private String nestedTuplesSchemaHelper(int depth) { + if (depth == 0) return "int"; + else return String.format("frozen>", nestedTuplesSchemaHelper(depth - 1)); + } + + /** Helper method for creating nested tuples */ + private TupleValue nestedTuplesCreatorHelper(int depth) { + if (depth == 1) { + TupleType baseTuple = cluster().getMetadata().newTupleType(DataType.cint()); + return baseTuple.newValue(303); + } else { + TupleValue innerTuple = nestedTuplesCreatorHelper(depth - 1); + TupleType t = cluster().getMetadata().newTupleType(innerTuple.getType()); + return t.newValue(innerTuple); + } + } + + /** + * Ensure nested are appropriately handled. Original code found in + * python-driver:integration.standard.test_types.py:test_nested_tuples + */ + @Test(groups = "short") + public void nestedTuplesTest() throws Exception { + session() + .execute( + "CREATE KEYSPACE test_nested_tuples " + + "WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor': '1'}"); + session().execute("USE test_nested_tuples"); + + // create a table with multiple sizes of nested tuples + session() + .execute( + String.format( + "CREATE TABLE mytable (" + + "k int PRIMARY KEY, " + + "v_1 %s, " + + "v_2 %s, " + + "v_3 %s, " + + "v_32 %s)", + nestedTuplesSchemaHelper(1), + nestedTuplesSchemaHelper(2), + nestedTuplesSchemaHelper(3), + nestedTuplesSchemaHelper(32))); - // test empty strings - v = t.newValue("", null, null, ByteBuffer.allocate(0)); - userType.setTupleValue("b", v); - session().execute(ins.bind(0, userType)); + for (int i : Arrays.asList(1, 2, 3, 32)) { + // create tuple + TupleValue createdTuple = nestedTuplesCreatorHelper(i); - // retrieve and verify data - rs = session().execute("SELECT * FROM mytable"); - rows = rs.all(); - assertEquals(1, rows.size()); + // write tuple + session() + .execute( + String.format("INSERT INTO mytable (k, v_%s) VALUES (?, ?)", i), i, createdTuple); - row = rows.get(0); + // verify tuple was written and read correctly + TupleValue r = + session() + .execute(String.format("SELECT v_%s FROM mytable WHERE k=?", i), i) + .one() + .getTupleValue(String.format("v_%s", i)); - assertEquals(row.getInt("a"), 0); - assertEquals(row.getUDTValue("b"), userType); + assertEquals(r, createdTuple); } + } + + /** + * Test for inserting null Tuple values into UDT's Original code found in + * python-driver:integration.standard.test_types.py:test_tuples_with_nulls + */ + @Test(groups = "short") + public void testTuplesWithNulls() throws Exception { + // create keyspace + session() + .execute( + "CREATE KEYSPACE testTuplesWithNulls " + + "WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor': '1'}"); + session().execute("USE testTuplesWithNulls"); + + // create UDT + session().execute("CREATE TYPE user (a text, b frozen>)"); + session().execute("CREATE TABLE mytable (a int PRIMARY KEY, b frozen)"); + + // insert UDT data + UserType userTypeDef = + cluster().getMetadata().getKeyspace("testTuplesWithNulls").getUserType("user"); + UDTValue userType = userTypeDef.newValue(); + + TupleType t = + cluster() + .getMetadata() + .newTupleType(DataType.text(), DataType.cint(), DataType.uuid(), DataType.blob()); + TupleValue v = t.newValue(null, null, null, null); + userType.setTupleValue("b", v); + + PreparedStatement ins = session().prepare("INSERT INTO mytable (a, b) VALUES (?, ?)"); + session().execute(ins.bind(0, userType)); + + // retrieve and verify data + ResultSet rs = session().execute("SELECT * FROM mytable"); + List rows = rs.all(); + assertEquals(1, rows.size()); + + Row row = rows.get(0); + + assertEquals(row.getInt("a"), 0); + assertEquals(row.getUDTValue("b"), userType); + + // test empty strings + v = t.newValue("", null, null, ByteBuffer.allocate(0)); + userType.setTupleValue("b", v); + session().execute(ins.bind(0, userType)); + + // retrieve and verify data + rs = session().execute("SELECT * FROM mytable"); + rows = rs.all(); + assertEquals(1, rows.size()); + + row = rows.get(0); + + assertEquals(row.getInt("a"), 0); + assertEquals(row.getUDTValue("b"), userType); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/TypeCodecAssert.java b/driver-core/src/test/java/com/datastax/driver/core/TypeCodecAssert.java index c0a4305df21..cdfd42d68dd 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/TypeCodecAssert.java +++ b/driver-core/src/test/java/com/datastax/driver/core/TypeCodecAssert.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,98 +17,118 @@ */ package com.datastax.driver.core; -import com.google.common.reflect.TypeToken; -import org.assertj.core.api.AbstractAssert; - import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Fail.fail; +import com.google.common.reflect.TypeToken; +import org.assertj.core.api.AbstractAssert; + @SuppressWarnings("unused") public class TypeCodecAssert extends AbstractAssert, TypeCodec> { - private ProtocolVersion version = ProtocolVersion.NEWEST_SUPPORTED; - - protected TypeCodecAssert(TypeCodec actual) { - super(actual, TypeCodecAssert.class); - } - - public TypeCodecAssert accepts(TypeToken javaType) { - assertThat(actual.accepts(javaType)).as("Codec %s should accept %s but it does not", actual, javaType).isTrue(); - return this; - } - - public TypeCodecAssert doesNotAccept(TypeToken javaType) { - assertThat(actual.accepts(javaType)).as("Codec %s should not accept %s but it does", actual, javaType).isFalse(); - return this; - } - - public TypeCodecAssert accepts(Class javaType) { - assertThat(actual.accepts(javaType)).as("Codec %s should accept %s but it does not", actual, javaType).isTrue(); - return this; - } - - public TypeCodecAssert doesNotAccept(Class javaType) { - assertThat(actual.accepts(javaType)).as("Codec %s should not accept %s but it does", actual, javaType).isFalse(); - return this; - } - - public TypeCodecAssert accepts(Object value) { - assertThat(actual.accepts(value)).as("Codec %s should accept %s but it does not", actual, value).isTrue(); - return this; - } - - public TypeCodecAssert doesNotAccept(Object value) { - assertThat(actual.accepts(value)).as("Codec %s should not accept %s but it does", actual, value).isFalse(); - return this; - } - - public TypeCodecAssert accepts(DataType cqlType) { - assertThat(actual.accepts(cqlType)).as("Codec %s should accept %s but it does not", actual, cqlType).isTrue(); - return this; + private ProtocolVersion version = ProtocolVersion.NEWEST_SUPPORTED; + + protected TypeCodecAssert(TypeCodec actual) { + super(actual, TypeCodecAssert.class); + } + + public TypeCodecAssert accepts(TypeToken javaType) { + assertThat(actual.accepts(javaType)) + .as("Codec %s should accept %s but it does not", actual, javaType) + .isTrue(); + return this; + } + + public TypeCodecAssert doesNotAccept(TypeToken javaType) { + assertThat(actual.accepts(javaType)) + .as("Codec %s should not accept %s but it does", actual, javaType) + .isFalse(); + return this; + } + + public TypeCodecAssert accepts(Class javaType) { + assertThat(actual.accepts(javaType)) + .as("Codec %s should accept %s but it does not", actual, javaType) + .isTrue(); + return this; + } + + public TypeCodecAssert doesNotAccept(Class javaType) { + assertThat(actual.accepts(javaType)) + .as("Codec %s should not accept %s but it does", actual, javaType) + .isFalse(); + return this; + } + + public TypeCodecAssert accepts(Object value) { + assertThat(actual.accepts(value)) + .as("Codec %s should accept %s but it does not", actual, value) + .isTrue(); + return this; + } + + public TypeCodecAssert doesNotAccept(Object value) { + assertThat(actual.accepts(value)) + .as("Codec %s should not accept %s but it does", actual, value) + .isFalse(); + return this; + } + + public TypeCodecAssert accepts(DataType cqlType) { + assertThat(actual.accepts(cqlType)) + .as("Codec %s should accept %s but it does not", actual, cqlType) + .isTrue(); + return this; + } + + public TypeCodecAssert doesNotAccept(DataType cqlType) { + assertThat(actual.accepts(cqlType)) + .as("Codec %s should not accept %s but it does", actual, cqlType) + .isFalse(); + return this; + } + + public TypeCodecAssert withProtocolVersion(ProtocolVersion version) { + if (version == null) fail("ProtocolVersion cannot be null"); + this.version = version; + return this; + } + + @SuppressWarnings("unchecked") + public TypeCodecAssert canSerialize(Object value) { + if (version == null) fail("ProtocolVersion cannot be null"); + try { + assertThat(actual.deserialize(actual.serialize((T) value, version), version)) + .isEqualTo(value); + } catch (Exception e) { + fail( + String.format( + "Codec is supposed to serialize this value but it actually doesn't: %s", value), + e); } - - public TypeCodecAssert doesNotAccept(DataType cqlType) { - assertThat(actual.accepts(cqlType)).as("Codec %s should not accept %s but it does", actual, cqlType).isFalse(); - return this; + return this; + } + + @SuppressWarnings("unchecked") + public TypeCodecAssert cannotSerialize(Object value) { + if (version == null) fail("ProtocolVersion cannot be null"); + try { + actual.serialize((T) value, version); + fail("Should not have been able to serialize " + value + " with " + actual); + } catch (Exception e) { + // ok } - - public TypeCodecAssert withProtocolVersion(ProtocolVersion version) { - if (version == null) fail("ProtocolVersion cannot be null"); - this.version = version; - return this; - } - - @SuppressWarnings("unchecked") - public TypeCodecAssert canSerialize(Object value) { - if (version == null) fail("ProtocolVersion cannot be null"); - try { - assertThat(actual.deserialize(actual.serialize((T) value, version), version)).isEqualTo(value); - } catch (Exception e) { - fail(String.format("Codec is supposed to serialize this value but it actually doesn't: %s", value), e); - } - return this; - } - - @SuppressWarnings("unchecked") - public TypeCodecAssert cannotSerialize(Object value) { - if (version == null) fail("ProtocolVersion cannot be null"); - try { - actual.serialize((T) value, version); - fail("Should not have been able to serialize " + value + " with " + actual); - } catch (Exception e) { - //ok - } - return this; - } - - @SuppressWarnings("unchecked") - public TypeCodecAssert cannotFormat(Object value) { - try { - actual.format((T) value); - fail("Should not have been able to format " + value + " with " + actual); - } catch (Exception e) { - // ok - } - return this; + return this; + } + + @SuppressWarnings("unchecked") + public TypeCodecAssert cannotFormat(Object value) { + try { + actual.format((T) value); + fail("Should not have been able to format " + value + " with " + actual); + } catch (Exception e) { + // ok } + return this; + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/TypeCodecCollectionsIntegrationTest.java b/driver-core/src/test/java/com/datastax/driver/core/TypeCodecCollectionsIntegrationTest.java index 094e052e134..2a2d6ea57dc 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/TypeCodecCollectionsIntegrationTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/TypeCodecCollectionsIntegrationTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,166 +17,189 @@ */ package com.datastax.driver.core; +import static com.datastax.driver.core.querybuilder.QueryBuilder.bindMarker; +import static com.datastax.driver.core.querybuilder.QueryBuilder.eq; +import static com.datastax.driver.core.querybuilder.QueryBuilder.insertInto; +import static com.datastax.driver.core.querybuilder.QueryBuilder.select; +import static com.google.common.collect.Lists.newArrayList; +import static com.google.common.collect.Sets.newHashSet; +import static org.assertj.core.api.Assertions.assertThat; + import com.datastax.driver.core.querybuilder.BuiltStatement; import com.datastax.driver.core.utils.CassandraVersion; import com.google.common.collect.ImmutableMap; import com.google.common.reflect.TypeToken; -import org.testng.annotations.BeforeMethod; -import org.testng.annotations.Test; - import java.math.BigDecimal; import java.math.BigInteger; import java.util.List; import java.util.Map; import java.util.Set; - -import static com.datastax.driver.core.querybuilder.QueryBuilder.*; -import static com.google.common.collect.Lists.newArrayList; -import static com.google.common.collect.Sets.newHashSet; -import static org.assertj.core.api.Assertions.assertThat; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; @CassandraVersion("2.0.0") public class TypeCodecCollectionsIntegrationTest extends CCMTestsSupport { - private final String insertQuery = "INSERT INTO \"myTable2\" (c_int, l_int, l_bigint, s_float, s_double, m_varint, m_decimal) VALUES (?, ?, ?, ?, ?, ?, ?)"; - - private final String selectQuery = "SELECT c_int, l_int, l_bigint, s_float, s_double, m_varint, m_decimal FROM \"myTable2\" WHERE c_int = ?"; - - private BuiltStatement insertStmt; - private BuiltStatement selectStmt; - - private int n_int = 42; - private List l_int = newArrayList(42, 43); - private List l_bigint = newArrayList(42L, 43L); - private Set s_float = newHashSet(42.42f, 43.43f); - private Set s_double = newHashSet(42.42d, 43.43d); - private Map m_varint = ImmutableMap.of(42, new BigInteger("424242"), 43, new BigInteger("434343")); - private Map m_decimal = ImmutableMap.of(42, new BigDecimal("424242.42"), 43, new BigDecimal("434343.43")); - - @Override - public void onTestContextInitialized() { - execute( - "CREATE TABLE IF NOT EXISTS \"myTable2\" (" - + "c_int int PRIMARY KEY, " - + "l_int list, " - + "l_bigint list, " - + "s_float set, " - + "s_double set, " - + "m_varint map, " - + "m_decimal map" - + ")"); - } - - @BeforeMethod(groups = "short") - public void createBuiltStatements() throws Exception { - insertStmt = insertInto("\"myTable2\"") - .value("c_int", bindMarker()) - .value("l_int", bindMarker()) - .value("l_bigint", bindMarker()) - .value("s_float", bindMarker()) - .value("s_double", bindMarker()) - .value("m_varint", bindMarker()) - .value("m_decimal", bindMarker()); - selectStmt = select("c_int", "l_int", "l_bigint", "s_float", "s_double", "m_varint", "m_decimal") - .from("\"myTable2\"") - .where(eq("c_int", bindMarker())); - } - - @Test(groups = "short") - public void should_use_collection_codecs_with_simple_statements() { - session().execute(insertQuery, n_int, l_int, l_bigint, s_float, s_double, m_varint, m_decimal); - ResultSet rows = session().execute(selectQuery, n_int); - Row row = rows.one(); - assertRow(row); - } - - @Test(groups = "short") - public void should_use_collection_codecs_with_prepared_statements_1() { - session().execute(session().prepare(insertQuery).bind(n_int, l_int, l_bigint, s_float, s_double, m_varint, m_decimal)); - PreparedStatement ps = session().prepare(selectQuery); - ResultSet rows = session().execute(ps.bind(n_int)); - Row row = rows.one(); - assertRow(row); - } - - @Test(groups = "short") - public void should_use_collection_codecs_with_prepared_statements_2() { - session().execute(session().prepare(insertQuery).bind() + private final String insertQuery = + "INSERT INTO \"myTable2\" (c_int, l_int, l_bigint, s_float, s_double, m_varint, m_decimal) VALUES (?, ?, ?, ?, ?, ?, ?)"; + + private final String selectQuery = + "SELECT c_int, l_int, l_bigint, s_float, s_double, m_varint, m_decimal FROM \"myTable2\" WHERE c_int = ?"; + + private BuiltStatement insertStmt; + private BuiltStatement selectStmt; + + private int n_int = 42; + private List l_int = newArrayList(42, 43); + private List l_bigint = newArrayList(42L, 43L); + private Set s_float = newHashSet(42.42f, 43.43f); + private Set s_double = newHashSet(42.42d, 43.43d); + private Map m_varint = + ImmutableMap.of(42, new BigInteger("424242"), 43, new BigInteger("434343")); + private Map m_decimal = + ImmutableMap.of(42, new BigDecimal("424242.42"), 43, new BigDecimal("434343.43")); + + @Override + public void onTestContextInitialized() { + execute( + "CREATE TABLE IF NOT EXISTS \"myTable2\" (" + + "c_int int PRIMARY KEY, " + + "l_int list, " + + "l_bigint list, " + + "s_float set, " + + "s_double set, " + + "m_varint map, " + + "m_decimal map" + + ")"); + } + + @BeforeMethod(groups = "short") + public void createBuiltStatements() throws Exception { + insertStmt = + insertInto("\"myTable2\"") + .value("c_int", bindMarker()) + .value("l_int", bindMarker()) + .value("l_bigint", bindMarker()) + .value("s_float", bindMarker()) + .value("s_double", bindMarker()) + .value("m_varint", bindMarker()) + .value("m_decimal", bindMarker()); + selectStmt = + select("c_int", "l_int", "l_bigint", "s_float", "s_double", "m_varint", "m_decimal") + .from("\"myTable2\"") + .where(eq("c_int", bindMarker())); + } + + @Test(groups = "short") + public void should_use_collection_codecs_with_simple_statements() { + session().execute(insertQuery, n_int, l_int, l_bigint, s_float, s_double, m_varint, m_decimal); + ResultSet rows = session().execute(selectQuery, n_int); + Row row = rows.one(); + assertRow(row); + } + + @Test(groups = "short") + public void should_use_collection_codecs_with_prepared_statements_1() { + session() + .execute( + session() + .prepare(insertQuery) + .bind(n_int, l_int, l_bigint, s_float, s_double, m_varint, m_decimal)); + PreparedStatement ps = session().prepare(selectQuery); + ResultSet rows = session().execute(ps.bind(n_int)); + Row row = rows.one(); + assertRow(row); + } + + @Test(groups = "short") + public void should_use_collection_codecs_with_prepared_statements_2() { + session() + .execute( + session() + .prepare(insertQuery) + .bind() .setInt(0, n_int) .setList(1, l_int) .setList(2, l_bigint, Long.class) // variant with element type explicitly set .setSet(3, s_float) - .setSet(4, s_double, TypeToken.of(Double.class)) // variant with element type explicitly set + .setSet( + 4, + s_double, + TypeToken.of(Double.class)) // variant with element type explicitly set .setMap(5, m_varint) - .setMap(6, m_decimal, Integer.class, BigDecimal.class) // variant with element type explicitly set - ); - PreparedStatement ps = session().prepare(selectQuery); - ResultSet rows = session().execute(ps.bind() - .setInt(0, n_int) - ); - Row row = rows.one(); - assertRow(row); - } - - @Test(groups = "short") - public void should_use_collection_codecs_with_prepared_statements_3() { - session().execute(session().prepare(insertQuery).bind() - .setInt(0, n_int) - .set(1, l_int, TypeTokens.listOf(Integer.class)) - .set(2, l_bigint, TypeTokens.listOf(Long.class)) - .set(3, s_float, TypeTokens.setOf(Float.class)) - .set(4, s_double, TypeTokens.setOf(Double.class)) - .set(5, m_varint, TypeTokens.mapOf(Integer.class, BigInteger.class)) - .set(6, m_decimal, TypeTokens.mapOf(Integer.class, BigDecimal.class)) - ); - PreparedStatement ps = session().prepare(selectQuery); - ResultSet rows = session().execute(ps.bind() - .setInt(0, n_int) - ); - Row row = rows.one(); - assertRow(row); - } - - @Test(groups = "short") - public void should_use_collection_codecs_with_built_statements() { - session().execute(session().prepare(insertStmt).bind() - .setInt(0, n_int) - .set(1, l_int, TypeTokens.listOf(Integer.class)) - .set(2, l_bigint, TypeTokens.listOf(Long.class)) - .set(3, s_float, TypeTokens.setOf(Float.class)) - .set(4, s_double, TypeTokens.setOf(Double.class)) - .set(5, m_varint, TypeTokens.mapOf(Integer.class, BigInteger.class)) - .set(6, m_decimal, TypeTokens.mapOf(Integer.class, BigDecimal.class)) - ); - PreparedStatement ps = session().prepare(selectStmt); - ResultSet rows = session().execute(ps.bind() - .setInt(0, n_int) - ); - Row row = rows.one(); - assertRow(row); - } - - private void assertRow(Row row) { - assertThat(row.getInt(0)).isEqualTo(n_int); - assertThat(row.getList(1, Integer.class)).isEqualTo(l_int); - assertThat(row.getList(2, Long.class)).isEqualTo(l_bigint); - assertThat(row.getSet(3, Float.class)).isEqualTo(s_float); - assertThat(row.getSet(4, Double.class)).isEqualTo(s_double); - assertThat(row.getMap(5, Integer.class, BigInteger.class)).isEqualTo(m_varint); - assertThat(row.getMap(6, Integer.class, BigDecimal.class)).isEqualTo(m_decimal); - // with get + type - assertThat(row.get(1, TypeTokens.listOf(Integer.class))).isEqualTo(l_int); - assertThat(row.get(2, TypeTokens.listOf(Long.class))).isEqualTo(l_bigint); - assertThat(row.get(3, TypeTokens.setOf(Float.class))).isEqualTo(s_float); - assertThat(row.get(4, TypeTokens.setOf(Double.class))).isEqualTo(s_double); - assertThat(row.get(5, TypeTokens.mapOf(Integer.class, BigInteger.class))).isEqualTo(m_varint); - assertThat(row.get(6, TypeTokens.mapOf(Integer.class, BigDecimal.class))).isEqualTo(m_decimal); - // with getObject - assertThat(row.getObject(1)).isEqualTo(l_int); - assertThat(row.getObject(2)).isEqualTo(l_bigint); - assertThat(row.getObject(3)).isEqualTo(s_float); - assertThat(row.getObject(4)).isEqualTo(s_double); - assertThat(row.getObject(5)).isEqualTo(m_varint); - assertThat(row.getObject(6)).isEqualTo(m_decimal); - } + .setMap( + 6, + m_decimal, + Integer.class, + BigDecimal.class) // variant with element type explicitly set + ); + PreparedStatement ps = session().prepare(selectQuery); + ResultSet rows = session().execute(ps.bind().setInt(0, n_int)); + Row row = rows.one(); + assertRow(row); + } + + @Test(groups = "short") + public void should_use_collection_codecs_with_prepared_statements_3() { + session() + .execute( + session() + .prepare(insertQuery) + .bind() + .setInt(0, n_int) + .set(1, l_int, TypeTokens.listOf(Integer.class)) + .set(2, l_bigint, TypeTokens.listOf(Long.class)) + .set(3, s_float, TypeTokens.setOf(Float.class)) + .set(4, s_double, TypeTokens.setOf(Double.class)) + .set(5, m_varint, TypeTokens.mapOf(Integer.class, BigInteger.class)) + .set(6, m_decimal, TypeTokens.mapOf(Integer.class, BigDecimal.class))); + PreparedStatement ps = session().prepare(selectQuery); + ResultSet rows = session().execute(ps.bind().setInt(0, n_int)); + Row row = rows.one(); + assertRow(row); + } + + @Test(groups = "short") + public void should_use_collection_codecs_with_built_statements() { + session() + .execute( + session() + .prepare(insertStmt) + .bind() + .setInt(0, n_int) + .set(1, l_int, TypeTokens.listOf(Integer.class)) + .set(2, l_bigint, TypeTokens.listOf(Long.class)) + .set(3, s_float, TypeTokens.setOf(Float.class)) + .set(4, s_double, TypeTokens.setOf(Double.class)) + .set(5, m_varint, TypeTokens.mapOf(Integer.class, BigInteger.class)) + .set(6, m_decimal, TypeTokens.mapOf(Integer.class, BigDecimal.class))); + PreparedStatement ps = session().prepare(selectStmt); + ResultSet rows = session().execute(ps.bind().setInt(0, n_int)); + Row row = rows.one(); + assertRow(row); + } + + private void assertRow(Row row) { + assertThat(row.getInt(0)).isEqualTo(n_int); + assertThat(row.getList(1, Integer.class)).isEqualTo(l_int); + assertThat(row.getList(2, Long.class)).isEqualTo(l_bigint); + assertThat(row.getSet(3, Float.class)).isEqualTo(s_float); + assertThat(row.getSet(4, Double.class)).isEqualTo(s_double); + assertThat(row.getMap(5, Integer.class, BigInteger.class)).isEqualTo(m_varint); + assertThat(row.getMap(6, Integer.class, BigDecimal.class)).isEqualTo(m_decimal); + // with get + type + assertThat(row.get(1, TypeTokens.listOf(Integer.class))).isEqualTo(l_int); + assertThat(row.get(2, TypeTokens.listOf(Long.class))).isEqualTo(l_bigint); + assertThat(row.get(3, TypeTokens.setOf(Float.class))).isEqualTo(s_float); + assertThat(row.get(4, TypeTokens.setOf(Double.class))).isEqualTo(s_double); + assertThat(row.get(5, TypeTokens.mapOf(Integer.class, BigInteger.class))).isEqualTo(m_varint); + assertThat(row.get(6, TypeTokens.mapOf(Integer.class, BigDecimal.class))).isEqualTo(m_decimal); + // with getObject + assertThat(row.getObject(1)).isEqualTo(l_int); + assertThat(row.getObject(2)).isEqualTo(l_bigint); + assertThat(row.getObject(3)).isEqualTo(s_float); + assertThat(row.getObject(4)).isEqualTo(s_double); + assertThat(row.getObject(5)).isEqualTo(m_varint); + assertThat(row.getObject(6)).isEqualTo(m_decimal); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/TypeCodecEncapsulationIntegrationTest.java b/driver-core/src/test/java/com/datastax/driver/core/TypeCodecEncapsulationIntegrationTest.java index c1dba310ff6..5bfad36f6f4 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/TypeCodecEncapsulationIntegrationTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/TypeCodecEncapsulationIntegrationTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,271 +17,306 @@ */ package com.datastax.driver.core; +import static com.datastax.driver.core.querybuilder.QueryBuilder.bindMarker; +import static com.datastax.driver.core.querybuilder.QueryBuilder.eq; +import static com.datastax.driver.core.querybuilder.QueryBuilder.insertInto; +import static com.datastax.driver.core.querybuilder.QueryBuilder.select; +import static org.assertj.core.api.Assertions.assertThat; + import com.datastax.driver.core.exceptions.InvalidTypeException; import com.datastax.driver.core.querybuilder.BuiltStatement; import com.datastax.driver.core.utils.CassandraVersion; import com.datastax.driver.core.utils.MoreObjects; import com.google.common.reflect.TypeParameter; import com.google.common.reflect.TypeToken; -import org.testng.annotations.BeforeMethod; -import org.testng.annotations.Test; - import java.math.BigDecimal; import java.math.BigInteger; import java.nio.ByteBuffer; - -import static com.datastax.driver.core.querybuilder.QueryBuilder.*; -import static org.assertj.core.api.Assertions.assertThat; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; public class TypeCodecEncapsulationIntegrationTest extends CCMTestsSupport { - // @formatter:off - private static final TypeToken> NUMBERBOX_OF_INTEGER_TOKEN = new TypeToken>() {}; - private static final TypeToken> NUMBERBOX_OF_LONG_TOKEN = new TypeToken>() {}; - private static final TypeToken> NUMBERBOX_OF_FLOAT_TOKEN = new TypeToken>() {}; - private static final TypeToken> NUMBERBOX_OF_DOUBLE_TOKEN = new TypeToken>() {}; - private static final TypeToken> NUMBERBOX_OF_BIGINTEGER_TOKEN = new TypeToken>() {}; - private static final TypeToken> NUMBERBOX_OF_BIGDECIMAL_TOKEN = new TypeToken>() {}; - // @formatter:on - - private final String insertQuery = "INSERT INTO \"myTable\" (c_int, c_bigint, c_float, c_double, c_varint, c_decimal) VALUES (?, ?, ?, ?, ?, ?)"; - - private final String selectQuery = "SELECT c_int, c_bigint, c_float, c_double, c_varint, c_decimal FROM \"myTable\" WHERE c_int = ? and c_bigint = ?"; - - private BuiltStatement insertStmt; - private BuiltStatement selectStmt; - - private int n_int = 42; - private long n_bigint = 4242; - private float n_float = 42.42f; - private double n_double = 4242.42d; - private BigInteger n_varint = new BigInteger("424242"); - private BigDecimal n_decimal = new BigDecimal("424242.42"); - - @Override - public void onTestContextInitialized() { - execute( - "CREATE TABLE \"myTable\" (" - + "c_int int, " - + "c_bigint bigint, " - + "c_float float, " - + "c_double double, " - + "c_varint varint, " - + "c_decimal decimal, " - + "PRIMARY KEY (c_int, c_bigint)" - + ")" - ); - } - - @Override - public Cluster.Builder createClusterBuilder() { - return Cluster.builder().withCodecRegistry( - new CodecRegistry() - .register( - new NumberBoxCodec(TypeCodec.cint()), - new NumberBoxCodec(TypeCodec.bigint()), - new NumberBoxCodec(TypeCodec.cfloat()), - new NumberBoxCodec(TypeCodec.cdouble()), - new NumberBoxCodec(TypeCodec.varint()), - new NumberBoxCodec(TypeCodec.decimal()) - ) - ); - } - - @BeforeMethod(groups = "short") - public void createBuiltStatements() throws Exception { - insertStmt = insertInto("\"myTable\"") - .value("c_int", bindMarker()) - .value("c_bigint", bindMarker()) - .value("c_float", bindMarker()) - .value("c_double", bindMarker()) - .value("c_varint", bindMarker()) - .value("c_decimal", bindMarker()); - selectStmt = select("c_int", "c_bigint", "c_float", "c_double", "c_varint", "c_decimal") - .from("\"myTable\"") - .where(eq("c_int", bindMarker())) - .and(eq("c_bigint", bindMarker())); - } - - @Test(groups = "short") - @CassandraVersion("2.0.0") - public void should_use_custom_codecs_with_simple_statements() { - session().execute(insertQuery, - n_int, - new NumberBox(n_bigint), - new NumberBox(n_float), - new NumberBox(n_double), - new NumberBox(n_varint), - new NumberBox(n_decimal)); - ResultSet rows = session().execute(selectQuery, n_int, new NumberBox(n_bigint)); - Row row = rows.one(); - assertRow(row); - } - - @Test(groups = "short") - public void should_use_custom_codecs_with_prepared_statements_1() { - session().execute(session().prepare(insertQuery).bind( - n_int, - new NumberBox(n_bigint), - new NumberBox(n_float), - new NumberBox(n_double), - new NumberBox(n_varint), - new NumberBox(n_decimal))); - PreparedStatement ps = session().prepare(selectQuery); - ResultSet rows = session().execute(ps.bind(n_int, new NumberBox(n_bigint))); - Row row = rows.one(); - assertRow(row); - } - - @Test(groups = "short") - public void should_use_custom_codecs_with_prepared_statements_2() { - session().execute(session().prepare(insertQuery).bind() - .set(0, new NumberBox(n_int), NUMBERBOX_OF_INTEGER_TOKEN) - .set(1, new NumberBox(n_bigint), NUMBERBOX_OF_LONG_TOKEN) - .set(2, new NumberBox(n_float), NUMBERBOX_OF_FLOAT_TOKEN) - .set(3, new NumberBox(n_double), NUMBERBOX_OF_DOUBLE_TOKEN) - .set(4, new NumberBox(n_varint), NUMBERBOX_OF_BIGINTEGER_TOKEN) - .set(5, new NumberBox(n_decimal), NUMBERBOX_OF_BIGDECIMAL_TOKEN) - ); - PreparedStatement ps = session().prepare(selectQuery); - ResultSet rows = session().execute(ps.bind() - .set(0, new NumberBox(n_int), NUMBERBOX_OF_INTEGER_TOKEN) - .set(1, new NumberBox(n_bigint), NUMBERBOX_OF_LONG_TOKEN) - ); - Row row = rows.one(); - assertRow(row); - } - - @Test(groups = "short") - public void should_use_custom_codecs_with_built_statements_1() { - session().execute(session().prepare(insertStmt).bind( - n_int, - new NumberBox(n_bigint), - new NumberBox(n_float), - new NumberBox(n_double), - new NumberBox(n_varint), - new NumberBox(n_decimal))); - PreparedStatement ps = session().prepare(selectStmt); - ResultSet rows = session().execute(ps.bind(n_int, new NumberBox(n_bigint))); - Row row = rows.one(); - assertRow(row); - } - - @Test(groups = "short") - public void should_use_custom_codecs_with_built_statements_2() { - session().execute(session().prepare(insertStmt).bind() + // @formatter:off + private static final TypeToken> NUMBERBOX_OF_INTEGER_TOKEN = + new TypeToken>() {}; + private static final TypeToken> NUMBERBOX_OF_LONG_TOKEN = + new TypeToken>() {}; + private static final TypeToken> NUMBERBOX_OF_FLOAT_TOKEN = + new TypeToken>() {}; + private static final TypeToken> NUMBERBOX_OF_DOUBLE_TOKEN = + new TypeToken>() {}; + private static final TypeToken> NUMBERBOX_OF_BIGINTEGER_TOKEN = + new TypeToken>() {}; + private static final TypeToken> NUMBERBOX_OF_BIGDECIMAL_TOKEN = + new TypeToken>() {}; + // @formatter:on + + private final String insertQuery = + "INSERT INTO \"myTable\" (c_int, c_bigint, c_float, c_double, c_varint, c_decimal) VALUES (?, ?, ?, ?, ?, ?)"; + + private final String selectQuery = + "SELECT c_int, c_bigint, c_float, c_double, c_varint, c_decimal FROM \"myTable\" WHERE c_int = ? and c_bigint = ?"; + + private BuiltStatement insertStmt; + private BuiltStatement selectStmt; + + private int n_int = 42; + private long n_bigint = 4242; + private float n_float = 42.42f; + private double n_double = 4242.42d; + private BigInteger n_varint = new BigInteger("424242"); + private BigDecimal n_decimal = new BigDecimal("424242.42"); + + @Override + public void onTestContextInitialized() { + execute( + "CREATE TABLE \"myTable\" (" + + "c_int int, " + + "c_bigint bigint, " + + "c_float float, " + + "c_double double, " + + "c_varint varint, " + + "c_decimal decimal, " + + "PRIMARY KEY (c_int, c_bigint)" + + ")"); + } + + @Override + public Cluster.Builder createClusterBuilder() { + return Cluster.builder() + .withCodecRegistry( + new CodecRegistry() + .register( + new NumberBoxCodec(TypeCodec.cint()), + new NumberBoxCodec(TypeCodec.bigint()), + new NumberBoxCodec(TypeCodec.cfloat()), + new NumberBoxCodec(TypeCodec.cdouble()), + new NumberBoxCodec(TypeCodec.varint()), + new NumberBoxCodec(TypeCodec.decimal()))); + } + + @BeforeMethod(groups = "short") + public void createBuiltStatements() throws Exception { + insertStmt = + insertInto("\"myTable\"") + .value("c_int", bindMarker()) + .value("c_bigint", bindMarker()) + .value("c_float", bindMarker()) + .value("c_double", bindMarker()) + .value("c_varint", bindMarker()) + .value("c_decimal", bindMarker()); + selectStmt = + select("c_int", "c_bigint", "c_float", "c_double", "c_varint", "c_decimal") + .from("\"myTable\"") + .where(eq("c_int", bindMarker())) + .and(eq("c_bigint", bindMarker())); + } + + @Test(groups = "short") + @CassandraVersion("2.0.0") + public void should_use_custom_codecs_with_simple_statements() { + session() + .execute( + insertQuery, + n_int, + new NumberBox(n_bigint), + new NumberBox(n_float), + new NumberBox(n_double), + new NumberBox(n_varint), + new NumberBox(n_decimal)); + ResultSet rows = session().execute(selectQuery, n_int, new NumberBox(n_bigint)); + Row row = rows.one(); + assertRow(row); + } + + @Test(groups = "short") + public void should_use_custom_codecs_with_prepared_statements_1() { + session() + .execute( + session() + .prepare(insertQuery) + .bind( + n_int, + new NumberBox(n_bigint), + new NumberBox(n_float), + new NumberBox(n_double), + new NumberBox(n_varint), + new NumberBox(n_decimal))); + PreparedStatement ps = session().prepare(selectQuery); + ResultSet rows = session().execute(ps.bind(n_int, new NumberBox(n_bigint))); + Row row = rows.one(); + assertRow(row); + } + + @Test(groups = "short") + public void should_use_custom_codecs_with_prepared_statements_2() { + session() + .execute( + session() + .prepare(insertQuery) + .bind() .set(0, new NumberBox(n_int), NUMBERBOX_OF_INTEGER_TOKEN) .set(1, new NumberBox(n_bigint), NUMBERBOX_OF_LONG_TOKEN) .set(2, new NumberBox(n_float), NUMBERBOX_OF_FLOAT_TOKEN) .set(3, new NumberBox(n_double), NUMBERBOX_OF_DOUBLE_TOKEN) .set(4, new NumberBox(n_varint), NUMBERBOX_OF_BIGINTEGER_TOKEN) .set(5, new NumberBox(n_decimal), NUMBERBOX_OF_BIGDECIMAL_TOKEN)); - PreparedStatement ps = session().prepare(selectStmt); - ResultSet rows = session().execute(ps.bind() + PreparedStatement ps = session().prepare(selectQuery); + ResultSet rows = + session() + .execute( + ps.bind() + .set(0, new NumberBox(n_int), NUMBERBOX_OF_INTEGER_TOKEN) + .set(1, new NumberBox(n_bigint), NUMBERBOX_OF_LONG_TOKEN)); + Row row = rows.one(); + assertRow(row); + } + + @Test(groups = "short") + public void should_use_custom_codecs_with_built_statements_1() { + session() + .execute( + session() + .prepare(insertStmt) + .bind( + n_int, + new NumberBox(n_bigint), + new NumberBox(n_float), + new NumberBox(n_double), + new NumberBox(n_varint), + new NumberBox(n_decimal))); + PreparedStatement ps = session().prepare(selectStmt); + ResultSet rows = session().execute(ps.bind(n_int, new NumberBox(n_bigint))); + Row row = rows.one(); + assertRow(row); + } + + @Test(groups = "short") + public void should_use_custom_codecs_with_built_statements_2() { + session() + .execute( + session() + .prepare(insertStmt) + .bind() .set(0, new NumberBox(n_int), NUMBERBOX_OF_INTEGER_TOKEN) - .set(1, new NumberBox(n_bigint), NUMBERBOX_OF_LONG_TOKEN)); - - Row row = rows.one(); - assertRow(row); + .set(1, new NumberBox(n_bigint), NUMBERBOX_OF_LONG_TOKEN) + .set(2, new NumberBox(n_float), NUMBERBOX_OF_FLOAT_TOKEN) + .set(3, new NumberBox(n_double), NUMBERBOX_OF_DOUBLE_TOKEN) + .set(4, new NumberBox(n_varint), NUMBERBOX_OF_BIGINTEGER_TOKEN) + .set(5, new NumberBox(n_decimal), NUMBERBOX_OF_BIGDECIMAL_TOKEN)); + PreparedStatement ps = session().prepare(selectStmt); + ResultSet rows = + session() + .execute( + ps.bind() + .set(0, new NumberBox(n_int), NUMBERBOX_OF_INTEGER_TOKEN) + .set(1, new NumberBox(n_bigint), NUMBERBOX_OF_LONG_TOKEN)); + + Row row = rows.one(); + assertRow(row); + } + + private void assertRow(Row row) { + // using getInt, etc: the default codecs are used + // and values are deserialized the traditional way + assertThat(row.getInt(0)).isEqualTo(n_int); + assertThat(row.getLong(1)).isEqualTo(n_bigint); + assertThat(row.getFloat(2)).isEqualTo(n_float); + assertThat(row.getDouble(3)).isEqualTo(n_double); + assertThat(row.getVarint(4)).isEqualTo(n_varint); + assertThat(row.getDecimal(5)).isEqualTo(n_decimal); + // with getObject, the first matching codec is the default one + assertThat(row.getObject(0)).isEqualTo(n_int); + assertThat(row.getObject(1)).isEqualTo(n_bigint); + assertThat(row.getObject(2)).isEqualTo(n_float); + assertThat(row.getObject(3)).isEqualTo(n_double); + assertThat(row.getObject(4)).isEqualTo(n_varint); + assertThat(row.getObject(5)).isEqualTo(n_decimal); + // with get + type + // we go back to the default codecs + assertThat(row.get(0, Integer.class)).isEqualTo(n_int); + assertThat(row.get(1, Long.class)).isEqualTo(n_bigint); + assertThat(row.get(2, Float.class)).isEqualTo(n_float); + assertThat(row.get(3, Double.class)).isEqualTo(n_double); + assertThat(row.get(4, BigInteger.class)).isEqualTo(n_varint); + assertThat(row.get(5, BigDecimal.class)).isEqualTo(n_decimal); + // with get + type, but enforcing NumberBox types + // we get the NumberBox codecs instead + assertThat(row.get(0, NUMBERBOX_OF_INTEGER_TOKEN)).isEqualTo(new NumberBox(n_int)); + assertThat(row.get(1, NUMBERBOX_OF_LONG_TOKEN)).isEqualTo(new NumberBox(n_bigint)); + assertThat(row.get(2, NUMBERBOX_OF_FLOAT_TOKEN)).isEqualTo(new NumberBox(n_float)); + assertThat(row.get(3, NUMBERBOX_OF_DOUBLE_TOKEN)).isEqualTo(new NumberBox(n_double)); + assertThat(row.get(4, NUMBERBOX_OF_BIGINTEGER_TOKEN)) + .isEqualTo(new NumberBox(n_varint)); + assertThat(row.get(5, NUMBERBOX_OF_BIGDECIMAL_TOKEN)) + .isEqualTo(new NumberBox(n_decimal)); + } + + private class NumberBoxCodec extends TypeCodec> { + + private final TypeCodec numberCodec; + + protected NumberBoxCodec(TypeCodec numberCodec) { + // @formatter:off + super( + numberCodec.getCqlType(), + new TypeToken>() {}.where( + new TypeParameter() {}, numberCodec.getJavaType())); + // @formatter:on + this.numberCodec = numberCodec; } - private void assertRow(Row row) { - // using getInt, etc: the default codecs are used - // and values are deserialized the traditional way - assertThat(row.getInt(0)).isEqualTo(n_int); - assertThat(row.getLong(1)).isEqualTo(n_bigint); - assertThat(row.getFloat(2)).isEqualTo(n_float); - assertThat(row.getDouble(3)).isEqualTo(n_double); - assertThat(row.getVarint(4)).isEqualTo(n_varint); - assertThat(row.getDecimal(5)).isEqualTo(n_decimal); - // with getObject, the first matching codec is the default one - assertThat(row.getObject(0)).isEqualTo(n_int); - assertThat(row.getObject(1)).isEqualTo(n_bigint); - assertThat(row.getObject(2)).isEqualTo(n_float); - assertThat(row.getObject(3)).isEqualTo(n_double); - assertThat(row.getObject(4)).isEqualTo(n_varint); - assertThat(row.getObject(5)).isEqualTo(n_decimal); - // with get + type - // we go back to the default codecs - assertThat(row.get(0, Integer.class)).isEqualTo(n_int); - assertThat(row.get(1, Long.class)).isEqualTo(n_bigint); - assertThat(row.get(2, Float.class)).isEqualTo(n_float); - assertThat(row.get(3, Double.class)).isEqualTo(n_double); - assertThat(row.get(4, BigInteger.class)).isEqualTo(n_varint); - assertThat(row.get(5, BigDecimal.class)).isEqualTo(n_decimal); - // with get + type, but enforcing NumberBox types - // we get the NumberBox codecs instead - assertThat(row.get(0, NUMBERBOX_OF_INTEGER_TOKEN)).isEqualTo(new NumberBox(n_int)); - assertThat(row.get(1, NUMBERBOX_OF_LONG_TOKEN)).isEqualTo(new NumberBox(n_bigint)); - assertThat(row.get(2, NUMBERBOX_OF_FLOAT_TOKEN)).isEqualTo(new NumberBox(n_float)); - assertThat(row.get(3, NUMBERBOX_OF_DOUBLE_TOKEN)).isEqualTo(new NumberBox(n_double)); - assertThat(row.get(4, NUMBERBOX_OF_BIGINTEGER_TOKEN)).isEqualTo(new NumberBox(n_varint)); - assertThat(row.get(5, NUMBERBOX_OF_BIGDECIMAL_TOKEN)).isEqualTo(new NumberBox(n_decimal)); + public boolean accepts(Object value) { + return value instanceof NumberBox && numberCodec.accepts(((NumberBox) value).getNumber()); } - private class NumberBoxCodec extends TypeCodec> { - - private final TypeCodec numberCodec; - - protected NumberBoxCodec(TypeCodec numberCodec) { - // @formatter:off - super(numberCodec.getCqlType(), - new TypeToken>() {}.where(new TypeParameter() {}, numberCodec.getJavaType())); - // @formatter:on - this.numberCodec = numberCodec; - } - - public boolean accepts(Object value) { - return value instanceof NumberBox && numberCodec.accepts(((NumberBox) value).getNumber()); - } - - @Override - public ByteBuffer serialize(NumberBox value, ProtocolVersion protocolVersion) throws InvalidTypeException { - return numberCodec.serialize(value.getNumber(), protocolVersion); - } - - @Override - public NumberBox deserialize(ByteBuffer bytes, ProtocolVersion protocolVersion) throws InvalidTypeException { - return new NumberBox(numberCodec.deserialize(bytes, protocolVersion)); - } + @Override + public ByteBuffer serialize(NumberBox value, ProtocolVersion protocolVersion) + throws InvalidTypeException { + return numberCodec.serialize(value.getNumber(), protocolVersion); + } - @Override - public NumberBox parse(String value) throws InvalidTypeException { - return new NumberBox(numberCodec.parse(value)); - } + @Override + public NumberBox deserialize(ByteBuffer bytes, ProtocolVersion protocolVersion) + throws InvalidTypeException { + return new NumberBox(numberCodec.deserialize(bytes, protocolVersion)); + } - @Override - public String format(NumberBox value) throws InvalidTypeException { - return numberCodec.format(value.getNumber()); - } + @Override + public NumberBox parse(String value) throws InvalidTypeException { + return new NumberBox(numberCodec.parse(value)); + } + @Override + public String format(NumberBox value) throws InvalidTypeException { + return numberCodec.format(value.getNumber()); } + } - private class NumberBox { + private class NumberBox { - private final T number; + private final T number; - private NumberBox(T number) { - this.number = number; - } + private NumberBox(T number) { + this.number = number; + } - public T getNumber() { - return number; - } + public T getNumber() { + return number; + } - @Override - public boolean equals(Object o) { - if (this == o) - return true; - if (o == null || getClass() != o.getClass()) - return false; - NumberBox numberBox = (NumberBox) o; - return MoreObjects.equal(number, numberBox.number); - } + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + NumberBox numberBox = (NumberBox) o; + return MoreObjects.equal(number, numberBox.number); + } - @Override - public int hashCode() { - return MoreObjects.hashCode(number); - } + @Override + public int hashCode() { + return MoreObjects.hashCode(number); } + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/TypeCodecNestedCollectionsIntegrationTest.java b/driver-core/src/test/java/com/datastax/driver/core/TypeCodecNestedCollectionsIntegrationTest.java index fa2a9b05069..4982b4e8772 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/TypeCodecNestedCollectionsIntegrationTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/TypeCodecNestedCollectionsIntegrationTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,192 +17,187 @@ */ package com.datastax.driver.core; +import static com.datastax.driver.core.querybuilder.QueryBuilder.bindMarker; +import static com.datastax.driver.core.querybuilder.QueryBuilder.eq; +import static com.datastax.driver.core.querybuilder.QueryBuilder.insertInto; +import static com.datastax.driver.core.querybuilder.QueryBuilder.select; +import static org.assertj.core.api.Assertions.assertThat; + import com.datastax.driver.core.exceptions.InvalidTypeException; import com.datastax.driver.core.querybuilder.BuiltStatement; import com.datastax.driver.core.utils.CassandraVersion; import com.datastax.driver.core.utils.MoreObjects; import com.google.common.collect.ImmutableMap; import com.google.common.reflect.TypeToken; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; import org.testng.annotations.BeforeClass; import org.testng.annotations.BeforeMethod; import org.testng.annotations.Test; -import java.nio.ByteBuffer; -import java.util.*; - -import static com.datastax.driver.core.querybuilder.QueryBuilder.*; -import static org.assertj.core.api.Assertions.assertThat; - /** - * Validates that nested collections are properly encoded, - * even if some inner type requires a custom codec. + * Validates that nested collections are properly encoded, even if some inner type requires a custom + * codec. */ @CassandraVersion("2.1.0") public class TypeCodecNestedCollectionsIntegrationTest extends CCMTestsSupport { - private final String insertQuery = "INSERT INTO \"myTable\" (pk, v) VALUES (?, ?)"; - - private final String selectQuery = "SELECT pk, v FROM \"myTable\" WHERE pk = ?"; - - private BuiltStatement insertStmt; - private BuiltStatement selectStmt; - - private int pk = 42; - private List>> v; - - // @formatter:off - private TypeToken>>> listType = new TypeToken>>>() {}; - private TypeToken>> elementsType = new TypeToken>>() {}; - // @formatter:on - - - @Override - public void onTestContextInitialized() { - execute( - "CREATE TABLE IF NOT EXISTS \"myTable\" (" - + "pk int PRIMARY KEY, " - + "v frozen>>>>>" - + ")" - ); - } - - public Cluster.Builder createClusterBuilder() { - return Cluster.builder().withCodecRegistry( - new CodecRegistry().register(new MyIntCodec()) // global User <-> varchar codec - ); - } - - @BeforeClass(groups = "short") - public void setupData() { - Map map = ImmutableMap.of(new MyInt(42), "foo", new MyInt(43), "bar"); - Set> set = new HashSet>(); - set.add(map); - v = new ArrayList>>(); - v.add(set); - } - - @BeforeMethod(groups = "short") - public void createBuiltStatements() throws Exception { - insertStmt = insertInto("\"myTable\"") - .value("pk", bindMarker()) - .value("v", bindMarker()); - selectStmt = select("pk", "v") - .from("\"myTable\"") - .where(eq("pk", bindMarker())); + private final String insertQuery = "INSERT INTO \"myTable\" (pk, v) VALUES (?, ?)"; + + private final String selectQuery = "SELECT pk, v FROM \"myTable\" WHERE pk = ?"; + + private BuiltStatement insertStmt; + private BuiltStatement selectStmt; + + private int pk = 42; + private List>> v; + + // @formatter:off + private TypeToken>>> listType = + new TypeToken>>>() {}; + private TypeToken>> elementsType = + new TypeToken>>() {}; + // @formatter:on + + @Override + public void onTestContextInitialized() { + execute( + "CREATE TABLE IF NOT EXISTS \"myTable\" (" + + "pk int PRIMARY KEY, " + + "v frozen>>>>>" + + ")"); + } + + public Cluster.Builder createClusterBuilder() { + return Cluster.builder() + .withCodecRegistry( + new CodecRegistry().register(new MyIntCodec()) // global User <-> varchar codec + ); + } + + @BeforeClass(groups = "short") + public void setupData() { + Map map = ImmutableMap.of(new MyInt(42), "foo", new MyInt(43), "bar"); + Set> set = new HashSet>(); + set.add(map); + v = new ArrayList>>(); + v.add(set); + } + + @BeforeMethod(groups = "short") + public void createBuiltStatements() throws Exception { + insertStmt = insertInto("\"myTable\"").value("pk", bindMarker()).value("v", bindMarker()); + selectStmt = select("pk", "v").from("\"myTable\"").where(eq("pk", bindMarker())); + } + + @Test(groups = "short") + public void should_work_with_simple_statements() { + session().execute(insertQuery, pk, v); + ResultSet rows = session().execute(selectQuery, pk); + Row row = rows.one(); + assertRow(row); + } + + @Test(groups = "short") + public void should_work_with_prepared_statements_1() { + session().execute(session().prepare(insertQuery).bind(pk, v)); + PreparedStatement ps = session().prepare(selectQuery); + ResultSet rows = session().execute(ps.bind(pk)); + Row row = rows.one(); + assertRow(row); + } + + @Test(groups = "short") + public void should_work_with_prepared_statements_2() { + session() + .execute( + session() + .prepare(insertQuery) + .bind() + .setInt(0, pk) + .setList(1, v, elementsType) // variant with element type explicitly set + ); + PreparedStatement ps = session().prepare(selectQuery); + ResultSet rows = session().execute(ps.bind().setInt(0, pk)); + Row row = rows.one(); + assertRow(row); + } + + @Test(groups = "short") + public void should_work_with_prepared_statements_3() { + session().execute(session().prepare(insertQuery).bind().setInt(0, pk).set(1, v, listType)); + PreparedStatement ps = session().prepare(selectQuery); + ResultSet rows = session().execute(ps.bind().setInt(0, pk)); + Row row = rows.one(); + assertRow(row); + } + + @Test(groups = "short") + public void should_work_with_built_statements() { + session().execute(session().prepare(insertStmt).bind().setInt(0, pk).set(1, v, listType)); + PreparedStatement ps = session().prepare(selectStmt); + ResultSet rows = session().execute(ps.bind().setInt(0, pk)); + Row row = rows.one(); + assertRow(row); + } + + private void assertRow(Row row) { + assertThat(row.getList(1, elementsType)).isEqualTo(v); + assertThat(row.get(1, listType)).isEqualTo(v); + } + + private class MyInt { + + private final int i; + + private MyInt(int i) { + this.i = i; } - @Test(groups = "short") - public void should_work_with_simple_statements() { - session().execute(insertQuery, pk, v); - ResultSet rows = session().execute(selectQuery, pk); - Row row = rows.one(); - assertRow(row); + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + MyInt myInt = (MyInt) o; + return MoreObjects.equal(i, myInt.i); } - @Test(groups = "short") - public void should_work_with_prepared_statements_1() { - session().execute(session().prepare(insertQuery).bind(pk, v)); - PreparedStatement ps = session().prepare(selectQuery); - ResultSet rows = session().execute(ps.bind(pk)); - Row row = rows.one(); - assertRow(row); + @Override + public int hashCode() { + return MoreObjects.hashCode(i); } + } - @Test(groups = "short") - public void should_work_with_prepared_statements_2() { - session().execute(session().prepare(insertQuery).bind() - .setInt(0, pk) - .setList(1, v, elementsType) // variant with element type explicitly set - ); - PreparedStatement ps = session().prepare(selectQuery); - ResultSet rows = session().execute(ps.bind() - .setInt(0, pk) - ); - Row row = rows.one(); - assertRow(row); - } + private class MyIntCodec extends TypeCodec { - @Test(groups = "short") - public void should_work_with_prepared_statements_3() { - session().execute(session().prepare(insertQuery).bind() - .setInt(0, pk) - .set(1, v, listType) - ); - PreparedStatement ps = session().prepare(selectQuery); - ResultSet rows = session().execute(ps.bind() - .setInt(0, pk) - ); - Row row = rows.one(); - assertRow(row); + MyIntCodec() { + super(DataType.cint(), MyInt.class); } - @Test(groups = "short") - public void should_work_with_built_statements() { - session().execute(session().prepare(insertStmt).bind() - .setInt(0, pk) - .set(1, v, listType) - ); - PreparedStatement ps = session().prepare(selectStmt); - ResultSet rows = session().execute(ps.bind() - .setInt(0, pk) - ); - Row row = rows.one(); - assertRow(row); + @Override + public ByteBuffer serialize(MyInt value, ProtocolVersion protocolVersion) + throws InvalidTypeException { + return TypeCodec.cint().serialize(value.i, protocolVersion); } - private void assertRow(Row row) { - assertThat(row.getList(1, elementsType)).isEqualTo(v); - assertThat(row.get(1, listType)).isEqualTo(v); + @Override + public MyInt deserialize(ByteBuffer bytes, ProtocolVersion protocolVersion) + throws InvalidTypeException { + return new MyInt(TypeCodec.cint().deserialize(bytes, protocolVersion)); } - private class MyInt { - - private final int i; - - private MyInt(int i) { - this.i = i; - } - - @Override - public boolean equals(Object o) { - if (this == o) - return true; - if (o == null || getClass() != o.getClass()) - return false; - MyInt myInt = (MyInt) o; - return MoreObjects.equal(i, myInt.i); - } - - @Override - public int hashCode() { - return MoreObjects.hashCode(i); - } + @Override + public MyInt parse(String value) throws InvalidTypeException { + return null; // not tested } - private class MyIntCodec extends TypeCodec { - - MyIntCodec() { - super(DataType.cint(), MyInt.class); - } - - @Override - public ByteBuffer serialize(MyInt value, ProtocolVersion protocolVersion) throws InvalidTypeException { - return TypeCodec.cint().serialize(value.i, protocolVersion); - } - - @Override - public MyInt deserialize(ByteBuffer bytes, ProtocolVersion protocolVersion) throws InvalidTypeException { - return new MyInt(TypeCodec.cint().deserialize(bytes, protocolVersion)); - } - - @Override - public MyInt parse(String value) throws InvalidTypeException { - return null; // not tested - } - - @Override - public String format(MyInt value) throws InvalidTypeException { - return null; // not tested - } + @Override + public String format(MyInt value) throws InvalidTypeException { + return null; // not tested } + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/TypeCodecNestedUDTAndTupleIntegrationTest.java b/driver-core/src/test/java/com/datastax/driver/core/TypeCodecNestedUDTAndTupleIntegrationTest.java index b9ed227e58e..05e7e42961e 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/TypeCodecNestedUDTAndTupleIntegrationTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/TypeCodecNestedUDTAndTupleIntegrationTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,92 +17,91 @@ */ package com.datastax.driver.core; +import static org.assertj.core.api.Assertions.assertThat; + import com.datastax.driver.core.utils.CassandraVersion; import org.testng.annotations.Test; -import static org.assertj.core.api.Assertions.assertThat; - /** - * This tests that the CodecRegistry is able to create codecs on the fly - * for nested UDTs and tuples received from a ResultSet, i.e. the first time - * the registry encounters the UDT or tuple, it's coming from a ResultSet, - * not from the Cluster's metadata. + * This tests that the CodecRegistry is able to create codecs on the fly for nested UDTs and tuples + * received from a ResultSet, i.e. the first time the registry encounters the UDT or tuple, it's + * coming from a ResultSet, not from the Cluster's metadata. * * @jira_ticket JAVA-847 */ @CassandraVersion("2.1.0") public class TypeCodecNestedUDTAndTupleIntegrationTest extends CCMTestsSupport { - @Override - public void onTestContextInitialized() { - execute( - "CREATE TYPE IF NOT EXISTS \"udt3\" (f3 text)", - "CREATE TYPE IF NOT EXISTS \"udt2\" (f2 frozen)", - "CREATE TYPE IF NOT EXISTS \"udt1\" (f1 frozen)", - "CREATE TABLE IF NOT EXISTS \"t1\" (pk int PRIMARY KEY, " - + "c1 frozen, " - + "c2 frozen>>>, " - + "c3 frozen>>>" - + ")", - // it's important to insert values using CQL literals - // so that the CodecRegistry will not be required until - // we receive a ResultSet - "INSERT INTO t1 (pk, c1) VALUES (1, {f1:{f2:{f3:'foo'}}})", - "INSERT INTO t1 (pk, c2) VALUES (2, ((('foo'))))", - "INSERT INTO t1 (pk, c3) VALUES (3, ((({f1:{f2:{f3:'foo'}}}))))" - ); - } - - @Test(groups = "short") - public void should_set_registry_on_nested_udts() { - ResultSet rows = session().execute("SELECT c1 FROM t1 WHERE pk = 1"); - Row row = rows.one(); - // here the CodecRegistry will create a codec on-the-fly using the UserType received from the resultset metadata - UDTValue udt1 = row.getUDTValue("c1"); - assertThat(udt1.getCodecRegistry()).isSameAs(cluster().getConfiguration().getCodecRegistry()); - UDTValue udt2 = udt1.getUDTValue("f1"); - assertThat(udt2.getCodecRegistry()).isSameAs(cluster().getConfiguration().getCodecRegistry()); - UDTValue udt3 = udt2.getUDTValue("f2"); - assertThat(udt3.getCodecRegistry()).isSameAs(cluster().getConfiguration().getCodecRegistry()); - String f3 = udt3.getString("f3"); - assertThat(f3).isEqualTo("foo"); - } - - @Test(groups = "short") - public void should_set_registry_on_nested_tuples() { - ResultSet rows = session().execute("SELECT c2 FROM t1 WHERE pk = 2"); - Row row = rows.one(); - // here the CodecRegistry will create a codec on-the-fly using the TupleType received from the resultset metadata - TupleValue tuple1 = row.getTupleValue("c2"); - assertThat(tuple1.getCodecRegistry()).isSameAs(cluster().getConfiguration().getCodecRegistry()); - TupleValue tuple2 = tuple1.getTupleValue(0); - assertThat(tuple2.getCodecRegistry()).isSameAs(cluster().getConfiguration().getCodecRegistry()); - TupleValue tuple3 = tuple2.getTupleValue(0); - assertThat(tuple3.getCodecRegistry()).isSameAs(cluster().getConfiguration().getCodecRegistry()); - String s = tuple3.getString(0); - assertThat(s).isEqualTo("foo"); - } + @Override + public void onTestContextInitialized() { + execute( + "CREATE TYPE IF NOT EXISTS \"udt3\" (f3 text)", + "CREATE TYPE IF NOT EXISTS \"udt2\" (f2 frozen)", + "CREATE TYPE IF NOT EXISTS \"udt1\" (f1 frozen)", + "CREATE TABLE IF NOT EXISTS \"t1\" (pk int PRIMARY KEY, " + + "c1 frozen, " + + "c2 frozen>>>, " + + "c3 frozen>>>" + + ")", + // it's important to insert values using CQL literals + // so that the CodecRegistry will not be required until + // we receive a ResultSet + "INSERT INTO t1 (pk, c1) VALUES (1, {f1:{f2:{f3:'foo'}}})", + "INSERT INTO t1 (pk, c2) VALUES (2, ((('foo'))))", + "INSERT INTO t1 (pk, c3) VALUES (3, ((({f1:{f2:{f3:'foo'}}}))))"); + } - @Test(groups = "short") - public void should_set_registry_on_nested_tuples_and_udts() { - ResultSet rows = session().execute("SELECT c3 FROM t1 WHERE pk = 3"); - Row row = rows.one(); - // here the CodecRegistry will create a codec on-the-fly using the TupleType received from the resultset metadata - TupleValue tuple1 = row.getTupleValue("c3"); - assertThat(tuple1.getCodecRegistry()).isSameAs(cluster().getConfiguration().getCodecRegistry()); - TupleValue tuple2 = tuple1.getTupleValue(0); - assertThat(tuple2.getCodecRegistry()).isSameAs(cluster().getConfiguration().getCodecRegistry()); - TupleValue tuple3 = tuple2.getTupleValue(0); - assertThat(tuple3.getCodecRegistry()).isSameAs(cluster().getConfiguration().getCodecRegistry()); - UDTValue udt1 = tuple3.getUDTValue(0); - assertThat(udt1.getCodecRegistry()).isSameAs(cluster().getConfiguration().getCodecRegistry()); - UDTValue udt2 = udt1.getUDTValue("f1"); - assertThat(udt2.getCodecRegistry()).isSameAs(cluster().getConfiguration().getCodecRegistry()); - UDTValue udt3 = udt2.getUDTValue("f2"); - assertThat(udt3.getCodecRegistry()).isSameAs(cluster().getConfiguration().getCodecRegistry()); - String f3 = udt3.getString("f3"); - assertThat(f3).isEqualTo("foo"); - } + @Test(groups = "short") + public void should_set_registry_on_nested_udts() { + ResultSet rows = session().execute("SELECT c1 FROM t1 WHERE pk = 1"); + Row row = rows.one(); + // here the CodecRegistry will create a codec on-the-fly using the UserType received from the + // resultset metadata + UDTValue udt1 = row.getUDTValue("c1"); + assertThat(udt1.getCodecRegistry()).isSameAs(cluster().getConfiguration().getCodecRegistry()); + UDTValue udt2 = udt1.getUDTValue("f1"); + assertThat(udt2.getCodecRegistry()).isSameAs(cluster().getConfiguration().getCodecRegistry()); + UDTValue udt3 = udt2.getUDTValue("f2"); + assertThat(udt3.getCodecRegistry()).isSameAs(cluster().getConfiguration().getCodecRegistry()); + String f3 = udt3.getString("f3"); + assertThat(f3).isEqualTo("foo"); + } + @Test(groups = "short") + public void should_set_registry_on_nested_tuples() { + ResultSet rows = session().execute("SELECT c2 FROM t1 WHERE pk = 2"); + Row row = rows.one(); + // here the CodecRegistry will create a codec on-the-fly using the TupleType received from the + // resultset metadata + TupleValue tuple1 = row.getTupleValue("c2"); + assertThat(tuple1.getCodecRegistry()).isSameAs(cluster().getConfiguration().getCodecRegistry()); + TupleValue tuple2 = tuple1.getTupleValue(0); + assertThat(tuple2.getCodecRegistry()).isSameAs(cluster().getConfiguration().getCodecRegistry()); + TupleValue tuple3 = tuple2.getTupleValue(0); + assertThat(tuple3.getCodecRegistry()).isSameAs(cluster().getConfiguration().getCodecRegistry()); + String s = tuple3.getString(0); + assertThat(s).isEqualTo("foo"); + } + @Test(groups = "short") + public void should_set_registry_on_nested_tuples_and_udts() { + ResultSet rows = session().execute("SELECT c3 FROM t1 WHERE pk = 3"); + Row row = rows.one(); + // here the CodecRegistry will create a codec on-the-fly using the TupleType received from the + // resultset metadata + TupleValue tuple1 = row.getTupleValue("c3"); + assertThat(tuple1.getCodecRegistry()).isSameAs(cluster().getConfiguration().getCodecRegistry()); + TupleValue tuple2 = tuple1.getTupleValue(0); + assertThat(tuple2.getCodecRegistry()).isSameAs(cluster().getConfiguration().getCodecRegistry()); + TupleValue tuple3 = tuple2.getTupleValue(0); + assertThat(tuple3.getCodecRegistry()).isSameAs(cluster().getConfiguration().getCodecRegistry()); + UDTValue udt1 = tuple3.getUDTValue(0); + assertThat(udt1.getCodecRegistry()).isSameAs(cluster().getConfiguration().getCodecRegistry()); + UDTValue udt2 = udt1.getUDTValue("f1"); + assertThat(udt2.getCodecRegistry()).isSameAs(cluster().getConfiguration().getCodecRegistry()); + UDTValue udt3 = udt2.getUDTValue("f2"); + assertThat(udt3.getCodecRegistry()).isSameAs(cluster().getConfiguration().getCodecRegistry()); + String f3 = udt3.getString("f3"); + assertThat(f3).isEqualTo("foo"); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/TypeCodecNumbersIntegrationTest.java b/driver-core/src/test/java/com/datastax/driver/core/TypeCodecNumbersIntegrationTest.java index 5f2851bd593..27161f030f2 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/TypeCodecNumbersIntegrationTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/TypeCodecNumbersIntegrationTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,118 +17,122 @@ */ package com.datastax.driver.core; -import com.datastax.driver.core.utils.CassandraVersion; -import org.testng.annotations.Test; +import static org.assertj.core.api.Assertions.assertThat; +import com.datastax.driver.core.utils.CassandraVersion; import java.math.BigDecimal; import java.math.BigInteger; - -import static org.assertj.core.api.Assertions.assertThat; +import org.testng.annotations.Test; public class TypeCodecNumbersIntegrationTest extends CCMTestsSupport { - private final String insertQuery = "INSERT INTO \"myTable\" (c_int, c_bigint, c_float, c_double, c_varint, c_decimal) VALUES (?, ?, ?, ?, ?, ?)"; + private final String insertQuery = + "INSERT INTO \"myTable\" (c_int, c_bigint, c_float, c_double, c_varint, c_decimal) VALUES (?, ?, ?, ?, ?, ?)"; - private final String selectQuery = "SELECT c_int, c_bigint, c_float, c_double, c_varint, c_decimal FROM \"myTable\" WHERE c_int = ? and c_bigint = ?"; + private final String selectQuery = + "SELECT c_int, c_bigint, c_float, c_double, c_varint, c_decimal FROM \"myTable\" WHERE c_int = ? and c_bigint = ?"; - private int n_int = 42; - private long n_bigint = 4242; - private float n_float = 42.42f; - private double n_double = 4242.42d; - private BigInteger n_varint = new BigInteger("424242"); - private BigDecimal n_decimal = new BigDecimal("424242.42"); + private int n_int = 42; + private long n_bigint = 4242; + private float n_float = 42.42f; + private double n_double = 4242.42d; + private BigInteger n_varint = new BigInteger("424242"); + private BigDecimal n_decimal = new BigDecimal("424242.42"); - @Override - public void onTestContextInitialized() { - execute( - "CREATE TABLE \"myTable\" (" - + "c_int int, " - + "c_bigint bigint, " - + "c_float float, " - + "c_double double, " - + "c_varint varint, " - + "c_decimal decimal, " - + "PRIMARY KEY (c_int, c_bigint)" - + ")" - ); - } + @Override + public void onTestContextInitialized() { + execute( + "CREATE TABLE \"myTable\" (" + + "c_int int, " + + "c_bigint bigint, " + + "c_float float, " + + "c_double double, " + + "c_varint varint, " + + "c_decimal decimal, " + + "PRIMARY KEY (c_int, c_bigint)" + + ")"); + } - @Test(groups = "short") - @CassandraVersion("2.0.0") - public void should_use_defaut_codecs_with_simple_statements() { - session().execute(insertQuery, n_int, n_bigint, n_float, n_double, n_varint, n_decimal); - ResultSet rows = session().execute(selectQuery, n_int, n_bigint); - Row row = rows.one(); - assertRow(row); - } + @Test(groups = "short") + @CassandraVersion("2.0.0") + public void should_use_defaut_codecs_with_simple_statements() { + session().execute(insertQuery, n_int, n_bigint, n_float, n_double, n_varint, n_decimal); + ResultSet rows = session().execute(selectQuery, n_int, n_bigint); + Row row = rows.one(); + assertRow(row); + } - @Test(groups = "short") - public void should_use_defaut_codecs_with_prepared_statements_1() { - session().execute(session().prepare(insertQuery).bind(n_int, n_bigint, n_float, n_double, n_varint, n_decimal)); - PreparedStatement ps = session().prepare(selectQuery); - ResultSet rows = session().execute(ps.bind(n_int, n_bigint)); - Row row = rows.one(); - assertRow(row); - } + @Test(groups = "short") + public void should_use_defaut_codecs_with_prepared_statements_1() { + session() + .execute( + session() + .prepare(insertQuery) + .bind(n_int, n_bigint, n_float, n_double, n_varint, n_decimal)); + PreparedStatement ps = session().prepare(selectQuery); + ResultSet rows = session().execute(ps.bind(n_int, n_bigint)); + Row row = rows.one(); + assertRow(row); + } - @Test(groups = "short") - public void should_use_default_codecs_with_prepared_statements_2() { - session().execute(session().prepare(insertQuery).bind() - .setInt(0, n_int) - .setLong(1, n_bigint) - .setFloat(2, n_float) - .setDouble(3, n_double) - .setVarint(4, n_varint) - .setDecimal(5, n_decimal) - ); - PreparedStatement ps = session().prepare(selectQuery); - ResultSet rows = session().execute(ps.bind() - .setInt(0, n_int) - .setLong(1, n_bigint) - ); - Row row = rows.one(); - assertRow(row); - } + @Test(groups = "short") + public void should_use_default_codecs_with_prepared_statements_2() { + session() + .execute( + session() + .prepare(insertQuery) + .bind() + .setInt(0, n_int) + .setLong(1, n_bigint) + .setFloat(2, n_float) + .setDouble(3, n_double) + .setVarint(4, n_varint) + .setDecimal(5, n_decimal)); + PreparedStatement ps = session().prepare(selectQuery); + ResultSet rows = session().execute(ps.bind().setInt(0, n_int).setLong(1, n_bigint)); + Row row = rows.one(); + assertRow(row); + } - @Test(groups = "short") - public void should_use_default_codecs_with_prepared_statements_3() { - session().execute(session().prepare(insertQuery).bind() - .set(0, n_int, Integer.class) - .set(1, n_bigint, Long.class) - .set(2, n_float, Float.class) - .set(3, n_double, Double.class) - .set(4, n_varint, BigInteger.class) - .set(5, n_decimal, BigDecimal.class) - ); - PreparedStatement ps = session().prepare(selectQuery); - ResultSet rows = session().execute(ps.bind() - .setInt(0, n_int) - .setLong(1, n_bigint) - ); - Row row = rows.one(); - assertRow(row); - } + @Test(groups = "short") + public void should_use_default_codecs_with_prepared_statements_3() { + session() + .execute( + session() + .prepare(insertQuery) + .bind() + .set(0, n_int, Integer.class) + .set(1, n_bigint, Long.class) + .set(2, n_float, Float.class) + .set(3, n_double, Double.class) + .set(4, n_varint, BigInteger.class) + .set(5, n_decimal, BigDecimal.class)); + PreparedStatement ps = session().prepare(selectQuery); + ResultSet rows = session().execute(ps.bind().setInt(0, n_int).setLong(1, n_bigint)); + Row row = rows.one(); + assertRow(row); + } - private void assertRow(Row row) { - assertThat(row.getInt(0)).isEqualTo(n_int); - assertThat(row.getLong(1)).isEqualTo(n_bigint); - assertThat(row.getFloat(2)).isEqualTo(n_float); - assertThat(row.getDouble(3)).isEqualTo(n_double); - assertThat(row.getVarint(4)).isEqualTo(n_varint); - assertThat(row.getDecimal(5)).isEqualTo(n_decimal); - // with getObject - assertThat(row.getObject(0)).isEqualTo(n_int); - assertThat(row.getObject(1)).isEqualTo(n_bigint); - assertThat(row.getObject(2)).isEqualTo(n_float); - assertThat(row.getObject(3)).isEqualTo(n_double); - assertThat(row.getObject(4)).isEqualTo(n_varint); - assertThat(row.getObject(5)).isEqualTo(n_decimal); - // with get + type - assertThat(row.get(0, Integer.class)).isEqualTo(n_int); - assertThat(row.get(1, Long.class)).isEqualTo(n_bigint); - assertThat(row.get(2, Float.class)).isEqualTo(n_float); - assertThat(row.get(3, Double.class)).isEqualTo(n_double); - assertThat(row.get(4, BigInteger.class)).isEqualTo(n_varint); - assertThat(row.get(5, BigDecimal.class)).isEqualTo(n_decimal); - } + private void assertRow(Row row) { + assertThat(row.getInt(0)).isEqualTo(n_int); + assertThat(row.getLong(1)).isEqualTo(n_bigint); + assertThat(row.getFloat(2)).isEqualTo(n_float); + assertThat(row.getDouble(3)).isEqualTo(n_double); + assertThat(row.getVarint(4)).isEqualTo(n_varint); + assertThat(row.getDecimal(5)).isEqualTo(n_decimal); + // with getObject + assertThat(row.getObject(0)).isEqualTo(n_int); + assertThat(row.getObject(1)).isEqualTo(n_bigint); + assertThat(row.getObject(2)).isEqualTo(n_float); + assertThat(row.getObject(3)).isEqualTo(n_double); + assertThat(row.getObject(4)).isEqualTo(n_varint); + assertThat(row.getObject(5)).isEqualTo(n_decimal); + // with get + type + assertThat(row.get(0, Integer.class)).isEqualTo(n_int); + assertThat(row.get(1, Long.class)).isEqualTo(n_bigint); + assertThat(row.get(2, Float.class)).isEqualTo(n_float); + assertThat(row.get(3, Double.class)).isEqualTo(n_double); + assertThat(row.get(4, BigInteger.class)).isEqualTo(n_varint); + assertThat(row.get(5, BigDecimal.class)).isEqualTo(n_decimal); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/TypeCodecOverlappingJavaTypeIntegrationTest.java b/driver-core/src/test/java/com/datastax/driver/core/TypeCodecOverlappingJavaTypeIntegrationTest.java index 3e3a623f23f..c67492ffa1d 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/TypeCodecOverlappingJavaTypeIntegrationTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/TypeCodecOverlappingJavaTypeIntegrationTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,105 +17,113 @@ */ package com.datastax.driver.core; -import com.datastax.driver.core.exceptions.InvalidTypeException; -import org.testng.annotations.Test; - -import java.nio.ByteBuffer; - import static com.google.common.collect.Lists.newArrayList; import static org.assertj.core.api.Assertions.assertThat; +import com.datastax.driver.core.exceptions.InvalidTypeException; +import java.nio.ByteBuffer; +import org.testng.annotations.Test; + /** * Test an edge case where the user register 2 codecs for the same Java type (here, String). - * SimpleStatements become almost unusable in these cases, but we are adding tests - * to check that at least it is possible to use prepared statements in these situations. + * SimpleStatements become almost unusable in these cases, but we are adding tests to check that at + * least it is possible to use prepared statements in these situations. */ public class TypeCodecOverlappingJavaTypeIntegrationTest extends CCMTestsSupport { - private static final String insertQuery = "INSERT INTO \"myTable\" (c_int, l_int, c_text) VALUES (?, ?, ?)"; - - private static final String selectQuery = "SELECT c_int, l_int, c_text FROM \"myTable\" WHERE c_int = ?"; + private static final String insertQuery = + "INSERT INTO \"myTable\" (c_int, l_int, c_text) VALUES (?, ?, ?)"; + + private static final String selectQuery = + "SELECT c_int, l_int, c_text FROM \"myTable\" WHERE c_int = ?"; + + @Override + public void onTestContextInitialized() { + execute( + "CREATE TABLE \"myTable\" (" + + "c_int int PRIMARY KEY, " + + "l_int list, " + + "c_text text " + + ")"); + } + + public Cluster.Builder createClusterBuilder() { + return super.createClusterBuilder() + .withCodecRegistry(new CodecRegistry().register(new IntToStringCodec())); + } + + @Test(groups = "short") + public void should_use_custom_codecs_with_prepared_statements() { + PreparedStatement ps = session().prepare(insertQuery); + session() + .execute( + ps.bind() + .setInt(0, 42) + .setList(1, newArrayList(42)) + .setString( + 2, "42") // here we have the CQL type so VarcharCodec will be used even if + // IntToStringCodec accepts it + ); + session() + .execute( + ps.bind() + .setString(0, "42") + .setList(1, newArrayList("42"), String.class) + .setString( + 2, "42") // here we have the CQL type so VarcharCodec will be used even if + // IntToStringCodec accepts it + ); + ps = session().prepare(selectQuery); + assertRow(session().execute(ps.bind().setInt(0, 42)).one()); + assertRow(session().execute(ps.bind().setString(0, "42")).one()); + } + + private void assertRow(Row row) { + assertThat(row.getInt(0)).isEqualTo(42); + assertThat(row.getObject(0)).isEqualTo(42); // uses the default codec + assertThat(row.get(0, Integer.class)).isEqualTo(42); + assertThat(row.get(0, String.class)).isEqualTo("42"); + + assertThat(row.getList(1, Integer.class)).isEqualTo(newArrayList(42)); + assertThat(row.getList(1, String.class)).isEqualTo(newArrayList("42")); + assertThat(row.getObject(1)).isEqualTo(newArrayList(42)); // uses the default codec + assertThat(row.get(1, TypeTokens.listOf(Integer.class))).isEqualTo(newArrayList(42)); + assertThat(row.get(1, TypeTokens.listOf(String.class))).isEqualTo(newArrayList("42")); + } + + private class IntToStringCodec extends TypeCodec { + + protected IntToStringCodec() { + super(DataType.cint(), String.class); + } @Override - public void onTestContextInitialized() { - execute( - "CREATE TABLE \"myTable\" (" - + "c_int int PRIMARY KEY, " - + "l_int list, " - + "c_text text " - + ")" - ); + public ByteBuffer serialize(String value, ProtocolVersion protocolVersion) + throws InvalidTypeException { + return TypeCodec.cint() + .serialize(value == null ? null : Integer.parseInt(value), protocolVersion); } - public Cluster.Builder createClusterBuilder() { - return Cluster.builder().withCodecRegistry( - new CodecRegistry().register(new IntToStringCodec()) - ); + @Override + public String deserialize(ByteBuffer bytes, ProtocolVersion protocolVersion) + throws InvalidTypeException { + Integer i = TypeCodec.cint().deserialize(bytes, protocolVersion); + return i == null ? null : Integer.toString(i); } - @Test(groups = "short") - public void should_use_custom_codecs_with_prepared_statements() { - PreparedStatement ps = session().prepare(insertQuery); - session().execute( - ps.bind() - .setInt(0, 42) - .setList(1, newArrayList(42)) - .setString(2, "42") // here we have the CQL type so VarcharCodec will be used even if IntToStringCodec accepts it - ); - session().execute( - ps.bind() - .setString(0, "42") - .setList(1, newArrayList("42"), String.class) - .setString(2, "42") // here we have the CQL type so VarcharCodec will be used even if IntToStringCodec accepts it - ); - ps = session().prepare(selectQuery); - assertRow(session().execute(ps.bind().setInt(0, 42)).one()); - assertRow(session().execute(ps.bind().setString(0, "42")).one()); + @Override + public String parse(String value) throws InvalidTypeException { + return value; } - private void assertRow(Row row) { - assertThat(row.getInt(0)).isEqualTo(42); - assertThat(row.getObject(0)).isEqualTo(42); // uses the default codec - assertThat(row.get(0, Integer.class)).isEqualTo(42); - assertThat(row.get(0, String.class)).isEqualTo("42"); - - assertThat(row.getList(1, Integer.class)).isEqualTo(newArrayList(42)); - assertThat(row.getList(1, String.class)).isEqualTo(newArrayList("42")); - assertThat(row.getObject(1)).isEqualTo(newArrayList(42)); // uses the default codec - assertThat(row.get(1, TypeTokens.listOf(Integer.class))).isEqualTo(newArrayList(42)); - assertThat(row.get(1, TypeTokens.listOf(String.class))).isEqualTo(newArrayList("42")); + @Override + public String format(String value) throws InvalidTypeException { + return value; } - private class IntToStringCodec extends TypeCodec { - - protected IntToStringCodec() { - super(DataType.cint(), String.class); - } - - @Override - public ByteBuffer serialize(String value, ProtocolVersion protocolVersion) throws InvalidTypeException { - return TypeCodec.cint().serialize(value == null ? null : Integer.parseInt(value), protocolVersion); - } - - @Override - public String deserialize(ByteBuffer bytes, ProtocolVersion protocolVersion) throws InvalidTypeException { - Integer i = TypeCodec.cint().deserialize(bytes, protocolVersion); - return i == null ? null : Integer.toString(i); - } - - @Override - public String parse(String value) throws InvalidTypeException { - return value; - } - - @Override - public String format(String value) throws InvalidTypeException { - return value; - } - - @Override - public boolean accepts(Object value) { - return value instanceof String && ((String) value).matches("\\d+"); - } + @Override + public boolean accepts(Object value) { + return value instanceof String && ((String) value).matches("\\d+"); } + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/TypeCodecTest.java b/driver-core/src/test/java/com/datastax/driver/core/TypeCodecTest.java index 7fc81fa4470..68a172f44d8 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/TypeCodecTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/TypeCodecTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,6 +17,18 @@ */ package com.datastax.driver.core; +import static com.datastax.driver.core.Assertions.assertThat; +import static com.datastax.driver.core.DataType.ascii; +import static com.datastax.driver.core.DataType.cint; +import static com.datastax.driver.core.DataType.list; +import static com.datastax.driver.core.DataType.map; +import static com.datastax.driver.core.DataType.set; +import static com.datastax.driver.core.DataType.text; +import static com.datastax.driver.core.DataType.varchar; +import static com.datastax.driver.core.ProtocolVersion.V3; +import static com.google.common.collect.Lists.newArrayList; +import static org.testng.Assert.fail; + import com.datastax.driver.core.UserType.Field; import com.datastax.driver.core.exceptions.CodecNotFoundException; import com.datastax.driver.core.exceptions.InvalidTypeException; @@ -23,370 +37,397 @@ import com.google.common.base.Strings; import com.google.common.collect.Lists; import com.google.common.reflect.TypeToken; -import org.testng.annotations.Test; - import java.nio.ByteBuffer; -import java.util.*; - -import static com.datastax.driver.core.Assertions.assertThat; -import static com.datastax.driver.core.DataType.*; -import static com.datastax.driver.core.ProtocolVersion.V3; -import static com.google.common.collect.Lists.newArrayList; -import static org.testng.Assert.fail; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.UUID; +import org.testng.annotations.Test; public class TypeCodecTest { - public static final DataType CUSTOM_FOO = DataType.custom("com.example.FooBar"); - - // @formatter:off - public static final TypeToken> LIST_OF_A_TOKEN = new TypeToken>() {}; - public static final TypeToken> LIST_OF_B_TOKEN = new TypeToken>() {}; - // @formatter:on - - private CodecRegistry codecRegistry = new CodecRegistry(); - - @Test(groups = "unit") - public void testCustomList() throws Exception { - DataType cqlType = list(CUSTOM_FOO); - TypeCodec> codec = codecRegistry.codecFor(cqlType); - assertThat(codec).isNotNull().accepts(cqlType); + public static final DataType CUSTOM_FOO = DataType.custom("com.example.FooBar"); + + // @formatter:off + public static final TypeToken> LIST_OF_A_TOKEN = new TypeToken>() {}; + public static final TypeToken> LIST_OF_B_TOKEN = new TypeToken>() {}; + // @formatter:on + + private CodecRegistry codecRegistry = new CodecRegistry(); + + @Test(groups = "unit") + public void testCustomList() throws Exception { + DataType cqlType = list(CUSTOM_FOO); + TypeCodec> codec = codecRegistry.codecFor(cqlType); + assertThat(codec).isNotNull().accepts(cqlType); + } + + @Test(groups = "unit") + public void testCustomSet() throws Exception { + DataType cqlType = set(CUSTOM_FOO); + TypeCodec> codec = codecRegistry.codecFor(cqlType); + assertThat(codec).isNotNull().accepts(cqlType); + } + + @Test(groups = "unit") + public void testCustomKeyMap() throws Exception { + DataType cqlType = map(CUSTOM_FOO, text()); + TypeCodec> codec = codecRegistry.codecFor(cqlType); + assertThat(codec).isNotNull().accepts(cqlType); + } + + @Test(groups = "unit") + public void testCustomValueMap() throws Exception { + DataType cqlType = map(text(), CUSTOM_FOO); + TypeCodec> codec = codecRegistry.codecFor(cqlType); + assertThat(codec).isNotNull().accepts(cqlType); + } + + @Test( + groups = "unit", + expectedExceptions = {IllegalArgumentException.class}) + public void collectionTooLargeTest() throws Exception { + DataType cqlType = DataType.list(DataType.cint()); + List list = Collections.nCopies(65536, 1); + TypeCodec> codec = codecRegistry.codecFor(cqlType); + codec.serialize(list, ProtocolVersion.V2); + } + + @Test( + groups = "unit", + expectedExceptions = {IllegalArgumentException.class}) + public void collectionElementTooLargeTest() throws Exception { + DataType cqlType = DataType.list(DataType.text()); + List list = newArrayList(Strings.repeat("a", 65536)); + TypeCodec> codec = codecRegistry.codecFor(cqlType); + codec.serialize(list, ProtocolVersion.V2); + } + + @Test(groups = "unit") + public void test_cql_list_varchar_to_list_list_integer() { + ListVarcharToListListInteger codec = new ListVarcharToListListInteger(); + List> list = new ArrayList>(); + list.add(newArrayList(1, 2, 3)); + list.add(newArrayList(4, 5, 6)); + assertThat(codec).canSerialize(list); + } + + @Test(groups = "unit") + public void test_ascii_vs_utf8() { + TypeCodec asciiCodec = TypeCodec.ascii(); + TypeCodec utf8Codec = TypeCodec.varchar(); + String ascii = "The quick brown fox jumps over the lazy dog!"; + String utf8 = + "Dès Noël, où un zéphyr haï me vêt de glaçons würmiens, je dîne d’exquis rôtis de bœuf au kir à l’aÿ d’âge mûr & cætera!"; + assertThat(asciiCodec) + .accepts(String.class) + .accepts(ascii()) + .doesNotAccept(varchar()) + .doesNotAccept(text()) + .accepts(ascii) + .canSerialize(ascii) + .cannotSerialize(utf8); + assertThat(utf8Codec) + .accepts(String.class) + .doesNotAccept(ascii()) + .accepts(varchar()) + .accepts(text()) + .accepts(ascii) + .accepts(utf8) + .canSerialize(ascii) + .canSerialize(utf8); + } + + @Test(groups = "unit") + public void test_varchar_vs_text() { + assertThat(TypeCodec.varchar()).accepts(String.class).accepts(varchar()).accepts(text()); + assertThat(TypeCodec.list(TypeCodec.varchar())).accepts(list(varchar())).accepts(list(text())); + assertThat(TypeCodec.set(TypeCodec.varchar())).accepts(set(varchar())).accepts(set(text())); + assertThat(TypeCodec.map(TypeCodec.varchar(), TypeCodec.varchar())) + .accepts(map(varchar(), varchar())) + .accepts(map(varchar(), text())) + .accepts(map(text(), varchar())) + .accepts(map(text(), text())); + TupleType t1 = new TupleType(newArrayList(varchar(), varchar()), V3, new CodecRegistry()); + TupleType t2 = new TupleType(newArrayList(text(), varchar()), V3, new CodecRegistry()); + TupleType t3 = new TupleType(newArrayList(varchar(), text()), V3, new CodecRegistry()); + TupleType t4 = new TupleType(newArrayList(text(), text()), V3, new CodecRegistry()); + assertThat(TypeCodec.tuple(t1)).accepts(t2).accepts(t3).accepts(t4); + UserType u1 = + new UserType( + "ks", + "table", + false, + newArrayList(new Field("f1", varchar()), new Field("f2", varchar())), + V3, + new CodecRegistry()); + UserType u2 = + new UserType( + "ks", + "table", + false, + newArrayList(new Field("f1", text()), new Field("f2", varchar())), + V3, + new CodecRegistry()); + UserType u3 = + new UserType( + "ks", + "table", + false, + newArrayList(new Field("f1", varchar()), new Field("f2", text())), + V3, + new CodecRegistry()); + UserType u4 = + new UserType( + "ks", + "table", + false, + newArrayList(new Field("f1", text()), new Field("f2", text())), + V3, + new CodecRegistry()); + assertThat(TypeCodec.userType(u1)).accepts(u2).accepts(u3).accepts(u4); + } + + @Test(groups = "unit") + public void test_inheritance() { + CodecRegistry codecRegistry = new CodecRegistry(); + ACodec aCodec = new ACodec(); + codecRegistry.register(aCodec); + assertThat(codecRegistry.codecFor(cint(), A.class)).isNotNull().isSameAs(aCodec); + try { + // covariance not accepted: no codec handles B exactly + codecRegistry.codecFor(cint(), B.class); + fail(); + } catch (CodecNotFoundException e) { + // ok } - - @Test(groups = "unit") - public void testCustomSet() throws Exception { - DataType cqlType = set(CUSTOM_FOO); - TypeCodec> codec = codecRegistry.codecFor(cqlType); - assertThat(codec).isNotNull().accepts(cqlType); + TypeCodec> expected = TypeCodec.list(aCodec); + TypeCodec> actual = codecRegistry.codecFor(list(cint()), LIST_OF_A_TOKEN); + assertThat(actual.getCqlType()).isEqualTo(expected.getCqlType()); + assertThat(actual.getJavaType()).isEqualTo(expected.getJavaType()); + // cannot work: List is not assignable to List + try { + codecRegistry.codecFor(list(cint()), LIST_OF_B_TOKEN); + fail(); + } catch (CodecNotFoundException e) { + // ok } - - @Test(groups = "unit") - public void testCustomKeyMap() throws Exception { - DataType cqlType = map(CUSTOM_FOO, text()); - TypeCodec> codec = codecRegistry.codecFor(cqlType); - assertThat(codec).isNotNull().accepts(cqlType); + codecRegistry = new CodecRegistry(); + BCodec bCodec = new BCodec(); + codecRegistry.register(bCodec); + try { + assertThat(codecRegistry.codecFor(cint(), A.class)); + fail(); + } catch (CodecNotFoundException e) { + // ok } - - @Test(groups = "unit") - public void testCustomValueMap() throws Exception { - DataType cqlType = map(text(), CUSTOM_FOO); - TypeCodec> codec = codecRegistry.codecFor(cqlType); - assertThat(codec).isNotNull().accepts(cqlType); + assertThat(codecRegistry.codecFor(cint(), B.class)).isNotNull().isSameAs(bCodec); + try { + codecRegistry.codecFor(list(cint()), LIST_OF_A_TOKEN); + fail(); + } catch (CodecNotFoundException e) { + // ok } - - @Test(groups = "unit", expectedExceptions = {IllegalArgumentException.class}) - public void collectionTooLargeTest() throws Exception { - DataType cqlType = DataType.list(DataType.cint()); - List list = Collections.nCopies(65536, 1); - TypeCodec> codec = codecRegistry.codecFor(cqlType); - codec.serialize(list, ProtocolVersion.V2); - } - - @Test(groups = "unit", expectedExceptions = {IllegalArgumentException.class}) - public void collectionElementTooLargeTest() throws Exception { - DataType cqlType = DataType.list(DataType.text()); - List list = newArrayList(Strings.repeat("a", 65536)); - TypeCodec> codec = codecRegistry.codecFor(cqlType); - codec.serialize(list, ProtocolVersion.V2); + TypeCodec> expectedB = TypeCodec.list(bCodec); + TypeCodec> actualB = codecRegistry.codecFor(list(cint()), LIST_OF_B_TOKEN); + assertThat(actualB.getCqlType()).isEqualTo(expectedB.getCqlType()); + assertThat(actualB.getJavaType()).isEqualTo(expectedB.getJavaType()); + } + + @Test(groups = "unit") + public void should_deserialize_empty_buffer_as_tuple_with_null_values() { + CodecRegistry codecRegistry = new CodecRegistry(); + TupleType tupleType = + new TupleType( + newArrayList(DataType.cint(), DataType.varchar(), DataType.cfloat()), + ProtocolVersion.NEWEST_SUPPORTED, + codecRegistry); + TupleValue expected = tupleType.newValue(null, null, null); + + TupleValue actual = + codecRegistry + .codecFor(tupleType, TupleValue.class) + .deserialize(ByteBuffer.allocate(0), ProtocolVersion.NEWEST_SUPPORTED); + assertThat(actual).isNotNull(); + assertThat(actual).isEqualTo(expected); + } + + @Test(groups = "unit") + public void should_deserialize_empty_buffer_as_udt_with_null_values() { + CodecRegistry codecRegistry = new CodecRegistry(); + UserType udt = + new UserType( + "ks", + "t", + false, + Arrays.asList( + new UserType.Field("t", DataType.text()), + new UserType.Field("i", DataType.cint()), + new UserType.Field("l", DataType.list(DataType.text()))), + ProtocolVersion.NEWEST_SUPPORTED, + codecRegistry); + UDTValue expected = udt.newValue(); + expected.setString("t", null); + expected.setToNull("i"); + expected.setList("l", null); + + UDTValue actual = + codecRegistry + .codecFor(udt, UDTValue.class) + .deserialize(ByteBuffer.allocate(0), ProtocolVersion.NEWEST_SUPPORTED); + assertThat(actual).isNotNull(); + assertThat(actual).isEqualTo(expected); + } + + /** + * Ensures that {@link TypeCodec#timeUUID()} is resolved for all UUIDs and throws an {@link + * InvalidTypeException} when attempting to serialize or format a non-type 1 UUID. + * + * @jira_ticket JAVA-965 + */ + @Test(groups = "unit") + public void should_resolve_timeuuid_codec_for_all_uuids_and_fail_to_serialize_non_type1_uuid() { + UUID type4UUID = UUID.randomUUID(); + TypeCodec codec = codecRegistry.codecFor(DataType.timeuuid(), type4UUID); + // Should resolve the TimeUUIDCodec, but not serialize/format a type4 uuid with it. + assertThat(codec) + .isSameAs(TypeCodec.timeUUID()) + .accepts(UUID.class) + .cannotSerialize(type4UUID) + .cannotFormat(type4UUID); + } + + /** Ensures that primitive types are correctly handled and wrapped when necessary. */ + @Test(groups = "unit") + public void should_wrap_primitive_types() { + assertThat(TypeCodec.cboolean()).accepts(Boolean.class).accepts(Boolean.TYPE).accepts(true); + assertThat(TypeCodec.cint()).accepts(Integer.class).accepts(Integer.TYPE).accepts(42); + assertThat(TypeCodec.bigint()).accepts(Long.class).accepts(Long.TYPE).accepts(42L); + assertThat(TypeCodec.cfloat()).accepts(Float.class).accepts(Float.TYPE).accepts(42.0F); + assertThat(TypeCodec.cdouble()).accepts(Double.class).accepts(Double.TYPE).accepts(42.0D); + } + + private class ListVarcharToListListInteger extends TypeCodec>> { + + private final TypeCodec> codec = TypeCodec.list(TypeCodec.varchar()); + + protected ListVarcharToListListInteger() { + super(DataType.list(DataType.varchar()), TypeTokens.listOf(TypeTokens.listOf(Integer.class))); } - @Test(groups = "unit") - public void test_cql_list_varchar_to_list_list_integer() { - ListVarcharToListListInteger codec = new ListVarcharToListListInteger(); - List> list = new ArrayList>(); - list.add(newArrayList(1, 2, 3)); - list.add(newArrayList(4, 5, 6)); - assertThat(codec).canSerialize(list); + @Override + public ByteBuffer serialize(List> value, ProtocolVersion protocolVersion) { + return codec.serialize( + Lists.transform( + value, + new Function, String>() { + @Override + public String apply(List input) { + return Joiner.on(",").join(input); + } + }), + protocolVersion); } - @Test(groups = "unit") - public void test_ascii_vs_utf8() { - TypeCodec asciiCodec = TypeCodec.ascii(); - TypeCodec utf8Codec = TypeCodec.varchar(); - String ascii = "The quick brown fox jumps over the lazy dog!"; - String utf8 = "Dès Noël, où un zéphyr haï me vêt de glaçons würmiens, je dîne d’exquis rôtis de bœuf au kir à l’aÿ d’âge mûr & cætera!"; - assertThat(asciiCodec) - .accepts(String.class) - .accepts(ascii()) - .doesNotAccept(varchar()) - .doesNotAccept(text()) - .accepts(ascii) - .canSerialize(ascii) - .cannotSerialize(utf8); - assertThat(utf8Codec) - .accepts(String.class) - .doesNotAccept(ascii()) - .accepts(varchar()) - .accepts(text()) - .accepts(ascii) - .accepts(utf8) - .canSerialize(ascii) - .canSerialize(utf8); + @Override + public List> deserialize(ByteBuffer bytes, ProtocolVersion protocolVersion) { + return Lists.transform( + codec.deserialize(bytes, protocolVersion), + new Function>() { + @Override + public List apply(String input) { + return Lists.transform( + Arrays.asList(input.split(",")), + new Function() { + @Override + public Integer apply(String input) { + return Integer.parseInt(input); + } + }); + } + }); } - @Test(groups = "unit") - public void test_varchar_vs_text() { - assertThat(TypeCodec.varchar()) - .accepts(String.class) - .accepts(varchar()) - .accepts(text()); - assertThat(TypeCodec.list(TypeCodec.varchar())) - .accepts(list(varchar())) - .accepts(list(text())); - assertThat(TypeCodec.set(TypeCodec.varchar())) - .accepts(set(varchar())) - .accepts(set(text())); - assertThat(TypeCodec.map(TypeCodec.varchar(), TypeCodec.varchar())) - .accepts(map(varchar(), varchar())) - .accepts(map(varchar(), text())) - .accepts(map(text(), varchar())) - .accepts(map(text(), text())); - TupleType t1 = new TupleType(newArrayList(varchar(), varchar()), V3, new CodecRegistry()); - TupleType t2 = new TupleType(newArrayList(text(), varchar()), V3, new CodecRegistry()); - TupleType t3 = new TupleType(newArrayList(varchar(), text()), V3, new CodecRegistry()); - TupleType t4 = new TupleType(newArrayList(text(), text()), V3, new CodecRegistry()); - assertThat(TypeCodec.tuple(t1)) - .accepts(t2) - .accepts(t3) - .accepts(t4); - UserType u1 = new UserType("ks", "table", false, newArrayList(new Field("f1", varchar()), new Field("f2", varchar())), V3, new CodecRegistry()); - UserType u2 = new UserType("ks", "table", false, newArrayList(new Field("f1", text()), new Field("f2", varchar())), V3, new CodecRegistry()); - UserType u3 = new UserType("ks", "table", false, newArrayList(new Field("f1", varchar()), new Field("f2", text())), V3, new CodecRegistry()); - UserType u4 = new UserType("ks", "table", false, newArrayList(new Field("f1", text()), new Field("f2", text())), V3, new CodecRegistry()); - assertThat(TypeCodec.userType(u1)) - .accepts(u2) - .accepts(u3) - .accepts(u4); + @Override + public List> parse(String value) { + throw new UnsupportedOperationException(); } - @Test(groups = "unit") - public void test_inheritance() { - CodecRegistry codecRegistry = new CodecRegistry(); - ACodec aCodec = new ACodec(); - codecRegistry.register(aCodec); - assertThat(codecRegistry.codecFor(cint(), A.class)).isNotNull().isSameAs(aCodec); - try { - // covariance not accepted: no codec handles B exactly - codecRegistry.codecFor(cint(), B.class); - fail(); - } catch (CodecNotFoundException e) { - //ok - } - TypeCodec> expected = TypeCodec.list(aCodec); - TypeCodec> actual = codecRegistry.codecFor(list(cint()), LIST_OF_A_TOKEN); - assertThat(actual.getCqlType()).isEqualTo(expected.getCqlType()); - assertThat(actual.getJavaType()).isEqualTo(expected.getJavaType()); - // cannot work: List is not assignable to List - try { - codecRegistry.codecFor(list(cint()), LIST_OF_B_TOKEN); - fail(); - } catch (CodecNotFoundException e) { - //ok - } - codecRegistry = new CodecRegistry(); - BCodec bCodec = new BCodec(); - codecRegistry.register(bCodec); - try { - assertThat(codecRegistry.codecFor(cint(), A.class)); - fail(); - } catch (CodecNotFoundException e) { - // ok - } - assertThat(codecRegistry.codecFor(cint(), B.class)).isNotNull().isSameAs(bCodec); - try { - codecRegistry.codecFor(list(cint()), LIST_OF_A_TOKEN); - fail(); - } catch (CodecNotFoundException e) { - // ok - } - TypeCodec> expectedB = TypeCodec.list(bCodec); - TypeCodec> actualB = codecRegistry.codecFor(list(cint()), LIST_OF_B_TOKEN); - assertThat(actualB.getCqlType()).isEqualTo(expectedB.getCqlType()); - assertThat(actualB.getJavaType()).isEqualTo(expectedB.getJavaType()); + @Override + public String format(List> value) { + throw new UnsupportedOperationException(); } + } + class A { - @Test(groups = "unit") - public void should_deserialize_empty_buffer_as_tuple_with_null_values() { - CodecRegistry codecRegistry = new CodecRegistry(); - TupleType tupleType = new TupleType(newArrayList(DataType.cint(), DataType.varchar(), DataType.cfloat()), ProtocolVersion.NEWEST_SUPPORTED, codecRegistry); - TupleValue expected = tupleType.newValue(null, null, null); + int i = 0; + } - TupleValue actual = codecRegistry.codecFor(tupleType, TupleValue.class).deserialize(ByteBuffer.allocate(0), ProtocolVersion.NEWEST_SUPPORTED); - assertThat(actual).isNotNull(); - assertThat(actual).isEqualTo(expected); + class B extends A { + { + i = 1; } + } - @Test(groups = "unit") - public void should_deserialize_empty_buffer_as_udt_with_null_values() { - CodecRegistry codecRegistry = new CodecRegistry(); - UserType udt = new UserType("ks", "t", false, Arrays.asList( - new UserType.Field("t", DataType.text()), - new UserType.Field("i", DataType.cint()), - new UserType.Field("l", DataType.list(DataType.text())) - ), ProtocolVersion.NEWEST_SUPPORTED, codecRegistry); - UDTValue expected = udt.newValue(); - expected.setString("t", null); - expected.setToNull("i"); - expected.setList("l", null); - - UDTValue actual = codecRegistry.codecFor(udt, UDTValue.class).deserialize(ByteBuffer.allocate(0), ProtocolVersion.NEWEST_SUPPORTED); - assertThat(actual).isNotNull(); - assertThat(actual).isEqualTo(expected); - } + class ACodec extends TypeCodec { - /** - * Ensures that {@link TypeCodec#timeUUID()} is resolved for all UUIDs and throws an - * {@link InvalidTypeException} when attempting to serialize or format a non-type 1 - * UUID. - * - * @jira_ticket JAVA-965 - */ - @Test(groups = "unit") - public void should_resolve_timeuuid_codec_for_all_uuids_and_fail_to_serialize_non_type1_uuid() { - UUID type4UUID = UUID.randomUUID(); - TypeCodec codec = codecRegistry.codecFor(DataType.timeuuid(), type4UUID); - // Should resolve the TimeUUIDCodec, but not serialize/format a type4 uuid with it. - assertThat(codec).isSameAs(TypeCodec.timeUUID()) - .accepts(UUID.class) - .cannotSerialize(type4UUID) - .cannotFormat(type4UUID); + protected ACodec() { + super(DataType.cint(), A.class); } - - /** - * Ensures that primitive types are correctly handled and wrapped when necessary. - */ - @Test(groups = "unit") - public void should_wrap_primitive_types() { - assertThat(TypeCodec.cboolean()) - .accepts(Boolean.class) - .accepts(Boolean.TYPE) - .accepts(true); - assertThat(TypeCodec.cint()) - .accepts(Integer.class) - .accepts(Integer.TYPE) - .accepts(42); - assertThat(TypeCodec.bigint()) - .accepts(Long.class) - .accepts(Long.TYPE) - .accepts(42L); - assertThat(TypeCodec.cfloat()) - .accepts(Float.class) - .accepts(Float.TYPE) - .accepts(42.0F); - assertThat(TypeCodec.cdouble()) - .accepts(Double.class) - .accepts(Double.TYPE) - .accepts(42.0D); + @Override + public ByteBuffer serialize(A value, ProtocolVersion protocolVersion) + throws InvalidTypeException { + return null; // not tested } - private class ListVarcharToListListInteger extends TypeCodec>> { - - private final TypeCodec> codec = TypeCodec.list(TypeCodec.varchar()); - - protected ListVarcharToListListInteger() { - super(DataType.list(DataType.varchar()), TypeTokens.listOf(TypeTokens.listOf(Integer.class))); - } - - @Override - public ByteBuffer serialize(List> value, ProtocolVersion protocolVersion) { - return codec.serialize(Lists.transform(value, new Function, String>() { - @Override - public String apply(List input) { - return Joiner.on(",").join(input); - } - }), protocolVersion); - } - - @Override - public List> deserialize(ByteBuffer bytes, ProtocolVersion protocolVersion) { - return Lists.transform(codec.deserialize(bytes, protocolVersion), new Function>() { - @Override - public List apply(String input) { - return Lists.transform(Arrays.asList(input.split(",")), new Function() { - @Override - public Integer apply(String input) { - return Integer.parseInt(input); - } - }); - } - }); - } - - @Override - public List> parse(String value) { - throw new UnsupportedOperationException(); - } - - @Override - public String format(List> value) { - throw new UnsupportedOperationException(); - } + @Override + public A deserialize(ByteBuffer bytes, ProtocolVersion protocolVersion) + throws InvalidTypeException { + return null; // not tested } - class A { - - int i = 0; + @Override + public A parse(String value) throws InvalidTypeException { + return null; // not tested } - class B extends A { - { - i = 1; - } + @Override + public String format(A value) throws InvalidTypeException { + return null; // not tested } + } - class ACodec extends TypeCodec { - - protected ACodec() { - super(DataType.cint(), A.class); - } - - @Override - public ByteBuffer serialize(A value, ProtocolVersion protocolVersion) throws InvalidTypeException { - return null; // not tested - } - - @Override - public A deserialize(ByteBuffer bytes, ProtocolVersion protocolVersion) throws InvalidTypeException { - return null; // not tested - } - - @Override - public A parse(String value) throws InvalidTypeException { - return null; // not tested - } + class BCodec extends TypeCodec { - @Override - public String format(A value) throws InvalidTypeException { - return null; // not tested - } + protected BCodec() { + super(DataType.cint(), B.class); } + @Override + public ByteBuffer serialize(B value, ProtocolVersion protocolVersion) + throws InvalidTypeException { + return null; // not tested + } - class BCodec extends TypeCodec { - - protected BCodec() { - super(DataType.cint(), B.class); - } - - @Override - public ByteBuffer serialize(B value, ProtocolVersion protocolVersion) throws InvalidTypeException { - return null; // not tested - } - - @Override - public B deserialize(ByteBuffer bytes, ProtocolVersion protocolVersion) throws InvalidTypeException { - return null; // not tested - } + @Override + public B deserialize(ByteBuffer bytes, ProtocolVersion protocolVersion) + throws InvalidTypeException { + return null; // not tested + } - @Override - public B parse(String value) throws InvalidTypeException { - return null; // not tested - } + @Override + public B parse(String value) throws InvalidTypeException { + return null; // not tested + } - @Override - public String format(B value) throws InvalidTypeException { - return null; // not tested - } + @Override + public String format(B value) throws InvalidTypeException { + return null; // not tested } + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/TypeCodecTupleIntegrationTest.java b/driver-core/src/test/java/com/datastax/driver/core/TypeCodecTupleIntegrationTest.java index 1d3aff4b16d..79bee457fe2 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/TypeCodecTupleIntegrationTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/TypeCodecTupleIntegrationTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,230 +17,232 @@ */ package com.datastax.driver.core; -import com.datastax.driver.core.utils.CassandraVersion; -import org.testng.annotations.Test; - -import java.util.UUID; - import static com.datastax.driver.core.DataType.cfloat; import static org.assertj.core.api.Assertions.assertThat; +import com.datastax.driver.core.utils.CassandraVersion; +import java.util.UUID; +import org.testng.annotations.Test; + @CassandraVersion("2.1.0") public class TypeCodecTupleIntegrationTest extends CCMTestsSupport { - private final String insertQuery = "INSERT INTO users (id, name, location) VALUES (?, ?, ?)"; - private final String selectQuery = "SELECT id, name, location FROM users WHERE id = ?"; - - private final UUID uuid = UUID.randomUUID(); - - private TupleType locationType; - private TupleValue locationValue; - private TupleValue partialLocationValueInserted; - private Location location; - private Location partialLocation; - private TupleValue partialLocationValueRetrieved; - - @Override - public void onTestContextInitialized() { - execute( - "CREATE TABLE IF NOT EXISTS \"users\" (id uuid PRIMARY KEY, name text, location frozen>)" - ); + private final String insertQuery = "INSERT INTO users (id, name, location) VALUES (?, ?, ?)"; + private final String selectQuery = "SELECT id, name, location FROM users WHERE id = ?"; + + private final UUID uuid = UUID.randomUUID(); + + private TupleType locationType; + private TupleValue locationValue; + private TupleValue partialLocationValueInserted; + private Location location; + private Location partialLocation; + private TupleValue partialLocationValueRetrieved; + + @Override + public void onTestContextInitialized() { + execute( + "CREATE TABLE IF NOT EXISTS \"users\" (id uuid PRIMARY KEY, name text, location frozen>)"); + } + + @Test(groups = "short") + public void should_handle_tuples_with_default_codecs() { + setUpTupleTypes(cluster()); + // simple statement + session().execute(insertQuery, uuid, "John Doe", locationValue); + ResultSet rows = session().execute(selectQuery, uuid); + Row row = rows.one(); + assertRow(row); + // prepared + values + PreparedStatement ps = session().prepare(insertQuery); + session().execute(ps.bind(uuid, "John Doe", locationValue)); + rows = session().execute(selectQuery, uuid); + row = rows.one(); + assertRow(row); + // bound with setTupleValue + session() + .execute( + ps.bind() + .setUUID(0, uuid) + .setString(1, "John Doe") + .setTupleValue("location", locationValue)); + rows = session().execute(selectQuery, uuid); + row = rows.one(); + assertRow(row); + } + + @Test(groups = "short") + public void should_handle_partial_tuples_with_default_codecs() { + setUpTupleTypes(cluster()); + // simple statement + session().execute(insertQuery, uuid, "John Doe", partialLocationValueInserted); + ResultSet rows = session().execute(selectQuery, uuid); + Row row = rows.one(); + assertPartialRow(row); + // prepared + values + PreparedStatement ps = session().prepare(insertQuery); + session().execute(ps.bind(uuid, "John Doe", partialLocationValueInserted)); + rows = session().execute(selectQuery, uuid); + row = rows.one(); + assertPartialRow(row); + // bound with setTupleValue + session() + .execute( + ps.bind() + .setUUID(0, uuid) + .setString(1, "John Doe") + .setTupleValue("location", partialLocationValueInserted)); + rows = session().execute(selectQuery, uuid); + row = rows.one(); + assertPartialRow(row); + } + + @Test(groups = "short") + public void should_handle_tuples_with_custom_codecs() { + Cluster cluster = register(createClusterBuilder().build()); + Session session = cluster.connect(keyspace); + setUpTupleTypes(cluster); + cluster + .getConfiguration() + .getCodecRegistry() + .register(new LocationCodec(TypeCodec.tuple(locationType))); + session.execute(insertQuery, uuid, "John Doe", locationValue); + ResultSet rows = session.execute(selectQuery, uuid); + Row row = rows.one(); + assertThat(row.getUUID(0)).isEqualTo(uuid); + assertThat(row.getObject(0)).isEqualTo(uuid); + assertThat(row.get(0, UUID.class)).isEqualTo(uuid); + assertThat(row.getString(1)).isEqualTo("John Doe"); + assertThat(row.getObject(1)).isEqualTo("John Doe"); + assertThat(row.get(1, String.class)).isEqualTo("John Doe"); + assertThat(row.getTupleValue(2)).isEqualTo(locationValue); + // edge case: getObject should use default codecs; + // but tuple and udt codecs are registered on the fly; + // so if we have another manually-registered codec + // that one will be picked up + assertThat(row.getObject(2)).isEqualTo(location); + assertThat(row.get(2, TupleValue.class)).isEqualTo(locationValue); + assertThat(row.get(2, Location.class)).isEqualTo(location); + } + + @Test(groups = "short") + public void should_handle_partial_tuples_with_custom_codecs() { + Cluster cluster = register(createClusterBuilder().build()); + Session session = cluster.connect(keyspace); + setUpTupleTypes(cluster); + cluster + .getConfiguration() + .getCodecRegistry() + .register(new LocationCodec(TypeCodec.tuple(locationType))); + session.execute(insertQuery, uuid, "John Doe", partialLocationValueInserted); + ResultSet rows = session.execute(selectQuery, uuid); + Row row = rows.one(); + assertThat(row.getUUID(0)).isEqualTo(uuid); + assertThat(row.getObject(0)).isEqualTo(uuid); + assertThat(row.get(0, UUID.class)).isEqualTo(uuid); + assertThat(row.getString(1)).isEqualTo("John Doe"); + assertThat(row.getObject(1)).isEqualTo("John Doe"); + assertThat(row.get(1, String.class)).isEqualTo("John Doe"); + assertThat(row.getTupleValue(2)).isEqualTo(locationType.newValue(37.387224f, null)); + // corner case: getObject should use default codecs; + // but tuple and udt codecs are registered on the fly; + // so if we have another manually-registered codec + // that one will be picked up :( + assertThat(row.getObject(2)).isEqualTo(partialLocation); + assertThat(row.get(2, TupleValue.class)).isEqualTo(locationType.newValue(37.387224f, null)); + assertThat(row.get(2, Location.class)).isEqualTo(partialLocation); + } + + private void assertRow(Row row) { + assertThat(row.getUUID(0)).isEqualTo(uuid); + assertThat(row.getObject(0)).isEqualTo(uuid); + assertThat(row.get(0, UUID.class)).isEqualTo(uuid); + assertThat(row.getString(1)).isEqualTo("John Doe"); + assertThat(row.getObject(1)).isEqualTo("John Doe"); + assertThat(row.get(1, String.class)).isEqualTo("John Doe"); + assertThat(row.getTupleValue(2)).isEqualTo(locationValue); + assertThat(row.getObject(2)).isEqualTo(locationValue); + assertThat(row.get(2, TupleValue.class)).isEqualTo(locationValue); + } + + private void assertPartialRow(Row row) { + assertThat(row.getUUID(0)).isEqualTo(uuid); + assertThat(row.getObject(0)).isEqualTo(uuid); + assertThat(row.get(0, UUID.class)).isEqualTo(uuid); + assertThat(row.getString(1)).isEqualTo("John Doe"); + assertThat(row.getObject(1)).isEqualTo("John Doe"); + assertThat(row.get(1, String.class)).isEqualTo("John Doe"); + assertThat(row.getTupleValue(2)).isEqualTo(partialLocationValueRetrieved); + assertThat(row.getObject(2)).isEqualTo(partialLocationValueRetrieved); + assertThat(row.get(2, TupleValue.class)).isEqualTo(partialLocationValueRetrieved); + } + + private void setUpTupleTypes(Cluster cluster) { + locationType = cluster.getMetadata().newTupleType(cfloat(), cfloat()); + locationValue = locationType.newValue().setFloat(0, 37.387224f).setFloat(1, -121.9733837f); + // insert a tuple of a different dimension + partialLocationValueInserted = + cluster.getMetadata().newTupleType(cfloat()).newValue().setFloat(0, 37.387224f); + // retrieve the partial tuple with null missing values + partialLocationValueRetrieved = locationType.newValue(37.387224f, null); + location = new Location(37.387224f, -121.9733837f); + partialLocation = new Location(37.387224f, 0.0f); + } + + static class LocationCodec extends MappingCodec { + + private final TupleType tupleType; + + public LocationCodec(TypeCodec innerCodec) { + super(innerCodec, Location.class); + tupleType = (TupleType) innerCodec.getCqlType(); } - @Test(groups = "short") - public void should_handle_tuples_with_default_codecs() { - setUpTupleTypes(cluster()); - // simple statement - session().execute(insertQuery, uuid, "John Doe", locationValue); - ResultSet rows = session().execute(selectQuery, uuid); - Row row = rows.one(); - assertRow(row); - // prepared + values - PreparedStatement ps = session().prepare(insertQuery); - session().execute(ps.bind(uuid, "John Doe", locationValue)); - rows = session().execute(selectQuery, uuid); - row = rows.one(); - assertRow(row); - // bound with setTupleValue - session().execute(ps.bind().setUUID(0, uuid).setString(1, "John Doe").setTupleValue("location", locationValue)); - rows = session().execute(selectQuery, uuid); - row = rows.one(); - assertRow(row); - } - - @Test(groups = "short") - public void should_handle_partial_tuples_with_default_codecs() { - setUpTupleTypes(cluster()); - // simple statement - session().execute(insertQuery, uuid, "John Doe", partialLocationValueInserted); - ResultSet rows = session().execute(selectQuery, uuid); - Row row = rows.one(); - assertPartialRow(row); - // prepared + values - PreparedStatement ps = session().prepare(insertQuery); - session().execute(ps.bind(uuid, "John Doe", partialLocationValueInserted)); - rows = session().execute(selectQuery, uuid); - row = rows.one(); - assertPartialRow(row); - // bound with setTupleValue - session().execute(ps.bind().setUUID(0, uuid).setString(1, "John Doe").setTupleValue("location", partialLocationValueInserted)); - rows = session().execute(selectQuery, uuid); - row = rows.one(); - assertPartialRow(row); + @Override + protected Location deserialize(TupleValue value) { + return value == null ? null : new Location(value.getFloat(0), value.getFloat(1)); } - @Test(groups = "short") - public void should_handle_tuples_with_custom_codecs() { - CodecRegistry codecRegistry = new CodecRegistry(); - Cluster cluster = register(Cluster.builder() - .addContactPoints(getContactPoints()) - .withPort(ccm().getBinaryPort()) - .withCodecRegistry(codecRegistry) - .build()); - Session session = cluster.connect(keyspace); - setUpTupleTypes(cluster); - codecRegistry.register(new LocationCodec(TypeCodec.tuple(locationType))); - session.execute(insertQuery, uuid, "John Doe", locationValue); - ResultSet rows = session.execute(selectQuery, uuid); - Row row = rows.one(); - assertThat(row.getUUID(0)).isEqualTo(uuid); - assertThat(row.getObject(0)).isEqualTo(uuid); - assertThat(row.get(0, UUID.class)).isEqualTo(uuid); - assertThat(row.getString(1)).isEqualTo("John Doe"); - assertThat(row.getObject(1)).isEqualTo("John Doe"); - assertThat(row.get(1, String.class)).isEqualTo("John Doe"); - assertThat(row.getTupleValue(2)).isEqualTo(locationValue); - // edge case: getObject should use default codecs; - // but tuple and udt codecs are registered on the fly; - // so if we have another manually-registered codec - // that one will be picked up - assertThat(row.getObject(2)).isEqualTo(location); - assertThat(row.get(2, TupleValue.class)).isEqualTo(locationValue); - assertThat(row.get(2, Location.class)).isEqualTo(location); + @Override + protected TupleValue serialize(Location value) { + return value == null + ? null + : tupleType.newValue().setFloat(0, value.latitude).setFloat(1, value.longitude); } + } - @Test(groups = "short") - public void should_handle_partial_tuples_with_custom_codecs() { - CodecRegistry codecRegistry = new CodecRegistry(); - Cluster cluster = register(Cluster.builder() - .addContactPoints(getContactPoints()) - .withPort(ccm().getBinaryPort()) - .withCodecRegistry(codecRegistry) - .build()); - Session session = cluster.connect(keyspace); - setUpTupleTypes(cluster); - codecRegistry.register(new LocationCodec(TypeCodec.tuple(locationType))); - session.execute(insertQuery, uuid, "John Doe", partialLocationValueInserted); - ResultSet rows = session.execute(selectQuery, uuid); - Row row = rows.one(); - assertThat(row.getUUID(0)).isEqualTo(uuid); - assertThat(row.getObject(0)).isEqualTo(uuid); - assertThat(row.get(0, UUID.class)).isEqualTo(uuid); - assertThat(row.getString(1)).isEqualTo("John Doe"); - assertThat(row.getObject(1)).isEqualTo("John Doe"); - assertThat(row.get(1, String.class)).isEqualTo("John Doe"); - assertThat(row.getTupleValue(2)).isEqualTo(locationType.newValue(37.387224f, null)); - // corner case: getObject should use default codecs; - // but tuple and udt codecs are registered on the fly; - // so if we have another manually-registered codec - // that one will be picked up :( - assertThat(row.getObject(2)).isEqualTo(partialLocation); - assertThat(row.get(2, TupleValue.class)).isEqualTo(locationType.newValue(37.387224f, null)); - assertThat(row.get(2, Location.class)).isEqualTo(partialLocation); - } + static class Location { - private void assertRow(Row row) { - assertThat(row.getUUID(0)).isEqualTo(uuid); - assertThat(row.getObject(0)).isEqualTo(uuid); - assertThat(row.get(0, UUID.class)).isEqualTo(uuid); - assertThat(row.getString(1)).isEqualTo("John Doe"); - assertThat(row.getObject(1)).isEqualTo("John Doe"); - assertThat(row.get(1, String.class)).isEqualTo("John Doe"); - assertThat(row.getTupleValue(2)).isEqualTo(locationValue); - assertThat(row.getObject(2)).isEqualTo(locationValue); - assertThat(row.get(2, TupleValue.class)).isEqualTo(locationValue); - } + float latitude; - private void assertPartialRow(Row row) { - assertThat(row.getUUID(0)).isEqualTo(uuid); - assertThat(row.getObject(0)).isEqualTo(uuid); - assertThat(row.get(0, UUID.class)).isEqualTo(uuid); - assertThat(row.getString(1)).isEqualTo("John Doe"); - assertThat(row.getObject(1)).isEqualTo("John Doe"); - assertThat(row.get(1, String.class)).isEqualTo("John Doe"); - assertThat(row.getTupleValue(2)).isEqualTo(partialLocationValueRetrieved); - assertThat(row.getObject(2)).isEqualTo(partialLocationValueRetrieved); - assertThat(row.get(2, TupleValue.class)).isEqualTo(partialLocationValueRetrieved); - } + float longitude; - private void setUpTupleTypes(Cluster cluster) { - locationType = cluster.getMetadata().newTupleType(cfloat(), cfloat()); - locationValue = locationType.newValue() - .setFloat(0, 37.387224f) - .setFloat(1, -121.9733837f); - // insert a tuple of a different dimension - partialLocationValueInserted = cluster.getMetadata().newTupleType(cfloat()).newValue().setFloat(0, 37.387224f); - // retrieve the partial tuple with null missing values - partialLocationValueRetrieved = locationType.newValue(37.387224f, null); - location = new Location(37.387224f, -121.9733837f); - partialLocation = new Location(37.387224f, 0.0f); + public Location(float latitude, float longitude) { + this.latitude = latitude; + this.longitude = longitude; } - static class LocationCodec extends MappingCodec { - - private final TupleType tupleType; - - public LocationCodec(TypeCodec innerCodec) { - super(innerCodec, Location.class); - tupleType = (TupleType) innerCodec.getCqlType(); - } + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; - @Override - protected Location deserialize(TupleValue value) { - return value == null ? null : new Location(value.getFloat(0), value.getFloat(1)); - } + Location location = (Location) o; - @Override - protected TupleValue serialize(Location value) { - return value == null ? null : tupleType.newValue().setFloat(0, value.latitude).setFloat(1, value.longitude); - } + return Float.compare(location.latitude, latitude) == 0 + && Float.compare(location.longitude, longitude) == 0; } - static class Location { - - float latitude; - - float longitude; - - public Location(float latitude, float longitude) { - this.latitude = latitude; - this.longitude = longitude; - } - - @Override - public boolean equals(Object o) { - if (this == o) - return true; - if (o == null || getClass() != o.getClass()) - return false; - - Location location = (Location) o; - - return Float.compare(location.latitude, latitude) == 0 && Float.compare(location.longitude, longitude) == 0; - - } - - @Override - public int hashCode() { - int result = (latitude != +0.0f ? Float.floatToIntBits(latitude) : 0); - result = 31 * result + (longitude != +0.0f ? Float.floatToIntBits(longitude) : 0); - return result; - } - - @Override - public String toString() { - return "[" + latitude + ", " + longitude + "]"; - } + @Override + public int hashCode() { + int result = (latitude != +0.0f ? Float.floatToIntBits(latitude) : 0); + result = 31 * result + (longitude != +0.0f ? Float.floatToIntBits(longitude) : 0); + return result; } + @Override + public String toString() { + return "[" + latitude + ", " + longitude + "]"; + } + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/TypeCodecUDTIntegrationTest.java b/driver-core/src/test/java/com/datastax/driver/core/TypeCodecUDTIntegrationTest.java index ce36e5c823b..7c78bb8e33b 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/TypeCodecUDTIntegrationTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/TypeCodecUDTIntegrationTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,225 +17,233 @@ */ package com.datastax.driver.core; +import static org.assertj.core.api.Assertions.assertThat; + import com.datastax.driver.core.utils.CassandraVersion; import com.google.common.collect.Lists; import com.google.common.collect.Sets; -import org.testng.annotations.Test; - import java.util.List; import java.util.Set; import java.util.UUID; - -import static org.assertj.core.api.Assertions.assertThat; +import org.testng.annotations.Test; @CassandraVersion("2.1.0") public class TypeCodecUDTIntegrationTest extends CCMTestsSupport { - private final String insertQuery = "INSERT INTO users (id, name, address) VALUES (?, ?, ?)"; - private final String selectQuery = "SELECT id, name, address FROM users WHERE id = ?"; - - private final UUID uuid = UUID.randomUUID(); - - private final Phone phone1 = new Phone("1234567", Sets.newHashSet("home", "iphone")); - private final Phone phone2 = new Phone("2345678", Sets.newHashSet("work")); - private final Address address = new Address("blah", 75010, Lists.newArrayList(phone1, phone2)); - - private UserType addressType; - private UserType phoneType; - - private UDTValue addressValue; + private final String insertQuery = "INSERT INTO users (id, name, address) VALUES (?, ?, ?)"; + private final String selectQuery = "SELECT id, name, address FROM users WHERE id = ?"; + + private final UUID uuid = UUID.randomUUID(); + + private final Phone phone1 = new Phone("1234567", Sets.newHashSet("home", "iphone")); + private final Phone phone2 = new Phone("2345678", Sets.newHashSet("work")); + private final Address address = new Address("blah", 75010, Lists.newArrayList(phone1, phone2)); + + private UserType addressType; + private UserType phoneType; + + private UDTValue addressValue; + + @Override + public void onTestContextInitialized() { + execute( + "CREATE TYPE IF NOT EXISTS \"phone\" (number text, tags set)", + "CREATE TYPE IF NOT EXISTS \"address\" (street text, zipcode int, phones list>)", + "CREATE TABLE IF NOT EXISTS \"users\" (id uuid PRIMARY KEY, name text, address frozen
    )"); + } + + @Test(groups = "short") + public void should_handle_udts_with_default_codecs() { + setUpUserTypes(cluster()); + // simple statement + session().execute(insertQuery, uuid, "John Doe", addressValue); + ResultSet rows = session().execute(selectQuery, uuid); + Row row = rows.one(); + assertRow(row); + // prepared + values + PreparedStatement ps = session().prepare(insertQuery); + session().execute(ps.bind(uuid, "John Doe", addressValue)); + rows = session().execute(selectQuery, uuid); + row = rows.one(); + assertRow(row); + // bound with setUDTValue + session() + .execute(ps.bind().setUUID(0, uuid).setString(1, "John Doe").setUDTValue(2, addressValue)); + rows = session().execute(selectQuery, uuid); + row = rows.one(); + assertRow(row); + } + + @Test(groups = "short") + public void should_handle_udts_with_custom_codecs() { + Cluster cluster = register(createClusterBuilder().build()); + Session session = cluster.connect(keyspace); + setUpUserTypes(cluster); + TypeCodec addressTypeCodec = TypeCodec.userType(addressType); + TypeCodec phoneTypeCodec = TypeCodec.userType(phoneType); + cluster + .getConfiguration() + .getCodecRegistry() + .register(new AddressCodec(addressTypeCodec, Address.class)) + .register(new PhoneCodec(phoneTypeCodec, Phone.class)); + session.execute(insertQuery, uuid, "John Doe", address); + ResultSet rows = session.execute(selectQuery, uuid); + Row row = rows.one(); + assertThat(row.getUUID(0)).isEqualTo(uuid); + assertThat(row.getObject(0)).isEqualTo(uuid); + assertThat(row.get(0, UUID.class)).isEqualTo(uuid); + assertThat(row.getString(1)).isEqualTo("John Doe"); + assertThat(row.getObject(1)).isEqualTo("John Doe"); + assertThat(row.get(1, String.class)).isEqualTo("John Doe"); + assertThat(row.getUDTValue(2)).isEqualTo(addressValue); + // corner case: getObject should use default codecs; + // but tuple and udt codecs are registered on the fly; + // so if we have another manually-registered codec + // that one will be picked up :( + assertThat(row.getObject(2)).isEqualTo(address); + assertThat(row.get(2, UDTValue.class)).isEqualTo(addressValue); + assertThat(row.get(2, Address.class)).isEqualTo(address); + } + + private void assertRow(Row row) { + assertThat(row.getUUID(0)).isEqualTo(uuid); + assertThat(row.getObject(0)).isEqualTo(uuid); + assertThat(row.get(0, UUID.class)).isEqualTo(uuid); + assertThat(row.getString(1)).isEqualTo("John Doe"); + assertThat(row.getObject(1)).isEqualTo("John Doe"); + assertThat(row.get(1, String.class)).isEqualTo("John Doe"); + assertThat(row.getUDTValue(2)).isEqualTo(addressValue); + assertThat(row.getObject(2)).isEqualTo(addressValue); + assertThat(row.get(2, UDTValue.class)).isEqualTo(addressValue); + } + + private void setUpUserTypes(Cluster cluster) { + addressType = cluster.getMetadata().getKeyspace(keyspace).getUserType("address"); + phoneType = cluster.getMetadata().getKeyspace(keyspace).getUserType("phone"); + UDTValue phone1Value = + phoneType.newValue().setString("number", phone1.number).setSet("tags", phone1.tags); + UDTValue phone2Value = + phoneType.newValue().setString("number", phone2.number).setSet("tags", phone2.tags); + addressValue = + addressType + .newValue() + .setString("street", address.street) + .setInt(1, address.zipcode) + .setList("phones", Lists.newArrayList(phone1Value, phone2Value)); + } + + static class AddressCodec extends MappingCodec { + + private final UserType userType; + + public AddressCodec(TypeCodec innerCodec, Class
    javaType) { + super(innerCodec, javaType); + userType = (UserType) innerCodec.getCqlType(); + } @Override - public void onTestContextInitialized() { - execute( - "CREATE TYPE IF NOT EXISTS \"phone\" (number text, tags set)", - "CREATE TYPE IF NOT EXISTS \"address\" (street text, zipcode int, phones list>)", - "CREATE TABLE IF NOT EXISTS \"users\" (id uuid PRIMARY KEY, name text, address frozen
    )" - ); + protected Address deserialize(UDTValue value) { + return value == null + ? null + : new Address( + value.getString("street"), + value.getInt("zipcode"), + value.getList("phones", Phone.class)); } - @Test(groups = "short") - public void should_handle_udts_with_default_codecs() { - setUpUserTypes(cluster()); - // simple statement - session().execute(insertQuery, uuid, "John Doe", addressValue); - ResultSet rows = session().execute(selectQuery, uuid); - Row row = rows.one(); - assertRow(row); - // prepared + values - PreparedStatement ps = session().prepare(insertQuery); - session().execute(ps.bind(uuid, "John Doe", addressValue)); - rows = session().execute(selectQuery, uuid); - row = rows.one(); - assertRow(row); - // bound with setUDTValue - session().execute(ps.bind().setUUID(0, uuid).setString(1, "John Doe").setUDTValue(2, addressValue)); - rows = session().execute(selectQuery, uuid); - row = rows.one(); - assertRow(row); + @Override + protected UDTValue serialize(Address value) { + return value == null + ? null + : userType + .newValue() + .setString("street", value.street) + .setInt("zipcode", value.zipcode) + .setList("phones", value.phones, Phone.class); } + } + + static class PhoneCodec extends MappingCodec { + + private final UserType userType; - @Test(groups = "short") - public void should_handle_udts_with_custom_codecs() { - CodecRegistry codecRegistry = new CodecRegistry(); - Cluster cluster = register(Cluster.builder() - .addContactPoints(getContactPoints()) - .withPort(ccm().getBinaryPort()) - .withCodecRegistry(codecRegistry) - .build()); - Session session = cluster.connect(keyspace); - setUpUserTypes(cluster); - TypeCodec addressTypeCodec = TypeCodec.userType(addressType); - TypeCodec phoneTypeCodec = TypeCodec.userType(phoneType); - codecRegistry - .register(new AddressCodec(addressTypeCodec, Address.class)) - .register(new PhoneCodec(phoneTypeCodec, Phone.class)) - ; - session.execute(insertQuery, uuid, "John Doe", address); - ResultSet rows = session.execute(selectQuery, uuid); - Row row = rows.one(); - assertThat(row.getUUID(0)).isEqualTo(uuid); - assertThat(row.getObject(0)).isEqualTo(uuid); - assertThat(row.get(0, UUID.class)).isEqualTo(uuid); - assertThat(row.getString(1)).isEqualTo("John Doe"); - assertThat(row.getObject(1)).isEqualTo("John Doe"); - assertThat(row.get(1, String.class)).isEqualTo("John Doe"); - assertThat(row.getUDTValue(2)).isEqualTo(addressValue); - // corner case: getObject should use default codecs; - // but tuple and udt codecs are registered on the fly; - // so if we have another manually-registered codec - // that one will be picked up :( - assertThat(row.getObject(2)).isEqualTo(address); - assertThat(row.get(2, UDTValue.class)).isEqualTo(addressValue); - assertThat(row.get(2, Address.class)).isEqualTo(address); + public PhoneCodec(TypeCodec innerCodec, Class javaType) { + super(innerCodec, javaType); + userType = (UserType) innerCodec.getCqlType(); } - private void assertRow(Row row) { - assertThat(row.getUUID(0)).isEqualTo(uuid); - assertThat(row.getObject(0)).isEqualTo(uuid); - assertThat(row.get(0, UUID.class)).isEqualTo(uuid); - assertThat(row.getString(1)).isEqualTo("John Doe"); - assertThat(row.getObject(1)).isEqualTo("John Doe"); - assertThat(row.get(1, String.class)).isEqualTo("John Doe"); - assertThat(row.getUDTValue(2)).isEqualTo(addressValue); - assertThat(row.getObject(2)).isEqualTo(addressValue); - assertThat(row.get(2, UDTValue.class)).isEqualTo(addressValue); + @Override + protected Phone deserialize(UDTValue value) { + return value == null + ? null + : new Phone(value.getString("number"), value.getSet("tags", String.class)); } - private void setUpUserTypes(Cluster cluster) { - addressType = cluster.getMetadata().getKeyspace(keyspace).getUserType("address"); - phoneType = cluster.getMetadata().getKeyspace(keyspace).getUserType("phone"); - UDTValue phone1Value = phoneType.newValue() - .setString("number", phone1.number) - .setSet("tags", phone1.tags); - UDTValue phone2Value = phoneType.newValue() - .setString("number", phone2.number) - .setSet("tags", phone2.tags); - addressValue = addressType.newValue() - .setString("street", address.street) - .setInt(1, address.zipcode) - .setList("phones", Lists.newArrayList(phone1Value, phone2Value)); + @Override + protected UDTValue serialize(Phone value) { + return value == null + ? null + : userType.newValue().setString("number", value.number).setSet("tags", value.tags); } + } + + static class Address { - static class AddressCodec extends MappingCodec { + String street; - private final UserType userType; + int zipcode; - public AddressCodec(TypeCodec innerCodec, Class
    javaType) { - super(innerCodec, javaType); - userType = (UserType) innerCodec.getCqlType(); - } + List phones; - @Override - protected Address deserialize(UDTValue value) { - return value == null ? null : new Address(value.getString("street"), value.getInt("zipcode"), value.getList("phones", Phone.class)); - } + public Address(String street, int zipcode, List phones) { + this.street = street; + this.zipcode = zipcode; + this.phones = phones; + } - @Override - protected UDTValue serialize(Address value) { - return value == null ? null : userType.newValue().setString("street", value.street).setInt("zipcode", value.zipcode).setList("phones", value.phones, Phone.class); - } + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Address address = (Address) o; + return zipcode == address.zipcode + && street.equals(address.street) + && phones.equals(address.phones); } - static class PhoneCodec extends MappingCodec { + @Override + public int hashCode() { + int result = street.hashCode(); + result = 31 * result + zipcode; + result = 31 * result + phones.hashCode(); + return result; + } + } - private final UserType userType; + static class Phone { - public PhoneCodec(TypeCodec innerCodec, Class javaType) { - super(innerCodec, javaType); - userType = (UserType) innerCodec.getCqlType(); - } + String number; - @Override - protected Phone deserialize(UDTValue value) { - return value == null ? null : new Phone(value.getString("number"), value.getSet("tags", String.class)); - } + Set tags; - @Override - protected UDTValue serialize(Phone value) { - return value == null ? null : userType.newValue().setString("number", value.number).setSet("tags", value.tags); - } + public Phone(String number, Set tags) { + this.number = number; + this.tags = tags; } - static class Address { - - String street; - - int zipcode; - - List phones; - - public Address(String street, int zipcode, List phones) { - this.street = street; - this.zipcode = zipcode; - this.phones = phones; - } - - @Override - public boolean equals(Object o) { - if (this == o) - return true; - if (o == null || getClass() != o.getClass()) - return false; - Address address = (Address) o; - return zipcode == address.zipcode && street.equals(address.street) && phones.equals(address.phones); - } - - @Override - public int hashCode() { - int result = street.hashCode(); - result = 31 * result + zipcode; - result = 31 * result + phones.hashCode(); - return result; - } + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Phone phone = (Phone) o; + return number.equals(phone.number) && tags.equals(phone.tags); } - static class Phone { - - String number; - - Set tags; - - public Phone(String number, Set tags) { - this.number = number; - this.tags = tags; - } - - @Override - public boolean equals(Object o) { - if (this == o) - return true; - if (o == null || getClass() != o.getClass()) - return false; - Phone phone = (Phone) o; - return number.equals(phone.number) && tags.equals(phone.tags); - } - - @Override - public int hashCode() { - int result = number.hashCode(); - result = 31 * result + tags.hashCode(); - return result; - } + @Override + public int hashCode() { + int result = number.hashCode(); + result = 31 * result + tags.hashCode(); + return result; } + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/UnresolvedUserTypeTest.java b/driver-core/src/test/java/com/datastax/driver/core/UnresolvedUserTypeTest.java index d721d61c9fd..32e7a454e30 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/UnresolvedUserTypeTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/UnresolvedUserTypeTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,103 +17,137 @@ */ package com.datastax.driver.core; -import com.datastax.driver.core.utils.CassandraVersion; -import org.testng.annotations.Test; +import static com.datastax.driver.core.Assertions.assertThat; +import static com.datastax.driver.core.DataType.cint; +import static com.datastax.driver.core.DataType.list; +import static com.datastax.driver.core.DataType.map; +import static com.datastax.driver.core.DataType.set; +import com.datastax.driver.core.utils.CassandraVersion; import java.util.ArrayList; import java.util.List; import java.util.concurrent.ExecutionException; - -import static com.datastax.driver.core.Assertions.assertThat; -import static com.datastax.driver.core.DataType.*; +import org.testng.annotations.Test; @CassandraVersion("3.0") public class UnresolvedUserTypeTest extends CCMTestsSupport { - @Override - public void onTestContextInitialized() { - execute( - /* - Creates the following acyclic graph (edges directed upwards - meaning "depends on"): - - H G - / \ /\ - F | E - \ / / - D / - / \/ - B C - | - A - - Topological sort order should be : GH,FE,D,CB,A - */ - String.format("CREATE TYPE %s.h (f1 int)", keyspace), - String.format("CREATE TYPE %s.g (f1 int)", keyspace), - String.format("CREATE TYPE %s.\"F\" (f1 frozen)", keyspace), - String.format("CREATE TYPE %s.\"E\" (f1 frozen>)", keyspace), - String.format("CREATE TYPE %s.\"D\" (f1 frozen>)", keyspace), - String.format("CREATE TYPE %s.\"C\" (f1 frozen>)", keyspace), - String.format("CREATE TYPE %s.\"B\" (f1 frozen>)", keyspace), - String.format("CREATE TYPE %s.\"A\" (f1 frozen<\"C\">)", keyspace) - ); - } - - @Test(groups = "short") - public void should_resolve_nested_user_types() throws ExecutionException, InterruptedException { - - // Each CREATE TYPE statement in getTableDefinitions() has triggered a partial schema refresh that - // should have used previous UDT definitions for dependencies. - checkUserTypes(cluster().getMetadata()); - - // Create a different Cluster instance to force a full refresh where all UDTs are loaded at once. - // The parsing logic should sort them to make sure they are loaded in the right order. - Cluster newCluster = register(Cluster.builder() - .addContactPoints(getContactPoints()) - .withPort(ccm().getBinaryPort()) - .build()); - checkUserTypes(newCluster.getMetadata()); - } - - private void checkUserTypes(Metadata metadata) { - KeyspaceMetadata keyspaceMetadata = metadata.getKeyspace(keyspace); - - UserType a = keyspaceMetadata.getUserType("\"A\""); - UserType b = keyspaceMetadata.getUserType("\"B\""); - UserType c = keyspaceMetadata.getUserType("\"C\""); - UserType d = keyspaceMetadata.getUserType("\"D\""); - UserType e = keyspaceMetadata.getUserType("\"E\""); - UserType f = keyspaceMetadata.getUserType("\"F\""); - UserType g = keyspaceMetadata.getUserType("g"); - UserType h = keyspaceMetadata.getUserType("h"); - - assertThat(a).hasField("f1", c); - assertThat(b).hasField("f1", set(d)); - assertThat(c).hasField("f1", map(e, d)); - assertThat(d).hasField("f1", metadata.newTupleType(f, g, h)); - assertThat(e).hasField("f1", list(g)); - assertThat(f).hasField("f1", h); - assertThat(g).hasField("f1", cint()); - assertThat(h).hasField("f1", cint()); - - // JAVA-1407: ensure udts are listed in topological order - List userTypes = new ArrayList(keyspaceMetadata.getUserTypes()); - - assertThat(userTypes.subList(0, 2)).containsOnly(g, h); - assertThat(userTypes.subList(2, 4)).containsOnly(e, f); - assertThat(userTypes.subList(4, 5)).containsOnly(d); - assertThat(userTypes.subList(5, 7)).containsOnly(b, c); - assertThat(userTypes.subList(7, 8)).containsOnly(a); - - String script = keyspaceMetadata.exportAsString(); - - assertThat(script.indexOf(a.exportAsString())).isGreaterThan(script.indexOf(b.exportAsString())).isGreaterThan(script.indexOf(c.exportAsString())); - assertThat(script.indexOf(b.exportAsString())).isGreaterThan(script.indexOf(d.exportAsString())); - assertThat(script.indexOf(c.exportAsString())).isGreaterThan(script.indexOf(d.exportAsString())); - assertThat(script.indexOf(d.exportAsString())).isGreaterThan(script.indexOf(e.exportAsString())).isGreaterThan(script.indexOf(f.exportAsString())); - assertThat(script.indexOf(e.exportAsString())).isGreaterThan(script.indexOf(g.exportAsString())).isGreaterThan(script.indexOf(h.exportAsString())); - assertThat(script.indexOf(f.exportAsString())).isGreaterThan(script.indexOf(g.exportAsString())).isGreaterThan(script.indexOf(h.exportAsString())); - - } + private static final String KEYSPACE = "unresolved_user_type_test"; + + private static final String EXPECTED_SCHEMA = + String.format( + "CREATE KEYSPACE %s WITH REPLICATION = { 'class' : 'org.apache.cassandra.locator.SimpleStrategy', 'replication_factor': '1' } AND DURABLE_WRITES = true;\n" + + "\n" + + "CREATE TYPE %s.g (\n" + + " f1 int\n" + + ");\n" + + "\n" + + "CREATE TYPE %s.h (\n" + + " f1 int\n" + + ");\n" + + "\n" + + "CREATE TYPE %s.\"E\" (\n" + + " f1 frozen>>\n" + + ");\n" + + "\n" + + "CREATE TYPE %s.\"F\" (\n" + + " f1 frozen<%s.h>\n" + + ");\n" + + "\n" + + "CREATE TYPE %s.\"D\" (\n" + + " f1 frozen>\n" + + ");\n" + + "\n" + + "CREATE TYPE %s.\"B\" (\n" + + " f1 frozen>>\n" + + ");\n" + + "\n" + + "CREATE TYPE %s.\"C\" (\n" + + " f1 frozen, frozen<%s.\"D\">>>\n" + + ");\n" + + "\n" + + "CREATE TYPE %s.\"A\" (\n" + + " f1 frozen<%s.\"C\">\n" + + ");\n", + KEYSPACE, KEYSPACE, KEYSPACE, KEYSPACE, KEYSPACE, KEYSPACE, KEYSPACE, KEYSPACE, KEYSPACE, + KEYSPACE, KEYSPACE, KEYSPACE, KEYSPACE, KEYSPACE, KEYSPACE); + + @Override + public void onTestContextInitialized() { + execute( + /* + Creates the following acyclic graph (edges directed upwards + meaning "depends on"): + + H G + / \ /\ + F | E + \ / / + D / + / \/ + B C + | + A + + Topological sort order should be : gh,FE,D,CB,A + */ + "CREATE KEYSPACE unresolved_user_type_test WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'}", + String.format("CREATE TYPE %s.h (f1 int)", KEYSPACE), + String.format("CREATE TYPE %s.g (f1 int)", KEYSPACE), + String.format("CREATE TYPE %s.\"F\" (f1 frozen)", KEYSPACE), + String.format("CREATE TYPE %s.\"E\" (f1 frozen>)", KEYSPACE), + String.format("CREATE TYPE %s.\"D\" (f1 frozen>)", KEYSPACE), + String.format("CREATE TYPE %s.\"C\" (f1 frozen>)", KEYSPACE), + String.format("CREATE TYPE %s.\"B\" (f1 frozen>)", KEYSPACE), + String.format("CREATE TYPE %s.\"A\" (f1 frozen<\"C\">)", KEYSPACE)); + } + + @Test(groups = "short") + public void should_resolve_nested_user_types() throws ExecutionException, InterruptedException { + + // Each CREATE TYPE statement in getTableDefinitions() has triggered a partial schema refresh + // that should have used previous UDT definitions for dependencies. + checkUserTypes(cluster().getMetadata()); + + // Create a different Cluster instance to force a full refresh where all UDTs are loaded at + // once. The parsing logic should sort them to make sure they are loaded in the right order. + Cluster newCluster = register(createClusterBuilder().build()); + checkUserTypes(newCluster.getMetadata()); + } + + private void checkUserTypes(Metadata metadata) { + KeyspaceMetadata keyspaceMetadata = metadata.getKeyspace(KEYSPACE); + + UserType a = keyspaceMetadata.getUserType("\"A\""); + UserType b = keyspaceMetadata.getUserType("\"B\""); + UserType c = keyspaceMetadata.getUserType("\"C\""); + UserType d = keyspaceMetadata.getUserType("\"D\""); + UserType e = keyspaceMetadata.getUserType("\"E\""); + UserType f = keyspaceMetadata.getUserType("\"F\""); + UserType g = keyspaceMetadata.getUserType("g"); + UserType h = keyspaceMetadata.getUserType("h"); + + assertThat(a).hasField("f1", c); + assertThat(b).hasField("f1", set(d)); + assertThat(c).hasField("f1", map(e, d)); + assertThat(d).hasField("f1", metadata.newTupleType(f, g, h)); + assertThat(e).hasField("f1", list(g)); + assertThat(f).hasField("f1", h); + assertThat(g).hasField("f1", cint()); + assertThat(h).hasField("f1", cint()); + + // JAVA-1407: ensure udts are listed in topological order + List userTypes = new ArrayList(keyspaceMetadata.getUserTypes()); + + assertThat(userTypes.subList(0, 2)).containsOnly(g, h); + assertThat(userTypes.subList(2, 4)).containsOnly(e, f); + assertThat(userTypes.subList(4, 5)).containsOnly(d); + assertThat(userTypes.subList(5, 7)).containsOnly(b, c); + assertThat(userTypes.subList(7, 8)).containsOnly(a); + + String script = keyspaceMetadata.exportAsString(); + + // validate against a strict expectation that the schema is exactly as defined. + assertThat(script).isEqualTo(EXPECTED_SCHEMA); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/UserTypesTest.java b/driver-core/src/test/java/com/datastax/driver/core/UserTypesTest.java index 6751fe1150f..a911253c187 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/UserTypesTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/UserTypesTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,507 +17,608 @@ */ package com.datastax.driver.core; +import static com.datastax.driver.core.Assertions.assertThat; +import static com.datastax.driver.core.ConditionChecker.check; +import static com.datastax.driver.core.Metadata.quote; +import static java.util.concurrent.TimeUnit.MINUTES; + import com.datastax.driver.core.utils.CassandraVersion; import com.google.common.base.Joiner; import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; import com.google.common.collect.Lists; import com.google.common.collect.Sets; -import org.testng.annotations.Test; - import java.math.BigDecimal; import java.math.BigInteger; import java.net.InetAddress; import java.nio.ByteBuffer; -import java.util.*; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Date; +import java.util.EnumSet; +import java.util.List; +import java.util.Map; +import java.util.UUID; import java.util.concurrent.Callable; - -import static com.datastax.driver.core.Assertions.assertThat; -import static com.datastax.driver.core.ConditionChecker.check; -import static com.datastax.driver.core.Metadata.quote; -import static java.util.concurrent.TimeUnit.MINUTES; +import org.testng.annotations.Test; @CassandraVersion("2.1.0") public class UserTypesTest extends CCMTestsSupport { - private List DATA_TYPE_PRIMITIVES; - private Map samples; + private List DATA_TYPE_PRIMITIVES; + private Map samples; - private final static List DATA_TYPE_NON_PRIMITIVE_NAMES = - new ArrayList(EnumSet.of(DataType.Name.LIST, DataType.Name.SET, DataType.Name.MAP, DataType.Name.TUPLE)); + private static final List DATA_TYPE_NON_PRIMITIVE_NAMES = + new ArrayList( + EnumSet.of( + DataType.Name.LIST, DataType.Name.SET, DataType.Name.MAP, DataType.Name.TUPLE)); - private final Callable userTableExists = new Callable() { + private final Callable userTableExists = + new Callable() { @Override public Boolean call() throws Exception { - return cluster().getMetadata().getKeyspace(keyspace).getTable("user") != null; + return cluster().getMetadata().getKeyspace(keyspace).getTable("user") != null; } - }; - - @Override - public void onTestContextInitialized() { - ProtocolVersion protocolVersion = ccm().getProtocolVersion(); - DATA_TYPE_PRIMITIVES = new ArrayList(TestUtils.allPrimitiveTypes(protocolVersion)); - DATA_TYPE_PRIMITIVES.remove(DataType.counter()); - samples = PrimitiveTypeSamples.samples(protocolVersion); - String type1 = "CREATE TYPE phone (alias text, number text)"; - String type2 = "CREATE TYPE \"\"\"User Address\"\"\" (street text, \"ZIP\"\"\" int, phones set>)"; - String type3 = "CREATE TYPE type_for_frozen_test(i int)"; - String table = "CREATE TABLE user (id int PRIMARY KEY, addr frozen<\"\"\"User Address\"\"\">)"; - execute(type1, type2, type3, table); - // Ci tests fail with "unconfigured columnfamily user" - check().that(userTableExists).before(5, MINUTES).becomesTrue(); + }; + + @Override + public void onTestContextInitialized() { + ProtocolVersion protocolVersion = ccm().getProtocolVersion(); + DATA_TYPE_PRIMITIVES = new ArrayList(TestUtils.allPrimitiveTypes(protocolVersion)); + DATA_TYPE_PRIMITIVES.remove(DataType.counter()); + samples = PrimitiveTypeSamples.samples(protocolVersion); + String type1 = "CREATE TYPE phone (alias text, number text)"; + String type2 = + "CREATE TYPE \"\"\"User Address\"\"\" (street text, \"ZIP\"\"\" int, phones set>)"; + String type3 = "CREATE TYPE type_for_frozen_test(i int)"; + String table = "CREATE TABLE user (id int PRIMARY KEY, addr frozen<\"\"\"User Address\"\"\">)"; + execute(type1, type2, type3, table); + // Ci tests fail with "unconfigured columnfamily user" + check().that(userTableExists).before(5, MINUTES).becomesTrue(); + } + + @Test(groups = "short") + public void should_store_and_retrieve_with_prepared_statements() throws Exception { + int userId = 0; + PreparedStatement ins = session().prepare("INSERT INTO user(id, addr) VALUES (?, ?)"); + PreparedStatement sel = session().prepare("SELECT * FROM user WHERE id=?"); + + UserType addrDef = + cluster().getMetadata().getKeyspace(keyspace).getUserType(quote("\"User Address\"")); + UserType phoneDef = cluster().getMetadata().getKeyspace(keyspace).getUserType("phone"); + + UDTValue phone1 = + phoneDef.newValue().setString("alias", "home").setString("number", "0123548790"); + UDTValue phone2 = + phoneDef.newValue().setString("alias", "work").setString("number", "0698265251"); + + UDTValue addr = + addrDef + .newValue() + .setString("street", "1600 Pennsylvania Ave NW") + .setInt(quote("ZIP\""), 20500) + .setSet("phones", ImmutableSet.of(phone1, phone2)); + + session().execute(ins.bind(userId, addr)); + + Row r = session().execute(sel.bind(userId)).one(); + + assertThat(r.getInt("id")).isEqualTo(0); + } + + @Test(groups = "short") + public void should_store_and_retrieve_with_simple_statements() throws Exception { + int userId = 1; + UserType addrDef = + cluster().getMetadata().getKeyspace(keyspace).getUserType(quote("\"User Address\"")); + UserType phoneDef = cluster().getMetadata().getKeyspace(keyspace).getUserType("phone"); + + UDTValue phone1 = + phoneDef.newValue().setString("alias", "home").setString("number", "0123548790"); + UDTValue phone2 = + phoneDef.newValue().setString("alias", "work").setString("number", "0698265251"); + + UDTValue addr = + addrDef + .newValue() + .setString("street", "1600 Pennsylvania Ave NW") + .setInt(quote("ZIP\""), 20500) + .setSet("phones", ImmutableSet.of(phone1, phone2)); + + session().execute("INSERT INTO user(id, addr) VALUES (?, ?)", userId, addr); + + Row r = session().execute("SELECT * FROM user WHERE id=?", userId).one(); + + assertThat(r.getInt("id")).isEqualTo(userId); + assertThat(r.getUDTValue("addr")).isEqualTo(addr); + } + + @Test(groups = "short") + public void should_store_type_definitions_in_their_keyspace() throws Exception { + KeyspaceMetadata thisKeyspace = cluster().getMetadata().getKeyspace(this.keyspace); + + // Types that don't exist don't have definitions + assertThat(thisKeyspace.getUserType("address1")).isNull(); + assertThat(thisKeyspace.getUserType("phone1")).isNull(); + + // Types created by this test have definitions + assertThat(thisKeyspace.getUserType(quote("\"User Address\""))).isNotNull(); + assertThat(thisKeyspace.getUserType("phone")).isNotNull(); + + // If we create another keyspace, it doesn't have the definitions of this keyspace + String otherKeyspaceName = this.keyspace + "_nonEx"; + session() + .execute( + "CREATE KEYSPACE " + + otherKeyspaceName + + " " + + "WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor': '1'}"); + + KeyspaceMetadata otherKeyspace = cluster().getMetadata().getKeyspace(otherKeyspaceName); + assertThat(otherKeyspace.getUserType(quote("\"User Address\""))).isNull(); + assertThat(otherKeyspace.getUserType("phone")).isNull(); + } + + @Test(groups = "short") + public void should_handle_UDT_with_many_fields() throws Exception { + int MAX_TEST_LENGTH = 1024; + + // create the seed udt + StringBuilder sb = new StringBuilder(); + for (int i = 0; i < MAX_TEST_LENGTH; ++i) { + sb.append(String.format("v_%s int", i)); + + if (i + 1 < MAX_TEST_LENGTH) sb.append(","); } + session().execute(String.format("CREATE TYPE lengthy_udt (%s)", sb.toString())); - @Test(groups = "short") - public void should_store_and_retrieve_with_prepared_statements() throws Exception { - int userId = 0; - PreparedStatement ins = session().prepare("INSERT INTO user(id, addr) VALUES (?, ?)"); - PreparedStatement sel = session().prepare("SELECT * FROM user WHERE id=?"); - - UserType addrDef = cluster().getMetadata().getKeyspace(keyspace).getUserType(quote("\"User Address\"")); - UserType phoneDef = cluster().getMetadata().getKeyspace(keyspace).getUserType("phone"); - - UDTValue phone1 = phoneDef.newValue().setString("alias", "home").setString("number", "0123548790"); - UDTValue phone2 = phoneDef.newValue().setString("alias", "work").setString("number", "0698265251"); + // create a table with multiple sizes of udts + session().execute("CREATE TABLE lengthy_udt_table (k int PRIMARY KEY, v frozen)"); - UDTValue addr = addrDef.newValue().setString("street", "1600 Pennsylvania Ave NW").setInt(quote("ZIP\""), 20500).setSet("phones", ImmutableSet.of(phone1, phone2)); + // hold onto the UserType for future use + UserType udtDef = cluster().getMetadata().getKeyspace(keyspace).getUserType("lengthy_udt"); - session().execute(ins.bind(userId, addr)); + // verify inserts and reads + for (int i : Arrays.asList(0, 1, 2, 3, MAX_TEST_LENGTH)) { + // create udt + UDTValue createdUDT = udtDef.newValue(); + for (int j = 0; j < i; ++j) { + createdUDT.setInt(j, j); + } - Row r = session().execute(sel.bind(userId)).one(); + // write udt + session().execute("INSERT INTO lengthy_udt_table (k, v) VALUES (0, ?)", createdUDT); - assertThat(r.getInt("id")).isEqualTo(0); + // verify udt was written and read correctly + UDTValue r = + session().execute("SELECT v FROM lengthy_udt_table WHERE k=0").one().getUDTValue("v"); + assertThat(r.toString()).isEqualTo(createdUDT.toString()); } - - @Test(groups = "short") - public void should_store_and_retrieve_with_simple_statements() throws Exception { - int userId = 1; - UserType addrDef = cluster().getMetadata().getKeyspace(keyspace).getUserType(quote("\"User Address\"")); - UserType phoneDef = cluster().getMetadata().getKeyspace(keyspace).getUserType("phone"); - - UDTValue phone1 = phoneDef.newValue().setString("alias", "home").setString("number", "0123548790"); - UDTValue phone2 = phoneDef.newValue().setString("alias", "work").setString("number", "0698265251"); - - UDTValue addr = addrDef.newValue().setString("street", "1600 Pennsylvania Ave NW").setInt(quote("ZIP\""), 20500).setSet("phones", ImmutableSet.of(phone1, phone2)); - - session().execute("INSERT INTO user(id, addr) VALUES (?, ?)", userId, addr); - - Row r = session().execute("SELECT * FROM user WHERE id=?", userId).one(); - - assertThat(r.getInt("id")).isEqualTo(userId); - assertThat(r.getUDTValue("addr")).isEqualTo(addr); - } - - @Test(groups = "short") - public void should_store_type_definitions_in_their_keyspace() throws Exception { - KeyspaceMetadata thisKeyspace = cluster().getMetadata().getKeyspace(this.keyspace); - - // Types that don't exist don't have definitions - assertThat(thisKeyspace.getUserType("address1")) - .isNull(); - assertThat(thisKeyspace.getUserType("phone1")) - .isNull(); - - // Types created by this test have definitions - assertThat(thisKeyspace.getUserType(quote("\"User Address\""))) - .isNotNull(); - assertThat(thisKeyspace.getUserType("phone")) - .isNotNull(); - - // If we create another keyspace, it doesn't have the definitions of this keyspace - String otherKeyspaceName = this.keyspace + "_nonEx"; - session().execute("CREATE KEYSPACE " + otherKeyspaceName + " " + - "WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor': '1'}"); - - KeyspaceMetadata otherKeyspace = cluster().getMetadata().getKeyspace(otherKeyspaceName); - assertThat(otherKeyspace.getUserType(quote("\"User Address\""))) - .isNull(); - assertThat(otherKeyspace.getUserType("phone")) - .isNull(); + } + + @Test(groups = "short") + public void should_store_and_retrieve_UDT_containing_any_primitive_type() throws Exception { + // create UDT + List alpha_type_list = new ArrayList(); + int startIndex = (int) 'a'; + for (int i = 0; i < DATA_TYPE_PRIMITIVES.size(); i++) { + alpha_type_list.add( + String.format( + "%s %s", + Character.toString((char) (startIndex + i)), DATA_TYPE_PRIMITIVES.get(i).getName())); } - @Test(groups = "short") - public void should_handle_UDT_with_many_fields() throws Exception { - int MAX_TEST_LENGTH = 1024; - - // create the seed udt - StringBuilder sb = new StringBuilder(); - for (int i = 0; i < MAX_TEST_LENGTH; ++i) { - sb.append(String.format("v_%s int", i)); - - if (i + 1 < MAX_TEST_LENGTH) - sb.append(","); - } - session().execute(String.format("CREATE TYPE lengthy_udt (%s)", sb.toString())); - - // create a table with multiple sizes of udts - session().execute("CREATE TABLE lengthy_udt_table (k int PRIMARY KEY, v frozen)"); - - // hold onto the UserType for future use - UserType udtDef = cluster().getMetadata().getKeyspace(keyspace).getUserType("lengthy_udt"); - - // verify inserts and reads - for (int i : Arrays.asList(0, 1, 2, 3, MAX_TEST_LENGTH)) { - // create udt - UDTValue createdUDT = udtDef.newValue(); - for (int j = 0; j < i; ++j) { - createdUDT.setInt(j, j); - } - - // write udt - session().execute("INSERT INTO lengthy_udt_table (k, v) VALUES (0, ?)", createdUDT); - - // verify udt was written and read correctly - UDTValue r = session().execute("SELECT v FROM lengthy_udt_table WHERE k=0") - .one().getUDTValue("v"); - assertThat(r.toString()).isEqualTo(createdUDT.toString()); - } + session() + .execute( + String.format("CREATE TYPE alldatatypes (%s)", Joiner.on(',').join(alpha_type_list))); + session() + .execute("CREATE TABLE alldatatypes_table (a int PRIMARY KEY, b frozen)"); + + // insert UDT data + UserType alldatatypesDef = + cluster().getMetadata().getKeyspace(keyspace).getUserType("alldatatypes"); + UDTValue alldatatypes = alldatatypesDef.newValue(); + + for (int i = 0; i < DATA_TYPE_PRIMITIVES.size(); i++) { + DataType dataType = DATA_TYPE_PRIMITIVES.get(i); + String index = Character.toString((char) (startIndex + i)); + Object sampleData = samples.get(dataType); + + switch (dataType.getName()) { + case ASCII: + alldatatypes.setString(index, (String) sampleData); + break; + case BIGINT: + alldatatypes.setLong(index, (Long) sampleData); + break; + case BLOB: + alldatatypes.setBytes(index, (ByteBuffer) sampleData); + break; + case BOOLEAN: + alldatatypes.setBool(index, (Boolean) sampleData); + break; + case DECIMAL: + alldatatypes.setDecimal(index, (BigDecimal) sampleData); + break; + case DOUBLE: + alldatatypes.setDouble(index, (Double) sampleData); + break; + case DURATION: + alldatatypes.set(index, Duration.from(sampleData.toString()), Duration.class); + break; + case FLOAT: + alldatatypes.setFloat(index, (Float) sampleData); + break; + case INET: + alldatatypes.setInet(index, (InetAddress) sampleData); + break; + case TINYINT: + alldatatypes.setByte(index, (Byte) sampleData); + break; + case SMALLINT: + alldatatypes.setShort(index, (Short) sampleData); + break; + case INT: + alldatatypes.setInt(index, (Integer) sampleData); + break; + case TEXT: + alldatatypes.setString(index, (String) sampleData); + break; + case TIMESTAMP: + alldatatypes.setTimestamp(index, ((Date) sampleData)); + break; + case DATE: + alldatatypes.setDate(index, ((LocalDate) sampleData)); + break; + case TIME: + alldatatypes.setTime(index, ((Long) sampleData)); + break; + case TIMEUUID: + alldatatypes.setUUID(index, (UUID) sampleData); + break; + case UUID: + alldatatypes.setUUID(index, (UUID) sampleData); + break; + case VARCHAR: + alldatatypes.setString(index, (String) sampleData); + break; + case VARINT: + alldatatypes.setVarint(index, (BigInteger) sampleData); + break; + } } - @Test(groups = "short") - public void should_store_and_retrieve_UDT_containing_any_primitive_type() throws Exception { - // create UDT - List alpha_type_list = new ArrayList(); - int startIndex = (int) 'a'; - for (int i = 0; i < DATA_TYPE_PRIMITIVES.size(); i++) { - alpha_type_list.add(String.format("%s %s", Character.toString((char) (startIndex + i)), - DATA_TYPE_PRIMITIVES.get(i).getName())); + PreparedStatement ins = + session().prepare("INSERT INTO alldatatypes_table (a, b) VALUES (?, ?)"); + session().execute(ins.bind(0, alldatatypes)); + + // retrieve and verify data + ResultSet rs = session().execute("SELECT * FROM alldatatypes_table"); + List rows = rs.all(); + assertThat(rows.size()).isEqualTo(1); + + Row row = rows.get(0); + + assertThat(row.getInt("a")).isEqualTo(0); + assertThat(row.getUDTValue("b")).isEqualTo(alldatatypes); + } + + @Test(groups = "short") + public void should_store_and_retrieve_UDT_containing_collections_and_tuples() throws Exception { + // counters and durations are not allowed inside collections + DATA_TYPE_PRIMITIVES.remove(DataType.counter()); + DATA_TYPE_PRIMITIVES.remove(DataType.duration()); + + // create UDT + List alpha_type_list = new ArrayList(); + int startIndex = (int) 'a'; + for (int i = 0; i < DATA_TYPE_NON_PRIMITIVE_NAMES.size(); i++) + for (int j = 0; j < DATA_TYPE_PRIMITIVES.size(); j++) { + String typeString; + if (DATA_TYPE_NON_PRIMITIVE_NAMES.get(i) == DataType.Name.MAP) { + typeString = + (String.format( + "%s_%s %s<%s, %s>", + Character.toString((char) (startIndex + i)), + Character.toString((char) (startIndex + j)), + DATA_TYPE_NON_PRIMITIVE_NAMES.get(i), + DATA_TYPE_PRIMITIVES.get(j).getName(), + DATA_TYPE_PRIMITIVES.get(j).getName())); + } else if (DATA_TYPE_NON_PRIMITIVE_NAMES.get(i) == DataType.Name.TUPLE) { + typeString = + (String.format( + "%s_%s frozen<%s<%s>>", + Character.toString((char) (startIndex + i)), + Character.toString((char) (startIndex + j)), + DATA_TYPE_NON_PRIMITIVE_NAMES.get(i), + DATA_TYPE_PRIMITIVES.get(j).getName())); + } else { + typeString = + (String.format( + "%s_%s %s<%s>", + Character.toString((char) (startIndex + i)), + Character.toString((char) (startIndex + j)), + DATA_TYPE_NON_PRIMITIVE_NAMES.get(i), + DATA_TYPE_PRIMITIVES.get(j).getName())); } - - session().execute(String.format("CREATE TYPE alldatatypes (%s)", Joiner.on(',').join(alpha_type_list))); - session().execute("CREATE TABLE alldatatypes_table (a int PRIMARY KEY, b frozen)"); - - // insert UDT data - UserType alldatatypesDef = cluster().getMetadata().getKeyspace(keyspace).getUserType("alldatatypes"); - UDTValue alldatatypes = alldatatypesDef.newValue(); - - - for (int i = 0; i < DATA_TYPE_PRIMITIVES.size(); i++) { - DataType dataType = DATA_TYPE_PRIMITIVES.get(i); - String index = Character.toString((char) (startIndex + i)); - Object sampleData = samples.get(dataType); - - switch (dataType.getName()) { - case ASCII: - alldatatypes.setString(index, (String) sampleData); - break; - case BIGINT: - alldatatypes.setLong(index, (Long) sampleData); - break; - case BLOB: - alldatatypes.setBytes(index, (ByteBuffer) sampleData); - break; - case BOOLEAN: - alldatatypes.setBool(index, (Boolean) sampleData); - break; - case DECIMAL: - alldatatypes.setDecimal(index, (BigDecimal) sampleData); - break; - case DOUBLE: - alldatatypes.setDouble(index, (Double) sampleData); - break; - case DURATION: - alldatatypes.set(index, Duration.from(sampleData.toString()), Duration.class); - break; - case FLOAT: - alldatatypes.setFloat(index, (Float) sampleData); - break; - case INET: - alldatatypes.setInet(index, (InetAddress) sampleData); - break; - case TINYINT: - alldatatypes.setByte(index, (Byte) sampleData); - break; - case SMALLINT: - alldatatypes.setShort(index, (Short) sampleData); - break; - case INT: - alldatatypes.setInt(index, (Integer) sampleData); - break; - case TEXT: - alldatatypes.setString(index, (String) sampleData); - break; - case TIMESTAMP: - alldatatypes.setTimestamp(index, ((Date) sampleData)); - break; - case DATE: - alldatatypes.setDate(index, ((LocalDate) sampleData)); - break; - case TIME: - alldatatypes.setTime(index, ((Long) sampleData)); - break; - case TIMEUUID: - alldatatypes.setUUID(index, (UUID) sampleData); - break; - case UUID: - alldatatypes.setUUID(index, (UUID) sampleData); - break; - case VARCHAR: - alldatatypes.setString(index, (String) sampleData); - break; - case VARINT: - alldatatypes.setVarint(index, (BigInteger) sampleData); - break; - } - } - - PreparedStatement ins = session().prepare("INSERT INTO alldatatypes_table (a, b) VALUES (?, ?)"); - session().execute(ins.bind(0, alldatatypes)); - - // retrieve and verify data - ResultSet rs = session().execute("SELECT * FROM alldatatypes_table"); - List rows = rs.all(); - assertThat(rows.size()).isEqualTo(1); - - Row row = rows.get(0); - - assertThat(row.getInt("a")).isEqualTo(0); - assertThat(row.getUDTValue("b")).isEqualTo(alldatatypes); - } - - @Test(groups = "short") - public void should_store_and_retrieve_UDT_containing_collections_and_tuples() throws Exception { - // counters and durations are not allowed inside collections - DATA_TYPE_PRIMITIVES.remove(DataType.counter()); - DATA_TYPE_PRIMITIVES.remove(DataType.duration()); - - // create UDT - List alpha_type_list = new ArrayList(); - int startIndex = (int) 'a'; - for (int i = 0; i < DATA_TYPE_NON_PRIMITIVE_NAMES.size(); i++) - for (int j = 0; j < DATA_TYPE_PRIMITIVES.size(); j++) { - String typeString; - if (DATA_TYPE_NON_PRIMITIVE_NAMES.get(i) == DataType.Name.MAP) { - typeString = (String.format("%s_%s %s<%s, %s>", Character.toString((char) (startIndex + i)), - Character.toString((char) (startIndex + j)), DATA_TYPE_NON_PRIMITIVE_NAMES.get(i), - DATA_TYPE_PRIMITIVES.get(j).getName(), DATA_TYPE_PRIMITIVES.get(j).getName())); - } else if (DATA_TYPE_NON_PRIMITIVE_NAMES.get(i) == DataType.Name.TUPLE) { - typeString = (String.format("%s_%s frozen<%s<%s>>", Character.toString((char) (startIndex + i)), - Character.toString((char) (startIndex + j)), DATA_TYPE_NON_PRIMITIVE_NAMES.get(i), - DATA_TYPE_PRIMITIVES.get(j).getName())); - } else { - typeString = (String.format("%s_%s %s<%s>", Character.toString((char) (startIndex + i)), - Character.toString((char) (startIndex + j)), DATA_TYPE_NON_PRIMITIVE_NAMES.get(i), - DATA_TYPE_PRIMITIVES.get(j).getName())); - } - alpha_type_list.add(typeString); - } - - session().execute(String.format("CREATE TYPE allcollectiontypes (%s)", Joiner.on(',').join(alpha_type_list))); - session().execute("CREATE TABLE allcollectiontypes_table (a int PRIMARY KEY, b frozen)"); - - // insert UDT data - UserType allcollectiontypesDef = cluster().getMetadata().getKeyspace(keyspace).getUserType("allcollectiontypes"); - UDTValue allcollectiontypes = allcollectiontypesDef.newValue(); - - for (int i = 0; i < DATA_TYPE_NON_PRIMITIVE_NAMES.size(); i++) - for (int j = 0; j < DATA_TYPE_PRIMITIVES.size(); j++) { - DataType.Name name = DATA_TYPE_NON_PRIMITIVE_NAMES.get(i); - DataType dataType = DATA_TYPE_PRIMITIVES.get(j); - - String index = Character.toString((char) (startIndex + i)) + "_" + Character.toString((char) (startIndex + j)); - Object sampleElement = samples.get(dataType); - switch (name) { - case LIST: - allcollectiontypes.setList(index, Lists.newArrayList(sampleElement)); - break; - case SET: - allcollectiontypes.setSet(index, Sets.newHashSet(sampleElement)); - break; - case MAP: - allcollectiontypes.setMap(index, ImmutableMap.of(sampleElement, sampleElement)); - break; - case TUPLE: - allcollectiontypes.setTupleValue(index, cluster().getMetadata().newTupleType(dataType).newValue(sampleElement)); - } - } - - PreparedStatement ins = session().prepare("INSERT INTO allcollectiontypes_table (a, b) VALUES (?, ?)"); - session().execute(ins.bind(0, allcollectiontypes)); - - // retrieve and verify data - ResultSet rs = session().execute("SELECT * FROM allcollectiontypes_table"); - List rows = rs.all(); - assertThat(rows.size()).isEqualTo(1); - - Row row = rows.get(0); - - assertThat(row.getInt("a")).isEqualTo(0); - assertThat(row.getUDTValue("b")).isEqualTo(allcollectiontypes); - } - - @Test(groups = "short") - public void should_save_and_retrieve_nested_UDTs() throws Exception { - final int MAX_NESTING_DEPTH = 4; - - // create UDT - session().execute("CREATE TYPE depth_0 (age int, name text)"); - - for (int i = 1; i <= MAX_NESTING_DEPTH; i++) { - session().execute(String.format("CREATE TYPE depth_%s (value frozen)", String.valueOf(i), String.valueOf(i - 1))); + alpha_type_list.add(typeString); + } + + session() + .execute( + String.format( + "CREATE TYPE allcollectiontypes (%s)", Joiner.on(',').join(alpha_type_list))); + session() + .execute( + "CREATE TABLE allcollectiontypes_table (a int PRIMARY KEY, b frozen)"); + + // insert UDT data + UserType allcollectiontypesDef = + cluster().getMetadata().getKeyspace(keyspace).getUserType("allcollectiontypes"); + UDTValue allcollectiontypes = allcollectiontypesDef.newValue(); + + for (int i = 0; i < DATA_TYPE_NON_PRIMITIVE_NAMES.size(); i++) + for (int j = 0; j < DATA_TYPE_PRIMITIVES.size(); j++) { + DataType.Name name = DATA_TYPE_NON_PRIMITIVE_NAMES.get(i); + DataType dataType = DATA_TYPE_PRIMITIVES.get(j); + + String index = + Character.toString((char) (startIndex + i)) + + "_" + + Character.toString((char) (startIndex + j)); + Object sampleElement = samples.get(dataType); + switch (name) { + case LIST: + allcollectiontypes.setList(index, Lists.newArrayList(sampleElement)); + break; + case SET: + allcollectiontypes.setSet(index, Sets.newHashSet(sampleElement)); + break; + case MAP: + allcollectiontypes.setMap(index, ImmutableMap.of(sampleElement, sampleElement)); + break; + case TUPLE: + allcollectiontypes.setTupleValue( + index, cluster().getMetadata().newTupleType(dataType).newValue(sampleElement)); } + } - session().execute(String.format("CREATE TABLE nested_udt_table (a int PRIMARY KEY, b frozen, c frozen, d frozen, e frozen," + - "f frozen)", MAX_NESTING_DEPTH)); - - // insert UDT data - KeyspaceMetadata keyspaceMetadata = cluster().getMetadata().getKeyspace(keyspace); - UserType depthZeroDef = keyspaceMetadata.getUserType("depth_0"); - UDTValue depthZero = depthZeroDef.newValue().setInt("age", 42).setString("name", "Bob"); - - UserType depthOneDef = keyspaceMetadata.getUserType("depth_1"); - UDTValue depthOne = depthOneDef.newValue().setUDTValue("value", depthZero); + PreparedStatement ins = + session().prepare("INSERT INTO allcollectiontypes_table (a, b) VALUES (?, ?)"); + session().execute(ins.bind(0, allcollectiontypes)); - UserType depthTwoDef = keyspaceMetadata.getUserType("depth_2"); - UDTValue depthTwo = depthTwoDef.newValue().setUDTValue("value", depthOne); + // retrieve and verify data + ResultSet rs = session().execute("SELECT * FROM allcollectiontypes_table"); + List rows = rs.all(); + assertThat(rows.size()).isEqualTo(1); - UserType depthThreeDef = keyspaceMetadata.getUserType("depth_3"); - UDTValue depthThree = depthThreeDef.newValue().setUDTValue("value", depthTwo); + Row row = rows.get(0); - UserType depthFourDef = keyspaceMetadata.getUserType("depth_4"); - UDTValue depthFour = depthFourDef.newValue().setUDTValue("value", depthThree); + assertThat(row.getInt("a")).isEqualTo(0); + assertThat(row.getUDTValue("b")).isEqualTo(allcollectiontypes); + } - PreparedStatement ins = session().prepare("INSERT INTO nested_udt_table (a, b, c, d, e, f) VALUES (?, ?, ?, ?, ?, ?)"); - session().execute(ins.bind(0, depthZero, depthOne, depthTwo, depthThree, depthFour)); + @Test(groups = "short") + public void should_save_and_retrieve_nested_UDTs() throws Exception { + final int MAX_NESTING_DEPTH = 4; - // retrieve and verify data - ResultSet rs = session().execute("SELECT * FROM nested_udt_table"); - List rows = rs.all(); - assertThat(rows.size()).isEqualTo(1); + // create UDT + session().execute("CREATE TYPE depth_0 (age int, name text)"); - Row row = rows.get(0); - - assertThat(row.getInt("a")).isEqualTo(0); - assertThat(row.getUDTValue("b")).isEqualTo(depthZero); - assertThat(row.getUDTValue("c")).isEqualTo(depthOne); - assertThat(row.getUDTValue("d")).isEqualTo(depthTwo); - assertThat(row.getUDTValue("e")).isEqualTo(depthThree); - assertThat(row.getUDTValue("f")).isEqualTo(depthFour); + for (int i = 1; i <= MAX_NESTING_DEPTH; i++) { + session() + .execute( + String.format( + "CREATE TYPE depth_%s (value frozen)", + String.valueOf(i), String.valueOf(i - 1))); } - @Test(groups = "short") - public void should_save_and_retrieve_UDTs_with_null_values() throws Exception { - // create UDT - session().execute("CREATE TYPE user_null_values (a text, b int, c uuid, d blob)"); - session().execute("CREATE TABLE null_values_table (a int PRIMARY KEY, b frozen)"); - - // insert UDT data - UserType userTypeDef = cluster().getMetadata().getKeyspace(keyspace).getUserType("user_null_values"); - UDTValue userType = userTypeDef.newValue().setString("a", null).setInt("b", 0).setUUID("c", null).setBytes("d", null); - - PreparedStatement ins = session().prepare("INSERT INTO null_values_table (a, b) VALUES (?, ?)"); - session().execute(ins.bind(0, userType)); - - // retrieve and verify data - ResultSet rs = session().execute("SELECT * FROM null_values_table"); - List rows = rs.all(); - assertThat(rows.size()).isEqualTo(1); - - Row row = rows.get(0); - - assertThat(row.getInt("a")).isEqualTo(0); - assertThat(row.getUDTValue("b")).isEqualTo(userType); - - // test empty strings - userType = userTypeDef.newValue().setString("a", "").setInt("b", 0).setUUID("c", null).setBytes("d", ByteBuffer.allocate(0)); - session().execute(ins.bind(0, userType)); - - // retrieve and verify data - rs = session().execute("SELECT * FROM null_values_table"); - rows = rs.all(); - assertThat(rows.size()).isEqualTo(1); - - row = rows.get(0); - - assertThat(row.getInt("a")).isEqualTo(0); - assertThat(row.getUDTValue("b")).isEqualTo(userType); - } - - @Test(groups = "short") - public void should_save_and_retrieve_UDTs_with_null_collections() throws Exception { - // create UDT - session().execute("CREATE TYPE user_null_collections (a List, b Set, c Map, d frozen>)"); - session().execute("CREATE TABLE null_collections_table (a int PRIMARY KEY, b frozen)"); - - // insert null UDT data - PreparedStatement ins = session().prepare("INSERT INTO null_collections_table (a, b) " + - "VALUES (0, { a: ?, b: ?, c: ?, d: ? })"); - session().execute(ins.bind().setList(0, null).setSet(1, null).setMap(2, null).setTupleValue(3, null)); - - // retrieve and verify data - ResultSet rs = session().execute("SELECT * FROM null_collections_table"); - List rows = rs.all(); - assertThat(rows.size()).isEqualTo(1); - - Row row = rows.get(0); - assertThat(row.getInt("a")).isEqualTo(0); - - UserType userTypeDef = cluster().getMetadata().getKeyspace(keyspace).getUserType("user_null_collections"); - UDTValue userType = userTypeDef.newValue().setList("a", null).setSet("b", null).setMap("c", null).setTupleValue("d", null); - assertThat(row.getUDTValue("b")).isEqualTo(userType); - - // test missing UDT args - ins = session().prepare("INSERT INTO null_collections_table (a, b) " + - "VALUES (1, { a: ? })"); - session().execute(ins.bind().setList(0, new ArrayList())); - - // retrieve and verify data - rs = session().execute("SELECT * FROM null_collections_table"); - rows = rs.all(); - assertThat(rows.size()).isEqualTo(2); - - row = rows.get(0); - assertThat(row.getInt("a")).isEqualTo(1); - - userType = userTypeDef.newValue().setList(0, new ArrayList()); - assertThat(row.getUDTValue("b")).isEqualTo(userType); - } - - @Test(groups = "short") - public void should_indicate_user_type_is_frozen() { - session().execute("CREATE TABLE frozen_table(k int primary key, v frozen)"); - - KeyspaceMetadata keyspaceMetadata = cluster().getMetadata().getKeyspace(this.keyspace); - - assertThat(keyspaceMetadata.getUserType("type_for_frozen_test")) - .isNotFrozen(); - - DataType userType = keyspaceMetadata.getTable("frozen_table").getColumn("v").getType(); - assertThat(userType).isFrozen(); - assertThat(userType.toString()).isEqualTo("frozen<" + keyspace + ".type_for_frozen_test>"); - - // The frozen flag is not set for result set definitions (the protocol does not provide - // that information and it's not really useful in that situation). We always return false. - ResultSet rs = session().execute("SELECT v FROM frozen_table WHERE k = 1"); - assertThat(rs.getColumnDefinitions().getType(0)) - .isNotFrozen(); - - // Same thing for prepared statements - PreparedStatement pst = session().prepare("SELECT v FROM frozen_table WHERE k = ?"); - assertThat(pst.getVariables().getType(0)) - .isNotFrozen(); - } - - @Test(groups = "short") - @CassandraVersion(value = "3.6", description = "Non-frozen UDTs were introduced in C* 3.6") - public void should_indicate_user_type_is_not_frozen() { - session().execute("CREATE TABLE not_frozen_table(k int primary key, v type_for_frozen_test)"); - - KeyspaceMetadata keyspaceMetadata = cluster().getMetadata().getKeyspace(this.keyspace); - - assertThat(keyspaceMetadata.getUserType("type_for_frozen_test")) - .isNotFrozen(); - - DataType userType = keyspaceMetadata.getTable("not_frozen_table").getColumn("v").getType(); - assertThat(userType).isNotFrozen(); - assertThat(userType.toString()).isEqualTo(keyspace + ".type_for_frozen_test"); - - ResultSet rs = session().execute("SELECT v FROM not_frozen_table WHERE k = 1"); - assertThat(rs.getColumnDefinitions().getType(0)) - .isNotFrozen(); - - PreparedStatement pst = session().prepare("SELECT v FROM not_frozen_table WHERE k = ?"); - assertThat(pst.getVariables().getType(0)) - .isNotFrozen(); - } + session() + .execute( + String.format( + "CREATE TABLE nested_udt_table (a int PRIMARY KEY, b frozen, c frozen, d frozen, e frozen," + + "f frozen)", + MAX_NESTING_DEPTH)); + + // insert UDT data + KeyspaceMetadata keyspaceMetadata = cluster().getMetadata().getKeyspace(keyspace); + UserType depthZeroDef = keyspaceMetadata.getUserType("depth_0"); + UDTValue depthZero = depthZeroDef.newValue().setInt("age", 42).setString("name", "Bob"); + + UserType depthOneDef = keyspaceMetadata.getUserType("depth_1"); + UDTValue depthOne = depthOneDef.newValue().setUDTValue("value", depthZero); + + UserType depthTwoDef = keyspaceMetadata.getUserType("depth_2"); + UDTValue depthTwo = depthTwoDef.newValue().setUDTValue("value", depthOne); + + UserType depthThreeDef = keyspaceMetadata.getUserType("depth_3"); + UDTValue depthThree = depthThreeDef.newValue().setUDTValue("value", depthTwo); + + UserType depthFourDef = keyspaceMetadata.getUserType("depth_4"); + UDTValue depthFour = depthFourDef.newValue().setUDTValue("value", depthThree); + + PreparedStatement ins = + session() + .prepare("INSERT INTO nested_udt_table (a, b, c, d, e, f) VALUES (?, ?, ?, ?, ?, ?)"); + session().execute(ins.bind(0, depthZero, depthOne, depthTwo, depthThree, depthFour)); + + // retrieve and verify data + ResultSet rs = session().execute("SELECT * FROM nested_udt_table"); + List rows = rs.all(); + assertThat(rows.size()).isEqualTo(1); + + Row row = rows.get(0); + + assertThat(row.getInt("a")).isEqualTo(0); + assertThat(row.getUDTValue("b")).isEqualTo(depthZero); + assertThat(row.getUDTValue("c")).isEqualTo(depthOne); + assertThat(row.getUDTValue("d")).isEqualTo(depthTwo); + assertThat(row.getUDTValue("e")).isEqualTo(depthThree); + assertThat(row.getUDTValue("f")).isEqualTo(depthFour); + } + + @Test(groups = "short") + public void should_save_and_retrieve_UDTs_with_null_values() throws Exception { + // create UDT + session().execute("CREATE TYPE user_null_values (a text, b int, c uuid, d blob)"); + session() + .execute("CREATE TABLE null_values_table (a int PRIMARY KEY, b frozen)"); + + // insert UDT data + UserType userTypeDef = + cluster().getMetadata().getKeyspace(keyspace).getUserType("user_null_values"); + UDTValue userType = + userTypeDef + .newValue() + .setString("a", null) + .setInt("b", 0) + .setUUID("c", null) + .setBytes("d", null); + + PreparedStatement ins = session().prepare("INSERT INTO null_values_table (a, b) VALUES (?, ?)"); + session().execute(ins.bind(0, userType)); + + // retrieve and verify data + ResultSet rs = session().execute("SELECT * FROM null_values_table"); + List rows = rs.all(); + assertThat(rows.size()).isEqualTo(1); + + Row row = rows.get(0); + + assertThat(row.getInt("a")).isEqualTo(0); + assertThat(row.getUDTValue("b")).isEqualTo(userType); + + // test empty strings + userType = + userTypeDef + .newValue() + .setString("a", "") + .setInt("b", 0) + .setUUID("c", null) + .setBytes("d", ByteBuffer.allocate(0)); + session().execute(ins.bind(0, userType)); + + // retrieve and verify data + rs = session().execute("SELECT * FROM null_values_table"); + rows = rs.all(); + assertThat(rows.size()).isEqualTo(1); + + row = rows.get(0); + + assertThat(row.getInt("a")).isEqualTo(0); + assertThat(row.getUDTValue("b")).isEqualTo(userType); + } + + @Test(groups = "short") + public void should_save_and_retrieve_UDTs_with_null_collections() throws Exception { + // create UDT + session() + .execute( + "CREATE TYPE user_null_collections (a List, b Set, c Map, d frozen>)"); + session() + .execute( + "CREATE TABLE null_collections_table (a int PRIMARY KEY, b frozen)"); + + // insert null UDT data + PreparedStatement ins = + session() + .prepare( + "INSERT INTO null_collections_table (a, b) " + + "VALUES (0, { a: ?, b: ?, c: ?, d: ? })"); + session() + .execute( + ins.bind().setList(0, null).setSet(1, null).setMap(2, null).setTupleValue(3, null)); + + // retrieve and verify data + ResultSet rs = session().execute("SELECT * FROM null_collections_table"); + List rows = rs.all(); + assertThat(rows.size()).isEqualTo(1); + + Row row = rows.get(0); + assertThat(row.getInt("a")).isEqualTo(0); + + UserType userTypeDef = + cluster().getMetadata().getKeyspace(keyspace).getUserType("user_null_collections"); + UDTValue userType = + userTypeDef + .newValue() + .setList("a", null) + .setSet("b", null) + .setMap("c", null) + .setTupleValue("d", null); + assertThat(row.getUDTValue("b")).isEqualTo(userType); + + // test missing UDT args + ins = session().prepare("INSERT INTO null_collections_table (a, b) " + "VALUES (1, { a: ? })"); + session().execute(ins.bind().setList(0, new ArrayList())); + + // retrieve and verify data + rs = session().execute("SELECT * FROM null_collections_table"); + rows = rs.all(); + assertThat(rows.size()).isEqualTo(2); + + row = rows.get(0); + assertThat(row.getInt("a")).isEqualTo(1); + + userType = userTypeDef.newValue().setList(0, new ArrayList()); + assertThat(row.getUDTValue("b")).isEqualTo(userType); + } + + @Test(groups = "short") + public void should_indicate_user_type_is_frozen() { + session() + .execute("CREATE TABLE frozen_table(k int primary key, v frozen)"); + + KeyspaceMetadata keyspaceMetadata = cluster().getMetadata().getKeyspace(this.keyspace); + + assertThat(keyspaceMetadata.getUserType("type_for_frozen_test")).isNotFrozen(); + + DataType userType = keyspaceMetadata.getTable("frozen_table").getColumn("v").getType(); + assertThat(userType).isFrozen(); + assertThat(userType.toString()).isEqualTo("frozen<" + keyspace + ".type_for_frozen_test>"); + + // The frozen flag is not set for result set definitions (the protocol does not provide + // that information and it's not really useful in that situation). We always return false. + ResultSet rs = session().execute("SELECT v FROM frozen_table WHERE k = 1"); + assertThat(rs.getColumnDefinitions().getType(0)).isNotFrozen(); + + // Same thing for prepared statements + PreparedStatement pst = session().prepare("SELECT v FROM frozen_table WHERE k = ?"); + assertThat(pst.getVariables().getType(0)).isNotFrozen(); + } + + @Test(groups = "short") + @CassandraVersion(value = "3.6", description = "Non-frozen UDTs were introduced in C* 3.6") + public void should_indicate_user_type_is_not_frozen() { + session().execute("CREATE TABLE not_frozen_table(k int primary key, v type_for_frozen_test)"); + + KeyspaceMetadata keyspaceMetadata = cluster().getMetadata().getKeyspace(this.keyspace); + + assertThat(keyspaceMetadata.getUserType("type_for_frozen_test")).isNotFrozen(); + + DataType userType = keyspaceMetadata.getTable("not_frozen_table").getColumn("v").getType(); + assertThat(userType).isNotFrozen(); + assertThat(userType.toString()).isEqualTo(keyspace + ".type_for_frozen_test"); + + ResultSet rs = session().execute("SELECT v FROM not_frozen_table WHERE k = 1"); + assertThat(rs.getColumnDefinitions().getType(0)).isNotFrozen(); + + PreparedStatement pst = session().prepare("SELECT v FROM not_frozen_table WHERE k = ?"); + assertThat(pst.getVariables().getType(0)).isNotFrozen(); + } + + @Test(groups = "short") + public void should_handle_udt_named_like_a_collection() { + execute( + "CREATE TYPE tuple(a text)", + "CREATE TYPE list(a text)", + "CREATE TYPE frozen(a text)", + "CREATE TYPE udt(tuple frozen, frozen frozen, " + + "m map,frozen>)"); + UserType udt = cluster().getMetadata().getKeyspace(keyspace).getUserType("udt"); + assertThat(udt.getFieldType("tuple")).isInstanceOf(UserType.class); + assertThat(udt.getFieldType("frozen")).isInstanceOf(UserType.class); + assertThat((udt.getFieldType("m").getTypeArguments().get(0))).isInstanceOf(UserType.class); + assertThat((udt.getFieldType("m").getTypeArguments().get(1))).isInstanceOf(UserType.class); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/VersionNumberAssert.java b/driver-core/src/test/java/com/datastax/driver/core/VersionNumberAssert.java index 9bfa3e7178d..d2c8fea5373 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/VersionNumberAssert.java +++ b/driver-core/src/test/java/com/datastax/driver/core/VersionNumberAssert.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,50 +17,51 @@ */ package com.datastax.driver.core; -import org.assertj.core.api.AbstractComparableAssert; - import static com.datastax.driver.core.Assertions.assertThat; -public class VersionNumberAssert extends AbstractComparableAssert { +import org.assertj.core.api.AbstractComparableAssert; + +public class VersionNumberAssert + extends AbstractComparableAssert { - public VersionNumberAssert(VersionNumber actual) { - super(actual, VersionNumberAssert.class); - } + public VersionNumberAssert(VersionNumber actual) { + super(actual, VersionNumberAssert.class); + } - public VersionNumberAssert hasMajorMinorPatch(int major, int minor, int patch) { - assertThat(actual.getMajor()).isEqualTo(major); - assertThat(actual.getMinor()).isEqualTo(minor); - assertThat(actual.getPatch()).isEqualTo(patch); - return this; - } + public VersionNumberAssert hasMajorMinorPatch(int major, int minor, int patch) { + assertThat(actual.getMajor()).isEqualTo(major); + assertThat(actual.getMinor()).isEqualTo(minor); + assertThat(actual.getPatch()).isEqualTo(patch); + return this; + } - public VersionNumberAssert hasDsePatch(int dsePatch) { - assertThat(actual.getDSEPatch()).isEqualTo(dsePatch); - return this; - } + public VersionNumberAssert hasDsePatch(int dsePatch) { + assertThat(actual.getDSEPatch()).isEqualTo(dsePatch); + return this; + } - public VersionNumberAssert hasPreReleaseLabels(String... labels) { - assertThat(actual.getPreReleaseLabels()).containsExactly(labels); - return this; - } + public VersionNumberAssert hasPreReleaseLabels(String... labels) { + assertThat(actual.getPreReleaseLabels()).containsExactly(labels); + return this; + } - public VersionNumberAssert hasNoPreReleaseLabels() { - assertThat(actual.getPreReleaseLabels()).isNull(); - return this; - } + public VersionNumberAssert hasNoPreReleaseLabels() { + assertThat(actual.getPreReleaseLabels()).isNull(); + return this; + } - public VersionNumberAssert hasBuildLabel(String label) { - assertThat(actual.getBuildLabel()).isEqualTo(label); - return this; - } + public VersionNumberAssert hasBuildLabel(String label) { + assertThat(actual.getBuildLabel()).isEqualTo(label); + return this; + } - public VersionNumberAssert hasNextStable(String version) { - assertThat(actual.nextStable()).isEqualTo(VersionNumber.parse(version)); - return this; - } + public VersionNumberAssert hasNextStable(String version) { + assertThat(actual.nextStable()).isEqualTo(VersionNumber.parse(version)); + return this; + } - public VersionNumberAssert hasToString(String string) { - assertThat(actual.toString()).isEqualTo(string); - return this; - } + public VersionNumberAssert hasToString(String string) { + assertThat(actual.toString()).isEqualTo(string); + return this; + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/VersionNumberTest.java b/driver-core/src/test/java/com/datastax/driver/core/VersionNumberTest.java index df1601b48b6..1f0e0cead5c 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/VersionNumberTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/VersionNumberTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,90 +17,99 @@ */ package com.datastax.driver.core; -import org.testng.annotations.Test; - import static com.datastax.driver.core.Assertions.assertThat; +import org.testng.annotations.Test; + public class VersionNumberTest { - @Test(groups = "unit") - public void should_parse_release_version() { - assertThat(VersionNumber.parse("1.2.19")) - .hasMajorMinorPatch(1, 2, 19) - .hasDsePatch(-1) - .hasNoPreReleaseLabels() - .hasBuildLabel(null) - .hasNextStable("1.2.19") - .hasToString("1.2.19"); - } - - @Test(groups = "unit") - public void should_parse_release_without_patch() { - assertThat(VersionNumber.parse("1.2")) - .hasMajorMinorPatch(1, 2, 0); - } - - @Test(groups = "unit") - public void should_parse_pre_release_version() { - assertThat(VersionNumber.parse("1.2.0-beta1-SNAPSHOT")) - .hasMajorMinorPatch(1, 2, 0) - .hasDsePatch(-1) - .hasPreReleaseLabels("beta1", "SNAPSHOT") - .hasBuildLabel(null) - .hasToString("1.2.0-beta1-SNAPSHOT") - .hasNextStable("1.2.0"); - } - - @Test(groups = "unit") - public void should_allow_tilde_as_first_pre_release_delimiter() { - assertThat(VersionNumber.parse("1.2.0~beta1-SNAPSHOT")) - .hasMajorMinorPatch(1, 2, 0) - .hasDsePatch(-1) - .hasPreReleaseLabels("beta1", "SNAPSHOT") - .hasBuildLabel(null) - .hasToString("1.2.0-beta1-SNAPSHOT") - .hasNextStable("1.2.0"); - } - - @Test(groups = "unit") - public void should_parse_dse_patch() { - assertThat(VersionNumber.parse("1.2.19.2-SNAPSHOT")) - .hasMajorMinorPatch(1, 2, 19) - .hasDsePatch(2) - .hasToString("1.2.19.2-SNAPSHOT") - .hasNextStable("1.2.19.2"); - } - - @Test(groups = "unit") - public void should_order_versions() { - // by component - assertOrder("1.2.0", "2.0.0", -1); - assertOrder("2.0.0", "2.1.0", -1); - assertOrder("2.0.1", "2.0.2", -1); - assertOrder("2.0.1.1", "2.0.1.2", -1); - - // shortened vs. longer version - assertOrder("2.0", "2.0.0", 0); - assertOrder("2.0", "2.0.1", -1); - - // any DSE version is higher than no DSE version - assertOrder("2.0.0", "2.0.0.0", -1); - assertOrder("2.0.0", "2.0.0.1", -1); - - // pre-release vs. release - assertOrder("2.0.0-beta1", "2.0.0", -1); - assertOrder("2.0.0-SNAPSHOT", "2.0.0", -1); - assertOrder("2.0.0-beta1-SNAPSHOT", "2.0.0", -1); - - // pre-release vs. pre-release - assertOrder("2.0.0-a-b-c", "2.0.0-a-b-d", -1); - assertOrder("2.0.0-a-b-c", "2.0.0-a-b-c-d", -1); - - // build number ignored - assertOrder("2.0.0+build01", "2.0.0+build02", 0); - } - - private void assertOrder(String version1, String version2, int expected) { - assertThat(VersionNumber.parse(version1).compareTo(VersionNumber.parse(version2))).isEqualTo(expected); - } + @Test(groups = "unit") + public void should_parse_release_version() { + assertThat(VersionNumber.parse("1.2.19")) + .hasMajorMinorPatch(1, 2, 19) + .hasDsePatch(-1) + .hasNoPreReleaseLabels() + .hasBuildLabel(null) + .hasNextStable("1.2.19") + .hasToString("1.2.19"); + } + + @Test(groups = "unit") + public void should_parse_release_without_patch() { + assertThat(VersionNumber.parse("1.2")).hasMajorMinorPatch(1, 2, 0); + } + + @Test(groups = "unit") + public void should_parse_pre_release_version() { + assertThat(VersionNumber.parse("1.2.0-beta1-SNAPSHOT")) + .hasMajorMinorPatch(1, 2, 0) + .hasDsePatch(-1) + .hasPreReleaseLabels("beta1", "SNAPSHOT") + .hasBuildLabel(null) + .hasToString("1.2.0-beta1-SNAPSHOT") + .hasNextStable("1.2.0"); + } + + @Test(groups = "unit") + public void should_allow_tilde_as_first_pre_release_delimiter() { + assertThat(VersionNumber.parse("1.2.0~beta1-SNAPSHOT")) + .hasMajorMinorPatch(1, 2, 0) + .hasDsePatch(-1) + .hasPreReleaseLabels("beta1", "SNAPSHOT") + .hasBuildLabel(null) + .hasToString("1.2.0-beta1-SNAPSHOT") + .hasNextStable("1.2.0"); + } + + @Test(groups = "unit") + public void should_parse_dse_patch() { + assertThat(VersionNumber.parse("1.2.19.2-SNAPSHOT")) + .hasMajorMinorPatch(1, 2, 19) + .hasDsePatch(2) + .hasToString("1.2.19.2-SNAPSHOT") + .hasNextStable("1.2.19.2"); + } + + @Test(groups = "unit") + public void should_order_versions() { + // by component + assertOrder("1.2.0", "2.0.0", -1); + assertOrder("2.0.0", "2.1.0", -1); + assertOrder("2.0.1", "2.0.2", -1); + assertOrder("2.0.1.1", "2.0.1.2", -1); + + // shortened vs. longer version + assertOrder("2.0", "2.0.0", 0); + assertOrder("2.0", "2.0.1", -1); + + // any DSE version is higher than no DSE version + assertOrder("2.0.0", "2.0.0.0", -1); + assertOrder("2.0.0", "2.0.0.1", -1); + + // pre-release vs. release + assertOrder("2.0.0-beta1", "2.0.0", -1); + assertOrder("2.0.0-SNAPSHOT", "2.0.0", -1); + assertOrder("2.0.0-beta1-SNAPSHOT", "2.0.0", -1); + + // pre-release vs. pre-release + assertOrder("2.0.0-a-b-c", "2.0.0-a-b-d", -1); + assertOrder("2.0.0-a-b-c", "2.0.0-a-b-c-d", -1); + + // build number ignored + assertOrder("2.0.0+build01", "2.0.0+build02", 0); + } + + @Test(groups = "unit") + public void should_treat_same_prerelease_equal() { + VersionNumber version1 = VersionNumber.parse("3.0.15-SNAPSHOT"); + VersionNumber version2 = VersionNumber.parse("3.0.15-SNAPSHOT"); + + assertThat(version1).isEqualTo(version2); + assertThat(version1.hashCode()).isEqualTo(version2.hashCode()); + } + + private void assertOrder(String version1, String version2, int expected) { + assertThat(VersionNumber.parse(version1).compareTo(VersionNumber.parse(version2))) + .isEqualTo(expected); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/VirtualTableMetadataTest.java b/driver-core/src/test/java/com/datastax/driver/core/VirtualTableMetadataTest.java new file mode 100644 index 00000000000..55b99cdf93b --- /dev/null +++ b/driver-core/src/test/java/com/datastax/driver/core/VirtualTableMetadataTest.java @@ -0,0 +1,79 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import static com.datastax.driver.core.Assertions.assertThat; + +import com.datastax.driver.core.utils.CassandraVersion; +import java.util.UUID; +import org.testng.annotations.Test; + +@CassandraVersion("4.0.0") +@CCMConfig() +public class VirtualTableMetadataTest extends CCMTestsSupport { + + @Test(groups = "short") + public void should_parse_virtual_metadata() { + KeyspaceMetadata km = session().getCluster().getMetadata().getKeyspace("system_views"); + // Keyspace name should be set, marked as virtual, and have a clients table. + // All other values should be defaulted since they are not defined in the virtual schema tables. + assertThat(km.getTables().size() >= 2); + assertThat(km.isVirtual()).isTrue(); + assertThat(km.isDurableWrites()).isFalse(); + assertThat(km.getName()).isEqualTo("system_views"); + assertThat(km.getUserTypes().size()).isEqualTo(0); + assertThat(km.getFunctions().size()).isEqualTo(0); + assertThat(km.getMaterializedViews().size()).isEqualTo(0); + assertThat(km.getAggregates().size()).isEqualTo(0); + assertThat(km.asCQLQuery()) + .isEqualTo( + "/* VIRTUAL KEYSPACE system_views WITH REPLICATION = { 'class' : 'null' } " + + "AND DURABLE_WRITES = false;*/"); + // Table name should be set, marked as virtual, and it should have columns set. + // indexes, views, clustering column, clustering order and id are not defined in the virtual + // schema tables. + TableMetadata tm = km.getTable("clients"); + assertThat(tm).isNotNull(); + assertThat(tm.getName()).isEqualTo("clients"); + assertThat(tm.isVirtual()).isTrue(); + assertThat(tm.getColumns().size()).isEqualTo(12); + assertThat(tm.getPartitionKey().size()).isEqualTo(1); + assertThat(tm.getPartitionKey().get(0).getName()).isEqualTo("address"); + assertThat(tm.getClusteringColumns().size()).isEqualTo(1); + assertThat(tm.getClusteringColumns().get(0).getName()).isEqualTo("port"); + assertThat(tm.getIndexes().size()).isEqualTo(0); + assertThat(tm.getViews().size()).isEqualTo(0); + assertThat(tm.getClusteringColumns().size()).isEqualTo(1); + assertThat(tm.getClusteringOrder().size()).isEqualTo(1); + assertThat(tm.getId()).isEqualTo(new UUID(0L, 0L)); + assertThat(tm.getOptions()).isNull(); + assertThat(tm.getKeyspace()).isEqualTo(km); + assertThat(tm.asCQLQuery()) + .isEqualTo( + "/* VIRTUAL TABLE system_views.clients (address inet, port int, " + + "connection_stage text, driver_name text, driver_version text, hostname text, protocol_version int, " + + "request_count bigint, ssl_cipher_suite text, ssl_enabled boolean, ssl_protocol text, username text, " + + "PRIMARY KEY (address, port)) */"); + // ColumnMetadata is as expected + ColumnMetadata cm = tm.getColumn("driver_name"); + assertThat(cm).isNotNull(); + assertThat(cm.getParent()).isEqualTo(tm); + assertThat(cm.getType()).isEqualTo(DataType.text()); + assertThat(cm.getName()).isEqualTo("driver_name"); + } +} diff --git a/driver-core/src/test/java/com/datastax/driver/core/WarningsTest.java b/driver-core/src/test/java/com/datastax/driver/core/WarningsTest.java index 190b728a8c2..fbfea9afc80 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/WarningsTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/WarningsTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,35 +17,133 @@ */ package com.datastax.driver.core; +import static org.assertj.core.api.Assertions.assertThat; + import com.datastax.driver.core.utils.CassandraVersion; import com.google.common.base.Strings; -import org.testng.annotations.Test; - import java.util.List; - -import static org.assertj.core.api.Assertions.assertThat; +import org.testng.annotations.Test; @CCMConfig(config = {"batch_size_warn_threshold_in_kb:5"}) -@CassandraVersion("2.2.0") public class WarningsTest extends CCMTestsSupport { - @Override - public void onTestContextInitialized() { - execute("CREATE TABLE foo(k int primary key, v text)"); + @Override + public void onTestContextInitialized() { + execute("CREATE TABLE foo(k int primary key, v text)"); + } + + @Test(groups = "short") + @CassandraVersion("2.2.0") + public void should_expose_warnings_on_execution_info() throws Exception { + // the default batch size warn threshold is 5 * 1024 bytes, but after CASSANDRA-10876 there must + // be multiple mutations in a batch to trigger this warning so the batch includes 2 different + // inserts. + final String query = + String.format( + "BEGIN UNLOGGED BATCH\n" + + "INSERT INTO foo (k, v) VALUES (1, '%s')\n" + + "INSERT INTO foo (k, v) VALUES (2, '%s')\n" + + "APPLY BATCH", + Strings.repeat("1", 2 * 1024), Strings.repeat("1", 3 * 1024)); + MemoryAppender logAppender = new MemoryAppender(); + logAppender.enableFor(RequestHandler.class); + try { + ResultSet rs = session().execute(query); + List warnings = rs.getExecutionInfo().getWarnings(); + // some versions of Cassandra will generate more than 1 log for this query + assertThat(warnings).isNotEmpty(); + // also assert that by default, the warning is logged and truncated to + // DEFAULT_MAX_QUERY_STRING_LENGTH + String log = logAppender.waitAndGet(2000); + assertThat(log).isNotEmpty(); + assertThat(log) + .startsWith("Query '") + // query will only be logged up to QueryLogger.DEFAULT_MAX_QUERY_STRING_LENGTH characters + .contains(query.substring(0, QueryLogger.DEFAULT_MAX_QUERY_STRING_LENGTH)) + .contains("' generated server side warning(s): ") + .contains("Batch") + .contains(keyspace + ".foo") + .contains(" is of size") + .contains(", exceeding specified threshold"); + } finally { + logAppender.disableFor(RequestHandler.class); + } + } + + @Test(groups = "short") + @CassandraVersion("3.0.0") + public void should_execute_query_and_log_server_side_warnings() throws Exception { + // Assert that logging of server-side query warnings is NOT disabled + assertThat(Boolean.getBoolean(RequestHandler.DISABLE_QUERY_WARNING_LOGS)).isFalse(); + + // Given a query that will produce server side warnings that will be embedded in the + // ExecutionInfo + final String query = "SELECT count(*) FROM foo;"; + SimpleStatement statement = new SimpleStatement(query); + // When the query is executed + MemoryAppender logAppender = new MemoryAppender(); + logAppender.enableFor(RequestHandler.class); + try { + ResultSet rs = session().execute(statement); + // Then the result has 1 Row + Row row = rs.one(); + assertThat(row).isNotNull(); + // And there is a server side warning captured in the ResultSet's ExecutionInfo + ExecutionInfo ei = rs.getExecutionInfo(); + List warnings = ei.getWarnings(); + assertThat(warnings).isNotEmpty(); + assertThat(warnings.size()).isEqualTo(1); + assertThat(warnings.get(0)).isEqualTo("Aggregation query used without partition key"); + // And the driver logged the server side warning + String log = logAppender.waitAndGet(2000); + assertThat(log).isNotEmpty(); + assertThat(log) + .startsWith( + "Query '[0 bound values] " + + query + + "' generated server side warning(s): Aggregation query used without partition key"); + } finally { + logAppender.disableFor(RequestHandler.class); } + } - @Test(groups = "short") - public void should_expose_warnings_on_execution_info() { - // the default batch size warn threshold is 5 * 1024 bytes, but after CASSANDRA-10876 there must be - // multiple mutations in a batch to trigger this warning so the batch includes 2 different inserts. - ResultSet rs = session().execute(String.format("BEGIN UNLOGGED BATCH\n" + - "INSERT INTO foo (k, v) VALUES (1, '%s')\n" + - "INSERT INTO foo (k, v) VALUES (2, '%s')\n" + - "APPLY BATCH", - Strings.repeat("1", 2 * 1024), - Strings.repeat("1", 3 * 1024))); - - List warnings = rs.getExecutionInfo().getWarnings(); - assertThat(warnings).hasSize(1); + @Test(groups = "isolated") + @CassandraVersion("3.0.0") + public void should_execute_query_and_not_log_server_side_warnings() throws Exception { + // Get the system property value for disabling logging server side warnings + final String disabledLogFlag = + System.getProperty(RequestHandler.DISABLE_QUERY_WARNING_LOGS, "false"); + // assert that logs are NOT disabled + assertThat(disabledLogFlag).isEqualTo("false"); + // Disable the logs + System.setProperty(RequestHandler.DISABLE_QUERY_WARNING_LOGS, "true"); + try { + // Given a query that will produce server side warnings that will be embedded in the + // ExecutionInfo + SimpleStatement statement = new SimpleStatement("SELECT count(*) FROM foo"); + // When the query is executed + MemoryAppender logAppender = new MemoryAppender(); + logAppender.enableFor(RequestHandler.class); + try { + ResultSet rs = session().execute(statement); + // Then the result has 1 Row + Row row = rs.one(); + assertThat(row).isNotNull(); + // And there is a server side warning captured in the ResultSet's ExecutionInfo + ExecutionInfo ei = rs.getExecutionInfo(); + List warnings = ei.getWarnings(); + assertThat(warnings).isNotEmpty(); + assertThat(warnings.size()).isEqualTo(1); + assertThat(warnings.get(0)).isEqualTo("Aggregation query used without partition key"); + // And the driver did NOT log the server side warning + String log = logAppender.waitAndGet(2000); + assertThat(log).isNullOrEmpty(); + } finally { + logAppender.disableFor(RequestHandler.class); + } + } finally { + // reset the logging flag + System.setProperty(RequestHandler.DISABLE_QUERY_WARNING_LOGS, disabledLogFlag); } + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/cloud/CloudTest.java b/driver-core/src/test/java/com/datastax/driver/core/cloud/CloudTest.java new file mode 100644 index 00000000000..7e91543861c --- /dev/null +++ b/driver-core/src/test/java/com/datastax/driver/core/cloud/CloudTest.java @@ -0,0 +1,259 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core.cloud; + +import static com.datastax.driver.core.cloud.SniProxyServer.CERTS_BUNDLE_SUFFIX; +import static com.github.tomakehurst.wiremock.client.WireMock.aResponse; +import static com.github.tomakehurst.wiremock.client.WireMock.any; +import static com.github.tomakehurst.wiremock.client.WireMock.urlEqualTo; +import static com.github.tomakehurst.wiremock.core.WireMockConfiguration.wireMockConfig; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.fail; + +import com.datastax.driver.core.Cluster; +import com.datastax.driver.core.EndPoint; +import com.datastax.driver.core.PlainTextAuthProvider; +import com.datastax.driver.core.ProtocolVersion; +import com.datastax.driver.core.ResultSet; +import com.datastax.driver.core.Session; +import com.datastax.driver.core.exceptions.AuthenticationException; +import com.github.tomakehurst.wiremock.WireMockServer; +import java.io.File; +import java.io.IOException; +import java.net.InetSocketAddress; +import java.net.URL; +import org.parboiled.common.FileUtils; +import org.testng.annotations.AfterClass; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; + +public class CloudTest { + + private SniProxyServer proxy = new SniProxyServer(); + + private WireMockServer wireMockServer; + + @BeforeClass(groups = "short") + public void startProxy() { + proxy.startProxy(); + } + + @BeforeMethod(groups = "short") + public void startWireMock() { + wireMockServer = new WireMockServer(wireMockConfig().dynamicPort().dynamicHttpsPort()); + wireMockServer.start(); + } + + @AfterMethod(groups = "short") + public void stopWireMock() { + wireMockServer.stop(); + } + + @AfterClass(groups = "short", alwaysRun = true) + public void stopProxy() throws Exception { + proxy.stopProxy(); + } + + @Test(groups = "short") + public void should_connect_to_proxy_using_absolute_path() { + Session session = + Cluster.builder() + .withCloudSecureConnectBundle(proxy.getSecureBundleFile()) + .withProtocolVersion(ProtocolVersion.V4) + .build() + .connect(); + ResultSet set = session.execute("select * from system.local"); + assertThat(set).isNotNull(); + } + + @Test(groups = "short") + public void should_connect_to_proxy_using_non_normalized_path() { + String path = + String.format("%s/%s", proxy.getProxyRootPath(), "certs/bundles/../bundles/creds-v1.zip"); + Session session = + Cluster.builder() + .withCloudSecureConnectBundle(new File(path)) + .withProtocolVersion(ProtocolVersion.V4) + .build() + .connect(); + ResultSet set = session.execute("select * from system.local"); + assertThat(set).isNotNull(); + } + + @Test(groups = "short") + public void should_connect_to_proxy_using_file_provided_by_the_http_URL() throws IOException { + // given + wireMockServer.stubFor( + any(urlEqualTo(CERTS_BUNDLE_SUFFIX)) + .willReturn( + aResponse() + .withStatus(200) + .withHeader("Content-Type", "application/octet-stream") + .withBody(FileUtils.readAllBytes(proxy.getSecureBundleFile())))); + + URL configFile = + new URL(String.format("http://localhost:%d%s", wireMockServer.port(), CERTS_BUNDLE_SUFFIX)); + + // when + Session session = + Cluster.builder() + .withCloudSecureConnectBundle(configFile) + .withProtocolVersion(ProtocolVersion.V4) + .build() + .connect(); + + // then + ResultSet set = session.execute("select * from system.local"); + assertThat(set).isNotNull(); + } + + @Test(groups = "short") + public void should_connect_to_proxy_using_file_provided_by_input_stream() throws IOException { + // given + wireMockServer.stubFor( + any(urlEqualTo(CERTS_BUNDLE_SUFFIX)) + .willReturn( + aResponse() + .withStatus(200) + .withHeader("Content-Type", "application/octet-stream") + .withBody(FileUtils.readAllBytes(proxy.getSecureBundleFile())))); + + URL configFile = + new URL(String.format("http://localhost:%d%s", wireMockServer.port(), CERTS_BUNDLE_SUFFIX)); + + // when + Session session = + Cluster.builder() + .withCloudSecureConnectBundle(configFile.openStream()) + .withProtocolVersion(ProtocolVersion.V4) + .build() + .connect(); + + // then + ResultSet set = session.execute("select * from system.local"); + assertThat(set).isNotNull(); + } + + @Test(groups = "short") + public void should_connect_to_proxy_using_auth_provider() { + Session session = + Cluster.builder() + .withCloudSecureConnectBundle(proxy.getSecureBundleNoCredsPath()) + .withAuthProvider(new PlainTextAuthProvider("cassandra", "cassandra")) + .withProtocolVersion(ProtocolVersion.V4) + .build() + .connect(); + ResultSet set = session.execute("select * from system.local"); + assertThat(set).isNotNull(); + } + + @Test(groups = "short") + public void should_not_connect_to_proxy_bad_creds() { + try { + Session session = + Cluster.builder() + .withCloudSecureConnectBundle(proxy.getSecureBundleNoCredsPath()) + .withProtocolVersion(ProtocolVersion.V4) + .build() + .connect(); + fail("Expected an AuthenticationException"); + } catch (AuthenticationException e) { + assertThat(e).hasMessageStartingWith("Authentication error on host"); + } + } + + @Test(groups = "short") + public void should_not_connect_to_proxy() { + try { + Session session = + Cluster.builder() + .withCloudSecureConnectBundle(proxy.getSecureBundleUnreachable()) + .withProtocolVersion(ProtocolVersion.V4) + .build() + .connect(); + fail("Expected an IllegalStateException"); + } catch (IllegalStateException e) { + assertThat(e).hasMessageStartingWith("Cannot construct cloud config from the cloudConfigUrl"); + } + } + + @Test(groups = "short") + public void should_not_allow_contact_points_and_cloud() { + try { + Session session = + Cluster.builder() + .addContactPoint("127.0.0.1") + .withCloudSecureConnectBundle(proxy.getSecureBundleNoCredsPath()) + .withCredentials("cassandra", "cassandra") + .withProtocolVersion(ProtocolVersion.V4) + .build() + .connect(); + fail("Expected an IllegalStateException"); + } catch (IllegalStateException e) { + assertThat(e) + .hasMessageStartingWith( + "Can't use withCloudSecureConnectBundle if you've already called addContactPoint(s)"); + } + } + + @Test(groups = "short") + public void should_not_allow_cloud_with_contact_points_string() { + try { + Session session = + Cluster.builder() + .withCloudSecureConnectBundle(proxy.getSecureBundleNoCredsPath()) + .addContactPoint("127.0.0.1") + .withCredentials("cassandra", "cassandra") + .withProtocolVersion(ProtocolVersion.V4) + .build() + .connect(); + fail("Expected an IllegalStateException"); + } catch (IllegalStateException e) { + assertThat(e) + .hasMessageStartingWith( + "Can't use addContactPoint(s) if you've already called withCloudSecureConnectBundle"); + } + } + + @Test(groups = "short") + public void should_not_allow_cloud_with_contact_points_endpoint() { + try { + Session session = + Cluster.builder() + .withCloudSecureConnectBundle(proxy.getSecureBundleNoCredsPath()) + .addContactPoint( + new EndPoint() { + @Override + public InetSocketAddress resolve() { + return null; + } + }) + .withCredentials("cassandra", "cassandra") + .withProtocolVersion(ProtocolVersion.V4) + .build() + .connect(); + fail("Expected an IllegalStateException"); + } catch (IllegalStateException e) { + assertThat(e) + .hasMessageStartingWith( + "Can't use addContactPoint(s) if you've already called withCloudSecureConnectBundle"); + } + } +} diff --git a/driver-core/src/test/java/com/datastax/driver/core/cloud/SniProxyServer.java b/driver-core/src/test/java/com/datastax/driver/core/cloud/SniProxyServer.java new file mode 100644 index 00000000000..59c3e0c51e4 --- /dev/null +++ b/driver-core/src/test/java/com/datastax/driver/core/cloud/SniProxyServer.java @@ -0,0 +1,114 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.datastax.driver.core.cloud; + +import java.io.ByteArrayOutputStream; +import java.io.File; +import java.io.IOException; +import java.util.concurrent.TimeUnit; +import org.apache.commons.exec.CommandLine; +import org.apache.commons.exec.DefaultExecutor; +import org.apache.commons.exec.ExecuteStreamHandler; +import org.apache.commons.exec.ExecuteWatchdog; +import org.apache.commons.exec.Executor; +import org.apache.commons.exec.LogOutputStream; +import org.apache.commons.exec.PumpStreamHandler; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class SniProxyServer { + private static final Logger logger = LoggerFactory.getLogger(SniProxyServer.class); + private final File proxyPath; + private boolean isRunning = false; + + static final String CERTS_BUNDLE_SUFFIX = "/certs/bundles/creds-v1.zip"; + + public SniProxyServer() { + proxyPath = new File(System.getProperty("proxy.path", "./")); + } + + public void startProxy() { + CommandLine run = CommandLine.parse(proxyPath + "/run.sh"); + execute(run); + isRunning = true; + } + + public void stopProxy() { + if (isRunning) { + CommandLine findImageId = + CommandLine.parse("docker ps -a -q --filter ancestor=single_endpoint"); + String id = execute(findImageId); + CommandLine stop = CommandLine.parse("docker kill " + id); + execute(stop); + isRunning = false; + } + } + + public boolean isRunning() { + return isRunning; + } + + public File getProxyRootPath() { + return proxyPath; + } + + public File getSecureBundleFile() { + return new File(proxyPath + CERTS_BUNDLE_SUFFIX); + } + + public File getSecureBundleNoCredsPath() { + return new File(proxyPath + "/certs/bundles/creds-v1-wo-creds.zip"); + } + + public File getSecureBundleUnreachable() { + return new File(proxyPath + "/certs/bundles/creds-v1-unreachable.zip"); + } + + private String execute(CommandLine cli) { + logger.debug("Executing: " + cli); + ExecuteWatchdog watchDog = new ExecuteWatchdog(TimeUnit.MINUTES.toMillis(10)); + ByteArrayOutputStream outStream = new ByteArrayOutputStream(); + LogOutputStream errStream = + new LogOutputStream() { + @Override + protected void processLine(String line, int logLevel) { + logger.error("sniendpointerr> {}", line); + } + }; + try { + Executor executor = new DefaultExecutor(); + ExecuteStreamHandler streamHandler = new PumpStreamHandler(outStream, errStream); + executor.setStreamHandler(streamHandler); + executor.setWatchdog(watchDog); + executor.setWorkingDirectory(proxyPath); + int retValue = executor.execute(cli); + if (retValue != 0) { + logger.error( + "Non-zero exit code ({}) returned from executing ccm command: {}", retValue, cli); + } + return outStream.toString(); + } catch (IOException ex) { + if (watchDog.killedProcess()) { + throw new RuntimeException("The command '" + cli + "' was killed after 10 minutes"); + } else { + throw new RuntimeException("The command '" + cli + "' failed to execute", ex); + } + } + } +} diff --git a/driver-core/src/test/java/com/datastax/driver/core/exceptions/ConnectionExceptionTest.java b/driver-core/src/test/java/com/datastax/driver/core/exceptions/ConnectionExceptionTest.java index eb322a11ecf..d8244526557 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/exceptions/ConnectionExceptionTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/exceptions/ConnectionExceptionTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,44 +17,41 @@ */ package com.datastax.driver.core.exceptions; -import org.testng.annotations.Test; - -import java.net.InetSocketAddress; - import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertNull; -public class ConnectionExceptionTest { - - /** - * @jira_ticket JAVA-1139 - */ - @Test(groups = "unit") - public void getHost_should_return_null_if_address_is_null() { - assertNull(new ConnectionException(null, "Test message").getHost()); - } - - /** - * @jira_ticket JAVA-1139 - */ - @Test(groups = "unit") - public void getMessage_should_return_message_if_address_is_null() { - assertEquals(new ConnectionException(null, "Test message").getMessage(), "Test message"); - } +import com.datastax.driver.core.EndPoints; +import org.testng.annotations.Test; - /** - * @jira_ticket JAVA-1139 - */ - @Test(groups = "unit") - public void getMessage_should_return_message_if_address_is_unresolved() { - assertEquals(new ConnectionException(InetSocketAddress.createUnresolved("127.0.0.1", 9042), "Test message").getMessage(), "[127.0.0.1:9042] Test message"); - } +public class ConnectionExceptionTest { - /** - * @jira_ticket JAVA-1139 - */ - @Test(groups = "unit") - public void getMessage_should_return_message_if_address_is_resolved() { - assertEquals(new ConnectionException(new InetSocketAddress("127.0.0.1", 9042), "Test message").getMessage(), "[/127.0.0.1:9042] Test message"); - } + /** @jira_ticket JAVA-1139 */ + @Test(groups = "unit") + public void getHost_should_return_null_if_address_is_null() { + assertNull(new ConnectionException(null, "Test message").getEndPoint()); + } + + /** @jira_ticket JAVA-1139 */ + @Test(groups = "unit") + public void getMessage_should_return_message_if_address_is_null() { + assertEquals(new ConnectionException(null, "Test message").getMessage(), "Test message"); + } + + /** @jira_ticket JAVA-1139 */ + @Test(groups = "unit") + public void getMessage_should_return_message_if_address_is_unresolved() { + assertEquals( + new ConnectionException(EndPoints.forAddress("127.0.0.1", 9042), "Test message") + .getMessage(), + "[/127.0.0.1:9042] Test message"); + } + + /** @jira_ticket JAVA-1139 */ + @Test(groups = "unit") + public void getMessage_should_return_message_if_address_is_resolved() { + assertEquals( + new ConnectionException(EndPoints.forAddress("127.0.0.1", 9042), "Test message") + .getMessage(), + "[/127.0.0.1:9042] Test message"); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/exceptions/ExceptionsScassandraTest.java b/driver-core/src/test/java/com/datastax/driver/core/exceptions/ExceptionsScassandraTest.java index daa3347f234..1c9c5e87f9e 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/exceptions/ExceptionsScassandraTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/exceptions/ExceptionsScassandraTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,135 +17,104 @@ */ package com.datastax.driver.core.exceptions; -import com.datastax.driver.core.*; -import com.datastax.driver.core.policies.FallthroughRetryPolicy; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.ImmutableMap; -import org.scassandra.Scassandra; -import org.scassandra.http.client.PrimingRequest; -import org.scassandra.http.client.Result; -import org.testng.annotations.*; - -import java.util.List; -import java.util.Map; - import static com.datastax.driver.core.ConsistencyLevel.LOCAL_ONE; import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.fail; import static org.scassandra.http.client.PrimingRequest.then; -import static org.scassandra.http.client.Result.*; - -public class ExceptionsScassandraTest { - - protected ScassandraCluster scassandras; - protected Cluster cluster; - protected Metrics.Errors errors; - protected Host host1; - protected Session session; - - @BeforeClass(groups = "short") - public void beforeClass() { - scassandras = ScassandraCluster.builder().withNodes(1).build(); - scassandras.init(); - } - - @BeforeMethod(groups = "short") - public void beforeMethod() { - cluster = Cluster.builder() - .addContactPoints(scassandras.address(1).getAddress()) - .withPort(scassandras.getBinaryPort()) - .withRetryPolicy(FallthroughRetryPolicy.INSTANCE) - .build(); - session = cluster.connect(); - host1 = TestUtils.findHost(cluster, 1); - errors = cluster.getMetrics().getErrorMetrics(); - - for (Scassandra node : scassandras.nodes()) { - node.primingClient().clearAllPrimes(); - node.activityClient().clearAllRecordedActivity(); - } - } - - @Test(groups = "short") - public void should_throw_proper_unavailable_exception() { - simulateError(1, unavailable); - try { - query(); - fail("expected an UnavailableException"); - } catch (UnavailableException e) { - assertThat(e.getMessage()).isEqualTo("Not enough replicas available for query at consistency LOCAL_ONE (1 required but only 0 alive)"); - assertThat(e.getConsistencyLevel()).isEqualTo(LOCAL_ONE); - assertThat(e.getAliveReplicas()).isEqualTo(0); - assertThat(e.getRequiredReplicas()).isEqualTo(1); - assertThat(e.getAddress()).isEqualTo(host1.getSocketAddress()); - assertThat(e.getHost()).isEqualTo(host1.getAddress()); - } - } - - @Test(groups = "short") - public void should_throw_proper_read_timeout_exception() { - simulateError(1, read_request_timeout); - try { - query(); - fail("expected a ReadTimeoutException"); - } catch (ReadTimeoutException e) { - assertThat(e.getMessage()).isEqualTo("Cassandra timeout during read query at consistency LOCAL_ONE (1 responses were required but only 0 replica responded)"); - assertThat(e.getConsistencyLevel()).isEqualTo(LOCAL_ONE); - assertThat(e.getReceivedAcknowledgements()).isEqualTo(0); - assertThat(e.getRequiredAcknowledgements()).isEqualTo(1); - assertThat(e.getAddress()).isEqualTo(host1.getSocketAddress()); - assertThat(e.getHost()).isEqualTo(host1.getAddress()); - } - } - - @Test(groups = "short") - public void should_throw_proper_write_timeout_exception() { - simulateError(1, write_request_timeout); - try { - query(); - fail("expected a WriteTimeoutException"); - } catch (WriteTimeoutException e) { - assertThat(e.getMessage()).isEqualTo("Cassandra timeout during write query at consistency LOCAL_ONE (1 replica were required but only 0 acknowledged the write)"); - assertThat(e.getConsistencyLevel()).isEqualTo(LOCAL_ONE); - assertThat(e.getReceivedAcknowledgements()).isEqualTo(0); - assertThat(e.getRequiredAcknowledgements()).isEqualTo(1); - assertThat(e.getWriteType()).isEqualTo(WriteType.SIMPLE); - assertThat(e.getAddress()).isEqualTo(host1.getSocketAddress()); - assertThat(e.getHost()).isEqualTo(host1.getAddress()); - } - } - - protected void simulateError(int hostNumber, Result result) { - scassandras.node(hostNumber).primingClient().prime(PrimingRequest.queryBuilder() - .withQuery("mock query") - .withThen(then().withResult(result)) - .build()); - } - - private static List> row(String key, String value) { - return ImmutableList.>of(ImmutableMap.of(key, value)); - } - - protected ResultSet query() { - return query(session); - } - - protected ResultSet query(Session session) { - return session.execute("mock query"); +import static org.scassandra.http.client.Result.read_request_timeout; +import static org.scassandra.http.client.Result.unavailable; +import static org.scassandra.http.client.Result.write_request_timeout; + +import com.datastax.driver.core.Cluster; +import com.datastax.driver.core.Host; +import com.datastax.driver.core.Metrics; +import com.datastax.driver.core.ResultSet; +import com.datastax.driver.core.ScassandraTestBase; +import com.datastax.driver.core.Session; +import com.datastax.driver.core.TestUtils; +import com.datastax.driver.core.WriteType; +import com.datastax.driver.core.policies.FallthroughRetryPolicy; +import org.scassandra.http.client.PrimingRequest; +import org.scassandra.http.client.Result; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; + +public class ExceptionsScassandraTest extends ScassandraTestBase { + + protected Cluster cluster; + protected Metrics.Errors errors; + protected Host host1; + protected Session session; + + @BeforeMethod(groups = "short") + public void beforeMethod() { + cluster = createClusterBuilder().withRetryPolicy(FallthroughRetryPolicy.INSTANCE).build(); + session = cluster.connect(); + host1 = TestUtils.findHost(cluster, 1); + errors = cluster.getMetrics().getErrorMetrics(); + } + + @Test(groups = "short") + public void should_throw_proper_unavailable_exception() { + simulateError(unavailable); + try { + query(); + fail("expected an UnavailableException"); + } catch (UnavailableException e) { + assertThat(e.getMessage()) + .isEqualTo( + "Not enough replicas available for query at consistency LOCAL_ONE (1 required but only 0 alive)"); + assertThat(e.getConsistencyLevel()).isEqualTo(LOCAL_ONE); + assertThat(e.getAliveReplicas()).isEqualTo(0); + assertThat(e.getRequiredReplicas()).isEqualTo(1); + assertThat(e.getEndPoint()).isEqualTo(host1.getEndPoint()); } - - @AfterMethod(groups = "short", alwaysRun = true) - public void afterMethod() { - for (Scassandra node : scassandras.nodes()) { - node.primingClient().clearAllPrimes(); - } - if (cluster != null) - cluster.close(); + } + + @Test(groups = "short") + public void should_throw_proper_read_timeout_exception() { + simulateError(read_request_timeout); + try { + query(); + fail("expected a ReadTimeoutException"); + } catch (ReadTimeoutException e) { + assertThat(e.getMessage()) + .isEqualTo( + "Cassandra timeout during read query at consistency LOCAL_ONE (1 responses were required but only 0 replica responded). In case this was generated during read repair, the consistency level is not representative of the actual consistency."); + assertThat(e.getConsistencyLevel()).isEqualTo(LOCAL_ONE); + assertThat(e.getReceivedAcknowledgements()).isEqualTo(0); + assertThat(e.getRequiredAcknowledgements()).isEqualTo(1); + assertThat(e.getEndPoint()).isEqualTo(host1.getEndPoint()); } - - @AfterClass(groups = "short", alwaysRun = true) - public void afterClass() { - if (scassandras != null) - scassandras.stop(); + } + + @Test(groups = "short") + public void should_throw_proper_write_timeout_exception() { + simulateError(write_request_timeout); + try { + query(); + fail("expected a WriteTimeoutException"); + } catch (WriteTimeoutException e) { + assertThat(e.getMessage()) + .isEqualTo( + "Cassandra timeout during SIMPLE write query at consistency LOCAL_ONE (1 replica were required but only 0 acknowledged the write)"); + assertThat(e.getConsistencyLevel()).isEqualTo(LOCAL_ONE); + assertThat(e.getReceivedAcknowledgements()).isEqualTo(0); + assertThat(e.getRequiredAcknowledgements()).isEqualTo(1); + assertThat(e.getWriteType()).isEqualTo(WriteType.SIMPLE); + assertThat(e.getEndPoint()).isEqualTo(host1.getEndPoint()); } + } + + private void simulateError(Result result) { + primingClient.prime( + PrimingRequest.queryBuilder() + .withQuery("mock query") + .withThen(then().withResult(result)) + .build()); + } + + protected ResultSet query() { + return session.execute("mock query"); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/exceptions/ExceptionsTest.java b/driver-core/src/test/java/com/datastax/driver/core/exceptions/ExceptionsTest.java index d5b18085e5a..93a8bfd9672 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/exceptions/ExceptionsTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/exceptions/ExceptionsTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,309 +17,312 @@ */ package com.datastax.driver.core.exceptions; +import static com.datastax.driver.core.ConsistencyLevel.LOCAL_QUORUM; +import static org.assertj.core.api.Assertions.assertThat; +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertTrue; + import com.datastax.driver.core.CCMTestsSupport; import com.datastax.driver.core.Cluster; +import com.datastax.driver.core.EndPoint; +import com.datastax.driver.core.EndPoints; import com.datastax.driver.core.TestUtils; import com.datastax.driver.core.WriteType; import org.testng.annotations.Test; -import java.net.InetSocketAddress; - -import static com.datastax.driver.core.ConsistencyLevel.LOCAL_QUORUM; -import static org.assertj.core.api.Assertions.assertThat; -import static org.testng.Assert.assertEquals; -import static org.testng.Assert.assertTrue; - -/** - * Tests Exception classes with separate clusters per test, when applicable - */ +/** Tests Exception classes with separate clusters per test, when applicable */ public class ExceptionsTest extends CCMTestsSupport { - private InetSocketAddress address1 = new InetSocketAddress("127.0.0.1", 9042); - private InetSocketAddress address2 = new InetSocketAddress("127.0.0.2", 9042); + private EndPoint endPoint1 = EndPoints.forAddress("127.0.0.1", 9042); + private EndPoint endPoint2 = EndPoints.forAddress("127.0.0.2", 9042); - /** - * Tests the AlreadyExistsException. - * Create a keyspace twice and a table twice. - * Catch and test all the exception methods. - */ - @Test(groups = "short") - public void alreadyExistsException() throws Throwable { - String keyspace = "TestKeyspace"; - String table = "TestTable"; + /** + * Tests the AlreadyExistsException. Create a keyspace twice and a table twice. Catch and test all + * the exception methods. + */ + @Test(groups = "short") + public void alreadyExistsException() throws Throwable { + String keyspace = "TestKeyspace"; + String table = "TestTable"; - String[] cqlCommands = new String[]{ - String.format(TestUtils.CREATE_KEYSPACE_SIMPLE_FORMAT, keyspace, 1), - "USE " + keyspace, - String.format("CREATE TABLE %s (k text PRIMARY KEY, t text, i int, f float)", table) + String[] cqlCommands = + new String[] { + String.format(TestUtils.CREATE_KEYSPACE_SIMPLE_FORMAT, keyspace, 1), + "USE " + keyspace, + String.format("CREATE TABLE %s (k text PRIMARY KEY, t text, i int, f float)", table) }; - // Create the schema once - session().execute(cqlCommands[0]); - session().execute(cqlCommands[1]); - session().execute(cqlCommands[2]); + // Create the schema once + session().execute(cqlCommands[0]); + session().execute(cqlCommands[1]); + session().execute(cqlCommands[2]); - // Try creating the keyspace again - try { - session().execute(cqlCommands[0]); - } catch (AlreadyExistsException e) { - String expected = String.format("Keyspace %s already exists", keyspace.toLowerCase()); - assertEquals(e.getMessage(), expected); - assertEquals(e.getKeyspace(), keyspace.toLowerCase()); - assertEquals(e.getTable(), null); - assertEquals(e.wasTableCreation(), false); - assertEquals(e.getHost(), ccm().addressOfNode(1).getAddress()); - assertEquals(e.getAddress(), ccm().addressOfNode(1)); - } + // Try creating the keyspace again + try { + session().execute(cqlCommands[0]); + } catch (AlreadyExistsException e) { + String expected = String.format("Keyspace %s already exists", keyspace.toLowerCase()); + assertEquals(e.getMessage(), expected); + assertEquals(e.getKeyspace(), keyspace.toLowerCase()); + assertEquals(e.getTable(), null); + assertEquals(e.wasTableCreation(), false); + assertEquals(e.getEndPoint().resolve().getAddress(), ccm().addressOfNode(1).getAddress()); + assertEquals(e.getEndPoint().resolve(), ccm().addressOfNode(1)); + } - session().execute(cqlCommands[1]); + session().execute(cqlCommands[1]); - // Try creating the table again - try { - session().execute(cqlCommands[2]); - } catch (AlreadyExistsException e) { - // is released - assertEquals(e.getKeyspace(), keyspace.toLowerCase()); - assertEquals(e.getTable(), table.toLowerCase()); - assertEquals(e.wasTableCreation(), true); - assertEquals(e.getHost(), ccm().addressOfNode(1).getAddress()); - assertEquals(e.getAddress(), ccm().addressOfNode(1)); - } + // Try creating the table again + try { + session().execute(cqlCommands[2]); + } catch (AlreadyExistsException e) { + // is released + assertEquals(e.getKeyspace(), keyspace.toLowerCase()); + assertEquals(e.getTable(), table.toLowerCase()); + assertEquals(e.wasTableCreation(), true); + assertEquals(e.getEndPoint().resolve().getAddress(), ccm().addressOfNode(1).getAddress()); + assertEquals(e.getEndPoint().resolve(), ccm().addressOfNode(1)); } + } - /** - * Tests the NoHostAvailableException. - * by attempting to build a cluster using the IP address "255.255.255.255" - * and test all available exception methods. - */ - @Test(groups = "short") - public void noHostAvailableException() throws Exception { - try { - Cluster.builder().addContactPoints("255.255.255.255").build(); - } catch (NoHostAvailableException e) { - assertEquals(e.getErrors().size(), 1); - assertTrue(e.getErrors().values().iterator().next().toString().contains("[/255.255.255.255] Cannot connect")); + /** + * Tests the NoHostAvailableException. by attempting to build a cluster using the IP address + * "255.255.255.255" and test all available exception methods. + */ + @Test(groups = "short") + public void noHostAvailableException() throws Exception { + try { + Cluster.builder().addContactPoints("255.255.255.255").build(); + } catch (NoHostAvailableException e) { + assertEquals(e.getErrors().size(), 1); + assertTrue( + e.getErrors() + .values() + .iterator() + .next() + .toString() + .contains("[/255.255.255.255] Cannot connect")); - NoHostAvailableException copy = (NoHostAvailableException) e.copy(); - assertEquals(copy.getMessage(), e.getMessage()); - assertEquals(copy.getErrors(), e.getErrors()); - } + NoHostAvailableException copy = (NoHostAvailableException) e.copy(); + assertEquals(copy.getMessage(), e.getMessage()); + assertEquals(copy.getErrors(), e.getErrors()); } + } - /** - * Tests DriverInternalError. - * Tests basic message, rethrow, and copy abilities. - */ - @Test(groups = "unit") - public void driverInternalError() throws Exception { - String errorMessage = "Test Message"; + /** Tests DriverInternalError. Tests basic message, rethrow, and copy abilities. */ + @Test(groups = "unit") + public void driverInternalError() throws Exception { + String errorMessage = "Test Message"; - try { - throw new DriverInternalError(errorMessage); - } catch (DriverInternalError e1) { - try { - throw new DriverInternalError(e1); - } catch (DriverInternalError e2) { - assertTrue(e2.getMessage().contains(errorMessage)); + try { + throw new DriverInternalError(errorMessage); + } catch (DriverInternalError e1) { + try { + throw new DriverInternalError(e1); + } catch (DriverInternalError e2) { + assertTrue(e2.getMessage().contains(errorMessage)); - DriverInternalError copy = (DriverInternalError) e2.copy(); - assertEquals(copy.getMessage(), e2.getMessage()); - } - } + DriverInternalError copy = (DriverInternalError) e2.copy(); + assertEquals(copy.getMessage(), e2.getMessage()); + } } + } - @Test(groups = "unit") - public void should_create_proper_already_exists_exception_for_keyspaces() { - AlreadyExistsException e = new AlreadyExistsException(address1, "keyspace1", ""); - assertThat(e.getMessage()).isEqualTo("Keyspace keyspace1 already exists"); - assertThat(e.getKeyspace()).isEqualTo("keyspace1"); - assertThat(e.getTable()).isNull(); - assertThat(e.getAddress()).isEqualTo(address1); - assertThat(e.getHost()).isEqualTo(address1.getAddress()); - e = e.copy(address2); - assertThat(e.getMessage()).isEqualTo("Keyspace keyspace1 already exists"); - assertThat(e.getKeyspace()).isEqualTo("keyspace1"); - assertThat(e.getTable()).isNull(); - assertThat(e.getAddress()).isEqualTo(address2); - assertThat(e.getHost()).isEqualTo(address2.getAddress()); - } + @Test(groups = "unit") + public void should_create_proper_already_exists_exception_for_keyspaces() { + AlreadyExistsException e = new AlreadyExistsException(endPoint1, "keyspace1", ""); + assertThat(e.getMessage()).isEqualTo("Keyspace keyspace1 already exists"); + assertThat(e.getKeyspace()).isEqualTo("keyspace1"); + assertThat(e.getTable()).isNull(); + assertThat(e.getEndPoint()).isEqualTo(endPoint1); + e = e.copy(endPoint2); + assertThat(e.getMessage()).isEqualTo("Keyspace keyspace1 already exists"); + assertThat(e.getKeyspace()).isEqualTo("keyspace1"); + assertThat(e.getTable()).isNull(); + assertThat(e.getEndPoint()).isEqualTo(endPoint2); + } - @Test(groups = "unit") - public void should_create_proper_already_exists_exception_for_tables() { - AlreadyExistsException e = new AlreadyExistsException(address1, "keyspace1", "table1"); - assertThat(e.getMessage()).isEqualTo("Table keyspace1.table1 already exists"); - assertThat(e.getKeyspace()).isEqualTo("keyspace1"); - assertThat(e.getTable()).isEqualTo("table1"); - assertThat(e.getAddress()).isEqualTo(address1); - assertThat(e.getHost()).isEqualTo(address1.getAddress()); - e = e.copy(address2); - assertThat(e.getMessage()).isEqualTo("Table keyspace1.table1 already exists"); - assertThat(e.getKeyspace()).isEqualTo("keyspace1"); - assertThat(e.getTable()).isEqualTo("table1"); - assertThat(e.getAddress()).isEqualTo(address2); - assertThat(e.getHost()).isEqualTo(address2.getAddress()); - } + @Test(groups = "unit") + public void should_create_proper_already_exists_exception_for_tables() { + AlreadyExistsException e = new AlreadyExistsException(endPoint1, "keyspace1", "table1"); + assertThat(e.getMessage()).isEqualTo("Table keyspace1.table1 already exists"); + assertThat(e.getKeyspace()).isEqualTo("keyspace1"); + assertThat(e.getTable()).isEqualTo("table1"); + assertThat(e.getEndPoint()).isEqualTo(endPoint1); + e = e.copy(endPoint2); + assertThat(e.getMessage()).isEqualTo("Table keyspace1.table1 already exists"); + assertThat(e.getKeyspace()).isEqualTo("keyspace1"); + assertThat(e.getTable()).isEqualTo("table1"); + assertThat(e.getEndPoint()).isEqualTo(endPoint2); + } - @Test(groups = "unit") - public void should_create_proper_bootstrapping_exception() { - BootstrappingException e = new BootstrappingException(address1, "Sorry mate"); - assertThat(e.getMessage()).isEqualTo("Queried host (" + address1 + ") was bootstrapping: Sorry mate"); - assertThat(e.getAddress()).isEqualTo(address1); - assertThat(e.getHost()).isEqualTo(address1.getAddress()); - e = e.copy(); - assertThat(e.getMessage()).isEqualTo("Queried host (" + address1 + ") was bootstrapping: Sorry mate"); - assertThat(e.getAddress()).isEqualTo(address1); - assertThat(e.getHost()).isEqualTo(address1.getAddress()); - } + @Test(groups = "unit") + public void should_create_proper_bootstrapping_exception() { + BootstrappingException e = new BootstrappingException(endPoint1, "Sorry mate"); + assertThat(e.getMessage()) + .isEqualTo("Queried host (" + endPoint1 + ") was bootstrapping: Sorry mate"); + assertThat(e.getEndPoint()).isEqualTo(endPoint1); + e = e.copy(); + assertThat(e.getMessage()) + .isEqualTo("Queried host (" + endPoint1 + ") was bootstrapping: Sorry mate"); + assertThat(e.getEndPoint()).isEqualTo(endPoint1); + } - @Test(groups = "unit") - public void should_create_proper_invalid_query_exception() { - InvalidQueryException e = new InvalidQueryException("Bad, really bad"); - assertThat(e.getMessage()).isEqualTo("Bad, really bad"); - e = (InvalidQueryException) e.copy(); - assertThat(e.getMessage()).isEqualTo("Bad, really bad"); - } + @Test(groups = "unit") + public void should_create_proper_invalid_query_exception() { + InvalidQueryException e = new InvalidQueryException("Bad, really bad"); + assertThat(e.getMessage()).isEqualTo("Bad, really bad"); + e = (InvalidQueryException) e.copy(); + assertThat(e.getMessage()).isEqualTo("Bad, really bad"); + } - @Test(groups = "unit") - public void should_create_proper_trace_retrieval_exception() { - TraceRetrievalException e = new TraceRetrievalException("Couldn't find any trace of it"); - assertThat(e.getMessage()).isEqualTo("Couldn't find any trace of it"); - e = (TraceRetrievalException) e.copy(); - assertThat(e.getMessage()).isEqualTo("Couldn't find any trace of it"); - } + @Test(groups = "unit") + public void should_create_proper_trace_retrieval_exception() { + TraceRetrievalException e = new TraceRetrievalException("Couldn't find any trace of it"); + assertThat(e.getMessage()).isEqualTo("Couldn't find any trace of it"); + e = (TraceRetrievalException) e.copy(); + assertThat(e.getMessage()).isEqualTo("Couldn't find any trace of it"); + } - @Test(groups = "unit") - public void should_create_proper_paging_state_exception() { - PagingStateException e = new PagingStateException("Bad, really bad"); - assertThat(e.getMessage()).isEqualTo("Bad, really bad"); - // no copy method for this exception - } + @Test(groups = "unit") + public void should_create_proper_paging_state_exception() { + PagingStateException e = new PagingStateException("Bad, really bad"); + assertThat(e.getMessage()).isEqualTo("Bad, really bad"); + // no copy method for this exception + } - @Test(groups = "unit") - public void should_create_proper_invalid_configuration_in_query_exception() { - InvalidConfigurationInQueryException e = new InvalidConfigurationInQueryException(address1, "Bad, really bad"); - assertThat(e.getMessage()).isEqualTo("Bad, really bad"); - assertThat(e.getAddress()).isEqualTo(address1); - assertThat(e.getHost()).isEqualTo(address1.getAddress()); - InvalidQueryException e1 = (InvalidQueryException) e.copy(); - assertThat(e1.getMessage()).isEqualTo("Bad, really bad"); - } + @Test(groups = "unit") + public void should_create_proper_invalid_configuration_in_query_exception() { + InvalidConfigurationInQueryException e = + new InvalidConfigurationInQueryException(endPoint1, "Bad, really bad"); + assertThat(e.getMessage()).isEqualTo("Bad, really bad"); + assertThat(e.getEndPoint()).isEqualTo(endPoint1); + InvalidQueryException e1 = (InvalidQueryException) e.copy(); + assertThat(e1.getMessage()).isEqualTo("Bad, really bad"); + } - @Test(groups = "unit") - public void should_create_proper_overloaded_exception() { - OverloadedException e = new OverloadedException(address1, "I'm busy"); - assertThat(e.getMessage()).isEqualTo("Queried host (" + address1 + ") was overloaded: I'm busy"); - assertThat(e.getAddress()).isEqualTo(address1); - assertThat(e.getHost()).isEqualTo(address1.getAddress()); - e = e.copy(); - assertThat(e.getMessage()).isEqualTo("Queried host (" + address1 + ") was overloaded: I'm busy"); - assertThat(e.getAddress()).isEqualTo(address1); - assertThat(e.getHost()).isEqualTo(address1.getAddress()); - } + @Test(groups = "unit") + public void should_create_proper_overloaded_exception() { + OverloadedException e = new OverloadedException(endPoint1, "I'm busy"); + assertThat(e.getMessage()) + .isEqualTo("Queried host (" + endPoint1 + ") was overloaded: I'm busy"); + assertThat(e.getEndPoint()).isEqualTo(endPoint1); + e = e.copy(); + assertThat(e.getMessage()) + .isEqualTo("Queried host (" + endPoint1 + ") was overloaded: I'm busy"); + assertThat(e.getEndPoint()).isEqualTo(endPoint1); + } - @Test(groups = "unit") - public void should_create_proper_syntax_error() { - SyntaxError e = new SyntaxError(address1, "Missing ) at EOF"); - assertThat(e.getMessage()).isEqualTo("Missing ) at EOF"); - assertThat(e.getAddress()).isEqualTo(address1); - assertThat(e.getHost()).isEqualTo(address1.getAddress()); - e = (SyntaxError) e.copy(); - assertThat(e.getMessage()).isEqualTo("Missing ) at EOF"); - assertThat(e.getAddress()).isEqualTo(address1); - assertThat(e.getHost()).isEqualTo(address1.getAddress()); - } + @Test(groups = "unit") + public void should_create_proper_syntax_error() { + SyntaxError e = new SyntaxError(endPoint1, "Missing ) at EOF"); + assertThat(e.getMessage()).isEqualTo("Missing ) at EOF"); + assertThat(e.getEndPoint()).isEqualTo(endPoint1); + e = (SyntaxError) e.copy(); + assertThat(e.getMessage()).isEqualTo("Missing ) at EOF"); + assertThat(e.getEndPoint()).isEqualTo(endPoint1); + } - @Test(groups = "unit") - public void should_create_proper_truncate_exception() { - TruncateException e = new TruncateException(address1, "I'm running headless now"); - assertThat(e.getMessage()).isEqualTo("I'm running headless now"); - assertThat(e.getAddress()).isEqualTo(address1); - assertThat(e.getHost()).isEqualTo(address1.getAddress()); - e = (TruncateException) e.copy(); - assertThat(e.getMessage()).isEqualTo("I'm running headless now"); - assertThat(e.getAddress()).isEqualTo(address1); - assertThat(e.getHost()).isEqualTo(address1.getAddress()); - } + @Test(groups = "unit") + public void should_create_proper_truncate_exception() { + TruncateException e = new TruncateException(endPoint1, "I'm running headless now"); + assertThat(e.getMessage()).isEqualTo("I'm running headless now"); + assertThat(e.getEndPoint()).isEqualTo(endPoint1); + e = (TruncateException) e.copy(); + assertThat(e.getMessage()).isEqualTo("I'm running headless now"); + assertThat(e.getEndPoint()).isEqualTo(endPoint1); + } - @Test(groups = "unit") - public void should_create_proper_unauthorized_exception() { - UnauthorizedException e = new UnauthorizedException(address1, "You talking to me?"); - assertThat(e.getMessage()).isEqualTo("You talking to me?"); - assertThat(e.getAddress()).isEqualTo(address1); - assertThat(e.getHost()).isEqualTo(address1.getAddress()); - e = (UnauthorizedException) e.copy(); - assertThat(e.getMessage()).isEqualTo("You talking to me?"); - assertThat(e.getAddress()).isEqualTo(address1); - assertThat(e.getHost()).isEqualTo(address1.getAddress()); - } + @Test(groups = "unit") + public void should_create_proper_unauthorized_exception() { + UnauthorizedException e = new UnauthorizedException(endPoint1, "You talking to me?"); + assertThat(e.getMessage()).isEqualTo("You talking to me?"); + assertThat(e.getEndPoint()).isEqualTo(endPoint1); + e = (UnauthorizedException) e.copy(); + assertThat(e.getMessage()).isEqualTo("You talking to me?"); + assertThat(e.getEndPoint()).isEqualTo(endPoint1); + } - @Test(groups = "unit") - public void should_create_proper_unavailable_exception() { - UnavailableException e = new UnavailableException(address1, LOCAL_QUORUM, 3, 2); - assertThat(e.getMessage()).isEqualTo("Not enough replicas available for query at consistency LOCAL_QUORUM (3 required but only 2 alive)"); - assertThat(e.getConsistencyLevel()).isEqualTo(LOCAL_QUORUM); - assertThat(e.getAliveReplicas()).isEqualTo(2); - assertThat(e.getRequiredReplicas()).isEqualTo(3); - assertThat(e.getAddress()).isEqualTo(address1); - assertThat(e.getHost()).isEqualTo(address1.getAddress()); - e = e.copy(address2); - assertThat(e.getMessage()).isEqualTo("Not enough replicas available for query at consistency LOCAL_QUORUM (3 required but only 2 alive)"); - assertThat(e.getConsistencyLevel()).isEqualTo(LOCAL_QUORUM); - assertThat(e.getAliveReplicas()).isEqualTo(2); - assertThat(e.getRequiredReplicas()).isEqualTo(3); - assertThat(e.getAddress()).isEqualTo(address2); - assertThat(e.getHost()).isEqualTo(address2.getAddress()); - } + @Test(groups = "unit") + public void should_create_proper_unavailable_exception() { + UnavailableException e = new UnavailableException(endPoint1, LOCAL_QUORUM, 3, 2); + assertThat(e.getMessage()) + .isEqualTo( + "Not enough replicas available for query at consistency LOCAL_QUORUM (3 required but only 2 alive)"); + assertThat(e.getConsistencyLevel()).isEqualTo(LOCAL_QUORUM); + assertThat(e.getAliveReplicas()).isEqualTo(2); + assertThat(e.getRequiredReplicas()).isEqualTo(3); + assertThat(e.getEndPoint()).isEqualTo(endPoint1); + e = e.copy(endPoint2); + assertThat(e.getMessage()) + .isEqualTo( + "Not enough replicas available for query at consistency LOCAL_QUORUM (3 required but only 2 alive)"); + assertThat(e.getConsistencyLevel()).isEqualTo(LOCAL_QUORUM); + assertThat(e.getAliveReplicas()).isEqualTo(2); + assertThat(e.getRequiredReplicas()).isEqualTo(3); + assertThat(e.getEndPoint()).isEqualTo(endPoint2); + } - @Test(groups = "unit") - public void should_create_proper_unprepared_exception() { - UnpreparedException e = new UnpreparedException(address1, "Caught me unawares"); - assertThat(e.getMessage()).isEqualTo("A prepared query was submitted on " + address1 + " but was not known of that node: Caught me unawares"); - assertThat(e.getAddress()).isEqualTo(address1); - assertThat(e.getHost()).isEqualTo(address1.getAddress()); - e = e.copy(); - assertThat(e.getMessage()).isEqualTo("A prepared query was submitted on " + address1 + " but was not known of that node: Caught me unawares"); - assertThat(e.getAddress()).isEqualTo(address1); - assertThat(e.getHost()).isEqualTo(address1.getAddress()); - } + @Test(groups = "unit") + public void should_create_proper_unprepared_exception() { + UnpreparedException e = new UnpreparedException(endPoint1, "Caught me unawares"); + assertThat(e.getMessage()) + .isEqualTo( + "A prepared query was submitted on " + + endPoint1 + + " but was not known of that node: Caught me unawares"); + assertThat(e.getEndPoint()).isEqualTo(endPoint1); + e = e.copy(); + assertThat(e.getMessage()) + .isEqualTo( + "A prepared query was submitted on " + + endPoint1 + + " but was not known of that node: Caught me unawares"); + assertThat(e.getEndPoint()).isEqualTo(endPoint1); + } - @Test(groups = "unit") - public void should_create_proper_read_timeout_exception() { - ReadTimeoutException e = new ReadTimeoutException(address1, LOCAL_QUORUM, 2, 3, true); - assertThat(e.getMessage()).isEqualTo("Cassandra timeout during read query at consistency LOCAL_QUORUM (3 responses were required but only 2 replica responded)"); - assertThat(e.getConsistencyLevel()).isEqualTo(LOCAL_QUORUM); - assertThat(e.getReceivedAcknowledgements()).isEqualTo(2); - assertThat(e.getRequiredAcknowledgements()).isEqualTo(3); - assertThat(e.wasDataRetrieved()).isTrue(); - assertThat(e.getAddress()).isEqualTo(address1); - assertThat(e.getHost()).isEqualTo(address1.getAddress()); - e = e.copy(address2); - assertThat(e.getMessage()).isEqualTo("Cassandra timeout during read query at consistency LOCAL_QUORUM (3 responses were required but only 2 replica responded)"); - assertThat(e.getConsistencyLevel()).isEqualTo(LOCAL_QUORUM); - assertThat(e.getReceivedAcknowledgements()).isEqualTo(2); - assertThat(e.getRequiredAcknowledgements()).isEqualTo(3); - assertThat(e.wasDataRetrieved()).isTrue(); - assertThat(e.getAddress()).isEqualTo(address2); - assertThat(e.getHost()).isEqualTo(address2.getAddress()); - } + @Test(groups = "unit") + public void should_create_proper_read_timeout_exception() { + ReadTimeoutException e = new ReadTimeoutException(endPoint1, LOCAL_QUORUM, 2, 3, true); + assertThat(e.getMessage()) + .contains( + "Cassandra timeout during read query at consistency LOCAL_QUORUM (3 responses were required but only 2 replica responded)"); + assertThat(e.getConsistencyLevel()).isEqualTo(LOCAL_QUORUM); + assertThat(e.getReceivedAcknowledgements()).isEqualTo(2); + assertThat(e.getRequiredAcknowledgements()).isEqualTo(3); + assertThat(e.wasDataRetrieved()).isTrue(); + assertThat(e.getEndPoint()).isEqualTo(endPoint1); + e = e.copy(endPoint2); + assertThat(e.getMessage()) + .contains( + "Cassandra timeout during read query at consistency LOCAL_QUORUM (3 responses were required but only 2 replica responded)"); + assertThat(e.getConsistencyLevel()).isEqualTo(LOCAL_QUORUM); + assertThat(e.getReceivedAcknowledgements()).isEqualTo(2); + assertThat(e.getRequiredAcknowledgements()).isEqualTo(3); + assertThat(e.wasDataRetrieved()).isTrue(); + assertThat(e.getEndPoint()).isEqualTo(endPoint2); + } - @Test(groups = "unit") - public void should_create_proper_write_timeout_exception() { - WriteTimeoutException e = new WriteTimeoutException(address1, LOCAL_QUORUM, WriteType.BATCH, 2, 3); - assertThat(e.getMessage()).isEqualTo("Cassandra timeout during write query at consistency LOCAL_QUORUM (3 replica were required but only 2 acknowledged the write)"); - assertThat(e.getConsistencyLevel()).isEqualTo(LOCAL_QUORUM); - assertThat(e.getReceivedAcknowledgements()).isEqualTo(2); - assertThat(e.getRequiredAcknowledgements()).isEqualTo(3); - assertThat(e.getWriteType()).isEqualTo(WriteType.BATCH); - assertThat(e.getAddress()).isEqualTo(address1); - assertThat(e.getHost()).isEqualTo(address1.getAddress()); - e = e.copy(address2); - assertThat(e.getMessage()).isEqualTo("Cassandra timeout during write query at consistency LOCAL_QUORUM (3 replica were required but only 2 acknowledged the write)"); - assertThat(e.getConsistencyLevel()).isEqualTo(LOCAL_QUORUM); - assertThat(e.getReceivedAcknowledgements()).isEqualTo(2); - assertThat(e.getRequiredAcknowledgements()).isEqualTo(3); - assertThat(e.getWriteType()).isEqualTo(WriteType.BATCH); - assertThat(e.getAddress()).isEqualTo(address2); - assertThat(e.getHost()).isEqualTo(address2.getAddress()); - } + @Test(groups = "unit") + public void should_create_proper_write_timeout_exception() { + WriteTimeoutException e = + new WriteTimeoutException(endPoint1, LOCAL_QUORUM, WriteType.BATCH, 2, 3); + assertThat(e.getMessage()) + .isEqualTo( + "Cassandra timeout during BATCH write query at consistency LOCAL_QUORUM (3 replica were required but only 2 acknowledged the write)"); + assertThat(e.getConsistencyLevel()).isEqualTo(LOCAL_QUORUM); + assertThat(e.getReceivedAcknowledgements()).isEqualTo(2); + assertThat(e.getRequiredAcknowledgements()).isEqualTo(3); + assertThat(e.getWriteType()).isEqualTo(WriteType.BATCH); + assertThat(e.getEndPoint()).isEqualTo(endPoint1); + e = e.copy(endPoint2); + assertThat(e.getMessage()) + .isEqualTo( + "Cassandra timeout during BATCH write query at consistency LOCAL_QUORUM (3 replica were required but only 2 acknowledged the write)"); + assertThat(e.getConsistencyLevel()).isEqualTo(LOCAL_QUORUM); + assertThat(e.getReceivedAcknowledgements()).isEqualTo(2); + assertThat(e.getRequiredAcknowledgements()).isEqualTo(3); + assertThat(e.getWriteType()).isEqualTo(WriteType.BATCH); + assertThat(e.getEndPoint()).isEqualTo(endPoint2); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/exceptions/FunctionExecutionExceptionTest.java b/driver-core/src/test/java/com/datastax/driver/core/exceptions/FunctionExecutionExceptionTest.java index a69a3f141c4..bd217030c11 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/exceptions/FunctionExecutionExceptionTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/exceptions/FunctionExecutionExceptionTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -24,18 +26,16 @@ @CCMConfig(config = "enable_user_defined_functions:true") public class FunctionExecutionExceptionTest extends CCMTestsSupport { + @Override + public void onTestContextInitialized() { + execute( + "CREATE TABLE foo (k int primary key, i int, l list)", + "INSERT INTO foo (k, i, l) VALUES (1, 1, [1])", + "CREATE FUNCTION element_at(l list, i int) RETURNS NULL ON NULL INPUT RETURNS int LANGUAGE java AS 'return (Integer) l.get(i);'"); + } - @Override - public void onTestContextInitialized() { - execute( - "CREATE TABLE foo (k int primary key, i int, l list)", - "INSERT INTO foo (k, i, l) VALUES (1, 1, [1])", - "CREATE FUNCTION element_at(l list, i int) RETURNS NULL ON NULL INPUT RETURNS int LANGUAGE java AS 'return (Integer) l.get(i);'" - ); - } - - @Test(groups = "short", expectedExceptions = FunctionExecutionException.class) - public void should_throw_when_function_execution_fails() { - session().execute("SELECT element_at(l, i) FROM foo WHERE k = 1"); - } + @Test(groups = "short", expectedExceptions = FunctionExecutionException.class) + public void should_throw_when_function_execution_fails() { + session().execute("SELECT element_at(l, i) FROM foo WHERE k = 1"); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/exceptions/NoHostAvailableExceptionTest.java b/driver-core/src/test/java/com/datastax/driver/core/exceptions/NoHostAvailableExceptionTest.java index 88fd09ff500..1740c17fb6d 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/exceptions/NoHostAvailableExceptionTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/exceptions/NoHostAvailableExceptionTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,76 +17,95 @@ */ package com.datastax.driver.core.exceptions; +import static org.assertj.core.api.Assertions.assertThat; + +import com.datastax.driver.core.EndPoint; +import com.datastax.driver.core.EndPoints; import java.io.PrintWriter; -import java.net.InetSocketAddress; import java.util.HashMap; import java.util.Map; - import org.testng.annotations.Test; -import static org.assertj.core.api.Assertions.assertThat; - public class NoHostAvailableExceptionTest { - @Test(groups = "unit") - public void should_build_default_message_when_less_than_3_errors() { - NoHostAvailableException e = new NoHostAvailableException(buildMockErrors(3)); - String message = e.getMessage(); - assertThat(message).startsWith("All host(s) tried for query failed"); - assertThat(message).contains("/127.0.0.1:9042 (com.datastax.driver.core.exceptions.NoHostAvailableExceptionTest$MockError: mock error 1)"); - assertThat(message).contains("/127.0.0.2:9042 (com.datastax.driver.core.exceptions.NoHostAvailableExceptionTest$MockError: mock error 2)"); - assertThat(message).contains("/127.0.0.3:9042 (com.datastax.driver.core.exceptions.NoHostAvailableExceptionTest$MockError: mock error 3)"); - } + @Test(groups = "unit") + public void should_build_default_message_when_less_than_3_errors() { + NoHostAvailableException e = new NoHostAvailableException(buildMockErrors(3)); + String message = e.getMessage(); + assertThat(message).startsWith("All host(s) tried for query failed"); + assertThat(message) + .contains( + "/127.0.0.1:9042 (com.datastax.driver.core.exceptions.NoHostAvailableExceptionTest$MockError: mock error 1)"); + assertThat(message) + .contains( + "/127.0.0.2:9042 (com.datastax.driver.core.exceptions.NoHostAvailableExceptionTest$MockError: mock error 2)"); + assertThat(message) + .contains( + "/127.0.0.3:9042 (com.datastax.driver.core.exceptions.NoHostAvailableExceptionTest$MockError: mock error 3)"); + } - @Test(groups = "unit") - public void should_build_default_message_when_more_than_3_errors() { - NoHostAvailableException e = new NoHostAvailableException(buildMockErrors(4)); - String message = e.getMessage(); - assertThat(message).startsWith("All host(s) tried for query failed"); - assertThat(message).contains("/127.0.0.1:9042 (com.datastax.driver.core.exceptions.NoHostAvailableExceptionTest$MockError: mock error 1)"); - assertThat(message).contains("/127.0.0.2:9042 (com.datastax.driver.core.exceptions.NoHostAvailableExceptionTest$MockError: mock error 2)"); - assertThat(message).contains("/127.0.0.3:9042 (com.datastax.driver.core.exceptions.NoHostAvailableExceptionTest$MockError: mock error 3)"); - assertThat(message).contains("only showing errors of first 3 hosts, use getErrors() for more details"); - } + @Test(groups = "unit") + public void should_build_default_message_when_more_than_3_errors() { + NoHostAvailableException e = new NoHostAvailableException(buildMockErrors(4)); + String message = e.getMessage(); + assertThat(message).startsWith("All host(s) tried for query failed"); + assertThat(message) + .contains( + "/127.0.0.1:9042 (com.datastax.driver.core.exceptions.NoHostAvailableExceptionTest$MockError: mock error 1)"); + assertThat(message) + .contains( + "/127.0.0.2:9042 (com.datastax.driver.core.exceptions.NoHostAvailableExceptionTest$MockError: mock error 2)"); + assertThat(message) + .contains( + "/127.0.0.3:9042 (com.datastax.driver.core.exceptions.NoHostAvailableExceptionTest$MockError: mock error 3)"); + assertThat(message) + .contains("only showing errors of first 3 hosts, use getErrors() for more details"); + } - @Test(groups = "unit") - public void should_build_formatted_message_without_stack_traces() { - NoHostAvailableException e = new NoHostAvailableException(buildMockErrors(3)); - String message = e.getCustomMessage(3, true, false); - assertThat(message).startsWith("All host(s) tried for query failed (tried:\n"); - assertThat(message).contains("/127.0.0.1:9042 (com.datastax.driver.core.exceptions.NoHostAvailableExceptionTest$MockError: mock error 1)\n"); - assertThat(message).contains("/127.0.0.2:9042 (com.datastax.driver.core.exceptions.NoHostAvailableExceptionTest$MockError: mock error 2)\n"); - assertThat(message).contains("/127.0.0.3:9042 (com.datastax.driver.core.exceptions.NoHostAvailableExceptionTest$MockError: mock error 3)\n"); - } + @Test(groups = "unit") + public void should_build_formatted_message_without_stack_traces() { + NoHostAvailableException e = new NoHostAvailableException(buildMockErrors(3)); + String message = e.getCustomMessage(3, true, false); + assertThat(message).startsWith("All host(s) tried for query failed (tried:\n"); + assertThat(message) + .contains( + "/127.0.0.1:9042 (com.datastax.driver.core.exceptions.NoHostAvailableExceptionTest$MockError: mock error 1)\n"); + assertThat(message) + .contains( + "/127.0.0.2:9042 (com.datastax.driver.core.exceptions.NoHostAvailableExceptionTest$MockError: mock error 2)\n"); + assertThat(message) + .contains( + "/127.0.0.3:9042 (com.datastax.driver.core.exceptions.NoHostAvailableExceptionTest$MockError: mock error 3)\n"); + } - @Test(groups = "unit") - public void should_build_formatted_message_with_stack_traces() { - NoHostAvailableException e = new NoHostAvailableException(buildMockErrors(3)); - String message = e.getCustomMessage(3, true, true); - assertThat(message).startsWith("All host(s) tried for query failed (tried:\n"); - assertThat(message).contains("/127.0.0.1:9042\nmock stack trace 1\n"); - assertThat(message).contains("/127.0.0.3:9042\nmock stack trace 3\n"); - assertThat(message).contains("/127.0.0.2:9042\nmock stack trace 2\n"); - } + @Test(groups = "unit") + public void should_build_formatted_message_with_stack_traces() { + NoHostAvailableException e = new NoHostAvailableException(buildMockErrors(3)); + String message = e.getCustomMessage(3, true, true); + assertThat(message).startsWith("All host(s) tried for query failed (tried:\n"); + assertThat(message).contains("/127.0.0.1:9042\nmock stack trace 1\n"); + assertThat(message).contains("/127.0.0.3:9042\nmock stack trace 3\n"); + assertThat(message).contains("/127.0.0.2:9042\nmock stack trace 2\n"); + } - private static Map buildMockErrors(int count) { - Map errors = new HashMap(); - for (int i = 1; i <= count; i++) { - errors.put(new InetSocketAddress("127.0.0." + i, 9042), new MockError(i)); - } - return errors; + private static Map buildMockErrors(int count) { + Map errors = new HashMap(); + for (int i = 1; i <= count; i++) { + errors.put(EndPoints.forAddress("127.0.0." + i, 9042), new MockError(i)); } + return errors; + } - static class MockError extends Exception { - private final int i; + static class MockError extends Exception { + private final int i; - MockError(int i) { - super("mock error " + i); - this.i = i; - } + MockError(int i) { + super("mock error " + i); + this.i = i; + } - @Override - public void printStackTrace(PrintWriter writer) { - writer.printf("mock stack trace %d", i); - } + @Override + public void printStackTrace(PrintWriter writer) { + writer.printf("mock stack trace %d", i); } + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/exceptions/ReadWriteFailureExceptionTest.java b/driver-core/src/test/java/com/datastax/driver/core/exceptions/ReadWriteFailureExceptionTest.java index 4d92f1311c5..5805ef060ef 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/exceptions/ReadWriteFailureExceptionTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/exceptions/ReadWriteFailureExceptionTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,77 +17,94 @@ */ package com.datastax.driver.core.exceptions; -import com.datastax.driver.core.*; -import com.datastax.driver.core.utils.CassandraVersion; -import org.testng.annotations.Test; - import static com.datastax.driver.core.TestUtils.CREATE_KEYSPACE_SIMPLE_FORMAT; import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.fail; -@CCMConfig(config = "tombstone_failure_threshold:1000", - numberOfNodes = 2, - jvmArgs = "-Dcassandra.test.fail_writes_ks=ks_write_fail") +import com.datastax.driver.core.BoundStatement; +import com.datastax.driver.core.CCMConfig; +import com.datastax.driver.core.CCMTestsSupport; +import com.datastax.driver.core.PreparedStatement; +import com.datastax.driver.core.ProtocolVersion; +import com.datastax.driver.core.ResultSet; +import com.datastax.driver.core.utils.CassandraVersion; +import org.testng.annotations.Test; + +@CCMConfig( + config = "tombstone_failure_threshold:1000", + numberOfNodes = 2, + jvmArgs = "-Dcassandra.test.fail_writes_ks=ks_write_fail") @CassandraVersion("2.2.0") public class ReadWriteFailureExceptionTest extends CCMTestsSupport { - /** - * Validates that ReadFailureException occurs, and that in the case of protocol v5 the reason map - * is surfaced appropriately on the exception. - * - * @jira_ticket JAVA-1424 - * @test_category error_codes - */ - @Test(groups = "long") - public void should_readFailure_on_tombstone_overwelmed() throws Throwable { - //Create a table and insert 2000 tombstones - session().execute("CREATE KEYSPACE ks_read_fail WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}"); - session().execute("CREATE TABLE ks_read_fail.foo(pk int, cc int, v int, primary key (pk, cc))"); - PreparedStatement prepared = session().prepare("INSERT INTO ks_read_fail.foo (pk, cc, v) VALUES (1, ?, null)"); + /** + * Validates that ReadFailureException occurs, and that in the case of protocol v5 the reason map + * is surfaced appropriately on the exception. + * + * @jira_ticket JAVA-1424 + * @test_category error_codes + */ + @Test(groups = "long") + public void should_readFailure_on_tombstone_overwelmed() throws Throwable { + // Create a table and insert 2000 tombstones + session() + .execute( + "CREATE KEYSPACE ks_read_fail WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}"); + session().execute("CREATE TABLE ks_read_fail.foo(pk int, cc int, v int, primary key (pk, cc))"); + PreparedStatement prepared = + session().prepare("INSERT INTO ks_read_fail.foo (pk, cc, v) VALUES (1, ?, null)"); - for (int v = 0; v < 2000; v++) { - BoundStatement bound = prepared.bind(v); - session().execute(bound); - } - // Attempt a query, since our tombstone failure threshold is set to 1000 this should error - try { - ResultSet result = session().execute("SELECT * FROM ks_read_fail.foo WHERE pk = 1"); - fail("A ReadFailureException should have been thrown here"); - } catch (ReadFailureException e) { - if (cluster().getConfiguration().getProtocolOptions().getProtocolVersion().compareTo(ProtocolVersion.V5) >= 0) { - assertThat(e.getFailuresMap()) - .hasSize(1) - .containsValue(1); - } else { - assertThat(e.getFailuresMap()).isEmpty(); - } - } + for (int v = 0; v < 2000; v++) { + BoundStatement bound = prepared.bind(v); + session().execute(bound); + } + // Attempt a query, since our tombstone failure threshold is set to 1000 this should error + try { + ResultSet result = session().execute("SELECT * FROM ks_read_fail.foo WHERE pk = 1"); + fail("A ReadFailureException should have been thrown here"); + } catch (ReadFailureException e) { + if (cluster() + .getConfiguration() + .getProtocolOptions() + .getProtocolVersion() + .compareTo(ProtocolVersion.V5) + >= 0) { + assertThat(e.getFailuresMap()).hasSize(1).containsValue(1); + } else { + assertThat(e.getFailuresMap()).isEmpty(); + } } + } - /** - * Validates that a WriteFailureException occurs. In the case of protocol > v5 the reason map - * is surfaced appropriately on the exception. - * - * @jira_ticket JAVA-1424 - * @test_category error_codes - */ - @Test(groups = "long") - public void should_writeFailure_on_error() throws Throwable { - // Creates the failure keyspace and a table. - session().execute(String.format(CREATE_KEYSPACE_SIMPLE_FORMAT, "ks_write_fail", 1)); - session().execute("CREATE TABLE ks_write_fail.foo(pk int, cc int, v int, primary key (pk, cc))"); - try { - // This should fail because we have a the jvm arg cassandra.test.fail_writes_ks=ks_write_fail set. - session().execute("INSERT INTO ks_write_fail.foo (pk, cc, v) VALUES (1, 1, null)"); - fail("A WriteFailureException should have been thrown here"); - } catch (WriteFailureException e) { + /** + * Validates that a WriteFailureException occurs. In the case of protocol > v5 the reason map is + * surfaced appropriately on the exception. + * + * @jira_ticket JAVA-1424 + * @test_category error_codes + */ + @Test(groups = "long") + public void should_writeFailure_on_error() throws Throwable { + // Creates the failure keyspace and a table. + session().execute(String.format(CREATE_KEYSPACE_SIMPLE_FORMAT, "ks_write_fail", 1)); + session() + .execute("CREATE TABLE ks_write_fail.foo(pk int, cc int, v int, primary key (pk, cc))"); + try { + // This should fail because we have a the jvm arg cassandra.test.fail_writes_ks=ks_write_fail + // set. + session().execute("INSERT INTO ks_write_fail.foo (pk, cc, v) VALUES (1, 1, null)"); + fail("A WriteFailureException should have been thrown here"); + } catch (WriteFailureException e) { - if (cluster().getConfiguration().getProtocolOptions().getProtocolVersion().compareTo(ProtocolVersion.V5) >= 0) { - assertThat(e.getFailuresMap()) - .hasSize(1) - .containsValue(0); - } else { - assertThat(e.getFailuresMap()).isEmpty(); - } - } + if (cluster() + .getConfiguration() + .getProtocolOptions() + .getProtocolVersion() + .compareTo(ProtocolVersion.V5) + >= 0) { + assertThat(e.getFailuresMap()).hasSize(1).containsValue(0); + } else { + assertThat(e.getFailuresMap()).isEmpty(); + } } -} \ No newline at end of file + } +} diff --git a/driver-core/src/test/java/com/datastax/driver/core/policies/AbstractRetryPolicyIntegrationTest.java b/driver-core/src/test/java/com/datastax/driver/core/policies/AbstractRetryPolicyIntegrationTest.java index 0c505d329dd..2ad6ee1435f 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/policies/AbstractRetryPolicyIntegrationTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/policies/AbstractRetryPolicyIntegrationTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,12 +17,38 @@ */ package com.datastax.driver.core.policies; -import com.datastax.driver.core.*; +import static com.datastax.driver.core.TestUtils.nonQuietClusterCloseOptions; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyBoolean; +import static org.mockito.Matchers.anyInt; +import static org.mockito.Mockito.times; +import static org.scassandra.http.client.PrimingRequest.then; +import static org.scassandra.http.client.Result.overloaded; +import static org.scassandra.http.client.Result.server_error; + +import com.datastax.driver.core.Cluster; +import com.datastax.driver.core.ConsistencyLevel; +import com.datastax.driver.core.Host; +import com.datastax.driver.core.HostDistance; +import com.datastax.driver.core.Metrics; +import com.datastax.driver.core.PoolingOptions; +import com.datastax.driver.core.QueryOptions; +import com.datastax.driver.core.ResultSet; +import com.datastax.driver.core.ScassandraCluster; +import com.datastax.driver.core.Session; +import com.datastax.driver.core.SimpleStatement; +import com.datastax.driver.core.SortingLoadBalancingPolicy; +import com.datastax.driver.core.Statement; +import com.datastax.driver.core.TestUtils; +import com.datastax.driver.core.WriteType; import com.datastax.driver.core.exceptions.DriverException; import com.datastax.driver.core.exceptions.OverloadedException; import com.datastax.driver.core.exceptions.ServerError; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; +import java.util.List; +import java.util.Map; import org.mockito.Mockito; import org.scassandra.Scassandra; import org.scassandra.http.client.ClosedConnectionConfig.CloseType; @@ -32,168 +60,169 @@ import org.testng.annotations.BeforeMethod; import org.testng.annotations.DataProvider; -import java.util.List; -import java.util.Map; - -import static com.datastax.driver.core.TestUtils.nonQuietClusterCloseOptions; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Matchers.*; -import static org.mockito.Mockito.times; -import static org.scassandra.http.client.PrimingRequest.then; -import static org.scassandra.http.client.Result.overloaded; -import static org.scassandra.http.client.Result.server_error; - /** * Base class for retry policy integration tests. - *

    - * We use SCassandra to easily simulate specific errors (unavailable, read timeout...) on nodes, - * and SortingLoadBalancingPolicy to get a predictable order of the query plan (always host1, host2, host3). - *

    - * Note that SCassandra only allows a limited number of test cases, for instance it always returns errors - * with receivedResponses = 0. If that becomes more finely tuneable in the future, we'll be able to add more - * tests in child classes. + * + *

    We use SCassandra to easily simulate specific errors (unavailable, read timeout...) on nodes, + * and SortingLoadBalancingPolicy to get a predictable order of the query plan (always host1, host2, + * host3). + * + *

    Note that SCassandra only allows a limited number of test cases, for instance it always + * returns errors with receivedResponses = 0. If that becomes more finely tuneable in the future, + * we'll be able to add more tests in child classes. */ public class AbstractRetryPolicyIntegrationTest { - protected ScassandraCluster scassandras; - protected Cluster cluster = null; - protected Metrics.Errors errors; - protected Host host1, host2, host3; - protected Session session; + protected ScassandraCluster scassandras; + protected Cluster cluster = null; + protected Metrics.Errors errors; + protected Host host1, host2, host3; + protected Session session; - protected RetryPolicy retryPolicy; + protected RetryPolicy retryPolicy; - protected AbstractRetryPolicyIntegrationTest() { - } + protected AbstractRetryPolicyIntegrationTest() {} - protected AbstractRetryPolicyIntegrationTest(RetryPolicy retryPolicy) { - setRetryPolicy(retryPolicy); - } + protected AbstractRetryPolicyIntegrationTest(RetryPolicy retryPolicy) { + setRetryPolicy(retryPolicy); + } - protected final void setRetryPolicy(RetryPolicy retryPolicy) { - this.retryPolicy = Mockito.spy(retryPolicy); - } + protected final void setRetryPolicy(RetryPolicy retryPolicy) { + this.retryPolicy = Mockito.spy(retryPolicy); + } - @BeforeMethod(groups = "short") - public void beforeMethod() { - scassandras = ScassandraCluster.builder().withNodes(3).build(); - scassandras.init(); - - cluster = Cluster.builder() - .addContactPoints(scassandras.address(1).getAddress()) - .withPort(scassandras.getBinaryPort()) - .withRetryPolicy(retryPolicy) - .withLoadBalancingPolicy(new SortingLoadBalancingPolicy()) - .withPoolingOptions(new PoolingOptions() - .setCoreConnectionsPerHost(HostDistance.LOCAL, 1) - .setMaxConnectionsPerHost(HostDistance.LOCAL, 1) - .setHeartbeatIntervalSeconds(0)) - .withNettyOptions(nonQuietClusterCloseOptions) - // Mark everything as idempotent by default so RetryPolicy is exercised. - .withQueryOptions(new QueryOptions().setDefaultIdempotence(true)) - .build(); - - session = cluster.connect(); - - host1 = TestUtils.findHost(cluster, 1); - host2 = TestUtils.findHost(cluster, 2); - host3 = TestUtils.findHost(cluster, 3); - - errors = cluster.getMetrics().getErrorMetrics(); - - Mockito.reset(retryPolicy); - - for (Scassandra node : scassandras.nodes()) { - node.activityClient().clearAllRecordedActivity(); - } - } + @BeforeMethod(groups = "short") + public void beforeMethod() { + scassandras = ScassandraCluster.builder().withNodes(3).build(); + scassandras.init(); - protected void simulateError(int hostNumber, Result result) { - simulateError(hostNumber, result, null); - } + cluster = + Cluster.builder() + .addContactPoints(scassandras.address(1).getAddress()) + .withPort(scassandras.getBinaryPort()) + .withRetryPolicy(retryPolicy) + .withLoadBalancingPolicy(new SortingLoadBalancingPolicy()) + .withPoolingOptions( + new PoolingOptions() + .setCoreConnectionsPerHost(HostDistance.LOCAL, 1) + .setMaxConnectionsPerHost(HostDistance.LOCAL, 1) + .setHeartbeatIntervalSeconds(0)) + .withNettyOptions(nonQuietClusterCloseOptions) + // Mark everything as idempotent by default so RetryPolicy is exercised. + .withQueryOptions(new QueryOptions().setDefaultIdempotence(true)) + .build(); - protected void simulateError(int hostNumber, Result result, Config config) { - PrimingRequest.Then.ThenBuilder then = then().withResult(result); - PrimingRequestBuilder builder = PrimingRequest.queryBuilder().withQuery("mock query"); + session = cluster.connect(); - if (config != null) - then = then.withConfig(config); + host1 = TestUtils.findHost(cluster, 1); + host2 = TestUtils.findHost(cluster, 2); + host3 = TestUtils.findHost(cluster, 3); - builder = builder.withThen(then); + errors = cluster.getMetrics().getErrorMetrics(); - scassandras.node(hostNumber).primingClient().prime(builder.build()); - } - - protected void simulateNormalResponse(int hostNumber) { - scassandras.node(hostNumber).primingClient().prime(PrimingRequest.queryBuilder() - .withQuery("mock query") - .withThen(then().withRows(row("result", "result1"))) - .build()); - } - - protected static List> row(String key, String value) { - return ImmutableList.>of(ImmutableMap.of(key, value)); - } - - protected ResultSet query() { - return query(session); - } - - protected ResultSet queryWithCL(ConsistencyLevel cl) { - Statement statement = new SimpleStatement("mock query").setConsistencyLevel(cl); - return session.execute(statement); - } - - protected ResultSet query(Session session) { - return session.execute("mock query"); - } - - protected void assertOnReadTimeoutWasCalled(int times) { - Mockito.verify(retryPolicy, times(times)).onReadTimeout( - any(Statement.class), any(ConsistencyLevel.class), anyInt(), anyInt(), anyBoolean(), anyInt()); + Mockito.reset(retryPolicy); + for (Scassandra node : scassandras.nodes()) { + node.activityClient().clearAllRecordedActivity(); } + } - protected void assertOnWriteTimeoutWasCalled(int times) { - Mockito.verify(retryPolicy, times(times)).onWriteTimeout( - any(Statement.class), any(ConsistencyLevel.class), any(WriteType.class), anyInt(), anyInt(), anyInt()); - } + protected void simulateError(int hostNumber, Result result) { + simulateError(hostNumber, result, null); + } - protected void assertOnUnavailableWasCalled(int times) { - Mockito.verify(retryPolicy, times(times)).onUnavailable( - any(Statement.class), any(ConsistencyLevel.class), anyInt(), anyInt(), anyInt()); - } + protected void simulateError(int hostNumber, Result result, Config config) { + PrimingRequest.Then.ThenBuilder then = then().withResult(result); + PrimingRequestBuilder builder = PrimingRequest.queryBuilder().withQuery("mock query"); - protected void assertOnRequestErrorWasCalled(int times, Class expected) { - Mockito.verify(retryPolicy, times(times)).onRequestError( - any(Statement.class), any(ConsistencyLevel.class), any(expected), anyInt()); - } + if (config != null) then = then.withConfig(config); - protected void assertQueried(int hostNumber, int times) { - assertThat(scassandras.node(hostNumber).activityClient().retrieveQueries()).hasSize(times); - } + builder = builder.withThen(then); - @AfterMethod(groups = "short", alwaysRun = true) - public void afterMethod() { - if (cluster != null) - cluster.close(); - if (scassandras != null) - scassandras.stop(); - } + scassandras.node(hostNumber).primingClient().prime(builder.build()); + } - @DataProvider - public static Object[][] serverSideErrors() { - return new Object[][]{ - {server_error, ServerError.class}, - {overloaded, OverloadedException.class}, - }; - } - - @DataProvider - public static Object[][] connectionErrors() { - return new Object[][]{ - {CloseType.CLOSE}, - {CloseType.HALFCLOSE}, - {CloseType.RESET} - }; - } + protected void simulateNormalResponse(int hostNumber) { + scassandras + .node(hostNumber) + .primingClient() + .prime( + PrimingRequest.queryBuilder() + .withQuery("mock query") + .withThen(then().withRows(row("result", "result1"))) + .build()); + } + + protected static List> row(String key, String value) { + return ImmutableList.>of(ImmutableMap.of(key, value)); + } + + protected ResultSet query() { + return query(session); + } + + protected ResultSet queryWithCL(ConsistencyLevel cl) { + Statement statement = new SimpleStatement("mock query").setConsistencyLevel(cl); + return session.execute(statement); + } + + protected ResultSet query(Session session) { + return session.execute("mock query"); + } + + protected void assertOnReadTimeoutWasCalled(int times) { + Mockito.verify(retryPolicy, times(times)) + .onReadTimeout( + any(Statement.class), + any(ConsistencyLevel.class), + anyInt(), + anyInt(), + anyBoolean(), + anyInt()); + } + + protected void assertOnWriteTimeoutWasCalled(int times) { + Mockito.verify(retryPolicy, times(times)) + .onWriteTimeout( + any(Statement.class), + any(ConsistencyLevel.class), + any(WriteType.class), + anyInt(), + anyInt(), + anyInt()); + } + + protected void assertOnUnavailableWasCalled(int times) { + Mockito.verify(retryPolicy, times(times)) + .onUnavailable( + any(Statement.class), any(ConsistencyLevel.class), anyInt(), anyInt(), anyInt()); + } + + protected void assertOnRequestErrorWasCalled( + int times, Class expected) { + Mockito.verify(retryPolicy, times(times)) + .onRequestError(any(Statement.class), any(ConsistencyLevel.class), any(expected), anyInt()); + } + + protected void assertQueried(int hostNumber, int times) { + assertThat(scassandras.node(hostNumber).activityClient().retrieveQueries()).hasSize(times); + } + + @AfterMethod(groups = "short", alwaysRun = true) + public void afterMethod() { + if (cluster != null) cluster.close(); + if (scassandras != null) scassandras.stop(); + } + + @DataProvider + public static Object[][] serverSideErrors() { + return new Object[][] { + {server_error, ServerError.class}, + {overloaded, OverloadedException.class}, + }; + } + + @DataProvider + public static Object[][] connectionErrors() { + return new Object[][] {{CloseType.CLOSE}, {CloseType.HALFCLOSE}, {CloseType.RESET}}; + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/policies/CloseableLoadBalancingPolicyTest.java b/driver-core/src/test/java/com/datastax/driver/core/policies/CloseableLoadBalancingPolicyTest.java index 3b64a234d92..0e155eb986a 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/policies/CloseableLoadBalancingPolicyTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/policies/CloseableLoadBalancingPolicyTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,47 +17,45 @@ */ package com.datastax.driver.core.policies; +import static org.assertj.core.api.Assertions.assertThat; + import com.datastax.driver.core.CCMConfig; import com.datastax.driver.core.CCMTestsSupport; import com.datastax.driver.core.Cluster; import org.testng.annotations.Test; -import static org.assertj.core.api.Assertions.assertThat; - @CCMConfig(createSession = false) public class CloseableLoadBalancingPolicyTest extends CCMTestsSupport { - private CloseMonitoringPolicy policy; + private CloseMonitoringPolicy policy; - @Test(groups = "short") - public void should_be_invoked_at_shutdown() { - try { - cluster().connect(); - cluster().close(); - } finally { - assertThat(policy.wasClosed).isTrue(); - } + @Test(groups = "short") + public void should_be_invoked_at_shutdown() { + try { + cluster().connect(); + cluster().close(); + } finally { + assertThat(policy.wasClosed).isTrue(); } + } - @Override - public Cluster.Builder createClusterBuilder() { - policy = new CloseMonitoringPolicy(Policies.defaultLoadBalancingPolicy()); - return Cluster.builder() - .addContactPoints(getContactPoints().get(0)) - .withLoadBalancingPolicy(policy); - } + @Override + public Cluster.Builder createClusterBuilder() { + policy = new CloseMonitoringPolicy(Policies.defaultLoadBalancingPolicy()); + return super.createClusterBuilder().withLoadBalancingPolicy(policy); + } - static class CloseMonitoringPolicy extends DelegatingLoadBalancingPolicy { + static class CloseMonitoringPolicy extends DelegatingLoadBalancingPolicy { - volatile boolean wasClosed = false; + volatile boolean wasClosed = false; - public CloseMonitoringPolicy(LoadBalancingPolicy delegate) { - super(delegate); - } + public CloseMonitoringPolicy(LoadBalancingPolicy delegate) { + super(delegate); + } - @Override - public void close() { - wasClosed = true; - } + @Override + public void close() { + wasClosed = true; } + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/policies/CustomRetryPolicyIntegrationTest.java b/driver-core/src/test/java/com/datastax/driver/core/policies/CustomRetryPolicyIntegrationTest.java index de2411276d9..935d97f6e70 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/policies/CustomRetryPolicyIntegrationTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/policies/CustomRetryPolicyIntegrationTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +17,19 @@ */ package com.datastax.driver.core.policies; -import com.datastax.driver.core.*; +import static com.datastax.driver.core.Assertions.assertThat; +import static org.assertj.core.api.Fail.fail; +import static org.scassandra.http.client.PrimingRequest.then; +import static org.scassandra.http.client.Result.closed_connection; +import static org.scassandra.http.client.Result.read_request_timeout; +import static org.scassandra.http.client.Result.unavailable; + +import com.datastax.driver.core.Cluster; +import com.datastax.driver.core.ConsistencyLevel; +import com.datastax.driver.core.ResultSet; +import com.datastax.driver.core.SocketOptions; +import com.datastax.driver.core.Statement; +import com.datastax.driver.core.WriteType; import com.datastax.driver.core.exceptions.DriverException; import com.datastax.driver.core.exceptions.OperationTimedOutException; import com.datastax.driver.core.exceptions.TransportException; @@ -26,156 +40,177 @@ import org.scassandra.http.client.Result; import org.testng.annotations.Test; -import static com.datastax.driver.core.Assertions.assertThat; -import static org.assertj.core.api.Fail.fail; -import static org.scassandra.http.client.PrimingRequest.then; -import static org.scassandra.http.client.Result.*; - -/** - * Integration test with a custom implementation, to test retry and ignore decisions. - */ +/** Integration test with a custom implementation, to test retry and ignore decisions. */ public class CustomRetryPolicyIntegrationTest extends AbstractRetryPolicyIntegrationTest { - public CustomRetryPolicyIntegrationTest() { - super(new CustomRetryPolicy()); + public CustomRetryPolicyIntegrationTest() { + super(new CustomRetryPolicy()); + } + + @Test(groups = "short") + public void should_ignore_read_timeout() { + simulateError(1, read_request_timeout); + + ResultSet rs = query(); + assertThat(rs.iterator().hasNext()).isFalse(); // ignore decisions produce empty result sets + + assertOnReadTimeoutWasCalled(1); + assertThat(errors.getIgnores().getCount()).isEqualTo(1); + assertThat(errors.getRetries().getCount()).isEqualTo(0); + assertThat(errors.getIgnoresOnReadTimeout().getCount()).isEqualTo(1); + assertThat(errors.getRetriesOnReadTimeout().getCount()).isEqualTo(0); + assertQueried(1, 1); + assertQueried(2, 0); + assertQueried(3, 0); + } + + @Test(groups = "short") + public void should_retry_once_on_same_host_on_unavailable() { + simulateError(1, unavailable); + + try { + query(); + fail("expected an UnavailableException"); + } catch (UnavailableException e) { + /*expected*/ } - @Test(groups = "short") - public void should_ignore_read_timeout() { - simulateError(1, read_request_timeout); - - ResultSet rs = query(); - assertThat(rs.iterator().hasNext()).isFalse(); // ignore decisions produce empty result sets - - assertOnReadTimeoutWasCalled(1); - assertThat(errors.getIgnores().getCount()).isEqualTo(1); - assertThat(errors.getRetries().getCount()).isEqualTo(0); - assertThat(errors.getIgnoresOnReadTimeout().getCount()).isEqualTo(1); - assertThat(errors.getRetriesOnReadTimeout().getCount()).isEqualTo(0); - assertQueried(1, 1); - assertQueried(2, 0); - assertQueried(3, 0); + assertOnUnavailableWasCalled(2); + assertThat(errors.getRetries().getCount()).isEqualTo(1); + assertThat(errors.getUnavailables().getCount()).isEqualTo(2); + assertThat(errors.getRetriesOnUnavailable().getCount()).isEqualTo(1); + assertQueried(1, 2); + assertQueried(2, 0); + assertQueried(3, 0); + } + + @Test(groups = "short") + public void should_rethrow_on_client_timeouts() { + cluster.getConfiguration().getSocketOptions().setReadTimeoutMillis(1); + try { + scassandras + .node(1) + .primingClient() + .prime( + PrimingRequest.queryBuilder() + .withQuery("mock query") + .withThen(then().withFixedDelay(1000L).withRows(row("result", "result1"))) + .build()); + try { + query(); + fail("expected an OperationTimedOutException"); + } catch (OperationTimedOutException e) { + assertThat(e.getMessage()) + .isEqualTo( + String.format( + "[%s] Timed out waiting for server response", host1.getEndPoint().resolve())); + } + assertOnRequestErrorWasCalled(1, OperationTimedOutException.class); + assertThat(errors.getRetries().getCount()).isEqualTo(0); + assertThat(errors.getClientTimeouts().getCount()).isEqualTo(1); + assertThat(errors.getRetriesOnClientTimeout().getCount()).isEqualTo(0); + assertQueried(1, 1); + assertQueried(2, 0); + assertQueried(3, 0); + } finally { + cluster + .getConfiguration() + .getSocketOptions() + .setReadTimeoutMillis(SocketOptions.DEFAULT_READ_TIMEOUT_MILLIS); } - - @Test(groups = "short") - public void should_retry_once_on_same_host_on_unavailable() { - simulateError(1, unavailable); - - try { - query(); - fail("expected an UnavailableException"); - } catch (UnavailableException e) {/*expected*/} - - assertOnUnavailableWasCalled(2); - assertThat(errors.getRetries().getCount()).isEqualTo(1); - assertThat(errors.getUnavailables().getCount()).isEqualTo(2); - assertThat(errors.getRetriesOnUnavailable().getCount()).isEqualTo(1); - assertQueried(1, 2); - assertQueried(2, 0); - assertQueried(3, 0); + } + + @Test(groups = "short", dataProvider = "serverSideErrors") + public void should_rethrow_on_server_side_error( + Result error, Class exception) { + simulateError(1, error); + try { + query(); + fail("expected a DriverException"); + } catch (DriverException e) { + assertThat(e).isInstanceOf(exception); } - - @Test(groups = "short") - public void should_rethrow_on_client_timeouts() { - cluster.getConfiguration().getSocketOptions().setReadTimeoutMillis(1); - try { - scassandras - .node(1).primingClient().prime(PrimingRequest.queryBuilder() - .withQuery("mock query") - .withThen(then().withFixedDelay(1000L).withRows(row("result", "result1"))) - .build()); - try { - query(); - fail("expected an OperationTimedOutException"); - } catch (OperationTimedOutException e) { - assertThat(e.getMessage()).isEqualTo( - String.format("[%s] Timed out waiting for server response", host1.getSocketAddress()) - ); - } - assertOnRequestErrorWasCalled(1, OperationTimedOutException.class); - assertThat(errors.getRetries().getCount()).isEqualTo(0); - assertThat(errors.getClientTimeouts().getCount()).isEqualTo(1); - assertThat(errors.getRetriesOnClientTimeout().getCount()).isEqualTo(0); - assertQueried(1, 1); - assertQueried(2, 0); - assertQueried(3, 0); - } finally { - cluster.getConfiguration().getSocketOptions().setReadTimeoutMillis(SocketOptions.DEFAULT_READ_TIMEOUT_MILLIS); - } + assertOnRequestErrorWasCalled(1, exception); + assertThat(errors.getOthers().getCount()).isEqualTo(1); + assertThat(errors.getRetries().getCount()).isEqualTo(0); + assertThat(errors.getRetriesOnOtherErrors().getCount()).isEqualTo(0); + assertQueried(1, 1); + assertQueried(2, 0); + assertQueried(3, 0); + } + + @Test(groups = "short", dataProvider = "connectionErrors") + public void should_rethrow_on_connection_error(CloseType closeType) { + simulateError(1, closed_connection, new ClosedConnectionConfig(closeType)); + try { + query(); + fail("expected a TransportException"); + } catch (TransportException e) { + assertThat(e.getMessage()) + .isEqualTo( + String.format("[%s] Connection has been closed", host1.getEndPoint().resolve())); + } + assertOnRequestErrorWasCalled(1, TransportException.class); + assertThat(errors.getRetries().getCount()).isEqualTo(0); + assertThat(errors.getConnectionErrors().getCount()).isEqualTo(1); + assertThat(errors.getIgnoresOnConnectionError().getCount()).isEqualTo(0); + assertThat(errors.getRetriesOnConnectionError().getCount()).isEqualTo(0); + assertQueried(1, 1); + assertQueried(2, 0); + assertQueried(3, 0); + } + + /** + * Ignores read and write timeouts, and retries at most once on unavailable. Rethrows for + * unexpected errors. + */ + static class CustomRetryPolicy implements RetryPolicy { + + @Override + public RetryDecision onReadTimeout( + Statement statement, + ConsistencyLevel cl, + int requiredResponses, + int receivedResponses, + boolean dataRetrieved, + int nbRetry) { + return RetryDecision.ignore(); } + @Override + public RetryDecision onWriteTimeout( + Statement statement, + ConsistencyLevel cl, + WriteType writeType, + int requiredAcks, + int receivedAcks, + int nbRetry) { + return RetryDecision.ignore(); + } - @Test(groups = "short", dataProvider = "serverSideErrors") - public void should_rethrow_on_server_side_error(Result error, Class exception) { - simulateError(1, error); - try { - query(); - fail("expected a DriverException"); - } catch (DriverException e) { - assertThat(e).isInstanceOf(exception); - } - assertOnRequestErrorWasCalled(1, exception); - assertThat(errors.getOthers().getCount()).isEqualTo(1); - assertThat(errors.getRetries().getCount()).isEqualTo(0); - assertThat(errors.getRetriesOnOtherErrors().getCount()).isEqualTo(0); - assertQueried(1, 1); - assertQueried(2, 0); - assertQueried(3, 0); + @Override + public RetryDecision onUnavailable( + Statement statement, + ConsistencyLevel cl, + int requiredReplica, + int aliveReplica, + int nbRetry) { + return (nbRetry == 0) ? RetryDecision.retry(cl) : RetryDecision.rethrow(); } + @Override + public RetryDecision onRequestError( + Statement statement, ConsistencyLevel cl, DriverException e, int nbRetry) { + return RetryDecision.rethrow(); + } - @Test(groups = "short", dataProvider = "connectionErrors") - public void should_rethrow_on_connection_error(CloseType closeType) { - simulateError(1, closed_connection, new ClosedConnectionConfig(closeType)); - try { - query(); - fail("expected a TransportException"); - } catch (TransportException e) { - assertThat(e.getMessage()).isEqualTo( - String.format("[%s] Connection has been closed", host1.getSocketAddress()) - ); - } - assertOnRequestErrorWasCalled(1, TransportException.class); - assertThat(errors.getRetries().getCount()).isEqualTo(0); - assertThat(errors.getConnectionErrors().getCount()).isEqualTo(1); - assertThat(errors.getIgnoresOnConnectionError().getCount()).isEqualTo(0); - assertThat(errors.getRetriesOnConnectionError().getCount()).isEqualTo(0); - assertQueried(1, 1); - assertQueried(2, 0); - assertQueried(3, 0); + @Override + public void init(Cluster cluster) { + /*nothing to do*/ } - /** - * Ignores read and write timeouts, and retries at most once on unavailable. - * Rethrows for unexpected errors. - */ - static class CustomRetryPolicy implements RetryPolicy { - - @Override - public RetryDecision onReadTimeout(Statement statement, ConsistencyLevel cl, int requiredResponses, int receivedResponses, boolean dataRetrieved, int nbRetry) { - return RetryDecision.ignore(); - } - - @Override - public RetryDecision onWriteTimeout(Statement statement, ConsistencyLevel cl, WriteType writeType, int requiredAcks, int receivedAcks, int nbRetry) { - return RetryDecision.ignore(); - } - - @Override - public RetryDecision onUnavailable(Statement statement, ConsistencyLevel cl, int requiredReplica, int aliveReplica, int nbRetry) { - return (nbRetry == 0) - ? RetryDecision.retry(cl) - : RetryDecision.rethrow(); - } - - @Override - public RetryDecision onRequestError(Statement statement, ConsistencyLevel cl, DriverException e, int nbRetry) { - return RetryDecision.rethrow(); - } - - @Override - public void init(Cluster cluster) {/*nothing to do*/} - - @Override - public void close() {/*nothing to do*/} + @Override + public void close() { + /*nothing to do*/ } + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/policies/DCAwareRoundRobinPolicyTest.java b/driver-core/src/test/java/com/datastax/driver/core/policies/DCAwareRoundRobinPolicyTest.java index 20ed08c23b2..c662339b0ee 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/policies/DCAwareRoundRobinPolicyTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/policies/DCAwareRoundRobinPolicyTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,9 +17,26 @@ */ package com.datastax.driver.core.policies; -import com.datastax.driver.core.*; +import static com.datastax.driver.core.Assertions.assertThat; +import static com.datastax.driver.core.ScassandraCluster.datacenter; +import static com.datastax.driver.core.TestUtils.findHost; +import static com.datastax.driver.core.TestUtils.nonQuietClusterCloseOptions; +import static com.google.common.collect.Lists.newArrayList; +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.spy; +import static org.mockito.MockitoAnnotations.initMocks; + +import com.datastax.driver.core.Cluster; +import com.datastax.driver.core.ConsistencyLevel; +import com.datastax.driver.core.DataProviders; +import com.datastax.driver.core.Host; +import com.datastax.driver.core.MemoryAppender; +import com.datastax.driver.core.QueryTracker; +import com.datastax.driver.core.ScassandraCluster; +import com.datastax.driver.core.Session; import com.datastax.driver.core.exceptions.NoHostAvailableException; import com.google.common.collect.Lists; +import java.util.Collection; import org.apache.log4j.Level; import org.apache.log4j.Logger; import org.mockito.ArgumentCaptor; @@ -27,521 +46,552 @@ import org.testng.annotations.BeforeMethod; import org.testng.annotations.Test; -import java.util.Collection; - -import static com.datastax.driver.core.Assertions.assertThat; -import static com.datastax.driver.core.ScassandraCluster.datacenter; -import static com.datastax.driver.core.TestUtils.findHost; -import static com.datastax.driver.core.TestUtils.nonQuietClusterCloseOptions; -import static com.google.common.collect.Lists.newArrayList; -import static org.mockito.Matchers.any; -import static org.mockito.Mockito.spy; -import static org.mockito.MockitoAnnotations.initMocks; - public class DCAwareRoundRobinPolicyTest { - Logger policyLogger = Logger.getLogger(DCAwareRoundRobinPolicy.class); - Level originalLevel; - MemoryAppender logs; - QueryTracker queryTracker; - - @Captor - ArgumentCaptor> initHostsCaptor; - - @BeforeMethod(groups = "short") - public void setUp() { - initMocks(this); - originalLevel = policyLogger.getLevel(); - policyLogger.setLevel(Level.WARN); - logs = new MemoryAppender(); - policyLogger.addAppender(logs); - queryTracker = new QueryTracker(); + private final Logger policyLogger = Logger.getLogger(DCAwareRoundRobinPolicy.class); + private Level originalLevel; + private MemoryAppender logs; + private QueryTracker queryTracker; + + @Captor private ArgumentCaptor> initHostsCaptor; + + @BeforeMethod(groups = "short") + public void setUp() { + initMocks(this); + originalLevel = policyLogger.getLevel(); + policyLogger.setLevel(Level.WARN); + logs = new MemoryAppender(); + policyLogger.addAppender(logs); + queryTracker = new QueryTracker(); + } + + @AfterMethod(groups = "short", alwaysRun = true) + public void tearDown() { + policyLogger.setLevel(originalLevel); + policyLogger.removeAppender(logs); + } + + private Cluster.Builder builder() { + return Cluster.builder() + // Close cluster immediately to speed up tests. + .withNettyOptions(nonQuietClusterCloseOptions); + } + + /** + * Ensures that {@link DCAwareRoundRobinPolicy} will round robin within hosts in the explicitly + * specific local DC via {@link DCAwareRoundRobinPolicy.Builder#withLocalDc(String)} + * + * @test_category load_balancing:dc_aware + */ + @Test(groups = "short") + public void should_round_robin_within_local_dc() { + // given: a 10 node 2 DC cluster. + ScassandraCluster sCluster = ScassandraCluster.builder().withNodes(5, 5).build(); + Cluster cluster = + builder() + .addContactPoints(sCluster.address(1, 1).getAddress()) + .withPort(sCluster.getBinaryPort()) + .withLoadBalancingPolicy( + DCAwareRoundRobinPolicy.builder().withLocalDc(datacenter(1)).build()) + .build(); + try { + sCluster.init(); + + Session session = cluster.connect(); + // when: a query is executed 50 times. + queryTracker.query(session, 50); + + // then: each node in local DC should get an equal (10) number of requests. + // then: no node in the remote DC should get a request. + for (int i = 1; i <= 5; i++) { + queryTracker.assertQueried(sCluster, 1, i, 10); + queryTracker.assertQueried(sCluster, 2, i, 0); + } + } finally { + cluster.close(); + sCluster.stop(); } - - @AfterMethod(groups = "short", alwaysRun = true) - public void tearDown() { - policyLogger.setLevel(originalLevel); - policyLogger.removeAppender(logs); + } + + /** + * Ensures that {@link DCAwareRoundRobinPolicy} does not use remote hosts if replicas in the local + * DC are UP. + * + * @test_category load_balancing:dc_aware + */ + @Test(groups = "short") + public void should_not_use_remote_hosts_if_some_nodes_are_up_in_local_dc() { + // given: a 10 node 2 DC cluster with DC policy with 2 remote hosts. + ScassandraCluster sCluster = ScassandraCluster.builder().withNodes(5, 5).build(); + @SuppressWarnings("deprecation") + Cluster cluster = + builder() + .addContactPoints(sCluster.address(1, 1).getAddress()) + .withPort(sCluster.getBinaryPort()) + .withLoadBalancingPolicy( + DCAwareRoundRobinPolicy.builder() + .withLocalDc(datacenter(1)) + .withUsedHostsPerRemoteDc(2) + .build()) + .build(); + + try { + sCluster.init(); + + Session session = cluster.connect(); + + // when: a query is executed 50 times and some hosts are down in the local DC. + sCluster.stop(cluster, 1, 5); + sCluster.stop(cluster, 1, 3); + sCluster.stop(cluster, 1, 1); + assertThat(cluster).controlHost().isNotNull(); + queryTracker.query(session, 50); + + // then: all requests should be distributed to the remaining up nodes in local DC. + queryTracker.assertQueried(sCluster, 1, 2, 25); + queryTracker.assertQueried(sCluster, 1, 4, 25); + + // then: no nodes in the remote DC should have been queried. + for (int i = 1; i <= 5; i++) { + queryTracker.assertQueried(sCluster, 2, i, 0); + } + } finally { + cluster.close(); + sCluster.stop(); } - - private Cluster.Builder builder() { - return Cluster.builder() - // Close cluster immediately to speed up tests. - .withNettyOptions(nonQuietClusterCloseOptions); + } + + /** + * Ensures that {@link DCAwareRoundRobinPolicy} will round robin on remote hosts but only if no + * local replicas are available and only within the number of hosts configured by {@link + * DCAwareRoundRobinPolicy.Builder#withUsedHostsPerRemoteDc(int)} + * + * @test_category load_balancing:dc_aware + */ + @Test(groups = "short") + public void should_round_robin_on_remote_hosts_when_no_up_nodes_in_local_dc() { + // given: a 10 node 2 DC cluster with DC policy with 2 remote hosts. + ScassandraCluster sCluster = ScassandraCluster.builder().withNodes(5, 5).build(); + @SuppressWarnings("deprecation") + Cluster cluster = + builder() + .addContactPoints(sCluster.address(1, 1).getAddress()) + .withPort(sCluster.getBinaryPort()) + .withLoadBalancingPolicy( + DCAwareRoundRobinPolicy.builder().withUsedHostsPerRemoteDc(2).build()) + .build(); + + try { + sCluster.init(); + + Session session = cluster.connect(); + + sCluster.stopDC(cluster, 1); + + // Wait for control connection to be re-established, needed as + // control connection attempts increment LBP counter. + assertThat(cluster).controlHost().isNotNull(); + + // when: a query is executed 50 times and all hosts are down in local DC. + queryTracker.query(session, 50); + + // then: only usedHostsPerRemoteDc nodes in the remote DC should get requests. + Collection queryCounts = newArrayList(); + for (int i = 1; i <= 5; i++) { + queryCounts.add(queryTracker.queryCount(sCluster, 2, i)); + } + assertThat(queryCounts).containsOnly(0, 0, 0, 25, 25); + } finally { + cluster.close(); + sCluster.stop(); } - - /** - * Ensures that {@link DCAwareRoundRobinPolicy} will round robin within hosts in the explicitly specific local DC - * via {@link DCAwareRoundRobinPolicy.Builder#withLocalDc(String)} - * - * @test_category load_balancing:dc_aware - */ - @Test(groups = "short") - public void should_round_robin_within_local_dc() { - // given: a 10 node 2 DC cluster. - ScassandraCluster sCluster = ScassandraCluster.builder().withNodes(5, 5).build(); - Cluster cluster = builder() - .addContactPoints(sCluster.address(1, 1).getAddress()) - .withPort(sCluster.getBinaryPort()) - .withLoadBalancingPolicy(DCAwareRoundRobinPolicy.builder().withLocalDc(datacenter(1)).build()) - .build(); - try { - sCluster.init(); - - Session session = cluster.connect(); - // when: a query is executed 50 times. - queryTracker.query(session, 50); - - // then: each node in local DC should get an equal (10) number of requests. - // then: no node in the remote DC should get a request. - for (int i = 1; i <= 5; i++) { - queryTracker.assertQueried(sCluster, 1, i, 10); - queryTracker.assertQueried(sCluster, 2, i, 0); - } - } finally { - cluster.close(); - sCluster.stop(); - } + } + + /** + * Ensures that {@link DCAwareRoundRobinPolicy} will by default only use remote hosts for non DC + * local Consistency Levels. In the case that a DC local Consistency Level is provided a {@link + * NoHostAvailableException} is raised. + * + * @test_category load_balancing:dc_aware + */ + @Test( + groups = "short", + dataProvider = "consistencyLevels", + dataProviderClass = DataProviders.class) + public void should_only_use_remote_hosts_when_using_non_dc_local_cl(ConsistencyLevel cl) { + // given: a 4 node 2 DC Cluster with a LB policy that specifies to not allow remote dcs for + // a local consistency level. + ScassandraCluster sCluster = ScassandraCluster.builder().withNodes(2, 2).build(); + @SuppressWarnings("deprecation") + Cluster cluster = + builder() + .addContactPoints(sCluster.address(1, 1).getAddress()) + .withPort(sCluster.getBinaryPort()) + .withLoadBalancingPolicy( + DCAwareRoundRobinPolicy.builder().withUsedHostsPerRemoteDc(2).build()) + .build(); + + try { + sCluster.init(); + + Session session = cluster.connect(); + + sCluster.stopDC(cluster, 1); + + // Wait for control connection to be re-established, needed as + // control connection attempts increment LBP counter. + assertThat(cluster).controlHost().isNotNull(); + + // when: a query is executed 50 times and all hosts are down in local DC. + // then: expect a NHAE for a local CL since no local replicas available. + Class expectedException = + cl.isDCLocal() ? NoHostAvailableException.class : null; + queryTracker.query(session, 50, cl, expectedException); + + int expectedQueryCount = cl.isDCLocal() ? 0 : 25; + for (int i = 1; i <= 2; i++) { + queryTracker.assertQueried(sCluster, 1, i, 0); + // then: Remote hosts should only be queried for non local CLs. + queryTracker.assertQueried(sCluster, 2, i, expectedQueryCount); + } + } finally { + cluster.close(); + sCluster.stop(); } - - /** - * Ensures that {@link DCAwareRoundRobinPolicy} does not use remote hosts if replicas in the local DC are UP. - * - * @test_category load_balancing:dc_aware - */ - @Test(groups = "short") - public void should_not_use_remote_hosts_if_some_nodes_are_up_in_local_dc() { - // given: a 10 node 2 DC cluster with DC policy with 2 remote hosts. - ScassandraCluster sCluster = ScassandraCluster.builder().withNodes(5, 5).build(); - Cluster cluster = builder() - .addContactPoints(sCluster.address(1, 1).getAddress()) - .withPort(sCluster.getBinaryPort()) - .withLoadBalancingPolicy(DCAwareRoundRobinPolicy.builder().withLocalDc(datacenter(1)).withUsedHostsPerRemoteDc(2).build()) - .build(); - - try { - sCluster.init(); - - Session session = cluster.connect(); - - // when: a query is executed 50 times and some hosts are down in the local DC. - sCluster.stop(cluster, 1, 5); - sCluster.stop(cluster, 1, 3); - sCluster.stop(cluster, 1, 1); - assertThat(cluster).controlHost().isNotNull(); - queryTracker.query(session, 50); - - // then: all requests should be distributed to the remaining up nodes in local DC. - queryTracker.assertQueried(sCluster, 1, 2, 25); - queryTracker.assertQueried(sCluster, 1, 4, 25); - - // then: no nodes in the remote DC should have been queried. - for (int i = 1; i <= 5; i++) { - queryTracker.assertQueried(sCluster, 2, i, 0); - } - } finally { - cluster.close(); - sCluster.stop(); - } + } + + /** + * Ensures that {@link DCAwareRoundRobinPolicy} will use remote hosts for non DC local Consistency + * Levels if {@code DCAwareRoundRobinPolicy.Builder#allowRemoteDCsForLocalConsistencyLevel} is + * used. In the case that a DC local Consistency Level is provided a {@link + * NoHostAvailableException} is raised. + * + * @test_category load_balancing:dc_aware + */ + @Test( + groups = "short", + dataProvider = "consistencyLevels", + dataProviderClass = DataProviders.class) + public void should_use_remote_hosts_for_local_cl_when_allowed(ConsistencyLevel cl) { + // given: a 4 node 2 DC Cluster with a LB policy that specifies to allow remote dcs for + // a local consistency level. + ScassandraCluster sCluster = ScassandraCluster.builder().withNodes(2, 2).build(); + @SuppressWarnings("deprecation") + Cluster cluster = + builder() + .addContactPoints(sCluster.address(1, 1).getAddress()) + .withPort(sCluster.getBinaryPort()) + .withLoadBalancingPolicy( + DCAwareRoundRobinPolicy.builder() + .allowRemoteDCsForLocalConsistencyLevel() + .withUsedHostsPerRemoteDc(2) + .build()) + .build(); + + try { + sCluster.init(); + + Session session = cluster.connect(); + + sCluster.stopDC(cluster, 1); + + // Wait for control connection to be re-established, needed as + // control connection attempts increment LBP counter. + assertThat(cluster).controlHost().isNotNull(); + + // when: a query is executed 50 times and all hosts are down in local DC. + queryTracker.query(session, 50, cl, null); + + for (int i = 1; i <= 2; i++) { + queryTracker.assertQueried(sCluster, 1, i, 0); + // then: Remote hosts should be queried. + queryTracker.assertQueried(sCluster, 2, i, 25); + } + } finally { + cluster.close(); + sCluster.stop(); } - - /** - * Ensures that {@link DCAwareRoundRobinPolicy} will round robin on remote hosts but only if - * no local replicas are available and only within the number of hosts configured by - * {@link DCAwareRoundRobinPolicy.Builder#withUsedHostsPerRemoteDc(int)} - * - * @test_category load_balancing:dc_aware - */ - @Test(groups = "short") - public void should_round_robin_on_remote_hosts_when_no_up_nodes_in_local_dc() { - // given: a 10 node 2 DC cluster with DC policy with 2 remote hosts. - ScassandraCluster sCluster = ScassandraCluster.builder().withNodes(5, 5).build(); - Cluster cluster = builder() - .addContactPoints(sCluster.address(1, 1).getAddress()) - .withPort(sCluster.getBinaryPort()) - .withLoadBalancingPolicy(DCAwareRoundRobinPolicy.builder().withUsedHostsPerRemoteDc(2).build()) - .build(); - - try { - sCluster.init(); - - Session session = cluster.connect(); - - sCluster.stopDC(cluster, 1); - - // Wait for control connection to be re-established, needed as - // control connection attempts increment LBP counter. - assertThat(cluster).controlHost().isNotNull(); - - // when: a query is executed 50 times and all hosts are down in local DC. - queryTracker.query(session, 50); - - // then: only usedHostsPerRemoteDc nodes in the remote DC should get requests. - Collection queryCounts = newArrayList(); - for (int i = 1; i <= 5; i++) { - queryCounts.add(queryTracker.queryCount(sCluster, 2, i)); - } - assertThat(queryCounts).containsOnly(0, 0, 0, 25, 25); - } finally { - cluster.close(); - sCluster.stop(); - } + } + + /** + * Ensures that when {@link DCAwareRoundRobinPolicy} is wrapped with a {@link HostFilterPolicy} + * that blacklists a data center that nodes in that datacenter are never queried. + * + * @test_category load_balancing:dc_aware + */ + @Test(groups = "short") + public void should_not_send_requests_to_blacklisted_dc_using_host_filter_policy() { + // given: a 6 node 3 DC cluster with a DCAwareRoundRobinPolicy that is filtering hosts in DC2. + ScassandraCluster sCluster = ScassandraCluster.builder().withNodes(2, 2, 2).build(); + @SuppressWarnings("deprecation") + LoadBalancingPolicy loadBalancingPolicy = + HostFilterPolicy.fromDCBlackList( + DCAwareRoundRobinPolicy.builder().withUsedHostsPerRemoteDc(2).build(), + Lists.newArrayList(datacenter(2))); + Cluster cluster = + builder() + .addContactPoints(sCluster.address(1, 1).getAddress()) + .withPort(sCluster.getBinaryPort()) + .withLoadBalancingPolicy(loadBalancingPolicy) + .build(); + + try { + sCluster.init(); + + Session session = cluster.connect(); + + // when: A query is made and nodes for the local dc are available. + queryTracker.query(session, 50); + + // then: only nodes in the local DC should have been queried. + queryTracker.assertQueried(sCluster, 1, 1, 25); + queryTracker.assertQueried(sCluster, 1, 2, 25); + queryTracker.assertQueried(sCluster, 2, 1, 0); + queryTracker.assertQueried(sCluster, 2, 2, 0); + queryTracker.assertQueried(sCluster, 3, 1, 0); + queryTracker.assertQueried(sCluster, 3, 1, 0); + + // when: A query is made and all nodes in the local dc are down. + sCluster.stopDC(cluster, 1); + assertThat(cluster).controlHost().isNotNull(); + queryTracker.reset(); + queryTracker.query(session, 50); + + // then: Only nodes in DC3 should have been queried, since DC2 is blacklisted and DC1 is down. + queryTracker.assertQueried(sCluster, 1, 1, 0); + queryTracker.assertQueried(sCluster, 1, 2, 0); + queryTracker.assertQueried(sCluster, 2, 1, 0); + queryTracker.assertQueried(sCluster, 2, 2, 0); + queryTracker.assertQueried(sCluster, 3, 1, 25); + queryTracker.assertQueried(sCluster, 3, 2, 25); + } finally { + cluster.close(); + sCluster.stop(); } - - /** - * Ensures that {@link DCAwareRoundRobinPolicy} will by default only use remote hosts for non DC local - * Consistency Levels. In the case that a DC local Consistency Level is provided a - * {@link NoHostAvailableException} is raised. - * - * @test_category load_balancing:dc_aware - */ - @Test(groups = "short", dataProvider = "consistencyLevels", dataProviderClass = DataProviders.class) - public void should_only_use_remote_hosts_when_using_non_dc_local_cl(ConsistencyLevel cl) { - // given: a 4 node 2 DC Cluster with a LB policy that specifies to not allow remote dcs for - // a local consistency level. - ScassandraCluster sCluster = ScassandraCluster.builder().withNodes(2, 2).build(); - Cluster cluster = builder() - .addContactPoints(sCluster.address(1, 1).getAddress()) - .withPort(sCluster.getBinaryPort()) - .withLoadBalancingPolicy(DCAwareRoundRobinPolicy.builder().withUsedHostsPerRemoteDc(2).build()) - .build(); - - try { - sCluster.init(); - - Session session = cluster.connect(); - - sCluster.stopDC(cluster, 1); - - // Wait for control connection to be re-established, needed as - // control connection attempts increment LBP counter. - assertThat(cluster).controlHost().isNotNull(); - - // when: a query is executed 50 times and all hosts are down in local DC. - // then: expect a NHAE for a local CL since no local replicas available. - Class expectedException = cl.isDCLocal() ? NoHostAvailableException.class : null; - queryTracker.query(session, 50, cl, expectedException); - - int expectedQueryCount = cl.isDCLocal() ? 0 : 25; - for (int i = 1; i <= 2; i++) { - queryTracker.assertQueried(sCluster, 1, i, 0); - // then: Remote hosts should only be queried for non local CLs. - queryTracker.assertQueried(sCluster, 2, i, expectedQueryCount); - } - } finally { - cluster.close(); - sCluster.stop(); - } + } + + /** + * Ensures that when {@link DCAwareRoundRobinPolicy} is wrapped with a {@link HostFilterPolicy} + * that white lists data centers that only nodes in those data centers are queried. + * + * @test_category load_balancing:dc_aware + */ + @Test(groups = "short") + public void should_send_requests_to_whitelisted_dcs_using_host_filter_policy() { + // given: a 6 node 3 DC cluster with a DCAwareRoundRobinPolicy that is whitelisting hosts in DC1 + // and DC2. + ScassandraCluster sCluster = ScassandraCluster.builder().withNodes(2, 2, 2).build(); + @SuppressWarnings("deprecation") + LoadBalancingPolicy loadBalancingPolicy = + HostFilterPolicy.fromDCWhiteList( + DCAwareRoundRobinPolicy.builder().withUsedHostsPerRemoteDc(2).build(), + Lists.newArrayList(datacenter(1), datacenter(2))); + Cluster cluster = + builder() + .addContactPoints(sCluster.address(1, 1).getAddress()) + .withPort(sCluster.getBinaryPort()) + .withLoadBalancingPolicy(loadBalancingPolicy) + .build(); + + try { + sCluster.init(); + + Session session = cluster.connect(); + + // when: A query is made and nodes for the local dc are available. + queryTracker.query(session, 50); + + // then: only nodes in the local DC should have been queried. + queryTracker.assertQueried(sCluster, 1, 1, 25); + queryTracker.assertQueried(sCluster, 1, 2, 25); + queryTracker.assertQueried(sCluster, 2, 1, 0); + queryTracker.assertQueried(sCluster, 2, 2, 0); + queryTracker.assertQueried(sCluster, 3, 1, 0); + queryTracker.assertQueried(sCluster, 3, 1, 0); + + // when: A query is made and all nodes in the local dc are down. + sCluster.stopDC(cluster, 1); + assertThat(cluster).controlHost().isNotNull(); + queryTracker.reset(); + queryTracker.query(session, 50); + + // then: Only nodes in DC2 should have been queried, since DC3 is not in the whitelist and DC1 + // is down. + queryTracker.assertQueried(sCluster, 1, 1, 0); + queryTracker.assertQueried(sCluster, 1, 2, 0); + queryTracker.assertQueried(sCluster, 2, 1, 25); + queryTracker.assertQueried(sCluster, 2, 2, 25); + queryTracker.assertQueried(sCluster, 3, 1, 0); + queryTracker.assertQueried(sCluster, 3, 1, 0); + } finally { + cluster.close(); + sCluster.stop(); } - - /** - * Ensures that {@link DCAwareRoundRobinPolicy} will use remote hosts for non DC local - * Consistency Levels if {@link DCAwareRoundRobinPolicy.Builder#allowRemoteDCsForLocalConsistencyLevel} is used. - * In the case that a DC local Consistency Level is provided a {@link NoHostAvailableException} is raised. - * - * @test_category load_balancing:dc_aware - */ - @Test(groups = "short", dataProvider = "consistencyLevels", dataProviderClass = DataProviders.class) - public void should_use_remote_hosts_for_local_cl_when_allowed(ConsistencyLevel cl) { - // given: a 4 node 2 DC Cluster with a LB policy that specifies to allow remote dcs for - // a local consistency level. - ScassandraCluster sCluster = ScassandraCluster.builder().withNodes(2, 2).build(); - Cluster cluster = builder() - .addContactPoints(sCluster.address(1, 1).getAddress()) - .withPort(sCluster.getBinaryPort()) - .withLoadBalancingPolicy(DCAwareRoundRobinPolicy.builder() - .allowRemoteDCsForLocalConsistencyLevel() - .withUsedHostsPerRemoteDc(2).build()) - .build(); - - try { - sCluster.init(); - - Session session = cluster.connect(); - - sCluster.stopDC(cluster, 1); - - // Wait for control connection to be re-established, needed as - // control connection attempts increment LBP counter. - assertThat(cluster).controlHost().isNotNull(); - - // when: a query is executed 50 times and all hosts are down in local DC. - queryTracker.query(session, 50, cl, null); - - for (int i = 1; i <= 2; i++) { - queryTracker.assertQueried(sCluster, 1, i, 0); - // then: Remote hosts should be queried. - queryTracker.assertQueried(sCluster, 2, i, 25); - } - } finally { - cluster.close(); - sCluster.stop(); - } + } + + /** + * Ensures that {@link DCAwareRoundRobinPolicy} will determine it's local DC based on the data + * center of the contact point(s). + * + * @test_category load_balancing:dc_aware + */ + @Test(groups = "short") + public void should_use_local_dc_from_contact_points_when_not_explicitly_specified() { + // given: a 4 node 2 DC cluster without a local DC specified. + DCAwareRoundRobinPolicy policy = spy(DCAwareRoundRobinPolicy.builder().build()); + ScassandraCluster sCluster = ScassandraCluster.builder().withNodes(2, 2).build(); + Cluster cluster = + builder() + .addContactPoints(sCluster.address(1, 1).getAddress()) + .withPort(sCluster.getBinaryPort()) + .withLoadBalancingPolicy(policy) + .build(); + + try { + sCluster.init(); + + Host host1 = findHost(cluster, 1); + + // when: the cluster is initialized. + cluster.init(); + + // then: should have been initialized with only the host given as the contact point. + Mockito.verify(policy).init(any(Cluster.class), initHostsCaptor.capture()); + assertThat(initHostsCaptor.getValue()).containsExactly(host1); + // then: the local dc should match the contact points' datacenter. + assertThat(policy.localDc).isEqualTo(host1.getDatacenter()); + // then: should not indicate that contact points don't match the local datacenter. + assertThat(logs.get()).doesNotContain("Some contact points don't match local datacenter"); + } finally { + cluster.close(); + sCluster.stop(); } - - /** - * Ensures that when {@link DCAwareRoundRobinPolicy} is wrapped with a {@link HostFilterPolicy} that - * blacklists a data center that nodes in that datacenter are never queried. - * - * @test_category load_balancing:dc_aware - */ - @Test(groups = "short") - public void should_not_send_requests_to_blacklisted_dc_using_host_filter_policy() { - // given: a 6 node 3 DC cluster with a DCAwareRoundRobinPolicy that is filtering hosts in DC2. - ScassandraCluster sCluster = ScassandraCluster.builder().withNodes(2, 2, 2).build(); - LoadBalancingPolicy loadBalancingPolicy = HostFilterPolicy.fromDCBlackList( - DCAwareRoundRobinPolicy.builder().withUsedHostsPerRemoteDc(2).build(), - Lists.newArrayList(datacenter(2))); - Cluster cluster = builder() - .addContactPoints(sCluster.address(1, 1).getAddress()) - .withPort(sCluster.getBinaryPort()) - .withLoadBalancingPolicy(loadBalancingPolicy) - .build(); - - try { - sCluster.init(); - - Session session = cluster.connect(); - - // when: A query is made and nodes for the local dc are available. - queryTracker.query(session, 50); - - // then: only nodes in the local DC should have been queried. - queryTracker.assertQueried(sCluster, 1, 1, 25); - queryTracker.assertQueried(sCluster, 1, 2, 25); - queryTracker.assertQueried(sCluster, 2, 1, 0); - queryTracker.assertQueried(sCluster, 2, 2, 0); - queryTracker.assertQueried(sCluster, 3, 1, 0); - queryTracker.assertQueried(sCluster, 3, 1, 0); - - // when: A query is made and all nodes in the local dc are down. - sCluster.stopDC(cluster, 1); - assertThat(cluster).controlHost().isNotNull(); - queryTracker.reset(); - queryTracker.query(session, 50); - - // then: Only nodes in DC3 should have been queried, since DC2 is blacklisted and DC1 is down. - queryTracker.assertQueried(sCluster, 1, 1, 0); - queryTracker.assertQueried(sCluster, 1, 2, 0); - queryTracker.assertQueried(sCluster, 2, 1, 0); - queryTracker.assertQueried(sCluster, 2, 2, 0); - queryTracker.assertQueried(sCluster, 3, 1, 25); - queryTracker.assertQueried(sCluster, 3, 2, 25); - } finally { - cluster.close(); - sCluster.stop(); - } + } + + /** + * Ensures that {@link DCAwareRoundRobinPolicy} will determine it's local DC based on the data + * center of the contact point(s) and if contact points in different DCs are detected that a log + * message is generated indicating some contact points don't match the local data center. + * + * @test_category load_balancing:dc_aware + */ + @Test(groups = "short") + public void should_warn_if_contact_points_have_different_dcs_when_not_explicitly_specified() { + // given: a 4 node 2 DC cluster with a Cluster instance with contact points in different DCs + // and no contact point specified. + DCAwareRoundRobinPolicy policy = spy(DCAwareRoundRobinPolicy.builder().build()); + ScassandraCluster sCluster = ScassandraCluster.builder().withNodes(2, 2).build(); + Cluster cluster = + builder() + .addContactPoints( + sCluster.address(1, 1).getAddress(), sCluster.address(2, 1).getAddress()) + .withPort(sCluster.getBinaryPort()) + .withLoadBalancingPolicy(policy) + .build(); + + try { + sCluster.init(); + + Host host1 = findHost(cluster, 1); + Host host3 = findHost(cluster, 3); + + // when: the cluster is initialized. + cluster.init(); + + // then: should have been initialized with only two hosts given as the contact point. + Mockito.verify(policy).init(any(Cluster.class), initHostsCaptor.capture()); + assertThat(initHostsCaptor.getValue()).containsOnly(host1, host3); + // then: should indicate that some contact points don't match the local datacenter. + assertThat(logs.get()).contains("Some contact points don't match local data center"); + } finally { + cluster.close(); + sCluster.stop(); } - - /** - * Ensures that when {@link DCAwareRoundRobinPolicy} is wrapped with a {@link HostFilterPolicy} that - * white lists data centers that only nodes in those data centers are queried. - * - * @test_category load_balancing:dc_aware - */ - @Test(groups = "short") - public void should_send_requests_to_whitelisted_dcs_using_host_filter_policy() { - // given: a 6 node 3 DC cluster with a DCAwareRoundRobinPolicy that is whitelisting hosts in DC1 and DC2. - ScassandraCluster sCluster = ScassandraCluster.builder().withNodes(2, 2, 2).build(); - LoadBalancingPolicy loadBalancingPolicy = HostFilterPolicy.fromDCWhiteList( - DCAwareRoundRobinPolicy.builder().withUsedHostsPerRemoteDc(2).build(), - Lists.newArrayList(datacenter(1), datacenter(2))); - Cluster cluster = builder() - .addContactPoints(sCluster.address(1, 1).getAddress()) - .withPort(sCluster.getBinaryPort()) - .withLoadBalancingPolicy(loadBalancingPolicy) - .build(); - - try { - sCluster.init(); - - Session session = cluster.connect(); - - // when: A query is made and nodes for the local dc are available. - queryTracker.query(session, 50); - - // then: only nodes in the local DC should have been queried. - queryTracker.assertQueried(sCluster, 1, 1, 25); - queryTracker.assertQueried(sCluster, 1, 2, 25); - queryTracker.assertQueried(sCluster, 2, 1, 0); - queryTracker.assertQueried(sCluster, 2, 2, 0); - queryTracker.assertQueried(sCluster, 3, 1, 0); - queryTracker.assertQueried(sCluster, 3, 1, 0); - - // when: A query is made and all nodes in the local dc are down. - sCluster.stopDC(cluster, 1); - assertThat(cluster).controlHost().isNotNull(); - queryTracker.reset(); - queryTracker.query(session, 50); - - // then: Only nodes in DC2 should have been queried, since DC3 is not in the whitelist and DC1 is down. - queryTracker.assertQueried(sCluster, 1, 1, 0); - queryTracker.assertQueried(sCluster, 1, 2, 0); - queryTracker.assertQueried(sCluster, 2, 1, 25); - queryTracker.assertQueried(sCluster, 2, 2, 25); - queryTracker.assertQueried(sCluster, 3, 1, 0); - queryTracker.assertQueried(sCluster, 3, 1, 0); - } finally { - cluster.close(); - sCluster.stop(); - } + } + + /** + * Ensures that {@link DCAwareRoundRobinPolicy} will not log a warning if all contact points match + * the data center provided in {@link DCAwareRoundRobinPolicy.Builder#withLocalDc(String)} and + * that the explicitly provided local data center is used. + * + * @test_category load_balancing:dc_aware + */ + @Test(groups = "short") + public void should_use_provided_local_dc_and_not_warn_if_contact_points_match() { + // given: a 4 node 2 DC cluster with a Cluster instance with contact points in different DCs + // and a local DC that doesn't match any contact points. + ScassandraCluster sCluster = ScassandraCluster.builder().withNodes(2, 2).build(); + DCAwareRoundRobinPolicy policy = + spy(DCAwareRoundRobinPolicy.builder().withLocalDc(datacenter(1)).build()); + Cluster cluster = + builder() + .addContactPoints(sCluster.address(1, 1).getAddress()) + .withPort(sCluster.getBinaryPort()) + .withLoadBalancingPolicy(policy) + .build(); + + try { + sCluster.init(); + + Host host1 = findHost(cluster, 1); + + // when: the cluster is initialized. + cluster.init(); + + // then: should have been initialized with only two hosts given as the contact point. + Mockito.verify(policy).init(any(Cluster.class), initHostsCaptor.capture()); + assertThat(initHostsCaptor.getValue()).containsOnly(host1); + // then: the data center should appropriately be set to the one specified. + assertThat(policy.localDc).isEqualTo(host1.getDatacenter()); + // then: should not indicate that contact points don't match the local datacenter. + assertThat(logs.get()).doesNotContain("Some contact points don't match local data center"); + } finally { + cluster.close(); + sCluster.stop(); } - - /** - * Ensures that {@link DCAwareRoundRobinPolicy} will determine it's local DC based on the data center of the - * contact point(s). - * - * @test_category load_balancing:dc_aware - */ - @Test(groups = "short") - public void should_use_local_dc_from_contact_points_when_not_explicitly_specified() { - // given: a 4 node 2 DC cluster without a local DC specified. - DCAwareRoundRobinPolicy policy = spy(DCAwareRoundRobinPolicy.builder().build()); - ScassandraCluster sCluster = ScassandraCluster.builder().withNodes(2, 2).build(); - Cluster cluster = builder() - .addContactPoints(sCluster.address(1, 1).getAddress()) - .withPort(sCluster.getBinaryPort()) - .withLoadBalancingPolicy(policy) - .build(); - - try { - sCluster.init(); - - Host host1 = findHost(cluster, 1); - - // when: the cluster is initialized. - cluster.init(); - - // then: should have been initialized with only the host given as the contact point. - Mockito.verify(policy).init(any(Cluster.class), initHostsCaptor.capture()); - assertThat(initHostsCaptor.getValue()).containsExactly(host1); - // then: the local dc should match the contact points' datacenter. - assertThat(policy.localDc).isEqualTo(host1.getDatacenter()); - // then: should not indicate that contact points don't match the local datacenter. - assertThat(logs.get()).doesNotContain("Some contact points don't match local datacenter"); - } finally { - cluster.close(); - sCluster.stop(); - } - } - - /** - * Ensures that {@link DCAwareRoundRobinPolicy} will determine it's local DC based on the data center of the - * contact point(s) and if contact points in different DCs are detected that a log message is generated - * indicating some contact points don't match the local data center. - * - * @test_category load_balancing:dc_aware - */ - @Test(groups = "short") - public void should_warn_if_contact_points_have_different_dcs_when_not_explicitly_specified() { - // given: a 4 node 2 DC cluster with a Cluster instance with contact points in different DCs - // and no contact point specified. - DCAwareRoundRobinPolicy policy = spy(DCAwareRoundRobinPolicy.builder().build()); - ScassandraCluster sCluster = ScassandraCluster.builder().withNodes(2, 2).build(); - Cluster cluster = builder() - .addContactPoints(sCluster.address(1, 1).getAddress(), sCluster.address(2, 1).getAddress()) - .withPort(sCluster.getBinaryPort()) - .withLoadBalancingPolicy(policy) - .build(); - - try { - sCluster.init(); - - Host host1 = findHost(cluster, 1); - Host host3 = findHost(cluster, 3); - - // when: the cluster is initialized. - cluster.init(); - - // then: should have been initialized with only two hosts given as the contact point. - Mockito.verify(policy).init(any(Cluster.class), initHostsCaptor.capture()); - assertThat(initHostsCaptor.getValue()).containsOnly(host1, host3); - // then: should indicate that some contact points don't match the local datacenter. - assertThat(logs.get()).contains("Some contact points don't match local data center"); - } finally { - cluster.close(); - sCluster.stop(); - } - } - - /** - * Ensures that {@link DCAwareRoundRobinPolicy} will not log a warning if all contact points match - * the data center provided in {@link DCAwareRoundRobinPolicy.Builder#withLocalDc(String)} and that - * the explicitly provided local data center is used. - * - * @test_category load_balancing:dc_aware - */ - @Test(groups = "short") - public void should_use_provided_local_dc_and_not_warn_if_contact_points_match() { - // given: a 4 node 2 DC cluster with a Cluster instance with contact points in different DCs - // and a local DC that doesn't match any contact points. - ScassandraCluster sCluster = ScassandraCluster.builder().withNodes(2, 2).build(); - DCAwareRoundRobinPolicy policy = spy(DCAwareRoundRobinPolicy.builder().withLocalDc(datacenter(1)).build()); - Cluster cluster = builder() - .addContactPoints(sCluster.address(1, 1).getAddress()) - .withPort(sCluster.getBinaryPort()) - .withLoadBalancingPolicy(policy) - .build(); - - try { - sCluster.init(); - - Host host1 = findHost(cluster, 1); - - // when: the cluster is initialized. - cluster.init(); - - // then: should have been initialized with only two hosts given as the contact point. - Mockito.verify(policy).init(any(Cluster.class), initHostsCaptor.capture()); - assertThat(initHostsCaptor.getValue()).containsOnly(host1); - // then: the data center should appropriately be set to the one specified. - assertThat(policy.localDc).isEqualTo(host1.getDatacenter()); - // then: should not indicate that contact points don't match the local datacenter. - assertThat(logs.get()).doesNotContain("Some contact points don't match local data center"); - } finally { - cluster.close(); - sCluster.stop(); - } - } - - /** - * Ensures that {@link DCAwareRoundRobinPolicy} will log a warning if some contact points don't match - * the data center provided in {@link DCAwareRoundRobinPolicy.Builder#withLocalDc(String)} and that - * the explicitly provided local data center is used. - * - * @test_category load_balancing:dc_aware - */ - @Test(groups = "short") - public void should_use_provided_local_dc_and_warn_if_contact_points_dont_match() { - // given: a 4 node 2 DC cluster with a Cluster instance with contact points in different DCs - // and a local DC that doesn't match any contact points. - ScassandraCluster sCluster = ScassandraCluster.builder().withNodes(2, 2).build(); - DCAwareRoundRobinPolicy policy = spy(DCAwareRoundRobinPolicy.builder().withLocalDc(datacenter(3)).build()); - Cluster cluster = builder() - .addContactPoints(sCluster.address(1, 1).getAddress(), sCluster.address(2, 1).getAddress()) - .withPort(sCluster.getBinaryPort()) - .withLoadBalancingPolicy(policy) - .build(); - - try { - sCluster.init(); - - Host host1 = findHost(cluster, 1); - Host host3 = findHost(cluster, 3); - - // when: the cluster is initialized. - cluster.init(); - - // then: should have been initialized with only two hosts given as the contact point. - Mockito.verify(policy).init(any(Cluster.class), initHostsCaptor.capture()); - assertThat(initHostsCaptor.getValue()).containsOnly(host1, host3); - // then: the data center should appropriately be set to the one specified. - assertThat(policy.localDc).isEqualTo(datacenter(3)); - // then: should indicate that some contact points don't match the local datacenter. - assertThat(logs.get()).contains("Some contact points don't match local data center"); - } finally { - cluster.close(); - sCluster.stop(); - } + } + + /** + * Ensures that {@link DCAwareRoundRobinPolicy} will log a warning if some contact points don't + * match the data center provided in {@link DCAwareRoundRobinPolicy.Builder#withLocalDc(String)} + * and that the explicitly provided local data center is used. + * + * @test_category load_balancing:dc_aware + */ + @Test(groups = "short") + public void should_use_provided_local_dc_and_warn_if_contact_points_dont_match() { + // given: a 4 node 2 DC cluster with a Cluster instance with contact points in different DCs + // and a local DC that doesn't match any contact points. + ScassandraCluster sCluster = ScassandraCluster.builder().withNodes(2, 2).build(); + DCAwareRoundRobinPolicy policy = + spy(DCAwareRoundRobinPolicy.builder().withLocalDc(datacenter(3)).build()); + Cluster cluster = + builder() + .addContactPoints( + sCluster.address(1, 1).getAddress(), sCluster.address(2, 1).getAddress()) + .withPort(sCluster.getBinaryPort()) + .withLoadBalancingPolicy(policy) + .build(); + + try { + sCluster.init(); + + Host host1 = findHost(cluster, 1); + Host host3 = findHost(cluster, 3); + + // when: the cluster is initialized. + cluster.init(); + + // then: should have been initialized with only two hosts given as the contact point. + Mockito.verify(policy).init(any(Cluster.class), initHostsCaptor.capture()); + assertThat(initHostsCaptor.getValue()).containsOnly(host1, host3); + // then: the data center should appropriately be set to the one specified. + assertThat(policy.localDc).isEqualTo(datacenter(3)); + // then: should indicate that some contact points don't match the local datacenter. + assertThat(logs.get()).contains("Some contact points don't match local data center"); + } finally { + cluster.close(); + sCluster.stop(); } + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/policies/DefaultRetryPolicyIntegrationTest.java b/driver-core/src/test/java/com/datastax/driver/core/policies/DefaultRetryPolicyIntegrationTest.java index 11084ae9339..0de1477a186 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/policies/DefaultRetryPolicyIntegrationTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/policies/DefaultRetryPolicyIntegrationTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,8 +17,32 @@ */ package com.datastax.driver.core.policies; -import com.datastax.driver.core.*; -import com.datastax.driver.core.exceptions.*; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.fail; +import static org.scassandra.http.client.Consistency.LOCAL_SERIAL; +import static org.scassandra.http.client.PrimingRequest.then; +import static org.scassandra.http.client.Result.closed_connection; +import static org.scassandra.http.client.Result.read_failure; +import static org.scassandra.http.client.Result.read_request_timeout; +import static org.scassandra.http.client.Result.unavailable; +import static org.scassandra.http.client.Result.write_failure; +import static org.scassandra.http.client.Result.write_request_timeout; + +import com.datastax.driver.core.Cluster; +import com.datastax.driver.core.ConsistencyLevel; +import com.datastax.driver.core.Metrics; +import com.datastax.driver.core.Session; +import com.datastax.driver.core.SocketOptions; +import com.datastax.driver.core.exceptions.DriverException; +import com.datastax.driver.core.exceptions.NoHostAvailableException; +import com.datastax.driver.core.exceptions.OperationTimedOutException; +import com.datastax.driver.core.exceptions.ReadFailureException; +import com.datastax.driver.core.exceptions.ReadTimeoutException; +import com.datastax.driver.core.exceptions.TransportException; +import com.datastax.driver.core.exceptions.UnavailableException; +import com.datastax.driver.core.exceptions.WriteFailureException; +import com.datastax.driver.core.exceptions.WriteTimeoutException; +import java.util.Collections; import org.assertj.core.api.Fail; import org.scassandra.Scassandra; import org.scassandra.http.client.ClosedConnectionConfig; @@ -26,251 +52,298 @@ import org.testng.Assert; import org.testng.annotations.Test; -import java.util.Collections; +public class DefaultRetryPolicyIntegrationTest extends AbstractRetryPolicyIntegrationTest { + public DefaultRetryPolicyIntegrationTest() { + super(DefaultRetryPolicy.INSTANCE); + } -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.fail; -import static org.scassandra.http.client.Consistency.LOCAL_SERIAL; -import static org.scassandra.http.client.PrimingRequest.then; -import static org.scassandra.http.client.Result.*; + @Test(groups = "short") + public void should_rethrow_on_read_timeout_with_0_receivedResponses() { + simulateError(1, read_request_timeout); -public class DefaultRetryPolicyIntegrationTest extends AbstractRetryPolicyIntegrationTest { - public DefaultRetryPolicyIntegrationTest() { - super(DefaultRetryPolicy.INSTANCE); + try { + query(); + fail("expected a ReadTimeoutException"); + } catch (ReadTimeoutException e) { + /*expected*/ } - @Test(groups = "short") - public void should_rethrow_on_read_timeout_with_0_receivedResponses() { - simulateError(1, read_request_timeout); - - try { - query(); - fail("expected a ReadTimeoutException"); - } catch (ReadTimeoutException e) {/*expected*/ } - - assertOnReadTimeoutWasCalled(1); - assertThat(errors.getReadTimeouts().getCount()).isEqualTo(1); - assertThat(errors.getRetries().getCount()).isEqualTo(0); - assertThat(errors.getRetriesOnReadTimeout().getCount()).isEqualTo(0); - assertQueried(1, 1); - assertQueried(2, 0); - assertQueried(3, 0); + assertOnReadTimeoutWasCalled(1); + assertThat(errors.getReadTimeouts().getCount()).isEqualTo(1); + assertThat(errors.getRetries().getCount()).isEqualTo(0); + assertThat(errors.getRetriesOnReadTimeout().getCount()).isEqualTo(0); + assertQueried(1, 1); + assertQueried(2, 0); + assertQueried(3, 0); + } + + @Test(groups = "short") + public void should_rethrow_on_write_timeout_with_SIMPLE_write_type() { + simulateError(1, write_request_timeout); + + try { + query(); + fail("expected a WriteTimeoutException"); + } catch (WriteTimeoutException e) { + /*expected*/ } - @Test(groups = "short") - public void should_rethrow_on_write_timeout_with_SIMPLE_write_type() { - simulateError(1, write_request_timeout); - - try { - query(); - fail("expected a WriteTimeoutException"); - } catch (WriteTimeoutException e) {/*expected*/} - - assertOnWriteTimeoutWasCalled(1); - assertThat(errors.getWriteTimeouts().getCount()).isEqualTo(1); - assertThat(errors.getRetries().getCount()).isEqualTo(0); - assertThat(errors.getRetriesOnWriteTimeout().getCount()).isEqualTo(0); - assertQueried(1, 1); - assertQueried(2, 0); - assertQueried(3, 0); + assertOnWriteTimeoutWasCalled(1); + assertThat(errors.getWriteTimeouts().getCount()).isEqualTo(1); + assertThat(errors.getRetries().getCount()).isEqualTo(0); + assertThat(errors.getRetriesOnWriteTimeout().getCount()).isEqualTo(0); + assertQueried(1, 1); + assertQueried(2, 0); + assertQueried(3, 0); + } + + @Test(groups = "short") + public void should_try_next_host_on_first_unavailable() { + simulateError(1, unavailable); + simulateNormalResponse(2); + + query(); + + assertOnUnavailableWasCalled(1); + assertThat(errors.getUnavailables().getCount()).isEqualTo(1); + assertThat(errors.getRetries().getCount()).isEqualTo(1); + assertThat(errors.getRetriesOnUnavailable().getCount()).isEqualTo(1); + assertQueried(1, 1); + assertQueried(2, 1); + assertQueried(3, 0); + } + + @Test(groups = "short") + public void should_rethrow_on_second_unavailable() { + simulateError(1, unavailable); + simulateError(2, unavailable); + + try { + query(); + fail("expected an UnavailableException"); + } catch (UnavailableException e) { + /*expected*/ } - @Test(groups = "short") - public void should_try_next_host_on_first_unavailable() { - simulateError(1, unavailable); - simulateNormalResponse(2); + assertOnUnavailableWasCalled(2); + assertThat(errors.getUnavailables().getCount()).isEqualTo(2); + assertThat(errors.getRetries().getCount()).isEqualTo(1); + assertThat(errors.getRetriesOnUnavailable().getCount()).isEqualTo(1); + assertQueried(1, 1); + assertQueried(2, 1); + assertQueried(3, 0); + } - query(); + @Test(groups = "short") + public void should_rethrow_unavailable_in_no_host_available_exception() { + LoadBalancingPolicy firstHostOnlyPolicy = + new WhiteListPolicy( + Policies.defaultLoadBalancingPolicy(), + Collections.singletonList(host1.getEndPoint().resolve())); + + Cluster whiteListedCluster = + Cluster.builder() + .addContactPoints(scassandras.address(1).getAddress()) + .withPort(scassandras.getBinaryPort()) + .withRetryPolicy(retryPolicy) + .withLoadBalancingPolicy(firstHostOnlyPolicy) + .build(); + + try { + Session whiteListedSession = whiteListedCluster.connect(); + // Clear all activity as result of connect. + for (Scassandra node : scassandras.nodes()) { + node.activityClient().clearAllRecordedActivity(); + } - assertOnUnavailableWasCalled(1); - assertThat(errors.getUnavailables().getCount()).isEqualTo(1); - assertThat(errors.getRetries().getCount()).isEqualTo(1); - assertThat(errors.getRetriesOnUnavailable().getCount()).isEqualTo(1); - assertQueried(1, 1); - assertQueried(2, 1); - assertQueried(3, 0); + simulateError(1, unavailable); + + try { + query(whiteListedSession); + fail("expected an NoHostAvailableException"); + } catch (NoHostAvailableException e) { + // ok + Throwable error = e.getErrors().get(host1.getEndPoint()); + assertThat(error).isNotNull(); + assertThat(error).isInstanceOf(UnavailableException.class); + } + + assertOnUnavailableWasCalled(1); + // We expect a retry, but it was never sent because there were no more hosts. + Metrics.Errors whiteListErrors = whiteListedCluster.getMetrics().getErrorMetrics(); + assertThat(whiteListErrors.getRetriesOnUnavailable().getCount()).isEqualTo(1); + assertQueried(1, 1); + assertQueried(2, 0); + assertQueried(3, 0); + } finally { + whiteListedCluster.close(); } + } - @Test(groups = "short") - public void should_rethrow_on_second_unavailable() { - simulateError(1, unavailable); - simulateError(2, unavailable); - - try { - query(); - fail("expected an UnavailableException"); - } catch (UnavailableException e) {/*expected*/} - - assertOnUnavailableWasCalled(2); - assertThat(errors.getUnavailables().getCount()).isEqualTo(2); - assertThat(errors.getRetries().getCount()).isEqualTo(1); - assertThat(errors.getRetriesOnUnavailable().getCount()).isEqualTo(1); - assertQueried(1, 1); - assertQueried(2, 1); - assertQueried(3, 0); + @Test(groups = "short") + public void should_try_next_host_on_client_timeouts() { + cluster.getConfiguration().getSocketOptions().setReadTimeoutMillis(1); + try { + scassandras + .node(1) + .primingClient() + .prime( + PrimingRequest.queryBuilder() + .withQuery("mock query") + .withThen(then().withFixedDelay(1000L).withRows(row("result", "result1"))) + .build()); + scassandras + .node(2) + .primingClient() + .prime( + PrimingRequest.queryBuilder() + .withQuery("mock query") + .withThen(then().withFixedDelay(1000L).withRows(row("result", "result2"))) + .build()); + scassandras + .node(3) + .primingClient() + .prime( + PrimingRequest.queryBuilder() + .withQuery("mock query") + .withThen(then().withFixedDelay(1000L).withRows(row("result", "result3"))) + .build()); + try { + query(); + fail("expected a NoHostAvailableException"); + } catch (NoHostAvailableException e) { + assertThat(e.getErrors().keySet()) + .hasSize(3) + .containsOnly(host1.getEndPoint(), host2.getEndPoint(), host3.getEndPoint()); + assertThat(e.getErrors().values()) + .hasOnlyElementsOfType(OperationTimedOutException.class) + .extractingResultOf("getMessage") + .containsOnlyOnce( + String.format("[%s] Timed out waiting for server response", host1.getEndPoint()), + String.format("[%s] Timed out waiting for server response", host2.getEndPoint()), + String.format("[%s] Timed out waiting for server response", host3.getEndPoint())); + } + assertOnRequestErrorWasCalled(3, OperationTimedOutException.class); + assertThat(errors.getRetries().getCount()).isEqualTo(3); + assertThat(errors.getClientTimeouts().getCount()).isEqualTo(3); + assertThat(errors.getRetriesOnClientTimeout().getCount()).isEqualTo(3); + assertQueried(1, 1); + assertQueried(2, 1); + assertQueried(3, 1); + } finally { + cluster + .getConfiguration() + .getSocketOptions() + .setReadTimeoutMillis(SocketOptions.DEFAULT_READ_TIMEOUT_MILLIS); } + } - @Test(groups = "short") - public void should_rethrow_unavailable_in_no_host_available_exception() { - LoadBalancingPolicy firstHostOnlyPolicy = - new WhiteListPolicy(Policies.defaultLoadBalancingPolicy(), - Collections.singletonList(host1.getSocketAddress())); - - Cluster whiteListedCluster = Cluster.builder() - .addContactPoints(scassandras.address(1).getAddress()) - .withPort(scassandras.getBinaryPort()) - .withRetryPolicy(retryPolicy) - .withLoadBalancingPolicy(firstHostOnlyPolicy) - .build(); - - try { - Session whiteListedSession = whiteListedCluster.connect(); - // Clear all activity as result of connect. - for (Scassandra node : scassandras.nodes()) { - node.activityClient().clearAllRecordedActivity(); - } - - simulateError(1, unavailable); - - try { - query(whiteListedSession); - fail("expected an NoHostAvailableException"); - } catch (NoHostAvailableException e) { - // ok - Throwable error = e.getErrors().get(host1.getSocketAddress()); - assertThat(error).isNotNull(); - assertThat(error).isInstanceOf(UnavailableException.class); - } - - assertOnUnavailableWasCalled(1); - // We expect a retry, but it was never sent because there were no more hosts. - Metrics.Errors whiteListErrors = whiteListedCluster.getMetrics().getErrorMetrics(); - assertThat(whiteListErrors.getRetriesOnUnavailable().getCount()).isEqualTo(1); - assertQueried(1, 1); - assertQueried(2, 0); - assertQueried(3, 0); - } finally { - whiteListedCluster.close(); - } + @Test(groups = "short", dataProvider = "serverSideErrors") + public void should_try_next_host_on_server_side_error( + Result error, Class exception) { + simulateError(1, error); + simulateError(2, error); + simulateError(3, error); + try { + query(); + Fail.fail("expected a NoHostAvailableException"); + } catch (NoHostAvailableException e) { + assertThat(e.getErrors().keySet()) + .hasSize(3) + .containsOnly(host1.getEndPoint(), host2.getEndPoint(), host3.getEndPoint()); + assertThat(e.getErrors().values()).hasOnlyElementsOfType(exception); } + assertOnRequestErrorWasCalled(3, exception); + assertThat(errors.getOthers().getCount()).isEqualTo(3); + assertThat(errors.getRetries().getCount()).isEqualTo(3); + assertThat(errors.getRetriesOnOtherErrors().getCount()).isEqualTo(3); + assertQueried(1, 1); + assertQueried(2, 1); + assertQueried(3, 1); + } + + @Test(groups = "short") + public void should_rethrow_on_read_failure() { + simulateError(1, read_failure); - @Test(groups = "short") - public void should_try_next_host_on_client_timeouts() { - cluster.getConfiguration().getSocketOptions().setReadTimeoutMillis(1); - try { - scassandras - .node(1).primingClient().prime(PrimingRequest.queryBuilder() - .withQuery("mock query") - .withThen(then().withFixedDelay(1000L).withRows(row("result", "result1"))) - .build()); - scassandras - .node(2).primingClient().prime(PrimingRequest.queryBuilder() - .withQuery("mock query") - .withThen(then().withFixedDelay(1000L).withRows(row("result", "result2"))) - .build()); - scassandras - .node(3).primingClient().prime(PrimingRequest.queryBuilder() - .withQuery("mock query") - .withThen(then().withFixedDelay(1000L).withRows(row("result", "result3"))) - .build()); - try { - query(); - fail("expected a NoHostAvailableException"); - } catch (NoHostAvailableException e) { - assertThat(e.getErrors().keySet()).hasSize(3).containsOnly( - host1.getSocketAddress(), - host2.getSocketAddress(), - host3.getSocketAddress()); - assertThat(e.getErrors().values()) - .hasOnlyElementsOfType(OperationTimedOutException.class) - .extractingResultOf("getMessage") - .containsOnlyOnce( - String.format("[%s] Timed out waiting for server response", host1.getSocketAddress()), - String.format("[%s] Timed out waiting for server response", host2.getSocketAddress()), - String.format("[%s] Timed out waiting for server response", host3.getSocketAddress()) - ); - } - assertOnRequestErrorWasCalled(3, OperationTimedOutException.class); - assertThat(errors.getRetries().getCount()).isEqualTo(3); - assertThat(errors.getClientTimeouts().getCount()).isEqualTo(3); - assertThat(errors.getRetriesOnClientTimeout().getCount()).isEqualTo(3); - assertQueried(1, 1); - assertQueried(2, 1); - assertQueried(3, 1); - } finally { - cluster.getConfiguration().getSocketOptions().setReadTimeoutMillis(SocketOptions.DEFAULT_READ_TIMEOUT_MILLIS); - } + try { + query(); + fail("expected a ReadFailureException"); + } catch (DriverException e) { + assertThat(e).isInstanceOf(ReadFailureException.class); } + assertOnRequestErrorWasCalled(1, ReadFailureException.class); + assertThat(errors.getOthers().getCount()).isEqualTo(1); + assertThat(errors.getRetries().getCount()).isEqualTo(0); + assertThat(errors.getRetriesOnOtherErrors().getCount()).isEqualTo(0); + assertQueried(1, 1); + assertQueried(2, 0); + assertQueried(3, 0); + } - @Test(groups = "short", dataProvider = "serverSideErrors") - public void should_try_next_host_on_server_side_error(Result error, Class exception) { - simulateError(1, error); - simulateError(2, error); - simulateError(3, error); - try { - query(); - Fail.fail("expected a NoHostAvailableException"); - } catch (NoHostAvailableException e) { - assertThat(e.getErrors().keySet()).hasSize(3).containsOnly( - host1.getSocketAddress(), - host2.getSocketAddress(), - host3.getSocketAddress()); - assertThat(e.getErrors().values()).hasOnlyElementsOfType(exception); - } - assertOnRequestErrorWasCalled(3, exception); - assertThat(errors.getOthers().getCount()).isEqualTo(3); - assertThat(errors.getRetries().getCount()).isEqualTo(3); - assertThat(errors.getRetriesOnOtherErrors().getCount()).isEqualTo(3); - assertQueried(1, 1); - assertQueried(2, 1); - assertQueried(3, 1); + @Test(groups = "short") + public void should_rethrow_on_write_failure() { + simulateError(1, write_failure); + + try { + query(); + fail("expected a WriteFailureException"); + } catch (DriverException e) { + assertThat(e).isInstanceOf(WriteFailureException.class); } + assertOnRequestErrorWasCalled(1, WriteFailureException.class); + assertThat(errors.getOthers().getCount()).isEqualTo(1); + assertThat(errors.getRetries().getCount()).isEqualTo(0); + assertThat(errors.getRetriesOnOtherErrors().getCount()).isEqualTo(0); + assertQueried(1, 1); + assertQueried(2, 0); + assertQueried(3, 0); + } - @Test(groups = "short", dataProvider = "connectionErrors") - public void should_try_next_host_on_connection_error(ClosedConnectionConfig.CloseType closeType) { - simulateError(1, closed_connection, new ClosedConnectionConfig(closeType)); - simulateError(2, closed_connection, new ClosedConnectionConfig(closeType)); - simulateError(3, closed_connection, new ClosedConnectionConfig(closeType)); - try { - query(); - Fail.fail("expected a NoHostAvailableException"); - } catch (NoHostAvailableException e) { - assertThat(e.getErrors().keySet()).hasSize(3).containsOnly( - host1.getSocketAddress(), - host2.getSocketAddress(), - host3.getSocketAddress()); - assertThat(e.getErrors().values()).hasOnlyElementsOfType(TransportException.class); - } - assertOnRequestErrorWasCalled(3, TransportException.class); - assertThat(errors.getRetries().getCount()).isEqualTo(3); - assertThat(errors.getConnectionErrors().getCount()).isEqualTo(3); - assertThat(errors.getIgnoresOnConnectionError().getCount()).isEqualTo(0); - assertThat(errors.getRetriesOnConnectionError().getCount()).isEqualTo(3); - assertQueried(1, 1); - assertQueried(2, 1); - assertQueried(3, 1); + @Test(groups = "short", dataProvider = "connectionErrors") + public void should_try_next_host_on_connection_error(ClosedConnectionConfig.CloseType closeType) { + simulateError(1, closed_connection, new ClosedConnectionConfig(closeType)); + simulateError(2, closed_connection, new ClosedConnectionConfig(closeType)); + simulateError(3, closed_connection, new ClosedConnectionConfig(closeType)); + try { + query(); + Fail.fail("expected a NoHostAvailableException"); + } catch (NoHostAvailableException e) { + assertThat(e.getErrors().keySet()) + .hasSize(3) + .containsOnly(host1.getEndPoint(), host2.getEndPoint(), host3.getEndPoint()); + assertThat(e.getErrors().values()).hasOnlyElementsOfType(TransportException.class); } + assertOnRequestErrorWasCalled(3, TransportException.class); + assertThat(errors.getRetries().getCount()).isEqualTo(3); + assertThat(errors.getConnectionErrors().getCount()).isEqualTo(3); + assertThat(errors.getIgnoresOnConnectionError().getCount()).isEqualTo(0); + assertThat(errors.getRetriesOnConnectionError().getCount()).isEqualTo(3); + assertQueried(1, 1); + assertQueried(2, 1); + assertQueried(3, 1); + } + + @Test(groups = "short") + public void should_rethrow_on_unavailable_if_CAS() { + simulateError(1, unavailable, new UnavailableConfig(1, 0, LOCAL_SERIAL)); + simulateError(2, unavailable, new UnavailableConfig(1, 0, LOCAL_SERIAL)); - @Test(groups = "short") - public void should_rethrow_on_unavailable_if_CAS() { - simulateError(1, unavailable, new UnavailableConfig(1, 0, LOCAL_SERIAL)); - simulateError(2, unavailable, new UnavailableConfig(1, 0, LOCAL_SERIAL)); - - try { - query(); - Assert.fail("expected an UnavailableException"); - } catch (UnavailableException e) { - assertThat(e.getConsistencyLevel()).isEqualTo(ConsistencyLevel.LOCAL_SERIAL); - } - - assertOnUnavailableWasCalled(2); - assertThat(errors.getRetries().getCount()).isEqualTo(1); - assertThat(errors.getUnavailables().getCount()).isEqualTo(2); - assertThat(errors.getRetriesOnUnavailable().getCount()).isEqualTo(1); - assertQueried(1, 1); - assertQueried(2, 1); - assertQueried(3, 0); + try { + query(); + Assert.fail("expected an UnavailableException"); + } catch (UnavailableException e) { + assertThat(e.getConsistencyLevel()).isEqualTo(ConsistencyLevel.LOCAL_SERIAL); } + + assertOnUnavailableWasCalled(2); + assertThat(errors.getRetries().getCount()).isEqualTo(1); + assertThat(errors.getUnavailables().getCount()).isEqualTo(2); + assertThat(errors.getRetriesOnUnavailable().getCount()).isEqualTo(1); + assertQueried(1, 1); + assertQueried(2, 1); + assertQueried(3, 0); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/policies/DelegatingLoadBalancingPolicy.java b/driver-core/src/test/java/com/datastax/driver/core/policies/DelegatingLoadBalancingPolicy.java index 58037669daa..538c55d0ca5 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/policies/DelegatingLoadBalancingPolicy.java +++ b/driver-core/src/test/java/com/datastax/driver/core/policies/DelegatingLoadBalancingPolicy.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,57 +21,57 @@ import com.datastax.driver.core.Host; import com.datastax.driver.core.HostDistance; import com.datastax.driver.core.Statement; - import java.util.Collection; import java.util.Iterator; /** * Base class for tests that want to wrap a policy to add some instrumentation. - *

    - * NB: this is currently only used in tests, but could be provided as a convenience in the production code. + * + *

    NB: this is currently only used in tests, but could be provided as a convenience in the + * production code. */ public abstract class DelegatingLoadBalancingPolicy implements ChainableLoadBalancingPolicy { - protected final LoadBalancingPolicy delegate; + protected final LoadBalancingPolicy delegate; - public DelegatingLoadBalancingPolicy(LoadBalancingPolicy delegate) { - this.delegate = delegate; - } + public DelegatingLoadBalancingPolicy(LoadBalancingPolicy delegate) { + this.delegate = delegate; + } - public void init(Cluster cluster, Collection hosts) { - delegate.init(cluster, hosts); - } + public void init(Cluster cluster, Collection hosts) { + delegate.init(cluster, hosts); + } - public HostDistance distance(Host host) { - return delegate.distance(host); - } + public HostDistance distance(Host host) { + return delegate.distance(host); + } - public Iterator newQueryPlan(String loggedKeyspace, Statement statement) { - return delegate.newQueryPlan(loggedKeyspace, statement); - } + public Iterator newQueryPlan(String loggedKeyspace, Statement statement) { + return delegate.newQueryPlan(loggedKeyspace, statement); + } - public void onAdd(Host host) { - delegate.onAdd(host); - } + public void onAdd(Host host) { + delegate.onAdd(host); + } - public void onUp(Host host) { - delegate.onUp(host); - } + public void onUp(Host host) { + delegate.onUp(host); + } - public void onDown(Host host) { - delegate.onDown(host); - } + public void onDown(Host host) { + delegate.onDown(host); + } - public void onRemove(Host host) { - delegate.onRemove(host); - } + public void onRemove(Host host) { + delegate.onRemove(host); + } - @Override - public LoadBalancingPolicy getChildPolicy() { - return delegate; - } + @Override + public LoadBalancingPolicy getChildPolicy() { + return delegate; + } - @Override - public void close() { - delegate.close(); - } -} \ No newline at end of file + @Override + public void close() { + delegate.close(); + } +} diff --git a/driver-core/src/test/java/com/datastax/driver/core/policies/DelegatingSpeculativeExecutionPolicy.java b/driver-core/src/test/java/com/datastax/driver/core/policies/DelegatingSpeculativeExecutionPolicy.java index e60c24ec604..24a43f45b81 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/policies/DelegatingSpeculativeExecutionPolicy.java +++ b/driver-core/src/test/java/com/datastax/driver/core/policies/DelegatingSpeculativeExecutionPolicy.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,28 +22,29 @@ /** * Base class for tests that want to wrap a policy to add some instrumentation. - *

    - * NB: this is currently only used in tests, but could be provided as a convenience in the production code. + * + *

    NB: this is currently only used in tests, but could be provided as a convenience in the + * production code. */ public abstract class DelegatingSpeculativeExecutionPolicy implements SpeculativeExecutionPolicy { - private final SpeculativeExecutionPolicy delegate; + private final SpeculativeExecutionPolicy delegate; - protected DelegatingSpeculativeExecutionPolicy(SpeculativeExecutionPolicy delegate) { - this.delegate = delegate; - } + protected DelegatingSpeculativeExecutionPolicy(SpeculativeExecutionPolicy delegate) { + this.delegate = delegate; + } - @Override - public void init(Cluster cluster) { - delegate.init(cluster); - } + @Override + public void init(Cluster cluster) { + delegate.init(cluster); + } - @Override - public SpeculativeExecutionPlan newPlan(String loggedKeyspace, Statement statement) { - return delegate.newPlan(loggedKeyspace, statement); - } + @Override + public SpeculativeExecutionPlan newPlan(String loggedKeyspace, Statement statement) { + return delegate.newPlan(loggedKeyspace, statement); + } - @Override - public void close() { - delegate.close(); - } + @Override + public void close() { + delegate.close(); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/policies/DowngradingConsistencyRetryPolicyIntegrationTest.java b/driver-core/src/test/java/com/datastax/driver/core/policies/DowngradingConsistencyRetryPolicyIntegrationTest.java index 891c1b9d23e..7d81152414f 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/policies/DowngradingConsistencyRetryPolicyIntegrationTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/policies/DowngradingConsistencyRetryPolicyIntegrationTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,475 +17,546 @@ */ package com.datastax.driver.core.policies; +import static org.assertj.core.api.Assertions.assertThat; +import static org.scassandra.http.client.Consistency.SERIAL; +import static org.scassandra.http.client.PrimingRequest.then; +import static org.scassandra.http.client.Result.closed_connection; +import static org.scassandra.http.client.Result.read_failure; +import static org.scassandra.http.client.Result.read_request_timeout; +import static org.scassandra.http.client.Result.unavailable; +import static org.scassandra.http.client.Result.write_failure; +import static org.scassandra.http.client.Result.write_request_timeout; +import static org.scassandra.http.client.WriteTypePrime.UNLOGGED_BATCH; +import static org.testng.Assert.fail; + import com.datastax.driver.core.ConsistencyLevel; import com.datastax.driver.core.SocketOptions; import com.datastax.driver.core.WriteType; -import com.datastax.driver.core.exceptions.*; +import com.datastax.driver.core.exceptions.DriverException; +import com.datastax.driver.core.exceptions.NoHostAvailableException; +import com.datastax.driver.core.exceptions.OperationTimedOutException; +import com.datastax.driver.core.exceptions.ReadFailureException; +import com.datastax.driver.core.exceptions.ReadTimeoutException; +import com.datastax.driver.core.exceptions.TransportException; +import com.datastax.driver.core.exceptions.UnavailableException; +import com.datastax.driver.core.exceptions.WriteFailureException; +import com.datastax.driver.core.exceptions.WriteTimeoutException; import org.assertj.core.api.Assertions; import org.assertj.core.api.Fail; -import org.scassandra.http.client.*; +import org.scassandra.http.client.ClosedConnectionConfig; +import org.scassandra.http.client.PrimingRequest; +import org.scassandra.http.client.ReadTimeoutConfig; +import org.scassandra.http.client.Result; +import org.scassandra.http.client.UnavailableConfig; +import org.scassandra.http.client.WriteTimeoutConfig; +import org.scassandra.http.client.WriteTypePrime; import org.testng.annotations.DataProvider; import org.testng.annotations.Test; -import static org.assertj.core.api.Assertions.assertThat; -import static org.scassandra.http.client.Consistency.SERIAL; -import static org.scassandra.http.client.PrimingRequest.then; -import static org.scassandra.http.client.Result.*; -import static org.scassandra.http.client.WriteTypePrime.UNLOGGED_BATCH; -import static org.testng.Assert.fail; - -public class DowngradingConsistencyRetryPolicyIntegrationTest extends AbstractRetryPolicyIntegrationTest { - - public DowngradingConsistencyRetryPolicyIntegrationTest() { - super(DowngradingConsistencyRetryPolicy.INSTANCE); +@SuppressWarnings("deprecation") +public class DowngradingConsistencyRetryPolicyIntegrationTest + extends AbstractRetryPolicyIntegrationTest { + + public DowngradingConsistencyRetryPolicyIntegrationTest() { + super(DowngradingConsistencyRetryPolicy.INSTANCE); + } + + /** + * @return An array of pairs that match # of alive replicas with the expected downgraded CL used + * on read/write/unavailable. + */ + @DataProvider + public static Object[][] consistencyLevels() { + return new Object[][] { + {4, ConsistencyLevel.THREE}, + {3, ConsistencyLevel.THREE}, + {2, ConsistencyLevel.TWO}, + {1, ConsistencyLevel.ONE} + }; + } + + /** @return Write Types for which we expect a rethrow if used and there are no received acks. */ + @DataProvider + public static Object[][] rethrowWriteTypes() { + return new Object[][] { + {WriteTypePrime.SIMPLE}, + {WriteTypePrime.BATCH}, + {WriteTypePrime.COUNTER}, + {WriteTypePrime.CAS} + }; + } + + /** @return Write Types for which we expect an ignore if used and there are received acks. */ + @DataProvider + public static Object[][] ignoreWriteTypesWithReceivedAcks() { + return new Object[][] {{WriteTypePrime.SIMPLE}, {WriteTypePrime.BATCH}}; + } + + /** + * Ensures that when handling a read timeout with {@link DowngradingConsistencyRetryPolicy} that a + * retry is reattempted with {@link ConsistencyLevel#ONE} if the consistency level on the + * statement executed is {@link ConsistencyLevel#EACH_QUORUM}, even if the number of known alive + * replicas was 0. + * + * @jira_ticket JAVA-1005 + * @test_category retry_policy + */ + @Test(groups = "short") + public void should_retry_once_on_same_host_from_each_quorum_to_one() { + simulateError(1, read_request_timeout, new ReadTimeoutConfig(0, 3, false)); + + try { + queryWithCL(ConsistencyLevel.EACH_QUORUM); + } catch (ReadTimeoutException e) { + assertThat(e.getConsistencyLevel()).isEqualTo(ConsistencyLevel.ONE); } - /** - * @return An array of pairs that match # of alive replicas with the expected downgraded CL used on read/write/unavailable. - */ - @DataProvider - public static Object[][] consistencyLevels() { - return new Object[][]{ - {4, ConsistencyLevel.THREE}, - {3, ConsistencyLevel.THREE}, - {2, ConsistencyLevel.TWO}, - {1, ConsistencyLevel.ONE} - }; + assertOnReadTimeoutWasCalled(2); + assertThat(errors.getRetries().getCount()).isEqualTo(1); + assertThat(errors.getReadTimeouts().getCount()).isEqualTo(2); + assertThat(errors.getRetriesOnReadTimeout().getCount()).isEqualTo(1); + assertQueried(1, 2); + assertQueried(2, 0); + assertQueried(3, 0); + } + + /** + * Ensures that when handling a read timeout with {@link DowngradingConsistencyRetryPolicy} that a + * retry is reattempted with a {@link ConsistencyLevel} that matches min(received + * acknowledgements, THREE) and is only retried once. + * + * @param received The number of received acknowledgements to use in read timeout. + * @param expectedDowngradedCL The consistency level that is expected to be used on the retry. + * @test_category retry_policy + */ + @Test(groups = "short", dataProvider = "consistencyLevels") + public void should_retry_once_on_same_host_with_reduced_consistency_level_on_read_timeout( + int received, ConsistencyLevel expectedDowngradedCL) { + simulateError(1, read_request_timeout, new ReadTimeoutConfig(received, received + 1, true)); + + try { + query(); + fail("expected an ReadTimeoutException"); + } catch (ReadTimeoutException e) { + assertThat(e.getConsistencyLevel()).isEqualTo(expectedDowngradedCL); } - - /** - * @return Write Types for which we expect a rethrow if used and there are no received acks. - */ - @DataProvider - public static Object[][] rethrowWriteTypes() { - return new Object[][]{ - {WriteTypePrime.SIMPLE}, - {WriteTypePrime.BATCH}, - {WriteTypePrime.COUNTER}, - {WriteTypePrime.CAS} - }; + assertOnReadTimeoutWasCalled(2); + assertThat(errors.getRetries().getCount()).isEqualTo(1); + assertThat(errors.getReadTimeouts().getCount()).isEqualTo(2); + assertThat(errors.getRetriesOnReadTimeout().getCount()).isEqualTo(1); + assertQueried(1, 2); + assertQueried(2, 0); + assertQueried(3, 0); + } + + /** + * Ensures that when handling a read timeout with {@link DowngradingConsistencyRetryPolicy} that a + * retry is reattempted if data was not retrieved, but enough replicas were alive to handle the + * request. + * + * @test_category retry_policy + */ + @Test(groups = "short") + public void should_retry_once_if_not_data_was_retrieved_and_enough_replicas_alive() { + simulateError(1, read_request_timeout, new ReadTimeoutConfig(1, 1, false)); + + try { + query(); + fail("expected an ReadTimeoutException"); + } catch (ReadTimeoutException e) { + /*expected*/ } - - /** - * @return Write Types for which we expect an ignore if used and there are received acks. - */ - @DataProvider - public static Object[][] ignoreWriteTypesWithReceivedAcks() { - return new Object[][]{ - {WriteTypePrime.SIMPLE}, - {WriteTypePrime.BATCH} - }; + assertOnReadTimeoutWasCalled(2); + assertThat(errors.getRetries().getCount()).isEqualTo(1); + assertThat(errors.getReadTimeouts().getCount()).isEqualTo(2); + assertThat(errors.getRetriesOnReadTimeout().getCount()).isEqualTo(1); + assertQueried(1, 2); + assertQueried(2, 0); + assertQueried(3, 0); + } + + /** + * Ensures that when handling a read timeout with {@link DowngradingConsistencyRetryPolicy} that a + * retry is not attempted if no replicas were alive. In a real scenario, this would not be + * expected as we'd anticipate an {@link UnavailableException} instead. + * + * @test_category retry_policy + */ + @Test(groups = "short") + public void should_rethrow_if_no_hosts_alive_on_read_timeout() { + simulateError(1, read_request_timeout); + + try { + query(); + fail("expected a ReadTimeoutException"); + } catch (ReadTimeoutException e) { + /*expected*/ } - /** - * Ensures that when handling a read timeout with {@link DowngradingConsistencyRetryPolicy} that a retry is - * reattempted with {@link ConsistencyLevel#ONE} if the consistency level on the statement executed is - * {@link ConsistencyLevel#EACH_QUORUM}, even if the number of known alive replicas was 0. - * - * @jira_ticket JAVA-1005 - * @test_category retry_policy - */ - @Test(groups = "short") - public void should_retry_once_on_same_host_from_each_quorum_to_one() { - simulateError(1, read_request_timeout, new ReadTimeoutConfig(0, 3, false)); - - try { - queryWithCL(ConsistencyLevel.EACH_QUORUM); - } catch (ReadTimeoutException e) { - assertThat(e.getConsistencyLevel()).isEqualTo(ConsistencyLevel.ONE); - } - - assertOnReadTimeoutWasCalled(2); - assertThat(errors.getRetries().getCount()).isEqualTo(1); - assertThat(errors.getReadTimeouts().getCount()).isEqualTo(2); - assertThat(errors.getRetriesOnReadTimeout().getCount()).isEqualTo(1); - assertQueried(1, 2); - assertQueried(2, 0); - assertQueried(3, 0); + assertOnReadTimeoutWasCalled(1); + assertThat(errors.getReadTimeouts().getCount()).isEqualTo(1); + assertThat(errors.getRetries().getCount()).isEqualTo(0); + assertThat(errors.getRetriesOnReadTimeout().getCount()).isEqualTo(0); + assertQueried(1, 1); + assertQueried(2, 0); + assertQueried(3, 0); + } + + /** + * Ensures that when handling a write timeout with {@link DowngradingConsistencyRetryPolicy} that + * it rethrows the exception if the {@link WriteType} was any of {@link #rethrowWriteTypes}. + * + * @param writeType writeType communicated by {@link WriteTimeoutException}. + * @test_category retry_policy + */ + @Test(groups = "short", dataProvider = "rethrowWriteTypes") + public void should_rethrow_on_write_timeout_with_write_type(WriteTypePrime writeType) { + simulateError(1, write_request_timeout, new WriteTimeoutConfig(writeType, 0, 2)); + + try { + query(); + fail("expected a WriteTimeoutException"); + } catch (WriteTimeoutException e) { + /*expected*/ } - /** - * Ensures that when handling a read timeout with {@link DowngradingConsistencyRetryPolicy} that a retry is - * reattempted with a {@link ConsistencyLevel} that matches min(received acknowledgements, THREE) and is only - * retried once. - * - * @param received The number of received acknowledgements to use in read timeout. - * @param expectedDowngradedCL The consistency level that is expected to be used on the retry. - * @test_category retry_policy - */ - @Test(groups = "short", dataProvider = "consistencyLevels") - public void should_retry_once_on_same_host_with_reduced_consistency_level_on_read_timeout(int received, ConsistencyLevel expectedDowngradedCL) { - simulateError(1, read_request_timeout, new ReadTimeoutConfig(received, received + 1, true)); - - try { - query(); - fail("expected an ReadTimeoutException"); - } catch (ReadTimeoutException e) { - assertThat(e.getConsistencyLevel()).isEqualTo(expectedDowngradedCL); - } - - assertOnReadTimeoutWasCalled(2); - assertThat(errors.getRetries().getCount()).isEqualTo(1); - assertThat(errors.getReadTimeouts().getCount()).isEqualTo(2); - assertThat(errors.getRetriesOnReadTimeout().getCount()).isEqualTo(1); - assertQueried(1, 2); - assertQueried(2, 0); - assertQueried(3, 0); + assertOnWriteTimeoutWasCalled(1); + assertThat(errors.getWriteTimeouts().getCount()).isEqualTo(1); + assertThat(errors.getRetries().getCount()).isEqualTo(0); + assertThat(errors.getRetriesOnWriteTimeout().getCount()).isEqualTo(0); + assertQueried(1, 1); + assertQueried(2, 0); + assertQueried(3, 0); + } + + /** + * Ensures that when handling a write timeout with {@link DowngradingConsistencyRetryPolicy} that + * it ignores the exception if the {@link WriteType} was any of {@link + * #ignoreWriteTypesWithReceivedAcks} and we received acks some at least one replica. + * + * @param writeType writeType communicated by {@link WriteTimeoutException}. + * @test_category retry_policy + */ + @Test(groups = "short", dataProvider = "ignoreWriteTypesWithReceivedAcks") + public void should_ignore_on_write_timeout_with_write_type_and_received_acks( + WriteTypePrime writeType) { + simulateError(1, write_request_timeout, new WriteTimeoutConfig(writeType, 1, 2)); + + query(); + + assertOnWriteTimeoutWasCalled(1); + assertThat(errors.getWriteTimeouts().getCount()).isEqualTo(1); + assertThat(errors.getRetries().getCount()).isEqualTo(0); + assertThat(errors.getRetriesOnWriteTimeout().getCount()).isEqualTo(0); + assertThat(errors.getIgnoresOnWriteTimeout().getCount()).isEqualTo(1); + assertQueried(1, 1); + assertQueried(2, 0); + assertQueried(3, 0); + } + + /** + * Ensures that when handling a write timeout with {@link DowngradingConsistencyRetryPolicy} that + * a retry is attempted on the same host if the {@link WriteType} is {@link WriteType#BATCH_LOG}. + * + * @test_category retry_policy + */ + @Test(groups = "short") + public void should_retry_once_on_same_host_with_BATCH_LOG_write_type() { + simulateError(1, write_request_timeout, new WriteTimeoutConfig(WriteTypePrime.BATCH_LOG, 1, 2)); + + try { + query(); + fail("expected a WriteTimeoutException"); + } catch (WriteTimeoutException e) { + /*expected*/ } - - /** - * Ensures that when handling a read timeout with {@link DowngradingConsistencyRetryPolicy} that a retry is - * reattempted if data was not retrieved, but enough replicas were alive to handle the request. - * - * @test_category retry_policy - */ - @Test(groups = "short") - public void should_retry_once_if_not_data_was_retrieved_and_enough_replicas_alive() { - simulateError(1, read_request_timeout, new ReadTimeoutConfig(1, 1, false)); - - try { - query(); - fail("expected an ReadTimeoutException"); - } catch (ReadTimeoutException e) {/*expected*/} - - assertOnReadTimeoutWasCalled(2); - assertThat(errors.getRetries().getCount()).isEqualTo(1); - assertThat(errors.getReadTimeouts().getCount()).isEqualTo(2); - assertThat(errors.getRetriesOnReadTimeout().getCount()).isEqualTo(1); - assertQueried(1, 2); - assertQueried(2, 0); - assertQueried(3, 0); + assertOnWriteTimeoutWasCalled(2); + assertThat(errors.getRetries().getCount()).isEqualTo(1); + assertThat(errors.getWriteTimeouts().getCount()).isEqualTo(2); + assertThat(errors.getRetriesOnWriteTimeout().getCount()).isEqualTo(1); + assertQueried(1, 2); + assertQueried(2, 0); + assertQueried(3, 0); + } + + /** + * Ensures that when handling a write timeout with {@link DowngradingConsistencyRetryPolicy} that + * a retry is attempted on the same host with a reduced consistency level that matches + * min(received acknowledgments, THREE) if the {@link WriteType} is {@link + * WriteType#UNLOGGED_BATCH} and is only retries once. + * + * @param alive The number of received acknowledgements to use in write timeout. + * @param expectedDowngradedCL The consistency level that is expected to be used on the retry. + * @test_category retry_policy + */ + @Test(groups = "short", dataProvider = "consistencyLevels") + public void should_retry_once_on_same_host_with_reduced_consistency_level_on_write_timeout( + int alive, ConsistencyLevel expectedDowngradedCL) { + simulateError( + 1, write_request_timeout, new WriteTimeoutConfig(UNLOGGED_BATCH, alive, alive + 1)); + + try { + query(); + fail("expected a WriteTimeoutException"); + } catch (WriteTimeoutException e) { + assertThat(e.getConsistencyLevel()).isEqualTo(expectedDowngradedCL); } - - /** - * Ensures that when handling a read timeout with {@link DowngradingConsistencyRetryPolicy} that a retry is not - * attempted if no replicas were alive. In a real scenario, this would not be expected as we'd anticipate an - * {@link UnavailableException} instead. - * - * @test_category retry_policy - */ - @Test(groups = "short") - public void should_rethrow_if_no_hosts_alive_on_read_timeout() { - simulateError(1, read_request_timeout); - - try { - query(); - fail("expected a ReadTimeoutException"); - } catch (ReadTimeoutException e) {/*expected*/ } - - assertOnReadTimeoutWasCalled(1); - assertThat(errors.getReadTimeouts().getCount()).isEqualTo(1); - assertThat(errors.getRetries().getCount()).isEqualTo(0); - assertThat(errors.getRetriesOnReadTimeout().getCount()).isEqualTo(0); - assertQueried(1, 1); - assertQueried(2, 0); - assertQueried(3, 0); + assertOnWriteTimeoutWasCalled(2); + assertThat(errors.getRetries().getCount()).isEqualTo(1); + assertThat(errors.getWriteTimeouts().getCount()).isEqualTo(2); + assertThat(errors.getRetriesOnWriteTimeout().getCount()).isEqualTo(1); + assertQueried(1, 2); + assertQueried(2, 0); + assertQueried(3, 0); + } + + /** + * Ensures that when handling an unavailable with {@link DowngradingConsistencyRetryPolicy} that a + * retry is reattempted with a {@link ConsistencyLevel} that matches min(received + * acknowledgements, THREE) and is only retried once. + * + * @param alive The number of received acknowledgements to use in unavailable. + * @param expectedDowngradedCL The consistency level that is expected to be used on the retry. + * @test_category retry_policy + */ + @Test(groups = "short", dataProvider = "consistencyLevels") + public void should_retry_once_on_same_host_with_reduced_consistency_level_on_unavailable( + int alive, ConsistencyLevel expectedDowngradedCL) { + simulateError(1, unavailable, new UnavailableConfig(alive + 1, alive)); + + try { + query(); + fail("expected an UnavailableException"); + } catch (UnavailableException e) { + assertThat(e.getConsistencyLevel()).isEqualTo(expectedDowngradedCL); } - - /** - * Ensures that when handling a write timeout with {@link DowngradingConsistencyRetryPolicy} that it rethrows - * the exception if the {@link WriteType} was any of {@link #rethrowWriteTypes}. - * - * @param writeType writeType communicated by {@link WriteTimeoutException}. - * @test_category retry_policy - */ - @Test(groups = "short", dataProvider = "rethrowWriteTypes") - public void should_rethrow_on_write_timeout_with_write_type(WriteTypePrime writeType) { - simulateError(1, write_request_timeout, new WriteTimeoutConfig(writeType, 0, 2)); - - try { - query(); - fail("expected a WriteTimeoutException"); - } catch (WriteTimeoutException e) {/*expected*/} - - assertOnWriteTimeoutWasCalled(1); - assertThat(errors.getWriteTimeouts().getCount()).isEqualTo(1); - assertThat(errors.getRetries().getCount()).isEqualTo(0); - assertThat(errors.getRetriesOnWriteTimeout().getCount()).isEqualTo(0); - assertQueried(1, 1); - assertQueried(2, 0); - assertQueried(3, 0); + assertOnUnavailableWasCalled(2); + assertThat(errors.getRetries().getCount()).isEqualTo(1); + assertThat(errors.getUnavailables().getCount()).isEqualTo(2); + assertThat(errors.getRetriesOnUnavailable().getCount()).isEqualTo(1); + assertQueried(1, 2); + assertQueried(2, 0); + assertQueried(3, 0); + } + + /** + * Ensures that when handling an unavailable with {@link DowngradingConsistencyRetryPolicy} that a + * retry is is not reattempted if no replicas are alive. + * + * @test_category retry_policy + */ + @Test(groups = "short") + public void should_rethrow_if_no_hosts_alive_on_unavailable() { + simulateError(1, unavailable, new UnavailableConfig(1, 0)); + + try { + query(); + fail("expected an UnavailableException"); + } catch (UnavailableException e) { + /*expected*/ } - - /** - * Ensures that when handling a write timeout with {@link DowngradingConsistencyRetryPolicy} that it ignores - * the exception if the {@link WriteType} was any of {@link #ignoreWriteTypesWithReceivedAcks} and we received acks - * some at least one replica. - * - * @param writeType writeType communicated by {@link WriteTimeoutException}. - * @test_category retry_policy - */ - @Test(groups = "short", dataProvider = "ignoreWriteTypesWithReceivedAcks") - public void should_ignore_on_write_timeout_with_write_type_and_received_acks(WriteTypePrime writeType) { - simulateError(1, write_request_timeout, new WriteTimeoutConfig(writeType, 1, 2)); - + assertOnUnavailableWasCalled(1); + assertThat(errors.getRetries().getCount()).isEqualTo(0); + assertThat(errors.getUnavailables().getCount()).isEqualTo(1); + assertThat(errors.getRetriesOnUnavailable().getCount()).isEqualTo(0); + assertQueried(1, 1); + assertQueried(2, 0); + assertQueried(3, 0); + } + + /** + * Ensures that when handling a client timeout with {@link DowngradingConsistencyRetryPolicy} that + * a retry is attempted on the next host until all hosts are tried at which point a {@link + * NoHostAvailableException} is returned. + * + * @test_category retry_policy + */ + @Test(groups = "short") + public void should_try_next_host_on_client_timeouts() { + cluster.getConfiguration().getSocketOptions().setReadTimeoutMillis(1); + try { + scassandras + .node(1) + .primingClient() + .prime( + PrimingRequest.queryBuilder() + .withQuery("mock query") + .withThen(then().withFixedDelay(1000L).withRows(row("result", "result1"))) + .build()); + scassandras + .node(2) + .primingClient() + .prime( + PrimingRequest.queryBuilder() + .withQuery("mock query") + .withThen(then().withFixedDelay(1000L).withRows(row("result", "result2"))) + .build()); + scassandras + .node(3) + .primingClient() + .prime( + PrimingRequest.queryBuilder() + .withQuery("mock query") + .withThen(then().withFixedDelay(1000L).withRows(row("result", "result3"))) + .build()); + try { query(); - - assertOnWriteTimeoutWasCalled(1); - assertThat(errors.getWriteTimeouts().getCount()).isEqualTo(1); - assertThat(errors.getRetries().getCount()).isEqualTo(0); - assertThat(errors.getRetriesOnWriteTimeout().getCount()).isEqualTo(0); - assertThat(errors.getIgnoresOnWriteTimeout().getCount()).isEqualTo(1); - assertQueried(1, 1); - assertQueried(2, 0); - assertQueried(3, 0); - } - - - /** - * Ensures that when handling a write timeout with {@link DowngradingConsistencyRetryPolicy} that a retry is - * attempted on the same host if the {@link WriteType} is {@link WriteType#BATCH_LOG}. - * - * @test_category retry_policy - */ - @Test(groups = "short") - public void should_retry_once_on_same_host_with_BATCH_LOG_write_type() { - simulateError(1, write_request_timeout, new WriteTimeoutConfig(WriteTypePrime.BATCH_LOG, 1, 2)); - - try { - query(); - fail("expected a WriteTimeoutException"); - } catch (WriteTimeoutException e) {/*expected*/} - - assertOnWriteTimeoutWasCalled(2); - assertThat(errors.getRetries().getCount()).isEqualTo(1); - assertThat(errors.getWriteTimeouts().getCount()).isEqualTo(2); - assertThat(errors.getRetriesOnWriteTimeout().getCount()).isEqualTo(1); - assertQueried(1, 2); - assertQueried(2, 0); - assertQueried(3, 0); - } - - - /** - * Ensures that when handling a write timeout with {@link DowngradingConsistencyRetryPolicy} that a retry is - * attempted on the same host with a reduced consistency level that matches min(received acknowledgments, THREE) - * if the {@link WriteType} is {@link WriteType#UNLOGGED_BATCH} and is only retries once. - * - * @param alive The number of received acknowledgements to use in write timeout. - * @param expectedDowngradedCL The consistency level that is expected to be used on the retry. - * @test_category retry_policy - */ - @Test(groups = "short", dataProvider = "consistencyLevels") - public void should_retry_once_on_same_host_with_reduced_consistency_level_on_write_timeout(int alive, ConsistencyLevel expectedDowngradedCL) { - simulateError(1, write_request_timeout, new WriteTimeoutConfig(UNLOGGED_BATCH, alive, alive + 1)); - - try { - query(); - fail("expected a WriteTimeoutException"); - } catch (WriteTimeoutException e) { - assertThat(e.getConsistencyLevel()).isEqualTo(expectedDowngradedCL); - } - - assertOnWriteTimeoutWasCalled(2); - assertThat(errors.getRetries().getCount()).isEqualTo(1); - assertThat(errors.getWriteTimeouts().getCount()).isEqualTo(2); - assertThat(errors.getRetriesOnWriteTimeout().getCount()).isEqualTo(1); - assertQueried(1, 2); - assertQueried(2, 0); - assertQueried(3, 0); + Assertions.fail("expected a NoHostAvailableException"); + } catch (NoHostAvailableException e) { + assertThat(e.getErrors().keySet()) + .hasSize(3) + .containsOnly(host1.getEndPoint(), host2.getEndPoint(), host3.getEndPoint()); + assertThat(e.getErrors().values()) + .hasOnlyElementsOfType(OperationTimedOutException.class) + .extractingResultOf("getMessage") + .containsOnlyOnce( + String.format("[%s] Timed out waiting for server response", host1.getEndPoint()), + String.format("[%s] Timed out waiting for server response", host2.getEndPoint()), + String.format("[%s] Timed out waiting for server response", host3.getEndPoint())); + } + assertOnRequestErrorWasCalled(3, OperationTimedOutException.class); + assertThat(errors.getRetries().getCount()).isEqualTo(3); + assertThat(errors.getClientTimeouts().getCount()).isEqualTo(3); + assertThat(errors.getRetriesOnClientTimeout().getCount()).isEqualTo(3); + assertQueried(1, 1); + assertQueried(2, 1); + assertQueried(3, 1); + } finally { + cluster + .getConfiguration() + .getSocketOptions() + .setReadTimeoutMillis(SocketOptions.DEFAULT_READ_TIMEOUT_MILLIS); } - - - /** - * Ensures that when handling an unavailable with {@link DowngradingConsistencyRetryPolicy} that a retry is - * reattempted with a {@link ConsistencyLevel} that matches min(received acknowledgements, THREE) and is only - * retried once. - * - * @param alive The number of received acknowledgements to use in unavailable. - * @param expectedDowngradedCL The consistency level that is expected to be used on the retry. - * @test_category retry_policy - */ - @Test(groups = "short", dataProvider = "consistencyLevels") - public void should_retry_once_on_same_host_with_reduced_consistency_level_on_unavailable(int alive, ConsistencyLevel expectedDowngradedCL) { - simulateError(1, unavailable, new UnavailableConfig(alive + 1, alive)); - - try { - query(); - fail("expected an UnavailableException"); - } catch (UnavailableException e) { - assertThat(e.getConsistencyLevel()).isEqualTo(expectedDowngradedCL); - } - - assertOnUnavailableWasCalled(2); - assertThat(errors.getRetries().getCount()).isEqualTo(1); - assertThat(errors.getUnavailables().getCount()).isEqualTo(2); - assertThat(errors.getRetriesOnUnavailable().getCount()).isEqualTo(1); - assertQueried(1, 2); - assertQueried(2, 0); - assertQueried(3, 0); + } + + /** + * Ensures that when handling a server error defined in {@link #serverSideErrors} with {@link + * DowngradingConsistencyRetryPolicy} that a retry is attempted on the next host until all hosts + * are tried at which point a {@link NoHostAvailableException} is raised and it's errors include + * the expected exception. + * + * @param error Server side error to be produced. + * @param exception The exception we expect to be raised. + * @test_category retry_policy + */ + @Test(groups = "short", dataProvider = "serverSideErrors") + public void should_try_next_host_on_server_side_error( + Result error, Class exception) { + simulateError(1, error); + simulateError(2, error); + simulateError(3, error); + try { + query(); + Fail.fail("expected a NoHostAvailableException"); + } catch (NoHostAvailableException e) { + assertThat(e.getErrors().keySet()) + .hasSize(3) + .containsOnly(host1.getEndPoint(), host2.getEndPoint(), host3.getEndPoint()); + assertThat(e.getErrors().values()).hasOnlyElementsOfType(exception); } - - - /** - * Ensures that when handling an unavailable with {@link DowngradingConsistencyRetryPolicy} that a retry is - * is not reattempted if no replicas are alive. - * - * @test_category retry_policy - */ - @Test(groups = "short") - public void should_rethrow_if_no_hosts_alive_on_unavailable() { - simulateError(1, unavailable, new UnavailableConfig(1, 0)); - - try { - query(); - fail("expected an UnavailableException"); - } catch (UnavailableException e) {/*expected*/} - - assertOnUnavailableWasCalled(1); - assertThat(errors.getRetries().getCount()).isEqualTo(0); - assertThat(errors.getUnavailables().getCount()).isEqualTo(1); - assertThat(errors.getRetriesOnUnavailable().getCount()).isEqualTo(0); - assertQueried(1, 1); - assertQueried(2, 0); - assertQueried(3, 0); + assertOnRequestErrorWasCalled(3, exception); + assertThat(errors.getOthers().getCount()).isEqualTo(3); + assertThat(errors.getRetries().getCount()).isEqualTo(3); + assertThat(errors.getRetriesOnOtherErrors().getCount()).isEqualTo(3); + assertQueried(1, 1); + assertQueried(2, 1); + assertQueried(3, 1); + } + + /** + * Ensures that when handling a connection error caused by the connection closing during a request + * in a way described by {@link #connectionErrors} that the next host is tried. + * + * @param closeType The way the connection should be closed during the request. + */ + @Test(groups = "short", dataProvider = "connectionErrors") + public void should_try_next_host_on_connection_error(ClosedConnectionConfig.CloseType closeType) { + simulateError(1, closed_connection, new ClosedConnectionConfig(closeType)); + simulateError(2, closed_connection, new ClosedConnectionConfig(closeType)); + simulateError(3, closed_connection, new ClosedConnectionConfig(closeType)); + try { + query(); + Fail.fail("expected a TransportException"); + } catch (NoHostAvailableException e) { + assertThat(e.getErrors().keySet()) + .hasSize(3) + .containsOnly(host1.getEndPoint(), host2.getEndPoint(), host3.getEndPoint()); + assertThat(e.getErrors().values()).hasOnlyElementsOfType(TransportException.class); } - - /** - * Ensures that when handling a client timeout with {@link DowngradingConsistencyRetryPolicy} that a retry is - * attempted on the next host until all hosts are tried at which point a {@link NoHostAvailableException} is - * returned. - * - * @test_category retry_policy - */ - @Test(groups = "short") - public void should_try_next_host_on_client_timeouts() { - cluster.getConfiguration().getSocketOptions().setReadTimeoutMillis(1); - try { - scassandras - .node(1).primingClient().prime(PrimingRequest.queryBuilder() - .withQuery("mock query") - .withThen(then().withFixedDelay(1000L).withRows(row("result", "result1"))) - .build()); - scassandras - .node(2).primingClient().prime(PrimingRequest.queryBuilder() - .withQuery("mock query") - .withThen(then().withFixedDelay(1000L).withRows(row("result", "result2"))) - .build()); - scassandras - .node(3).primingClient().prime(PrimingRequest.queryBuilder() - .withQuery("mock query") - .withThen(then().withFixedDelay(1000L).withRows(row("result", "result3"))) - .build()); - try { - query(); - Assertions.fail("expected a NoHostAvailableException"); - } catch (NoHostAvailableException e) { - assertThat(e.getErrors().keySet()).hasSize(3).containsOnly( - host1.getSocketAddress(), - host2.getSocketAddress(), - host3.getSocketAddress()); - assertThat(e.getErrors().values()) - .hasOnlyElementsOfType(OperationTimedOutException.class) - .extractingResultOf("getMessage") - .containsOnlyOnce( - String.format("[%s] Timed out waiting for server response", host1.getSocketAddress()), - String.format("[%s] Timed out waiting for server response", host2.getSocketAddress()), - String.format("[%s] Timed out waiting for server response", host3.getSocketAddress()) - ); - } - assertOnRequestErrorWasCalled(3, OperationTimedOutException.class); - assertThat(errors.getRetries().getCount()).isEqualTo(3); - assertThat(errors.getClientTimeouts().getCount()).isEqualTo(3); - assertThat(errors.getRetriesOnClientTimeout().getCount()).isEqualTo(3); - assertQueried(1, 1); - assertQueried(2, 1); - assertQueried(3, 1); - } finally { - cluster.getConfiguration().getSocketOptions().setReadTimeoutMillis(SocketOptions.DEFAULT_READ_TIMEOUT_MILLIS); - } + assertOnRequestErrorWasCalled(3, TransportException.class); + assertThat(errors.getRetries().getCount()).isEqualTo(3); + assertThat(errors.getConnectionErrors().getCount()).isEqualTo(3); + assertThat(errors.getIgnoresOnConnectionError().getCount()).isEqualTo(0); + assertThat(errors.getRetriesOnConnectionError().getCount()).isEqualTo(3); + assertQueried(1, 1); + assertQueried(2, 1); + assertQueried(3, 1); + } + + @Test(groups = "short") + public void should_rethrow_on_unavailable_if_CAS() { + simulateError(1, unavailable, new UnavailableConfig(1, 0, SERIAL)); + simulateError(2, unavailable, new UnavailableConfig(1, 0, SERIAL)); + + try { + query(); + fail("expected an UnavailableException"); + } catch (UnavailableException e) { + assertThat(e.getConsistencyLevel()).isEqualTo(ConsistencyLevel.SERIAL); } - - /** - * Ensures that when handling a server error defined in {@link #serverSideErrors} with - * {@link DowngradingConsistencyRetryPolicy} that a retry is attempted on the next host until all hosts are tried - * at which point a {@link NoHostAvailableException} is raised and it's errors include the expected exception. - * - * @param error Server side error to be produced. - * @param exception The exception we expect to be raised. - * @test_category retry_policy - */ - @Test(groups = "short", dataProvider = "serverSideErrors") - public void should_try_next_host_on_server_side_error(Result error, Class exception) { - simulateError(1, error); - simulateError(2, error); - simulateError(3, error); - try { - query(); - Fail.fail("expected a NoHostAvailableException"); - } catch (NoHostAvailableException e) { - assertThat(e.getErrors().keySet()).hasSize(3).containsOnly( - host1.getSocketAddress(), - host2.getSocketAddress(), - host3.getSocketAddress()); - assertThat(e.getErrors().values()).hasOnlyElementsOfType(exception); - } - assertOnRequestErrorWasCalled(3, exception); - assertThat(errors.getOthers().getCount()).isEqualTo(3); - assertThat(errors.getRetries().getCount()).isEqualTo(3); - assertThat(errors.getRetriesOnOtherErrors().getCount()).isEqualTo(3); - assertQueried(1, 1); - assertQueried(2, 1); - assertQueried(3, 1); + assertOnUnavailableWasCalled(2); + assertThat(errors.getRetries().getCount()).isEqualTo(1); + assertThat(errors.getUnavailables().getCount()).isEqualTo(2); + assertThat(errors.getRetriesOnUnavailable().getCount()).isEqualTo(1); + assertQueried(1, 1); + assertQueried(2, 1); + assertQueried(3, 0); + } + + @Test(groups = "short") + public void should_rethrow_on_read_failure() { + simulateError(1, read_failure); + + try { + query(); + fail("expected a ReadFailureException"); + } catch (DriverException e) { + assertThat(e).isInstanceOf(ReadFailureException.class); } - - /** - * Ensures that when handling a connection error caused by the connection closing during a request in a way - * described by {@link #connectionErrors} that the next host is tried. - * - * @param closeType The way the connection should be closed during the request. - */ - @Test(groups = "short", dataProvider = "connectionErrors") - public void should_try_next_host_on_connection_error(ClosedConnectionConfig.CloseType closeType) { - simulateError(1, closed_connection, new ClosedConnectionConfig(closeType)); - simulateError(2, closed_connection, new ClosedConnectionConfig(closeType)); - simulateError(3, closed_connection, new ClosedConnectionConfig(closeType)); - try { - query(); - Fail.fail("expected a TransportException"); - } catch (NoHostAvailableException e) { - assertThat(e.getErrors().keySet()).hasSize(3).containsOnly( - host1.getSocketAddress(), - host2.getSocketAddress(), - host3.getSocketAddress()); - assertThat(e.getErrors().values()).hasOnlyElementsOfType(TransportException.class); - } - assertOnRequestErrorWasCalled(3, TransportException.class); - assertThat(errors.getRetries().getCount()).isEqualTo(3); - assertThat(errors.getConnectionErrors().getCount()).isEqualTo(3); - assertThat(errors.getIgnoresOnConnectionError().getCount()).isEqualTo(0); - assertThat(errors.getRetriesOnConnectionError().getCount()).isEqualTo(3); - assertQueried(1, 1); - assertQueried(2, 1); - assertQueried(3, 1); + assertOnRequestErrorWasCalled(1, ReadFailureException.class); + assertThat(errors.getOthers().getCount()).isEqualTo(1); + assertThat(errors.getRetries().getCount()).isEqualTo(0); + assertThat(errors.getRetriesOnOtherErrors().getCount()).isEqualTo(0); + assertQueried(1, 1); + assertQueried(2, 0); + assertQueried(3, 0); + } + + @Test(groups = "short") + public void should_rethrow_on_write_failure() { + simulateError(1, write_failure); + + try { + query(); + fail("expected a WriteFailureException"); + } catch (DriverException e) { + assertThat(e).isInstanceOf(WriteFailureException.class); } - @Test(groups = "short") - public void should_rethrow_on_unavailable_if_CAS() { - simulateError(1, unavailable, new UnavailableConfig(1, 0, SERIAL)); - simulateError(2, unavailable, new UnavailableConfig(1, 0, SERIAL)); - - try { - query(); - fail("expected an UnavailableException"); - } catch (UnavailableException e) { - assertThat(e.getConsistencyLevel()).isEqualTo(ConsistencyLevel.SERIAL); - } - - assertOnUnavailableWasCalled(2); - assertThat(errors.getRetries().getCount()).isEqualTo(1); - assertThat(errors.getUnavailables().getCount()).isEqualTo(2); - assertThat(errors.getRetriesOnUnavailable().getCount()).isEqualTo(1); - assertQueried(1, 1); - assertQueried(2, 1); - assertQueried(3, 0); - } + assertOnRequestErrorWasCalled(1, WriteFailureException.class); + assertThat(errors.getOthers().getCount()).isEqualTo(1); + assertThat(errors.getRetries().getCount()).isEqualTo(0); + assertThat(errors.getRetriesOnOtherErrors().getCount()).isEqualTo(0); + assertQueried(1, 1); + assertQueried(2, 0); + assertQueried(3, 0); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/policies/EC2MultiRegionAddressTranslatorTest.java b/driver-core/src/test/java/com/datastax/driver/core/policies/EC2MultiRegionAddressTranslatorTest.java index 014341620c6..794bf7fc134 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/policies/EC2MultiRegionAddressTranslatorTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/policies/EC2MultiRegionAddressTranslatorTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,83 +17,80 @@ */ package com.datastax.driver.core.policies; -import org.testng.annotations.Test; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.anyString; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; +import java.net.InetAddress; +import java.net.InetSocketAddress; import javax.naming.NamingException; import javax.naming.directory.BasicAttributes; import javax.naming.directory.InitialDirContext; -import java.net.InetAddress; -import java.net.InetSocketAddress; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.*; +import org.testng.annotations.Test; public class EC2MultiRegionAddressTranslatorTest { - @Test(groups = "unit") - public void should_return_same_address_when_no_entry_found() throws Exception { - InitialDirContext mock = mock(InitialDirContext.class); - when(mock.getAttributes(anyString(), any(String[].class))) - .thenReturn(new BasicAttributes()); - EC2MultiRegionAddressTranslator translator = new EC2MultiRegionAddressTranslator(mock); + @Test(groups = "unit") + public void should_return_same_address_when_no_entry_found() throws Exception { + InitialDirContext mock = mock(InitialDirContext.class); + when(mock.getAttributes(anyString(), any(String[].class))).thenReturn(new BasicAttributes()); + EC2MultiRegionAddressTranslator translator = new EC2MultiRegionAddressTranslator(mock); - InetSocketAddress address = new InetSocketAddress("192.0.2.5", 9042); - assertThat(translator.translate(address)).isEqualTo(address); - } + InetSocketAddress address = new InetSocketAddress("192.0.2.5", 9042); + assertThat(translator.translate(address)).isEqualTo(address); + } - @Test(groups = "unit") - public void should_return_same_address_when_exception_encountered() throws Exception { - InitialDirContext mock = mock(InitialDirContext.class); - when(mock.getAttributes(anyString(), any(String[].class))) - .thenThrow(new NamingException("Problem resolving address (not really).")); - EC2MultiRegionAddressTranslator translator = new EC2MultiRegionAddressTranslator(mock); + @Test(groups = "unit") + public void should_return_same_address_when_exception_encountered() throws Exception { + InitialDirContext mock = mock(InitialDirContext.class); + when(mock.getAttributes(anyString(), any(String[].class))) + .thenThrow(new NamingException("Problem resolving address (not really).")); + EC2MultiRegionAddressTranslator translator = new EC2MultiRegionAddressTranslator(mock); - InetSocketAddress address = new InetSocketAddress("192.0.2.5", 9042); - assertThat(translator.translate(address)).isEqualTo(address); - } + InetSocketAddress address = new InetSocketAddress("192.0.2.5", 9042); + assertThat(translator.translate(address)).isEqualTo(address); + } - @Test(groups = "unit") - public void should_return_new_address_when_match_found() throws Exception { - InetSocketAddress expectedAddress = new InetSocketAddress("54.32.55.66", 9042); + @Test(groups = "unit") + public void should_return_new_address_when_match_found() throws Exception { + InetSocketAddress expectedAddress = new InetSocketAddress("54.32.55.66", 9042); - InitialDirContext mock = mock(InitialDirContext.class); - when(mock.getAttributes("5.2.0.192.in-addr.arpa", new String[]{"PTR"})) - .thenReturn(new BasicAttributes("PTR", expectedAddress.getHostName())); - EC2MultiRegionAddressTranslator translator = new EC2MultiRegionAddressTranslator(mock); + InitialDirContext mock = mock(InitialDirContext.class); + when(mock.getAttributes("5.2.0.192.in-addr.arpa", new String[] {"PTR"})) + .thenReturn(new BasicAttributes("PTR", expectedAddress.getHostName())); + EC2MultiRegionAddressTranslator translator = new EC2MultiRegionAddressTranslator(mock); - InetSocketAddress address = new InetSocketAddress("192.0.2.5", 9042); - assertThat(translator.translate(address)).isEqualTo(expectedAddress); - } + InetSocketAddress address = new InetSocketAddress("192.0.2.5", 9042); + assertThat(translator.translate(address)).isEqualTo(expectedAddress); + } - @Test(groups = "unit") - public void should_close_context_when_closed() throws Exception { - InitialDirContext mock = mock(InitialDirContext.class); - EC2MultiRegionAddressTranslator translator = new EC2MultiRegionAddressTranslator(mock); + @Test(groups = "unit") + public void should_close_context_when_closed() throws Exception { + InitialDirContext mock = mock(InitialDirContext.class); + EC2MultiRegionAddressTranslator translator = new EC2MultiRegionAddressTranslator(mock); - // ensure close has not been called to this point. - verify(mock, times(0)).close(); - translator.close(); - // ensure close is closed. - verify(mock).close(); - } + // ensure close has not been called to this point. + verify(mock, times(0)).close(); + translator.close(); + // ensure close is closed. + verify(mock).close(); + } - @Test(groups = "unit") - public void should_build_reversed_domain_name_for_ip_v4() throws Exception { - InetAddress address = InetAddress.getByName("192.0.2.5"); - assertThat( - EC2MultiRegionAddressTranslator.reverse(address) - ).isEqualTo( - "5.2.0.192.in-addr.arpa" - ); - } + @Test(groups = "unit") + public void should_build_reversed_domain_name_for_ip_v4() throws Exception { + InetAddress address = InetAddress.getByName("192.0.2.5"); + assertThat(EC2MultiRegionAddressTranslator.reverse(address)) + .isEqualTo("5.2.0.192.in-addr.arpa"); + } - @Test(groups = "unit") - public void should_build_reversed_domain_name_for_ip_v6() throws Exception { - InetAddress address = InetAddress.getByName("2001:db8::567:89ab"); - assertThat( - EC2MultiRegionAddressTranslator.reverse(address) - ).isEqualTo( - "b.a.9.8.7.6.5.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa" - ); - } -} \ No newline at end of file + @Test(groups = "unit") + public void should_build_reversed_domain_name_for_ip_v6() throws Exception { + InetAddress address = InetAddress.getByName("2001:db8::567:89ab"); + assertThat(EC2MultiRegionAddressTranslator.reverse(address)) + .isEqualTo("b.a.9.8.7.6.5.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa"); + } +} diff --git a/driver-core/src/test/java/com/datastax/driver/core/policies/ErrorAwarePolicyIntegrationTest.java b/driver-core/src/test/java/com/datastax/driver/core/policies/ErrorAwarePolicyIntegrationTest.java index 9e01658e125..ddfb57b2b64 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/policies/ErrorAwarePolicyIntegrationTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/policies/ErrorAwarePolicyIntegrationTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,364 +17,400 @@ */ package com.datastax.driver.core.policies; -import com.datastax.driver.core.*; -import com.datastax.driver.core.exceptions.*; -import org.scassandra.http.client.Result; -import org.testng.annotations.AfterMethod; -import org.testng.annotations.BeforeMethod; -import org.testng.annotations.Test; - -import java.util.concurrent.Callable; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicInteger; - import static com.datastax.driver.core.TestUtils.nonQuietClusterCloseOptions; import static java.util.concurrent.TimeUnit.SECONDS; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; import static org.scassandra.http.client.PrimingRequest.queryBuilder; import static org.scassandra.http.client.PrimingRequest.then; -import static org.scassandra.http.client.Result.*; +import static org.scassandra.http.client.Result.already_exists; +import static org.scassandra.http.client.Result.invalid; +import static org.scassandra.http.client.Result.read_request_timeout; +import static org.scassandra.http.client.Result.success; +import static org.scassandra.http.client.Result.syntax_error; +import static org.scassandra.http.client.Result.unauthorized; +import static org.scassandra.http.client.Result.unavailable; +import static org.scassandra.http.client.Result.write_request_timeout; + +import com.datastax.driver.core.Cluster; +import com.datastax.driver.core.ConditionChecker; +import com.datastax.driver.core.Host; +import com.datastax.driver.core.LatencyTracker; +import com.datastax.driver.core.QueryTracker; +import com.datastax.driver.core.ScassandraCluster; +import com.datastax.driver.core.Session; +import com.datastax.driver.core.SortingLoadBalancingPolicy; +import com.datastax.driver.core.Statement; +import com.datastax.driver.core.exceptions.AlreadyExistsException; +import com.datastax.driver.core.exceptions.InvalidQueryException; +import com.datastax.driver.core.exceptions.ReadTimeoutException; +import com.datastax.driver.core.exceptions.SyntaxError; +import com.datastax.driver.core.exceptions.UnauthorizedException; +import com.datastax.driver.core.exceptions.UnavailableException; +import com.datastax.driver.core.exceptions.WriteTimeoutException; +import java.util.concurrent.Callable; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import org.scassandra.http.client.Result; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; public class ErrorAwarePolicyIntegrationTest { - private QueryTracker queryTracker; - private Clock clock; - private ScassandraCluster sCluster; - private AtomicInteger errorCounter; - private LatencyTracker latencyTracker; - - @BeforeMethod(groups = "short") - public void setUp() { - queryTracker = new QueryTracker(); - clock = mock(Clock.class); - sCluster = ScassandraCluster.builder().withNodes(2).build(); - sCluster.init(); - errorCounter = new AtomicInteger(0); - latencyTracker = new LatencyTracker() { - @Override - public void update(Host host, Statement statement, Exception exception, long newLatencyNanos) { - if (exception != null) - errorCounter.incrementAndGet(); - } - - @Override - public void onRegister(Cluster cluster) { - } - - @Override - public void onUnregister(Cluster cluster) { - } + private QueryTracker queryTracker; + private Clock clock; + private ScassandraCluster sCluster; + private AtomicInteger errorCounter; + private LatencyTracker latencyTracker; + + @BeforeMethod(groups = "short") + public void setUp() { + queryTracker = new QueryTracker(); + clock = mock(Clock.class); + sCluster = ScassandraCluster.builder().withNodes(2).build(); + sCluster.init(); + errorCounter = new AtomicInteger(0); + latencyTracker = + new LatencyTracker() { + @Override + public void update( + Host host, Statement statement, Exception exception, long newLatencyNanos) { + if (exception != null) errorCounter.incrementAndGet(); + } + + @Override + public void onRegister(Cluster cluster) {} + + @Override + public void onUnregister(Cluster cluster) {} }; - // By default node 1 should always fail, 2 succeed. - prime(1, unauthorized); - prime(2, success); + // By default node 1 should always fail, 2 succeed. + prime(1, unauthorized); + prime(2, success); + } + + @AfterMethod(groups = "short") + public void tearDown() { + sCluster.stop(); + } + + private Cluster.Builder builder(LoadBalancingPolicy lbp) { + return Cluster.builder() + .withNettyOptions(nonQuietClusterCloseOptions) + .addContactPoints(sCluster.address(1).getAddress()) + .withPort(sCluster.getBinaryPort()) + .withLoadBalancingPolicy(lbp); + } + + private void prime(int node, Result result) { + sCluster + .node(node) + .primingClient() + .prime( + queryBuilder() + .withQuery(QueryTracker.QUERY) + .withThen(then().withResult(result)) + .build()); + } + + /** + * Checks that {@link LatencyTracker#update} was called at least expectedCount times within 5 + * seconds. + * + *

    Note that the usefulness of this is dependent on the {@link + * ErrorAwarePolicy.PerHostErrorTracker} being invoked before the latency tracker being used to + * track error invocations in this test. The existing implementation seems to invoke update on + * latency trackers in order that they are registered. + * + * @param expectedCount Expected number of errors to have been invoked. + */ + private void awaitTrackerUpdate(final int expectedCount) { + ConditionChecker.check() + .every(10) + .that( + new Callable() { + @Override + public Boolean call() throws Exception { + return errorCounter.get() >= expectedCount; + } + }) + .before(5000) + .becomesTrue(); + } + + private void setTime(long time, TimeUnit timeUnit) { + when(clock.nanoTime()).thenReturn(TimeUnit.NANOSECONDS.convert(time, timeUnit)); + } + + /** + * Validates that {@link ErrorAwarePolicy} properly excludes a host after the maximum number of + * errors is exceeded. + * + *

    This test configures a maximum of 1 error per minute and executes 2 failing queries against + * host1 during the first 5 simulated seconds. host1 should be excluded as soon as the rolling + * count updates over the next 5-second interval. + * + *

    It then makes another query and ensures it is executed against host2 and that the response + * was successful. + * + * @jira_ticket JAVA-1055 + * @test_category load_balancing:error_aware + * @since 3.1.0 + */ + @Test(groups = "short") + public void should_exclude_host_after_reaching_maximum_errors() throws InterruptedException { + LoadBalancingPolicy lbp = + ErrorAwarePolicy.builder(new SortingLoadBalancingPolicy()) + .withMaxErrorsPerMinute(1) + .withClock(clock) + .build(); + + Cluster cluster = builder(lbp).build(); + + try { + Session session = cluster.connect(); + cluster.register(latencyTracker); + + setTime(0, SECONDS); + // Make 2 queries producing a count higher than the threshold + queryTracker.query(session, 2, UnauthorizedException.class, sCluster.address(1)); + awaitTrackerUpdate(2); + + // Advance time so that RollingCount ticks and updates its count. + setTime(5, SECONDS); + + // The next query should succeed and hit node 2 since node 1 is now ignored. + queryTracker.query(session, 1, sCluster.address(2)); + } finally { + cluster.close(); } - - @AfterMethod(groups = "short") - public void tearDown() { - sCluster.stop(); - } - - private Cluster.Builder builder(LoadBalancingPolicy lbp) { - return Cluster.builder() - .withNettyOptions(nonQuietClusterCloseOptions) - .addContactPoints(sCluster.address(1).getAddress()) - .withPort(sCluster.getBinaryPort()) - .withLoadBalancingPolicy(lbp); - } - - private void prime(int node, Result result) { - sCluster.node(node).primingClient().prime( - queryBuilder() - .withQuery(QueryTracker.QUERY) - .withThen(then().withResult(result)) - .build() - ); - } - - /** - * Checks that {@link LatencyTracker#update} was called at least expectedCount times within 5 seconds. - *

    - * Note that the usefulness of this is dependent on the {@link ErrorAwarePolicy.PerHostErrorTracker} being invoked - * before the latency tracker being used to track error invocations in this test. The existing implementation - * seems to invoke update on latency trackers in order that they are registered. - * - * @param expectedCount Expected number of errors to have been invoked. - */ - private void awaitTrackerUpdate(final int expectedCount) { - ConditionChecker.check() - .every(10) - .that(new Callable() { - @Override - public Boolean call() throws Exception { - return errorCounter.get() >= expectedCount; - } - }) - .before(5000) - .becomesTrue(); - } - - private void setTime(long time, TimeUnit timeUnit) { - when(clock.nanoTime()).thenReturn(TimeUnit.NANOSECONDS.convert(time, timeUnit)); - } - - /** - * Validates that {@link ErrorAwarePolicy} properly excludes a host after the maximum number of errors is exceeded. - *

    - * This test configures a maximum of 1 error per minute and executes 2 failing queries against host1 during the - * first 5 simulated seconds. host1 should be excluded as soon as the rolling count updates over the next 5-second - * interval. - *

    - * It then makes another query and ensures it is executed against host2 and that the response was successful. - * - * @jira_ticket JAVA-1055 - * @test_category load_balancing:error_aware - * @since 3.1.0 - */ - @Test(groups = "short") - public void should_exclude_host_after_reaching_maximum_errors() throws InterruptedException { - LoadBalancingPolicy lbp = ErrorAwarePolicy.builder(new SortingLoadBalancingPolicy()) - .withMaxErrorsPerMinute(1) - .withClock(clock) - .build(); - - Cluster cluster = builder(lbp).build(); - - try { - Session session = cluster.connect(); - cluster.register(latencyTracker); - - setTime(0, SECONDS); - // Make 2 queries producing a count higher than the threshold - queryTracker.query(session, 2, UnauthorizedException.class, sCluster.address(1)); - awaitTrackerUpdate(2); - - // Advance time so that RollingCount ticks and updates its count. - setTime(5, SECONDS); - - // The next query should succeed and hit node 2 since node 1 is now ignored. - queryTracker.query(session, 1, sCluster.address(2)); - } finally { - cluster.close(); - } - } - - /** - * Validates that {@link ErrorAwarePolicy} will include a previously excluded host after the configured retry - * period has elapsed. - *

    - * The test executes queries with error to get host1 excluded. It then executes queries over 70 simulated seconds - * and then executes another query after this time has elapsed and ensures that the next query execution uses host1. - * - * @jira_ticket JAVA-1055 - * @test_category load_balancing:error_aware - * @since 3.1.0 - */ - @Test(groups = "short") - public void should_resurrect_host_after_retry_period() throws InterruptedException { - LoadBalancingPolicy lbp = ErrorAwarePolicy.builder(new SortingLoadBalancingPolicy()) - .withMaxErrorsPerMinute(1) - .withRetryPeriod(70, SECONDS) - .withClock(clock) - .build(); - - Cluster cluster = builder(lbp).build(); - try { - Session session = cluster.connect(); - cluster.register(latencyTracker); - - setTime(0, SECONDS); - // Make 2 queries producing a count higher than the threshold - queryTracker.query(session, 2, UnauthorizedException.class, sCluster.address(1)); - awaitTrackerUpdate(2); - - // Advance time so that RollingCount ticks and updates its count. - setTime(5, SECONDS); - - // Execute some queries, these should all succeed and hit host2 since host1 is excluded. - queryTracker.query(session, 5, sCluster.address(2)); - - // Advance time after the retry period - setTime(75, SECONDS); - - // At this the load balancing policy should resurrect node 1 which will be used and fail. - queryTracker.query(session, 1, UnauthorizedException.class, sCluster.address(1)); - - } finally { - cluster.close(); - } + } + + /** + * Validates that {@link ErrorAwarePolicy} will include a previously excluded host after the + * configured retry period has elapsed. + * + *

    The test executes queries with error to get host1 excluded. It then executes queries over 70 + * simulated seconds and then executes another query after this time has elapsed and ensures that + * the next query execution uses host1. + * + * @jira_ticket JAVA-1055 + * @test_category load_balancing:error_aware + * @since 3.1.0 + */ + @Test(groups = "short") + public void should_resurrect_host_after_retry_period() throws InterruptedException { + LoadBalancingPolicy lbp = + ErrorAwarePolicy.builder(new SortingLoadBalancingPolicy()) + .withMaxErrorsPerMinute(1) + .withRetryPeriod(70, SECONDS) + .withClock(clock) + .build(); + + Cluster cluster = builder(lbp).build(); + try { + Session session = cluster.connect(); + cluster.register(latencyTracker); + + setTime(0, SECONDS); + // Make 2 queries producing a count higher than the threshold + queryTracker.query(session, 2, UnauthorizedException.class, sCluster.address(1)); + awaitTrackerUpdate(2); + + // Advance time so that RollingCount ticks and updates its count. + setTime(5, SECONDS); + + // Execute some queries, these should all succeed and hit host2 since host1 is excluded. + queryTracker.query(session, 5, sCluster.address(2)); + + // Advance time after the retry period + setTime(75, SECONDS); + + // At this the load balancing policy should resurrect node 1 which will be used and fail. + queryTracker.query(session, 1, UnauthorizedException.class, sCluster.address(1)); + + } finally { + cluster.close(); } - - /** - * Validates that {@link ErrorAwarePolicy} will not penalize errors that are not considered in the default - * {@link ErrorAwarePolicy.ErrorFilter} implementation. - *

    - * Executes 10 queries with each error type and ensures that host1 is used each time, verifying that it was - * never excluded. - * - * @jira_ticket JAVA-1055 - * @test_category load_balancing:error_aware - * @since 3.1.0 - */ - @Test(groups = "short") - public void should_not_penalize_default_ignored_exceptions() throws InterruptedException { - LoadBalancingPolicy lbp = ErrorAwarePolicy.builder(new SortingLoadBalancingPolicy()) - .withMaxErrorsPerMinute(1) - .withClock(clock) - .build(); - - // Use fall through retry policy so other hosts aren't tried. - Cluster cluster = builder(lbp).withRetryPolicy(FallthroughRetryPolicy.INSTANCE).build(); - try { - Session session = cluster.connect(); - cluster.register(latencyTracker); - - setTime(0, SECONDS); - // TODO: Add Read and Write Failure, FunctionExecution exception when Scassandra supports v4. - prime(1, read_request_timeout); - queryTracker.query(session, 10, ReadTimeoutException.class, sCluster.address(1)); - awaitTrackerUpdate(10); - - setTime(5, SECONDS); - - prime(1, write_request_timeout); - queryTracker.query(session, 10, WriteTimeoutException.class, sCluster.address(1)); - awaitTrackerUpdate(20); - - setTime(10, SECONDS); - - prime(1, unavailable); - queryTracker.query(session, 10, UnavailableException.class, sCluster.address(1)); - awaitTrackerUpdate(30); - - setTime(15, SECONDS); - - prime(1, already_exists); - queryTracker.query(session, 10, AlreadyExistsException.class, sCluster.address(1)); - awaitTrackerUpdate(40); - - setTime(20, SECONDS); - - prime(1, invalid); - queryTracker.query(session, 10, InvalidQueryException.class, sCluster.address(1)); - awaitTrackerUpdate(50); - - setTime(25, SECONDS); - - prime(1, syntax_error); - queryTracker.query(session, 10, SyntaxError.class, sCluster.address(1)); - awaitTrackerUpdate(60); - - setTime(30, SECONDS); - - // ensure host1 still used after another tick. - queryTracker.query(session, 10, SyntaxError.class, sCluster.address(1)); - } finally { - cluster.close(); - } + } + + /** + * Validates that {@link ErrorAwarePolicy} will not penalize errors that are not considered in the + * default {@link ErrorAwarePolicy.ErrorFilter} implementation. + * + *

    Executes 10 queries with each error type and ensures that host1 is used each time, verifying + * that it was never excluded. + * + * @jira_ticket JAVA-1055 + * @test_category load_balancing:error_aware + * @since 3.1.0 + */ + @Test(groups = "short") + public void should_not_penalize_default_ignored_exceptions() throws InterruptedException { + LoadBalancingPolicy lbp = + ErrorAwarePolicy.builder(new SortingLoadBalancingPolicy()) + .withMaxErrorsPerMinute(1) + .withClock(clock) + .build(); + + // Use fall through retry policy so other hosts aren't tried. + Cluster cluster = builder(lbp).withRetryPolicy(FallthroughRetryPolicy.INSTANCE).build(); + try { + Session session = cluster.connect(); + cluster.register(latencyTracker); + + setTime(0, SECONDS); + // TODO: Add Read and Write Failure, FunctionExecution exception when Scassandra supports v4. + prime(1, read_request_timeout); + queryTracker.query(session, 10, ReadTimeoutException.class, sCluster.address(1)); + awaitTrackerUpdate(10); + + setTime(5, SECONDS); + + prime(1, write_request_timeout); + queryTracker.query(session, 10, WriteTimeoutException.class, sCluster.address(1)); + awaitTrackerUpdate(20); + + setTime(10, SECONDS); + + prime(1, unavailable); + queryTracker.query(session, 10, UnavailableException.class, sCluster.address(1)); + awaitTrackerUpdate(30); + + setTime(15, SECONDS); + + prime(1, already_exists); + queryTracker.query(session, 10, AlreadyExistsException.class, sCluster.address(1)); + awaitTrackerUpdate(40); + + setTime(20, SECONDS); + + prime(1, invalid); + queryTracker.query(session, 10, InvalidQueryException.class, sCluster.address(1)); + awaitTrackerUpdate(50); + + setTime(25, SECONDS); + + prime(1, syntax_error); + queryTracker.query(session, 10, SyntaxError.class, sCluster.address(1)); + awaitTrackerUpdate(60); + + setTime(30, SECONDS); + + // ensure host1 still used after another tick. + queryTracker.query(session, 10, SyntaxError.class, sCluster.address(1)); + } finally { + cluster.close(); } - - /** - * Validates that {@link ErrorAwarePolicy} will regard a custom {@link ErrorAwarePolicy.ErrorFilter} by only - * penalizing a node when it produces exceptions that evaluate to true in the filter implementation. - *

    - * It first executes 10 queries with an error type that is not considered and verify that host1 is never excluded. - *

    - * It then executes queries with an error type that is considered and verifies that host1 is then excluded and host2 - * is used instead. - * - * @jira_ticket JAVA-1055 - * @test_category load_balancing:error_aware - * @since 3.1.0 - */ - @Test(groups = "short") - public void should_only_consider_exceptions_based_on_errors_filter() throws InterruptedException { - ErrorAwarePolicy.ErrorFilter iqeOnlyFilter = new ErrorAwarePolicy.ErrorFilter() { - @Override - public boolean shouldConsiderError(Exception e, Host host, Statement statement) { - return e.getClass().isAssignableFrom(InvalidQueryException.class); - } + } + + /** + * Validates that {@link ErrorAwarePolicy} will regard a custom {@link + * ErrorAwarePolicy.ErrorFilter} by only penalizing a node when it produces exceptions that + * evaluate to true in the filter implementation. + * + *

    It first executes 10 queries with an error type that is not considered and verify that host1 + * is never excluded. + * + *

    It then executes queries with an error type that is considered and verifies that host1 is + * then excluded and host2 is used instead. + * + * @jira_ticket JAVA-1055 + * @test_category load_balancing:error_aware + * @since 3.1.0 + */ + @Test(groups = "short") + public void should_only_consider_exceptions_based_on_errors_filter() throws InterruptedException { + ErrorAwarePolicy.ErrorFilter iqeOnlyFilter = + new ErrorAwarePolicy.ErrorFilter() { + @Override + public boolean shouldConsiderError(Exception e, Host host, Statement statement) { + return e.getClass().isAssignableFrom(InvalidQueryException.class); + } }; - LoadBalancingPolicy lbp = ErrorAwarePolicy.builder(new SortingLoadBalancingPolicy()) - .withMaxErrorsPerMinute(1) - .withClock(clock) - .withErrorsFilter(iqeOnlyFilter) - .build(); - - // Use fall through retry policy so other hosts aren't tried. - Cluster cluster = builder(lbp).withRetryPolicy(FallthroughRetryPolicy.INSTANCE).build(); - try { - Session session = cluster.connect(); - cluster.register(latencyTracker); - - setTime(0, SECONDS); - // UnauthorizedException evaluates to false in the filter, so it should not be considered. - prime(1, unauthorized); - queryTracker.query(session, 10, UnauthorizedException.class, sCluster.address(1)); - awaitTrackerUpdate(10); - - setTime(5, SECONDS); - // should still query host1 - queryTracker.query(session, 1, UnauthorizedException.class, sCluster.address(1)); - - // InvalidQueryException evaluates to true, so it *should* be considered increment the count - prime(1, invalid); - queryTracker.query(session, 2, InvalidQueryException.class, sCluster.address(1)); - awaitTrackerUpdate(13); - - // Advance time so that the rolling count updates the next time we query. - // The first errors that were considered were at t = 5 seconds and this is when the rolling count was - // initialized, so we want to be at t > 5 + 5 for the rolling count to update - setTime(10, SECONDS); - - // The next query should succeed and hit node 2 since node 1 is now ignored. - queryTracker.query(session, 1, sCluster.address(2)); - } finally { - cluster.close(); - } + LoadBalancingPolicy lbp = + ErrorAwarePolicy.builder(new SortingLoadBalancingPolicy()) + .withMaxErrorsPerMinute(1) + .withClock(clock) + .withErrorsFilter(iqeOnlyFilter) + .build(); + + // Use fall through retry policy so other hosts aren't tried. + Cluster cluster = builder(lbp).withRetryPolicy(FallthroughRetryPolicy.INSTANCE).build(); + try { + Session session = cluster.connect(); + cluster.register(latencyTracker); + + setTime(0, SECONDS); + // UnauthorizedException evaluates to false in the filter, so it should not be considered. + prime(1, unauthorized); + queryTracker.query(session, 10, UnauthorizedException.class, sCluster.address(1)); + awaitTrackerUpdate(10); + + setTime(5, SECONDS); + // should still query host1 + queryTracker.query(session, 1, UnauthorizedException.class, sCluster.address(1)); + + // InvalidQueryException evaluates to true, so it *should* be considered increment the count + prime(1, invalid); + queryTracker.query(session, 2, InvalidQueryException.class, sCluster.address(1)); + awaitTrackerUpdate(13); + + // Advance time so that the rolling count updates the next time we query. + // The first errors that were considered were at t = 5 seconds and this is when the rolling + // count was + // initialized, so we want to be at t > 5 + 5 for the rolling count to update + setTime(10, SECONDS); + + // The next query should succeed and hit node 2 since node 1 is now ignored. + queryTracker.query(session, 1, sCluster.address(2)); + } finally { + cluster.close(); } - - /** - * Validates that an {@link ErrorAwarePolicy} configured with its defaults behaves as documented, that being that - * the maximum number of errors is 1 and the retry period is 120 seconds. - * - * @jira_ticket JAVA-1055 - * @test_category load_balancing:error_aware - * @since 3.1.0 - */ - @Test(groups = "short") - public void should_regard_defaults() throws InterruptedException { - LoadBalancingPolicy lbp = ErrorAwarePolicy.builder(new SortingLoadBalancingPolicy()) - .withClock(clock) - .build(); - - Cluster cluster = builder(lbp).build(); - try { - Session session = cluster.connect(); - cluster.register(latencyTracker); - - setTime(0, SECONDS); - // Make 2 queries producing a count higher than the threshold - queryTracker.query(session, 2, UnauthorizedException.class, sCluster.address(1)); - awaitTrackerUpdate(2); - - // Advance time so the rolling count ticks, next query should go to host 2 - setTime(5, SECONDS); - queryTracker.query(session, 5, sCluster.address(2)); - - // Advance clock 30 seconds, this is within the retry period so host 1 should still be excluded. - setTime(35, SECONDS); - queryTracker.query(session, 5, sCluster.address(2)); - - // At this point 120 seconds have elapsed, the load balancing policy should see that the retry period has - // elapsed and resurrect node 1 which will be used and fail. - setTime(125, SECONDS); - queryTracker.query(session, 1, UnauthorizedException.class, sCluster.address(1)); - - } finally { - cluster.close(); - } + } + + /** + * Validates that an {@link ErrorAwarePolicy} configured with its defaults behaves as documented, + * that being that the maximum number of errors is 1 and the retry period is 120 seconds. + * + * @jira_ticket JAVA-1055 + * @test_category load_balancing:error_aware + * @since 3.1.0 + */ + @Test(groups = "short") + public void should_regard_defaults() throws InterruptedException { + LoadBalancingPolicy lbp = + ErrorAwarePolicy.builder(new SortingLoadBalancingPolicy()).withClock(clock).build(); + + Cluster cluster = builder(lbp).build(); + try { + Session session = cluster.connect(); + cluster.register(latencyTracker); + + setTime(0, SECONDS); + // Make 2 queries producing a count higher than the threshold + queryTracker.query(session, 2, UnauthorizedException.class, sCluster.address(1)); + awaitTrackerUpdate(2); + + // Advance time so the rolling count ticks, next query should go to host 2 + setTime(5, SECONDS); + queryTracker.query(session, 5, sCluster.address(2)); + + // Advance clock 30 seconds, this is within the retry period so host 1 should still be + // excluded. + setTime(35, SECONDS); + queryTracker.query(session, 5, sCluster.address(2)); + + // At this point 120 seconds have elapsed, the load balancing policy should see that the retry + // period has + // elapsed and resurrect node 1 which will be used and fail. + setTime(125, SECONDS); + queryTracker.query(session, 1, UnauthorizedException.class, sCluster.address(1)); + + } finally { + cluster.close(); } + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/policies/FallthroughRetryPolicyIntegrationTest.java b/driver-core/src/test/java/com/datastax/driver/core/policies/FallthroughRetryPolicyIntegrationTest.java index 45eb5be6ca4..45bd0ef8817 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/policies/FallthroughRetryPolicyIntegrationTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/policies/FallthroughRetryPolicyIntegrationTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,8 +17,21 @@ */ package com.datastax.driver.core.policies; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.fail; +import static org.scassandra.http.client.PrimingRequest.then; +import static org.scassandra.http.client.Result.read_request_timeout; +import static org.scassandra.http.client.Result.unavailable; +import static org.scassandra.http.client.Result.write_request_timeout; + import com.datastax.driver.core.SocketOptions; -import com.datastax.driver.core.exceptions.*; +import com.datastax.driver.core.exceptions.DriverException; +import com.datastax.driver.core.exceptions.OperationTimedOutException; +import com.datastax.driver.core.exceptions.ReadTimeoutException; +import com.datastax.driver.core.exceptions.ServerError; +import com.datastax.driver.core.exceptions.TransportException; +import com.datastax.driver.core.exceptions.UnavailableException; +import com.datastax.driver.core.exceptions.WriteTimeoutException; import org.assertj.core.api.Fail; import org.scassandra.http.client.ClosedConnectionConfig; import org.scassandra.http.client.ClosedConnectionConfig.CloseType; @@ -24,131 +39,138 @@ import org.scassandra.http.client.Result; import org.testng.annotations.Test; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.fail; -import static org.scassandra.http.client.PrimingRequest.then; -import static org.scassandra.http.client.Result.*; - public class FallthroughRetryPolicyIntegrationTest extends AbstractRetryPolicyIntegrationTest { - public FallthroughRetryPolicyIntegrationTest() { - super(FallthroughRetryPolicy.INSTANCE); + public FallthroughRetryPolicyIntegrationTest() { + super(FallthroughRetryPolicy.INSTANCE); + } + + @Test(groups = "short") + public void should_rethrow_on_read_timeout_with_0_receivedResponses() { + simulateError(1, read_request_timeout); + + try { + query(); + fail("expected a ReadTimeoutException"); + } catch (ReadTimeoutException e) { + /*expected*/ } - @Test(groups = "short") - public void should_rethrow_on_read_timeout_with_0_receivedResponses() { - simulateError(1, read_request_timeout); - - try { - query(); - fail("expected a ReadTimeoutException"); - } catch (ReadTimeoutException e) {/*expected*/ } - - assertOnReadTimeoutWasCalled(1); - assertThat(errors.getRetriesOnReadTimeout().getCount()).isEqualTo(0); - assertQueried(1, 1); - assertQueried(2, 0); - assertQueried(3, 0); + assertOnReadTimeoutWasCalled(1); + assertThat(errors.getRetriesOnReadTimeout().getCount()).isEqualTo(0); + assertQueried(1, 1); + assertQueried(2, 0); + assertQueried(3, 0); + } + + @Test(groups = "short") + public void should_rethrow_on_write_timeout_with_SIMPLE_write_type() { + simulateError(1, write_request_timeout); + + try { + query(); + fail("expected a WriteTimeoutException"); + } catch (WriteTimeoutException e) { + /*expected*/ } - @Test(groups = "short") - public void should_rethrow_on_write_timeout_with_SIMPLE_write_type() { - simulateError(1, write_request_timeout); - - try { - query(); - fail("expected a WriteTimeoutException"); - } catch (WriteTimeoutException e) {/*expected*/} - - assertOnWriteTimeoutWasCalled(1); - assertThat(errors.getRetriesOnWriteTimeout().getCount()).isEqualTo(0); - assertQueried(1, 1); - assertQueried(2, 0); - assertQueried(3, 0); - } - - @Test(groups = "short") - public void should_rethrow_on_unavailable() { - simulateError(1, unavailable); - - try { - query(); - fail("expected an UnavailableException"); - } catch (UnavailableException e) {/*expected*/} - - assertOnUnavailableWasCalled(1); - assertThat(errors.getRetriesOnUnavailable().getCount()).isEqualTo(0); - assertQueried(1, 1); - assertQueried(2, 0); - assertQueried(3, 0); + assertOnWriteTimeoutWasCalled(1); + assertThat(errors.getRetriesOnWriteTimeout().getCount()).isEqualTo(0); + assertQueried(1, 1); + assertQueried(2, 0); + assertQueried(3, 0); + } + + @Test(groups = "short") + public void should_rethrow_on_unavailable() { + simulateError(1, unavailable); + + try { + query(); + fail("expected an UnavailableException"); + } catch (UnavailableException e) { + /*expected*/ } - @Test(groups = "short") - public void should_rethrow_on_client_timeouts() { - cluster.getConfiguration().getSocketOptions().setReadTimeoutMillis(1); - try { - scassandras - .node(1).primingClient().prime(PrimingRequest.queryBuilder() - .withQuery("mock query") - .withThen(then().withFixedDelay(1000L).withRows(row("result", "result1"))) - .build()); - try { - query(); - Fail.fail("expected an OperationTimedOutException"); - } catch (OperationTimedOutException e) { - assertThat(e.getMessage()).isEqualTo( - String.format("[%s] Timed out waiting for server response", host1.getSocketAddress()) - ); - } - assertOnRequestErrorWasCalled(1, OperationTimedOutException.class); - assertThat(errors.getRetries().getCount()).isEqualTo(0); - assertThat(errors.getClientTimeouts().getCount()).isEqualTo(1); - assertThat(errors.getRetriesOnClientTimeout().getCount()).isEqualTo(0); - assertQueried(1, 1); - assertQueried(2, 0); - assertQueried(3, 0); - } finally { - cluster.getConfiguration().getSocketOptions().setReadTimeoutMillis(SocketOptions.DEFAULT_READ_TIMEOUT_MILLIS); - } + assertOnUnavailableWasCalled(1); + assertThat(errors.getRetriesOnUnavailable().getCount()).isEqualTo(0); + assertQueried(1, 1); + assertQueried(2, 0); + assertQueried(3, 0); + } + + @Test(groups = "short") + public void should_rethrow_on_client_timeouts() { + cluster.getConfiguration().getSocketOptions().setReadTimeoutMillis(1); + try { + scassandras + .node(1) + .primingClient() + .prime( + PrimingRequest.queryBuilder() + .withQuery("mock query") + .withThen(then().withFixedDelay(1000L).withRows(row("result", "result1"))) + .build()); + try { + query(); + Fail.fail("expected an OperationTimedOutException"); + } catch (OperationTimedOutException e) { + assertThat(e.getMessage()) + .isEqualTo( + String.format( + "[%s] Timed out waiting for server response", host1.getEndPoint().resolve())); + } + assertOnRequestErrorWasCalled(1, OperationTimedOutException.class); + assertThat(errors.getRetries().getCount()).isEqualTo(0); + assertThat(errors.getClientTimeouts().getCount()).isEqualTo(1); + assertThat(errors.getRetriesOnClientTimeout().getCount()).isEqualTo(0); + assertQueried(1, 1); + assertQueried(2, 0); + assertQueried(3, 0); + } finally { + cluster + .getConfiguration() + .getSocketOptions() + .setReadTimeoutMillis(SocketOptions.DEFAULT_READ_TIMEOUT_MILLIS); } - - - @Test(groups = "short", dataProvider = "serverSideErrors") - public void should_rethrow_on_server_side_error(Result error, Class exception) { - simulateError(1, error); - try { - query(); - Fail.fail("expected a DriverException"); - } catch (DriverException e) { - assertThat(e).isInstanceOf(exception); - } - assertOnRequestErrorWasCalled(1, ServerError.class); - assertThat(errors.getOthers().getCount()).isEqualTo(1); - assertThat(errors.getRetries().getCount()).isEqualTo(0); - assertThat(errors.getRetriesOnOtherErrors().getCount()).isEqualTo(0); - assertQueried(1, 1); - assertQueried(2, 0); - assertQueried(3, 0); + } + + @Test(groups = "short", dataProvider = "serverSideErrors") + public void should_rethrow_on_server_side_error( + Result error, Class exception) { + simulateError(1, error); + try { + query(); + Fail.fail("expected a DriverException"); + } catch (DriverException e) { + assertThat(e).isInstanceOf(exception); } - - - @Test(groups = "short", dataProvider = "connectionErrors") - public void should_rethrow_on_connection_error(CloseType closeType) { - simulateError(1, Result.closed_connection, new ClosedConnectionConfig(closeType)); - try { - query(); - Fail.fail("expected a TransportException"); - } catch (TransportException e) { - assertThat(e.getMessage()).isEqualTo( - String.format("[%s] Connection has been closed", host1.getSocketAddress()) - ); - } - assertOnRequestErrorWasCalled(1, TransportException.class); - assertThat(errors.getRetries().getCount()).isEqualTo(0); - assertThat(errors.getConnectionErrors().getCount()).isEqualTo(1); - assertThat(errors.getIgnoresOnConnectionError().getCount()).isEqualTo(0); - assertThat(errors.getRetriesOnConnectionError().getCount()).isEqualTo(0); - assertQueried(1, 1); - assertQueried(2, 0); - assertQueried(3, 0); + assertOnRequestErrorWasCalled(1, ServerError.class); + assertThat(errors.getOthers().getCount()).isEqualTo(1); + assertThat(errors.getRetries().getCount()).isEqualTo(0); + assertThat(errors.getRetriesOnOtherErrors().getCount()).isEqualTo(0); + assertQueried(1, 1); + assertQueried(2, 0); + assertQueried(3, 0); + } + + @Test(groups = "short", dataProvider = "connectionErrors") + public void should_rethrow_on_connection_error(CloseType closeType) { + simulateError(1, Result.closed_connection, new ClosedConnectionConfig(closeType)); + try { + query(); + Fail.fail("expected a TransportException"); + } catch (TransportException e) { + assertThat(e.getMessage()) + .isEqualTo( + String.format("[%s] Connection has been closed", host1.getEndPoint().resolve())); } + assertOnRequestErrorWasCalled(1, TransportException.class); + assertThat(errors.getRetries().getCount()).isEqualTo(0); + assertThat(errors.getConnectionErrors().getCount()).isEqualTo(1); + assertThat(errors.getIgnoresOnConnectionError().getCount()).isEqualTo(0); + assertThat(errors.getRetriesOnConnectionError().getCount()).isEqualTo(0); + assertQueried(1, 1); + assertQueried(2, 0); + assertQueried(3, 0); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/policies/HostFilterPolicyTest.java b/driver-core/src/test/java/com/datastax/driver/core/policies/HostFilterPolicyTest.java index 1107760f358..27df4921974 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/policies/HostFilterPolicyTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/policies/HostFilterPolicyTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,6 +17,14 @@ */ package com.datastax.driver.core.policies; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + import com.datastax.driver.core.Cluster; import com.datastax.driver.core.Host; import com.datastax.driver.core.HostDistance; @@ -23,6 +33,7 @@ import com.google.common.base.Predicates; import com.google.common.collect.Iterators; import com.google.common.collect.Lists; +import java.util.Collection; import org.mockito.ArgumentCaptor; import org.mockito.Captor; import org.mockito.Mock; @@ -30,138 +41,119 @@ import org.testng.annotations.BeforeMethod; import org.testng.annotations.Test; -import java.net.InetSocketAddress; -import java.util.Collection; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.*; - public class HostFilterPolicyTest { - @Mock - Cluster cluster; - - @Mock - Host host1, host2, host3; - - InetSocketAddress address1 = InetSocketAddress.createUnresolved("192.168.1.1", 2345); - InetSocketAddress address2 = InetSocketAddress.createUnresolved("192.168.1.2", 9876); - InetSocketAddress address3 = InetSocketAddress.createUnresolved("192.168.1.3", 6666); - - @Mock - LoadBalancingPolicy wrappedPolicy; + @Mock Cluster cluster; - @Captor - ArgumentCaptor> hostsCaptor; + @Mock Host host1, host2, host3; - @BeforeMethod(groups = "unit") - public void setup() { - MockitoAnnotations.initMocks(this); + @Mock LoadBalancingPolicy wrappedPolicy; - when(host1.getSocketAddress()).thenReturn(address1); - when(host2.getSocketAddress()).thenReturn(address2); - when(host3.getSocketAddress()).thenReturn(address3); + @Captor ArgumentCaptor> hostsCaptor; - when(wrappedPolicy.distance(any(Host.class))).thenReturn(HostDistance.LOCAL); - } + @BeforeMethod(groups = "unit") + public void setup() { + MockitoAnnotations.initMocks(this); + when(wrappedPolicy.distance(any(Host.class))).thenReturn(HostDistance.LOCAL); + } - @Test(groups = "unit") - public void should_delegate_to_wrapped_policy_when_predicate_is_true() { - Predicate predicate = Predicates.alwaysTrue(); - HostFilterPolicy policy = new HostFilterPolicy(wrappedPolicy, predicate); + @Test(groups = "unit") + public void should_delegate_to_wrapped_policy_when_predicate_is_true() { + Predicate predicate = Predicates.alwaysTrue(); + HostFilterPolicy policy = new HostFilterPolicy(wrappedPolicy, predicate); - policy.onAdd(host1); - verify(wrappedPolicy).onAdd(host1); + policy.onAdd(host1); + verify(wrappedPolicy).onAdd(host1); - policy.onDown(host1); - verify(wrappedPolicy).onDown(host1); + policy.onDown(host1); + verify(wrappedPolicy).onDown(host1); - policy.onUp(host1); - verify(wrappedPolicy).onUp(host1); + policy.onUp(host1); + verify(wrappedPolicy).onUp(host1); - policy.onRemove(host1); - verify(wrappedPolicy).onRemove(host1); + policy.onRemove(host1); + verify(wrappedPolicy).onRemove(host1); - assertThat(policy.distance(host1)).isSameAs(HostDistance.LOCAL); + assertThat(policy.distance(host1)).isSameAs(HostDistance.LOCAL); - policy.close(); - verify(wrappedPolicy).close(); - } + policy.close(); + verify(wrappedPolicy).close(); + } - @Test(groups = "unit") - public void should_not_delegate_to_wrapped_policy_when_predicate_is_false() { - Predicate predicate = Predicates.alwaysFalse(); - HostFilterPolicy policy = new HostFilterPolicy(wrappedPolicy, predicate); + @Test(groups = "unit") + public void should_not_delegate_to_wrapped_policy_when_predicate_is_false() { + Predicate predicate = Predicates.alwaysFalse(); + HostFilterPolicy policy = new HostFilterPolicy(wrappedPolicy, predicate); - policy.onAdd(host1); - verify(wrappedPolicy, never()).onAdd(host1); + policy.onAdd(host1); + verify(wrappedPolicy, never()).onAdd(host1); - policy.onDown(host1); - verify(wrappedPolicy, never()).onDown(host1); + policy.onDown(host1); + verify(wrappedPolicy, never()).onDown(host1); - policy.onUp(host1); - verify(wrappedPolicy, never()).onUp(host1); + policy.onUp(host1); + verify(wrappedPolicy, never()).onUp(host1); - policy.onRemove(host1); - verify(wrappedPolicy, never()).onRemove(host1); + policy.onRemove(host1); + verify(wrappedPolicy, never()).onRemove(host1); - assertThat(policy.distance(host1)).isSameAs(HostDistance.IGNORED); - } + assertThat(policy.distance(host1)).isSameAs(HostDistance.IGNORED); + } - @Test(groups = "unit") - public void should_filter_init_hosts_with_predicate() { - Predicate predicate = Predicates.in(Lists.newArrayList(host1, host2)); - HostFilterPolicy policy = new HostFilterPolicy(wrappedPolicy, predicate); + @Test(groups = "unit") + public void should_filter_init_hosts_with_predicate() { + Predicate predicate = Predicates.in(Lists.newArrayList(host1, host2)); + HostFilterPolicy policy = new HostFilterPolicy(wrappedPolicy, predicate); - policy.init(cluster, Lists.newArrayList(host1, host2, host3)); + policy.init(cluster, Lists.newArrayList(host1, host2, host3)); - verify(wrappedPolicy).init(eq(cluster), hostsCaptor.capture()); - assertThat(hostsCaptor.getValue()).containsOnly(host1, host2); - } + verify(wrappedPolicy).init(eq(cluster), hostsCaptor.capture()); + assertThat(hostsCaptor.getValue()).containsOnly(host1, host2); + } - @Test(groups = "unit", expectedExceptions = IllegalArgumentException.class) - public void should_throw_if_predicate_filters_out_all_init_hosts() { - Predicate predicate = Predicates.alwaysFalse(); - HostFilterPolicy policy = new HostFilterPolicy(wrappedPolicy, predicate); + @Test(groups = "unit", expectedExceptions = IllegalArgumentException.class) + public void should_throw_if_predicate_filters_out_all_init_hosts() { + Predicate predicate = Predicates.alwaysFalse(); + HostFilterPolicy policy = new HostFilterPolicy(wrappedPolicy, predicate); - policy.init(cluster, Lists.newArrayList(host1, host2, host3)); - } + policy.init(cluster, Lists.newArrayList(host1, host2, host3)); + } - @Test(groups = "unit") - public void should_return_query_plan_of_wrapped_policy() { - when(wrappedPolicy.newQueryPlan(any(String.class), any(Statement.class))) - .thenReturn(Iterators.forArray(host1, host2, host3)); + @Test(groups = "unit") + public void should_return_query_plan_of_wrapped_policy() { + when(wrappedPolicy.newQueryPlan(any(String.class), any(Statement.class))) + .thenReturn(Iterators.forArray(host1, host2, host3)); - HostFilterPolicy policy = new HostFilterPolicy(wrappedPolicy, null); + HostFilterPolicy policy = new HostFilterPolicy(wrappedPolicy, null); - assertThat(policy.newQueryPlan("keyspace", mock(Statement.class))) - .containsExactly(host1, host2, host3); - } + assertThat(policy.newQueryPlan("keyspace", mock(Statement.class))) + .containsExactly(host1, host2, host3); + } - @Test(groups = "unit") - public void should_ignore_DCs_in_black_list() { - when(host1.getDatacenter()).thenReturn("dc1"); - when(host2.getDatacenter()).thenReturn("dc2"); - when(host3.getDatacenter()).thenReturn(null); + @Test(groups = "unit") + public void should_ignore_DCs_in_black_list() { + when(host1.getDatacenter()).thenReturn("dc1"); + when(host2.getDatacenter()).thenReturn("dc2"); + when(host3.getDatacenter()).thenReturn(null); - HostFilterPolicy policy = HostFilterPolicy.fromDCBlackList(wrappedPolicy, - Lists.newArrayList("dc2")); + HostFilterPolicy policy = + HostFilterPolicy.fromDCBlackList(wrappedPolicy, Lists.newArrayList("dc2")); - assertThat(policy.distance(host1)).isSameAs(HostDistance.LOCAL); - assertThat(policy.distance(host2)).isSameAs(HostDistance.IGNORED); - assertThat(policy.distance(host3)).isSameAs(HostDistance.LOCAL); - } + assertThat(policy.distance(host1)).isSameAs(HostDistance.LOCAL); + assertThat(policy.distance(host2)).isSameAs(HostDistance.IGNORED); + assertThat(policy.distance(host3)).isSameAs(HostDistance.LOCAL); + } - @Test(groups = "unit") - public void should_ignore_DCs_not_in_white_list_and_not_null() { - when(host1.getDatacenter()).thenReturn("dc1"); - when(host2.getDatacenter()).thenReturn("dc2"); - when(host3.getDatacenter()).thenReturn(null); + @Test(groups = "unit") + public void should_ignore_DCs_not_in_white_list_and_not_null() { + when(host1.getDatacenter()).thenReturn("dc1"); + when(host2.getDatacenter()).thenReturn("dc2"); + when(host3.getDatacenter()).thenReturn(null); - HostFilterPolicy policy = HostFilterPolicy.fromDCWhiteList(wrappedPolicy, - Lists.newArrayList("dc1")); + HostFilterPolicy policy = + HostFilterPolicy.fromDCWhiteList(wrappedPolicy, Lists.newArrayList("dc1")); - assertThat(policy.distance(host1)).isSameAs(HostDistance.LOCAL); - assertThat(policy.distance(host2)).isSameAs(HostDistance.IGNORED); - assertThat(policy.distance(host3)).isSameAs(HostDistance.LOCAL); - } + assertThat(policy.distance(host1)).isSameAs(HostDistance.LOCAL); + assertThat(policy.distance(host2)).isSameAs(HostDistance.IGNORED); + assertThat(policy.distance(host3)).isSameAs(HostDistance.LOCAL); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/policies/IdempotenceAwareRetryPolicyIntegrationTest.java b/driver-core/src/test/java/com/datastax/driver/core/policies/IdempotenceAwareRetryPolicyIntegrationTest.java index d473b022fca..dc82c7c11cf 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/policies/IdempotenceAwareRetryPolicyIntegrationTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/policies/IdempotenceAwareRetryPolicyIntegrationTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,8 +17,24 @@ */ package com.datastax.driver.core.policies; -import com.datastax.driver.core.*; -import com.datastax.driver.core.exceptions.*; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.fail; +import static org.scassandra.http.client.PrimingRequest.then; +import static org.scassandra.http.client.Result.closed_connection; +import static org.scassandra.http.client.Result.write_request_timeout; + +import com.datastax.driver.core.Cluster; +import com.datastax.driver.core.ConsistencyLevel; +import com.datastax.driver.core.SimpleStatement; +import com.datastax.driver.core.SocketOptions; +import com.datastax.driver.core.Statement; +import com.datastax.driver.core.WriteType; +import com.datastax.driver.core.exceptions.DriverException; +import com.datastax.driver.core.exceptions.NoHostAvailableException; +import com.datastax.driver.core.exceptions.OperationTimedOutException; +import com.datastax.driver.core.exceptions.ServerError; +import com.datastax.driver.core.exceptions.TransportException; +import com.datastax.driver.core.exceptions.WriteTimeoutException; import org.assertj.core.api.Fail; import org.mockito.Mockito; import org.scassandra.http.client.ClosedConnectionConfig; @@ -25,258 +43,278 @@ import org.testng.annotations.BeforeMethod; import org.testng.annotations.Test; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.fail; -import static org.scassandra.http.client.PrimingRequest.then; -import static org.scassandra.http.client.Result.closed_connection; -import static org.scassandra.http.client.Result.write_request_timeout; - -/** - * Integration test with an IdempotenceAwareRetryPolicy. - */ +/** Integration test with an IdempotenceAwareRetryPolicy. */ @SuppressWarnings("deprecation") public class IdempotenceAwareRetryPolicyIntegrationTest extends AbstractRetryPolicyIntegrationTest { - public IdempotenceAwareRetryPolicyIntegrationTest() { - super(new IdempotenceAwareRetryPolicy(new CustomRetryPolicy())); - } + public IdempotenceAwareRetryPolicyIntegrationTest() { + super(new IdempotenceAwareRetryPolicy(new CustomRetryPolicy())); + } - @BeforeMethod(groups = "short") - public void setUpDefaultIdempotence() { - cluster.getConfiguration().getQueryOptions().setDefaultIdempotence(false); - } + @BeforeMethod(groups = "short") + public void setUpDefaultIdempotence() { + cluster.getConfiguration().getQueryOptions().setDefaultIdempotence(false); + } - @Test(groups = "short") - public void should_not_retry_on_write_timeout_if_statement_non_idempotent() { - simulateError(1, write_request_timeout); - try { - query(); - fail("expected an WriteTimeoutException"); - } catch (WriteTimeoutException e) {/* expected */} - // Should not have even been called as statement was not idempotent. - assertOnWriteTimeoutWasCalled(0); - assertThat(errors.getWriteTimeouts().getCount()).isEqualTo(1); - assertThat(errors.getRetries().getCount()).isEqualTo(0); - assertThat(errors.getRetriesOnWriteTimeout().getCount()).isEqualTo(0); - assertQueried(1, 1); - assertQueried(2, 0); - assertQueried(3, 0); + @Test(groups = "short") + public void should_not_retry_on_write_timeout_if_statement_non_idempotent() { + simulateError(1, write_request_timeout); + try { + query(); + fail("expected an WriteTimeoutException"); + } catch (WriteTimeoutException e) { + /* expected */ } + // Should not have even been called as statement was not idempotent. + assertOnWriteTimeoutWasCalled(0); + assertThat(errors.getWriteTimeouts().getCount()).isEqualTo(1); + assertThat(errors.getRetries().getCount()).isEqualTo(0); + assertThat(errors.getRetriesOnWriteTimeout().getCount()).isEqualTo(0); + assertQueried(1, 1); + assertQueried(2, 0); + assertQueried(3, 0); + } - @Test(groups = "short") - public void should_retry_on_write_timeout_if_statement_idempotent() { - simulateError(1, write_request_timeout); - session.execute(new SimpleStatement("mock query").setIdempotent(true)); - assertOnWriteTimeoutWasCalled(1); - assertThat(errors.getWriteTimeouts().getCount()).isEqualTo(1); - assertThat(errors.getRetries().getCount()).isEqualTo(1); - assertThat(errors.getRetriesOnWriteTimeout().getCount()).isEqualTo(1); - assertQueried(1, 1); - assertQueried(2, 1); - assertQueried(3, 0); - } + @Test(groups = "short") + public void should_retry_on_write_timeout_if_statement_idempotent() { + simulateError(1, write_request_timeout); + session.execute(new SimpleStatement("mock query").setIdempotent(true)); + assertOnWriteTimeoutWasCalled(1); + assertThat(errors.getWriteTimeouts().getCount()).isEqualTo(1); + assertThat(errors.getRetries().getCount()).isEqualTo(1); + assertThat(errors.getRetriesOnWriteTimeout().getCount()).isEqualTo(1); + assertQueried(1, 1); + assertQueried(2, 1); + assertQueried(3, 0); + } - @Test(groups = "short") - public void should_not_retry_on_client_timeout_if_statement_non_idempotent() { - cluster.getConfiguration().getSocketOptions().setReadTimeoutMillis(1); - try { - scassandras - .node(1).primingClient().prime(PrimingRequest.queryBuilder() - .withQuery("mock query") - .withThen(then().withFixedDelay(1000L).withRows(row("result", "result1"))) - .build()); - try { - query(); - fail("expected an OperationTimedOutException"); - } catch (OperationTimedOutException e) { - assertThat(e.getMessage()).isEqualTo( - String.format("[%s] Timed out waiting for server response", host1.getSocketAddress()) - ); - } - // Should not have even been called as statement was not idempotent. - assertOnRequestErrorWasCalled(0, OperationTimedOutException.class); - assertThat(errors.getClientTimeouts().getCount()).isEqualTo(1); - assertThat(errors.getRetries().getCount()).isEqualTo(0); - assertThat(errors.getRetriesOnClientTimeout().getCount()).isEqualTo(0); - assertQueried(1, 1); - assertQueried(2, 0); - assertQueried(3, 0); - } finally { - cluster.getConfiguration().getSocketOptions().setReadTimeoutMillis(SocketOptions.DEFAULT_READ_TIMEOUT_MILLIS); - } + @Test(groups = "short") + public void should_not_retry_on_client_timeout_if_statement_non_idempotent() { + cluster.getConfiguration().getSocketOptions().setReadTimeoutMillis(1); + try { + scassandras + .node(1) + .primingClient() + .prime( + PrimingRequest.queryBuilder() + .withQuery("mock query") + .withThen(then().withFixedDelay(1000L).withRows(row("result", "result1"))) + .build()); + try { + query(); + fail("expected an OperationTimedOutException"); + } catch (OperationTimedOutException e) { + assertThat(e.getMessage()) + .isEqualTo( + String.format( + "[%s] Timed out waiting for server response", host1.getEndPoint().resolve())); + } + // Should not have even been called as statement was not idempotent. + assertOnRequestErrorWasCalled(0, OperationTimedOutException.class); + assertThat(errors.getClientTimeouts().getCount()).isEqualTo(1); + assertThat(errors.getRetries().getCount()).isEqualTo(0); + assertThat(errors.getRetriesOnClientTimeout().getCount()).isEqualTo(0); + assertQueried(1, 1); + assertQueried(2, 0); + assertQueried(3, 0); + } finally { + cluster + .getConfiguration() + .getSocketOptions() + .setReadTimeoutMillis(SocketOptions.DEFAULT_READ_TIMEOUT_MILLIS); } + } - @Test(groups = "short") - public void should_retry_on_client_timeout_if_statement_idempotent() { - cluster.getConfiguration().getSocketOptions().setReadTimeoutMillis(1); - try { - scassandras - .node(1).primingClient().prime(PrimingRequest.queryBuilder() - .withQuery("mock query") - .withThen(then().withFixedDelay(1000L).withRows(row("result", "result1"))) - .build()); - session.execute(new SimpleStatement("mock query").setIdempotent(true)); - assertOnRequestErrorWasCalled(1, OperationTimedOutException.class); - assertThat(errors.getClientTimeouts().getCount()).isEqualTo(1); - assertThat(errors.getRetries().getCount()).isEqualTo(1); - assertThat(errors.getRetriesOnClientTimeout().getCount()).isEqualTo(1); - assertQueried(1, 1); - assertQueried(2, 1); - assertQueried(3, 0); - } finally { - cluster.getConfiguration().getSocketOptions().setReadTimeoutMillis(SocketOptions.DEFAULT_READ_TIMEOUT_MILLIS); - } + @Test(groups = "short") + public void should_retry_on_client_timeout_if_statement_idempotent() { + cluster.getConfiguration().getSocketOptions().setReadTimeoutMillis(1); + try { + scassandras + .node(1) + .primingClient() + .prime( + PrimingRequest.queryBuilder() + .withQuery("mock query") + .withThen(then().withFixedDelay(1000L).withRows(row("result", "result1"))) + .build()); + session.execute(new SimpleStatement("mock query").setIdempotent(true)); + assertOnRequestErrorWasCalled(1, OperationTimedOutException.class); + assertThat(errors.getClientTimeouts().getCount()).isEqualTo(1); + assertThat(errors.getRetries().getCount()).isEqualTo(1); + assertThat(errors.getRetriesOnClientTimeout().getCount()).isEqualTo(1); + assertQueried(1, 1); + assertQueried(2, 1); + assertQueried(3, 0); + } finally { + cluster + .getConfiguration() + .getSocketOptions() + .setReadTimeoutMillis(SocketOptions.DEFAULT_READ_TIMEOUT_MILLIS); } + } - - @Test(groups = "short", dataProvider = "serverSideErrors") - public void should_not_retry_on_server_error_if_statement_non_idempotent(Result error, Class exception) { - simulateError(1, error); - try { - query(); - fail("expected " + exception); - } catch (DriverException e) { - assertThat(e).isInstanceOf(exception); - } - // Should not have even been called as statement was not idempotent. - assertOnRequestErrorWasCalled(0, ServerError.class); - assertThat(errors.getOthers().getCount()).isEqualTo(1); - assertThat(errors.getRetries().getCount()).isEqualTo(0); - assertThat(errors.getRetriesOnOtherErrors().getCount()).isEqualTo(0); - assertQueried(1, 1); - assertQueried(2, 0); - assertQueried(3, 0); + @Test(groups = "short", dataProvider = "serverSideErrors") + public void should_not_retry_on_server_error_if_statement_non_idempotent( + Result error, Class exception) { + simulateError(1, error); + try { + query(); + fail("expected " + exception); + } catch (DriverException e) { + assertThat(e).isInstanceOf(exception); } + // Should not have even been called as statement was not idempotent. + assertOnRequestErrorWasCalled(0, ServerError.class); + assertThat(errors.getOthers().getCount()).isEqualTo(1); + assertThat(errors.getRetries().getCount()).isEqualTo(0); + assertThat(errors.getRetriesOnOtherErrors().getCount()).isEqualTo(0); + assertQueried(1, 1); + assertQueried(2, 0); + assertQueried(3, 0); + } - @SuppressWarnings("UnusedParameters") - @Test(groups = "short", dataProvider = "serverSideErrors") - public void should_retry_on_server_error_if_statement_idempotent(Result error, Class exception) { - simulateError(1, error); - simulateError(2, error); - simulateError(3, error); - try { - session.execute(new SimpleStatement("mock query").setIdempotent(true)); - fail("expected a NoHostAvailableException"); - } catch (NoHostAvailableException e) { - assertThat(e.getErrors().keySet()).hasSize(3).containsOnly( - host1.getSocketAddress(), - host2.getSocketAddress(), - host3.getSocketAddress()); - assertThat(e.getErrors().values()).hasOnlyElementsOfType(exception); - } - assertOnRequestErrorWasCalled(3, exception); - assertThat(errors.getOthers().getCount()).isEqualTo(3); - assertThat(errors.getRetries().getCount()).isEqualTo(3); - assertThat(errors.getRetriesOnOtherErrors().getCount()).isEqualTo(3); - assertQueried(1, 1); - assertQueried(2, 1); - assertQueried(3, 1); + @SuppressWarnings("UnusedParameters") + @Test(groups = "short", dataProvider = "serverSideErrors") + public void should_retry_on_server_error_if_statement_idempotent( + Result error, Class exception) { + simulateError(1, error); + simulateError(2, error); + simulateError(3, error); + try { + session.execute(new SimpleStatement("mock query").setIdempotent(true)); + fail("expected a NoHostAvailableException"); + } catch (NoHostAvailableException e) { + assertThat(e.getErrors().keySet()) + .hasSize(3) + .containsOnly(host1.getEndPoint(), host2.getEndPoint(), host3.getEndPoint()); + assertThat(e.getErrors().values()).hasOnlyElementsOfType(exception); } + assertOnRequestErrorWasCalled(3, exception); + assertThat(errors.getOthers().getCount()).isEqualTo(3); + assertThat(errors.getRetries().getCount()).isEqualTo(3); + assertThat(errors.getRetriesOnOtherErrors().getCount()).isEqualTo(3); + assertQueried(1, 1); + assertQueried(2, 1); + assertQueried(3, 1); + } - - @Test(groups = "short", dataProvider = "connectionErrors") - public void should_not_retry_on_connection_error_if_statement_non_idempotent(ClosedConnectionConfig.CloseType closeType) { - simulateError(1, closed_connection, new ClosedConnectionConfig(closeType)); - simulateError(2, closed_connection, new ClosedConnectionConfig(closeType)); - simulateError(3, closed_connection, new ClosedConnectionConfig(closeType)); - try { - query(); - Fail.fail("expected a TransportException"); - } catch (TransportException e) { - assertThat(e.getMessage()).isEqualTo( - String.format("[%s] Connection has been closed", host1.getSocketAddress()) - ); - } - // Should not have even been called as statement was not idempotent. - assertOnRequestErrorWasCalled(0, TransportException.class); - assertThat(errors.getRetries().getCount()).isEqualTo(0); - assertThat(errors.getConnectionErrors().getCount()).isEqualTo(1); - assertThat(errors.getIgnoresOnConnectionError().getCount()).isEqualTo(0); - assertThat(errors.getRetriesOnConnectionError().getCount()).isEqualTo(0); - assertQueried(1, 1); - assertQueried(2, 0); - assertQueried(3, 0); + @Test(groups = "short", dataProvider = "connectionErrors") + public void should_not_retry_on_connection_error_if_statement_non_idempotent( + ClosedConnectionConfig.CloseType closeType) { + simulateError(1, closed_connection, new ClosedConnectionConfig(closeType)); + simulateError(2, closed_connection, new ClosedConnectionConfig(closeType)); + simulateError(3, closed_connection, new ClosedConnectionConfig(closeType)); + try { + query(); + Fail.fail("expected a TransportException"); + } catch (TransportException e) { + assertThat(e.getMessage()) + .isEqualTo( + String.format("[%s] Connection has been closed", host1.getEndPoint().resolve())); } + // Should not have even been called as statement was not idempotent. + assertOnRequestErrorWasCalled(0, TransportException.class); + assertThat(errors.getRetries().getCount()).isEqualTo(0); + assertThat(errors.getConnectionErrors().getCount()).isEqualTo(1); + assertThat(errors.getIgnoresOnConnectionError().getCount()).isEqualTo(0); + assertThat(errors.getRetriesOnConnectionError().getCount()).isEqualTo(0); + assertQueried(1, 1); + assertQueried(2, 0); + assertQueried(3, 0); + } - - @Test(groups = "short", dataProvider = "connectionErrors") - public void should_retry_on_connection_error_if_statement_idempotent(ClosedConnectionConfig.CloseType closeType) { - simulateError(1, closed_connection, new ClosedConnectionConfig(closeType)); - simulateError(2, closed_connection, new ClosedConnectionConfig(closeType)); - simulateError(3, closed_connection, new ClosedConnectionConfig(closeType)); - try { - session.execute(new SimpleStatement("mock query").setIdempotent(true)); - Fail.fail("expected a TransportException"); - } catch (NoHostAvailableException e) { - assertThat(e.getErrors().keySet()).hasSize(3).containsOnly( - host1.getSocketAddress(), - host2.getSocketAddress(), - host3.getSocketAddress()); - assertThat(e.getErrors().values()).hasOnlyElementsOfType(TransportException.class); - } - assertOnRequestErrorWasCalled(3, TransportException.class); - assertThat(errors.getRetries().getCount()).isEqualTo(3); - assertThat(errors.getConnectionErrors().getCount()).isEqualTo(3); - assertThat(errors.getIgnoresOnConnectionError().getCount()).isEqualTo(0); - assertThat(errors.getRetriesOnConnectionError().getCount()).isEqualTo(3); - assertQueried(1, 1); - assertQueried(2, 1); - assertQueried(3, 1); + @Test(groups = "short", dataProvider = "connectionErrors") + public void should_retry_on_connection_error_if_statement_idempotent( + ClosedConnectionConfig.CloseType closeType) { + simulateError(1, closed_connection, new ClosedConnectionConfig(closeType)); + simulateError(2, closed_connection, new ClosedConnectionConfig(closeType)); + simulateError(3, closed_connection, new ClosedConnectionConfig(closeType)); + try { + session.execute(new SimpleStatement("mock query").setIdempotent(true)); + Fail.fail("expected a TransportException"); + } catch (NoHostAvailableException e) { + assertThat(e.getErrors().keySet()) + .hasSize(3) + .containsOnly(host1.getEndPoint(), host2.getEndPoint(), host3.getEndPoint()); + assertThat(e.getErrors().values()).hasOnlyElementsOfType(TransportException.class); } + assertOnRequestErrorWasCalled(3, TransportException.class); + assertThat(errors.getRetries().getCount()).isEqualTo(3); + assertThat(errors.getConnectionErrors().getCount()).isEqualTo(3); + assertThat(errors.getIgnoresOnConnectionError().getCount()).isEqualTo(0); + assertThat(errors.getRetriesOnConnectionError().getCount()).isEqualTo(3); + assertQueried(1, 1); + assertQueried(2, 1); + assertQueried(3, 1); + } - @Test(groups = "short") - public void should_call_init_method_on_inner_policy() { - RetryPolicy innerPolicyMock = Mockito.mock(RetryPolicy.class); - - new IdempotenceAwareRetryPolicy(innerPolicyMock).init(cluster); - - Mockito.verify(innerPolicyMock).init(cluster); - } + @Test(groups = "short") + public void should_call_init_method_on_inner_policy() { + RetryPolicy innerPolicyMock = Mockito.mock(RetryPolicy.class); - @Test(groups = "unit") - public void should_call_close_method_on_inner_policy() { - RetryPolicy innerPolicyMock = Mockito.mock(RetryPolicy.class); + new IdempotenceAwareRetryPolicy(innerPolicyMock).init(cluster); - new IdempotenceAwareRetryPolicy(innerPolicyMock).close(); + Mockito.verify(innerPolicyMock).init(cluster); + } - Mockito.verify(innerPolicyMock).close(); - } + @Test(groups = "unit") + public void should_call_close_method_on_inner_policy() { + RetryPolicy innerPolicyMock = Mockito.mock(RetryPolicy.class); + new IdempotenceAwareRetryPolicy(innerPolicyMock).close(); - /** - * Retries everything on the next host. - */ - static class CustomRetryPolicy implements RetryPolicy { + Mockito.verify(innerPolicyMock).close(); + } - @Override - public RetryDecision onReadTimeout(Statement statement, ConsistencyLevel cl, int requiredResponses, int receivedResponses, boolean dataRetrieved, int nbRetry) { - return RetryDecision.tryNextHost(cl); - } + /** Retries everything on the next host. */ + static class CustomRetryPolicy implements RetryPolicy { - @Override - public RetryDecision onWriteTimeout(Statement statement, ConsistencyLevel cl, WriteType writeType, int requiredAcks, int receivedAcks, int nbRetry) { - return RetryDecision.tryNextHost(cl); - } + @Override + public RetryDecision onReadTimeout( + Statement statement, + ConsistencyLevel cl, + int requiredResponses, + int receivedResponses, + boolean dataRetrieved, + int nbRetry) { + return RetryDecision.tryNextHost(cl); + } - @Override - public RetryDecision onUnavailable(Statement statement, ConsistencyLevel cl, int requiredReplica, int aliveReplica, int nbRetry) { - return RetryDecision.tryNextHost(cl); - } + @Override + public RetryDecision onWriteTimeout( + Statement statement, + ConsistencyLevel cl, + WriteType writeType, + int requiredAcks, + int receivedAcks, + int nbRetry) { + return RetryDecision.tryNextHost(cl); + } - @Override - public RetryDecision onRequestError(Statement statement, ConsistencyLevel cl, DriverException e, int nbRetry) { - return RetryDecision.tryNextHost(cl); - } + @Override + public RetryDecision onUnavailable( + Statement statement, + ConsistencyLevel cl, + int requiredReplica, + int aliveReplica, + int nbRetry) { + return RetryDecision.tryNextHost(cl); + } - @Override - public void init(Cluster cluster) { - // nothing to do - } + @Override + public RetryDecision onRequestError( + Statement statement, ConsistencyLevel cl, DriverException e, int nbRetry) { + return RetryDecision.tryNextHost(cl); + } - @Override - public void close() { - // nothing to do - } + @Override + public void init(Cluster cluster) { + // nothing to do + } + @Override + public void close() { + // nothing to do } + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/policies/LatencyAwarePolicyTest.java b/driver-core/src/test/java/com/datastax/driver/core/policies/LatencyAwarePolicyTest.java index 86042027aae..4f4d41626e2 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/policies/LatencyAwarePolicyTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/policies/LatencyAwarePolicyTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,14 +17,6 @@ */ package com.datastax.driver.core.policies; -import com.datastax.driver.core.*; -import com.datastax.driver.core.exceptions.NoHostAvailableException; -import com.datastax.driver.core.exceptions.ReadTimeoutException; -import com.datastax.driver.core.exceptions.UnavailableException; -import org.testng.annotations.Test; - -import java.util.concurrent.CountDownLatch; - import static java.util.concurrent.TimeUnit.SECONDS; import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.fail; @@ -31,163 +25,159 @@ import static org.scassandra.http.client.Result.read_request_timeout; import static org.scassandra.http.client.Result.unavailable; -public class LatencyAwarePolicyTest extends ScassandraTestBase { - - /** - * A special latency tracker used to signal to the main thread that all trackers have finished their jobs. - */ - private class LatencyTrackerBarrier implements LatencyTracker { - - private final CountDownLatch latch; - - private LatencyTrackerBarrier(int numberOfQueries) { - latch = new CountDownLatch(numberOfQueries); - } +import com.datastax.driver.core.Cluster; +import com.datastax.driver.core.Host; +import com.datastax.driver.core.LatencyTracker; +import com.datastax.driver.core.ScassandraTestBase; +import com.datastax.driver.core.Session; +import com.datastax.driver.core.Statement; +import com.datastax.driver.core.exceptions.NoHostAvailableException; +import com.datastax.driver.core.exceptions.ReadTimeoutException; +import com.datastax.driver.core.exceptions.UnavailableException; +import java.util.concurrent.CountDownLatch; +import org.testng.annotations.Test; - @Override - public void update(Host host, Statement statement, Exception exception, long newLatencyNanos) { - latch.countDown(); - } +public class LatencyAwarePolicyTest extends ScassandraTestBase { - public void await() throws InterruptedException { - latch.await(10, SECONDS); - } + /** + * A special latency tracker used to signal to the main thread that all trackers have finished + * their jobs. + */ + private class LatencyTrackerBarrier implements LatencyTracker { - @Override - public void onRegister(Cluster cluster) { - } + private final CountDownLatch latch; - @Override - public void onUnregister(Cluster cluster) { - } + private LatencyTrackerBarrier(int numberOfQueries) { + latch = new CountDownLatch(numberOfQueries); } - @Test(groups = "short") - public void should_consider_latency_when_query_successful() throws Exception { - // given - String query = "SELECT foo FROM bar"; - primingClient.prime( - queryBuilder() - .withQuery(query) - .build() - ); - LatencyAwarePolicy latencyAwarePolicy = LatencyAwarePolicy.builder(new RoundRobinPolicy()) - .withMininumMeasurements(1) - .build(); - Cluster.Builder builder = super.createClusterBuilder(); - builder.withLoadBalancingPolicy(latencyAwarePolicy); - Cluster cluster = builder.build(); - try { - cluster.init(); // force initialization of latency aware policy - LatencyTrackerBarrier barrier = new LatencyTrackerBarrier(1); - cluster.register(barrier); // add barrier to synchronize latency tracker threads with the current thread - Session session = cluster.connect(); - // when - session.execute(query); - // then - // wait until trackers have been notified - barrier.await(); - // make sure the updater is called at least once - latencyAwarePolicy.new Updater().run(); - LatencyAwarePolicy.Snapshot snapshot = latencyAwarePolicy.getScoresSnapshot(); - assertThat(snapshot.getAllStats()).hasSize(1); - LatencyAwarePolicy.Snapshot.Stats stats = snapshot.getStats(retrieveSingleHost(cluster)); - assertThat(stats).isNotNull(); - assertThat(stats.getMeasurementsCount()).isEqualTo(1); - assertThat(stats.getLatencyScore()).isNotEqualTo(-1); - } finally { - cluster.close(); - } + @Override + public void update(Host host, Statement statement, Exception exception, long newLatencyNanos) { + latch.countDown(); } - @Test(groups = "short") - public void should_discard_latency_when_unavailable() throws Exception { - // given - String query = "SELECT foo FROM bar"; - primingClient.prime( - queryBuilder() - .withQuery(query) - .withThen(then().withResult(unavailable)) - .build() - ); - LatencyAwarePolicy latencyAwarePolicy = LatencyAwarePolicy.builder(new RoundRobinPolicy()) - .withMininumMeasurements(1) - .build(); - Cluster.Builder builder = super.createClusterBuilder(); - builder.withLoadBalancingPolicy(latencyAwarePolicy); - Cluster cluster = builder.build(); - try { - cluster.init(); // force initialization of latency aware policy - LatencyTrackerBarrier barrier = new LatencyTrackerBarrier(1); - cluster.register(barrier); - Session session = cluster.connect(); - // when - try { - session.execute(query); - fail("Should have thrown NoHostAvailableException"); - } catch (NoHostAvailableException e) { - // ok - Throwable error = e.getErrors().get(hostAddress); - assertThat(error).isNotNull(); - assertThat(error).isInstanceOf(UnavailableException.class); - } - // then - // wait until trackers have been notified - barrier.await(); - // make sure the updater is called at least once - latencyAwarePolicy.new Updater().run(); - LatencyAwarePolicy.Snapshot snapshot = latencyAwarePolicy.getScoresSnapshot(); - assertThat(snapshot.getAllStats()).isEmpty(); - LatencyAwarePolicy.Snapshot.Stats stats = snapshot.getStats(retrieveSingleHost(cluster)); - assertThat(stats).isNull(); - } finally { - cluster.close(); - } + public void await() throws InterruptedException { + latch.await(10, SECONDS); } - @Test(groups = "short") - public void should_consider_latency_when_read_timeout() throws Exception { - String query = "SELECT foo FROM bar"; - primingClient.prime( - queryBuilder() - .withQuery(query) - .withThen(then().withResult(read_request_timeout)) - .build() - ); - - LatencyAwarePolicy latencyAwarePolicy = LatencyAwarePolicy.builder(new RoundRobinPolicy()) - .withMininumMeasurements(1) - .build(); - Cluster.Builder builder = super.createClusterBuilder(); - builder.withLoadBalancingPolicy(latencyAwarePolicy); - builder.withRetryPolicy(FallthroughRetryPolicy.INSTANCE); - Cluster cluster = builder.build(); - try { - cluster.init(); // force initialization of latency aware policy - LatencyTrackerBarrier barrier = new LatencyTrackerBarrier(1); - cluster.register(barrier); - Session session = cluster.connect(); - // when - try { - session.execute(query); - fail("Should have thrown ReadTimeoutException"); - } catch (ReadTimeoutException e) { - // ok - } - // then - // wait until trackers have been notified - barrier.await(); - // make sure the updater is called at least once - latencyAwarePolicy.new Updater().run(); - LatencyAwarePolicy.Snapshot snapshot = latencyAwarePolicy.getScoresSnapshot(); - assertThat(snapshot.getAllStats()).hasSize(1); - LatencyAwarePolicy.Snapshot.Stats stats = snapshot.getStats(retrieveSingleHost(cluster)); - assertThat(stats).isNotNull(); - assertThat(stats.getMeasurementsCount()).isEqualTo(1); - assertThat(stats.getLatencyScore()).isNotEqualTo(-1); - } finally { - cluster.close(); - } + @Override + public void onRegister(Cluster cluster) {} + + @Override + public void onUnregister(Cluster cluster) {} + } + + @Test(groups = "short") + public void should_consider_latency_when_query_successful() throws Exception { + // given + String query = "SELECT foo FROM bar"; + primingClient.prime(queryBuilder().withQuery(query).build()); + LatencyAwarePolicy latencyAwarePolicy = + LatencyAwarePolicy.builder(new RoundRobinPolicy()).withMininumMeasurements(1).build(); + Cluster.Builder builder = super.createClusterBuilder(); + builder.withLoadBalancingPolicy(latencyAwarePolicy); + Cluster cluster = builder.build(); + try { + cluster.init(); // force initialization of latency aware policy + LatencyTrackerBarrier barrier = new LatencyTrackerBarrier(1); + cluster.register( + barrier); // add barrier to synchronize latency tracker threads with the current thread + Session session = cluster.connect(); + // when + session.execute(query); + // then + // wait until trackers have been notified + barrier.await(); + // make sure the updater is called at least once + latencyAwarePolicy.new Updater().run(); + LatencyAwarePolicy.Snapshot snapshot = latencyAwarePolicy.getScoresSnapshot(); + assertThat(snapshot.getAllStats()).hasSize(1); + LatencyAwarePolicy.Snapshot.Stats stats = snapshot.getStats(retrieveSingleHost(cluster)); + assertThat(stats).isNotNull(); + assertThat(stats.getMeasurementsCount()).isEqualTo(1); + assertThat(stats.getLatencyScore()).isNotEqualTo(-1); + } finally { + cluster.close(); } - + } + + @Test(groups = "short") + public void should_discard_latency_when_unavailable() throws Exception { + // given + String query = "SELECT foo FROM bar"; + primingClient.prime( + queryBuilder().withQuery(query).withThen(then().withResult(unavailable)).build()); + LatencyAwarePolicy latencyAwarePolicy = + LatencyAwarePolicy.builder(new RoundRobinPolicy()).withMininumMeasurements(1).build(); + Cluster.Builder builder = super.createClusterBuilder(); + builder.withLoadBalancingPolicy(latencyAwarePolicy); + Cluster cluster = builder.build(); + try { + cluster.init(); // force initialization of latency aware policy + LatencyTrackerBarrier barrier = new LatencyTrackerBarrier(1); + cluster.register(barrier); + Session session = cluster.connect(); + // when + try { + session.execute(query); + fail("Should have thrown NoHostAvailableException"); + } catch (NoHostAvailableException e) { + // ok + Throwable error = e.getErrors().get(hostEndPoint); + assertThat(error).isNotNull(); + assertThat(error).isInstanceOf(UnavailableException.class); + } + // then + // wait until trackers have been notified + barrier.await(); + // make sure the updater is called at least once + latencyAwarePolicy.new Updater().run(); + LatencyAwarePolicy.Snapshot snapshot = latencyAwarePolicy.getScoresSnapshot(); + assertThat(snapshot.getAllStats()).isEmpty(); + LatencyAwarePolicy.Snapshot.Stats stats = snapshot.getStats(retrieveSingleHost(cluster)); + assertThat(stats).isNull(); + } finally { + cluster.close(); + } + } + + @Test(groups = "short") + public void should_consider_latency_when_read_timeout() throws Exception { + String query = "SELECT foo FROM bar"; + primingClient.prime( + queryBuilder().withQuery(query).withThen(then().withResult(read_request_timeout)).build()); + + LatencyAwarePolicy latencyAwarePolicy = + LatencyAwarePolicy.builder(new RoundRobinPolicy()).withMininumMeasurements(1).build(); + Cluster.Builder builder = super.createClusterBuilder(); + builder.withLoadBalancingPolicy(latencyAwarePolicy); + builder.withRetryPolicy(FallthroughRetryPolicy.INSTANCE); + Cluster cluster = builder.build(); + try { + cluster.init(); // force initialization of latency aware policy + LatencyTrackerBarrier barrier = new LatencyTrackerBarrier(1); + cluster.register(barrier); + Session session = cluster.connect(); + // when + try { + session.execute(query); + fail("Should have thrown ReadTimeoutException"); + } catch (ReadTimeoutException e) { + // ok + } + // then + // wait until trackers have been notified + barrier.await(); + // make sure the updater is called at least once + latencyAwarePolicy.new Updater().run(); + LatencyAwarePolicy.Snapshot snapshot = latencyAwarePolicy.getScoresSnapshot(); + assertThat(snapshot.getAllStats()).hasSize(1); + LatencyAwarePolicy.Snapshot.Stats stats = snapshot.getStats(retrieveSingleHost(cluster)); + assertThat(stats).isNotNull(); + assertThat(stats.getMeasurementsCount()).isEqualTo(1); + assertThat(stats.getLatencyScore()).isNotEqualTo(-1); + } finally { + cluster.close(); + } + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/policies/LimitingLoadBalancingPolicy.java b/driver-core/src/test/java/com/datastax/driver/core/policies/LimitingLoadBalancingPolicy.java index ab14972d061..ba8ba8188f0 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/policies/LimitingLoadBalancingPolicy.java +++ b/driver-core/src/test/java/com/datastax/driver/core/policies/LimitingLoadBalancingPolicy.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,133 +21,134 @@ import com.datastax.driver.core.Host; import com.datastax.driver.core.HostDistance; import com.datastax.driver.core.Statement; - -import java.util.*; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.HashSet; +import java.util.Iterator; +import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; /** * Sample load balancing policy that limits the number of nodes the driver connects to. - *

    - * If more nodes are available, they are marked as IGNORED. When one of the "chosen" nodes - * goes down, the policy picks one of the ignored nodes to replace it. - *

    - * This kind of policy can be used to alleviate the load on a cluster that has a lot of - * clients. - *

    - * For simplicity, this policy does not distinguish LOCAL and REMOTE nodes. + * + *

    If more nodes are available, they are marked as IGNORED. When one of the "chosen" nodes goes + * down, the policy picks one of the ignored nodes to replace it. + * + *

    This kind of policy can be used to alleviate the load on a cluster that has a lot of clients. + * + *

    For simplicity, this policy does not distinguish LOCAL and REMOTE nodes. */ public class LimitingLoadBalancingPolicy extends DelegatingLoadBalancingPolicy { - private final int maxHosts; - private final int threshold; - private final Set liveHosts = Collections.newSetFromMap(new ConcurrentHashMap()); - private final Set chosenHosts = Collections.newSetFromMap(new ConcurrentHashMap()); - private final Lock updateLock = new ReentrantLock(); - - private volatile Cluster cluster; - - /** - * @param delegate the underlying policy that will be fed with the chosen nodes - * @param maxHosts the maximum number of chosen nodes - * @param threshold how many chosen nodes we accept to lose before we start picking new ones - */ - public LimitingLoadBalancingPolicy(LoadBalancingPolicy delegate, int maxHosts, int threshold) { - super(delegate); - this.maxHosts = maxHosts; - this.threshold = threshold; - } - - @Override - public void init(Cluster cluster, Collection hosts) { - this.cluster = cluster; - - Iterator hostIt = hosts.iterator(); - while (hostIt.hasNext() && chosenHosts.size() <= maxHosts - threshold) { - chosenHosts.add(hostIt.next()); - } - - this.delegate.init(cluster, new ArrayList(chosenHosts)); + private final int maxHosts; + private final int threshold; + private final Set liveHosts = + Collections.newSetFromMap(new ConcurrentHashMap()); + private final Set chosenHosts = + Collections.newSetFromMap(new ConcurrentHashMap()); + private final Lock updateLock = new ReentrantLock(); + + private volatile Cluster cluster; + + /** + * @param delegate the underlying policy that will be fed with the chosen nodes + * @param maxHosts the maximum number of chosen nodes + * @param threshold how many chosen nodes we accept to lose before we start picking new ones + */ + public LimitingLoadBalancingPolicy(LoadBalancingPolicy delegate, int maxHosts, int threshold) { + super(delegate); + this.maxHosts = maxHosts; + this.threshold = threshold; + } + + @Override + public void init(Cluster cluster, Collection hosts) { + this.cluster = cluster; + + Iterator hostIt = hosts.iterator(); + while (hostIt.hasNext() && chosenHosts.size() <= maxHosts - threshold) { + chosenHosts.add(hostIt.next()); } - private void updateChosenHosts() { - if (chosenHosts.size() > maxHosts - threshold || liveHosts.size() == 0) - return; - - // We lock to prevent two events from triggering this simultaneously. - updateLock.lock(); - try { - int missing = maxHosts - chosenHosts.size(); - if (missing < threshold || liveHosts.size() == 0) - return; - Set newlyChosen = new HashSet(); - - for (Host host : liveHosts) { - // Note that this picks hosts whatever their distance is. - // We can't reliably call childPolicy.distance() here, because the childPolicy - // might require hosts to be already added to compute their distance properly - // (this is the case for DCAware policy). - newlyChosen.add(host); - missing -= 1; - if (missing == 0) - break; - } - - chosenHosts.addAll(newlyChosen); - liveHosts.removeAll(newlyChosen); - for (Host host : newlyChosen) { - delegate.onAdd(host); - - // delegate should have updated the distance, inform the driver so that it can - // recreate the pool. - cluster.getConfiguration().getPoolingOptions().refreshConnectedHost(host); - } - } finally { - updateLock.unlock(); - } - } - - @Override - public HostDistance distance(Host host) { - if (chosenHosts.contains(host)) - return delegate.distance(host); - else - return HostDistance.IGNORED; - } - - @Override - public Iterator newQueryPlan(String loggedKeyspace, Statement statement) { - // Since we only add chosen nodes to the child policy, its query plan will only contain chosen nodes - return delegate.newQueryPlan(loggedKeyspace, statement); - } - - @Override - public void onAdd(Host host) { - liveHosts.add(host); - // update in case we didn't have enough chosen hosts before the addition - updateChosenHosts(); - } - - @Override - public void onUp(Host host) { - onAdd(host); - } - - @Override - public void onDown(Host host) { - delegate.onDown(host); - - liveHosts.remove(host); - chosenHosts.remove(host); - updateChosenHosts(); - } - - @Override - public void onRemove(Host host) { - delegate.onRemove(host); - - liveHosts.remove(host); - chosenHosts.remove(host); - updateChosenHosts(); + this.delegate.init(cluster, new ArrayList(chosenHosts)); + } + + private void updateChosenHosts() { + if (chosenHosts.size() > maxHosts - threshold || liveHosts.size() == 0) return; + + // We lock to prevent two events from triggering this simultaneously. + updateLock.lock(); + try { + int missing = maxHosts - chosenHosts.size(); + if (missing < threshold || liveHosts.size() == 0) return; + Set newlyChosen = new HashSet(); + + for (Host host : liveHosts) { + // Note that this picks hosts whatever their distance is. + // We can't reliably call childPolicy.distance() here, because the childPolicy + // might require hosts to be already added to compute their distance properly + // (this is the case for DCAware policy). + newlyChosen.add(host); + missing -= 1; + if (missing == 0) break; + } + + chosenHosts.addAll(newlyChosen); + liveHosts.removeAll(newlyChosen); + for (Host host : newlyChosen) { + delegate.onAdd(host); + + // delegate should have updated the distance, inform the driver so that it can + // recreate the pool. + cluster.getConfiguration().getPoolingOptions().refreshConnectedHost(host); + } + } finally { + updateLock.unlock(); } + } + + @Override + public HostDistance distance(Host host) { + if (chosenHosts.contains(host)) return delegate.distance(host); + else return HostDistance.IGNORED; + } + + @Override + public Iterator newQueryPlan(String loggedKeyspace, Statement statement) { + // Since we only add chosen nodes to the child policy, its query plan will only contain chosen + // nodes + return delegate.newQueryPlan(loggedKeyspace, statement); + } + + @Override + public void onAdd(Host host) { + liveHosts.add(host); + // update in case we didn't have enough chosen hosts before the addition + updateChosenHosts(); + } + + @Override + public void onUp(Host host) { + onAdd(host); + } + + @Override + public void onDown(Host host) { + delegate.onDown(host); + + liveHosts.remove(host); + chosenHosts.remove(host); + updateChosenHosts(); + } + + @Override + public void onRemove(Host host) { + delegate.onRemove(host); + + liveHosts.remove(host); + chosenHosts.remove(host); + updateChosenHosts(); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/policies/LoggingRetryPolicyIntegrationTest.java b/driver-core/src/test/java/com/datastax/driver/core/policies/LoggingRetryPolicyIntegrationTest.java index 147f278a05e..4a5f4617e4e 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/policies/LoggingRetryPolicyIntegrationTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/policies/LoggingRetryPolicyIntegrationTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +17,30 @@ */ package com.datastax.driver.core.policies; -import com.datastax.driver.core.*; +import static com.datastax.driver.core.Assertions.assertThat; +import static com.datastax.driver.core.ConsistencyLevel.LOCAL_ONE; +import static com.datastax.driver.core.WriteType.SIMPLE; +import static com.datastax.driver.core.policies.LoggingRetryPolicy.IGNORING_READ_TIMEOUT; +import static com.datastax.driver.core.policies.LoggingRetryPolicy.IGNORING_REQUEST_ERROR; +import static com.datastax.driver.core.policies.LoggingRetryPolicy.IGNORING_UNAVAILABLE; +import static com.datastax.driver.core.policies.LoggingRetryPolicy.IGNORING_WRITE_TIMEOUT; +import static com.datastax.driver.core.policies.LoggingRetryPolicy.RETRYING_ON_READ_TIMEOUT; +import static com.datastax.driver.core.policies.LoggingRetryPolicy.RETRYING_ON_REQUEST_ERROR; +import static com.datastax.driver.core.policies.LoggingRetryPolicy.RETRYING_ON_UNAVAILABLE; +import static com.datastax.driver.core.policies.LoggingRetryPolicy.RETRYING_ON_WRITE_TIMEOUT; +import static com.datastax.driver.core.policies.RetryPolicy.RetryDecision.ignore; +import static com.datastax.driver.core.policies.RetryPolicy.RetryDecision.tryNextHost; +import static org.apache.log4j.Level.INFO; +import static org.scassandra.http.client.Result.read_request_timeout; +import static org.scassandra.http.client.Result.server_error; +import static org.scassandra.http.client.Result.unavailable; +import static org.scassandra.http.client.Result.write_request_timeout; + +import com.datastax.driver.core.Cluster; +import com.datastax.driver.core.ConsistencyLevel; +import com.datastax.driver.core.MemoryAppender; +import com.datastax.driver.core.Statement; +import com.datastax.driver.core.WriteType; import com.datastax.driver.core.exceptions.DriverException; import com.datastax.driver.core.exceptions.ServerError; import org.apache.log4j.Level; @@ -27,180 +52,208 @@ import org.testng.annotations.BeforeMethod; import org.testng.annotations.Test; -import static com.datastax.driver.core.Assertions.assertThat; -import static com.datastax.driver.core.ConsistencyLevel.LOCAL_ONE; -import static com.datastax.driver.core.WriteType.SIMPLE; -import static com.datastax.driver.core.policies.LoggingRetryPolicy.*; -import static com.datastax.driver.core.policies.RetryPolicy.RetryDecision.ignore; -import static com.datastax.driver.core.policies.RetryPolicy.RetryDecision.tryNextHost; -import static org.apache.log4j.Level.INFO; -import static org.scassandra.http.client.Result.*; - -/** - * Integration tests with LoggingRetryPolicy. - */ +/** Integration tests with LoggingRetryPolicy. */ public class LoggingRetryPolicyIntegrationTest extends AbstractRetryPolicyIntegrationTest { - private volatile RetryPolicy.RetryDecision retryDecision; - - private Logger logger = Logger.getLogger(LoggingRetryPolicy.class.getName()); - - private MemoryAppender appender; - - private Level originalLevel; - private ConsistencyLevel defaultCL; - - @BeforeClass(groups = {"short", "unit"}) - public void setUpRetryPolicy() { - setRetryPolicy(new LoggingRetryPolicy(new CustomRetryPolicy())); - } - - @BeforeMethod(groups = {"short"}) - public void storeDefaultCL() { - defaultCL = cluster.getConfiguration().getQueryOptions().getConsistencyLevel(); - } - - @BeforeMethod(groups = {"short", "unit"}) - public void startCapturingLogs() { - originalLevel = logger.getLevel(); - logger.setLevel(INFO); - logger.addAppender(appender = new MemoryAppender()); - } - - @AfterMethod(groups = {"short", "unit"}) - public void stopCapturingLogs() { - logger.setLevel(originalLevel); - logger.removeAppender(appender); - } - - @Test(groups = "short") - public void should_log_ignored_read_timeout() throws InterruptedException { - simulateError(1, read_request_timeout); - retryDecision = ignore(); - query(); - String line = appender.waitAndGet(5000); - assertThat(line.trim()).isEqualTo(expectedMessage(IGNORING_READ_TIMEOUT, defaultCL, 1, 0, false, 0)); - } - - @Test(groups = "short") - public void should_log_retried_read_timeout() throws InterruptedException { - simulateError(1, read_request_timeout); - retryDecision = tryNextHost(LOCAL_ONE); - query(); - String line = appender.waitAndGet(5000); - assertThat(line.trim()).isEqualTo(expectedMessage(RETRYING_ON_READ_TIMEOUT, "next host", LOCAL_ONE, defaultCL, 1, 0, false, 0)); - } - - @Test(groups = "short") - public void should_log_ignored_write_timeout() throws InterruptedException { - simulateError(1, write_request_timeout); - retryDecision = ignore(); - query(); - String line = appender.waitAndGet(5000); - assertThat(line.trim()).isEqualTo(expectedMessage(IGNORING_WRITE_TIMEOUT, defaultCL, SIMPLE, 1, 0, 0)); - } - - @Test(groups = "short") - public void should_log_retried_write_timeout() throws InterruptedException { - simulateError(1, write_request_timeout); - retryDecision = tryNextHost(LOCAL_ONE); - query(); - String line = appender.waitAndGet(5000); - assertThat(line.trim()).isEqualTo(expectedMessage(RETRYING_ON_WRITE_TIMEOUT, "next host", LOCAL_ONE, defaultCL, SIMPLE, 1, 0, 0)); + private volatile RetryPolicy.RetryDecision retryDecision; + + private Logger logger = Logger.getLogger(LoggingRetryPolicy.class.getName()); + + private MemoryAppender appender; + + private Level originalLevel; + private ConsistencyLevel defaultCL; + + @BeforeClass(groups = {"short", "unit"}) + public void setUpRetryPolicy() { + setRetryPolicy(new LoggingRetryPolicy(new CustomRetryPolicy())); + } + + @BeforeMethod(groups = {"short"}) + public void storeDefaultCL() { + defaultCL = cluster.getConfiguration().getQueryOptions().getConsistencyLevel(); + } + + @BeforeMethod(groups = {"short", "unit"}) + public void startCapturingLogs() { + originalLevel = logger.getLevel(); + logger.setLevel(INFO); + logger.addAppender(appender = new MemoryAppender()); + } + + @AfterMethod(groups = {"short", "unit"}) + public void stopCapturingLogs() { + logger.setLevel(originalLevel); + logger.removeAppender(appender); + } + + @Test(groups = "short") + public void should_log_ignored_read_timeout() throws InterruptedException { + simulateError(1, read_request_timeout); + retryDecision = ignore(); + query(); + String line = appender.waitAndGet(5000); + assertThat(line.trim()) + .isEqualTo(expectedMessage(IGNORING_READ_TIMEOUT, defaultCL, 1, 0, false, 0)); + } + + @Test(groups = "short") + public void should_log_retried_read_timeout() throws InterruptedException { + simulateError(1, read_request_timeout); + retryDecision = tryNextHost(LOCAL_ONE); + query(); + String line = appender.waitAndGet(5000); + assertThat(line.trim()) + .isEqualTo( + expectedMessage( + RETRYING_ON_READ_TIMEOUT, "next host", LOCAL_ONE, defaultCL, 1, 0, false, 0)); + } + + @Test(groups = "short") + public void should_log_ignored_write_timeout() throws InterruptedException { + simulateError(1, write_request_timeout); + retryDecision = ignore(); + query(); + String line = appender.waitAndGet(5000); + assertThat(line.trim()) + .isEqualTo(expectedMessage(IGNORING_WRITE_TIMEOUT, defaultCL, SIMPLE, 1, 0, 0)); + } + + @Test(groups = "short") + public void should_log_retried_write_timeout() throws InterruptedException { + simulateError(1, write_request_timeout); + retryDecision = tryNextHost(LOCAL_ONE); + query(); + String line = appender.waitAndGet(5000); + assertThat(line.trim()) + .isEqualTo( + expectedMessage( + RETRYING_ON_WRITE_TIMEOUT, "next host", LOCAL_ONE, defaultCL, SIMPLE, 1, 0, 0)); + } + + @Test(groups = "short") + public void should_log_ignored_unavailable() throws InterruptedException { + simulateError(1, unavailable); + retryDecision = ignore(); + query(); + String line = appender.waitAndGet(5000); + assertThat(line.trim()).isEqualTo(expectedMessage(IGNORING_UNAVAILABLE, defaultCL, 1, 0, 0)); + } + + @Test(groups = "short") + public void should_log_retried_unavailable() throws InterruptedException { + simulateError(1, unavailable); + retryDecision = tryNextHost(LOCAL_ONE); + query(); + String line = appender.waitAndGet(5000); + assertThat(line.trim()) + .isEqualTo( + expectedMessage(RETRYING_ON_UNAVAILABLE, "next host", LOCAL_ONE, defaultCL, 1, 0, 0)); + } + + @Test(groups = "short") + public void should_log_ignored_request_error() throws InterruptedException { + simulateError(1, server_error); + retryDecision = ignore(); + query(); + String line = appender.waitAndGet(5000); + assertThat(line.trim()) + .isEqualTo( + expectedMessage( + IGNORING_REQUEST_ERROR, + defaultCL, + 0, + new ServerError(host1.getEndPoint(), "Server Error").toString())); + } + + @Test(groups = "short") + public void should_log_retried_request_error() throws InterruptedException { + simulateError(1, server_error); + retryDecision = tryNextHost(LOCAL_ONE); + query(); + String line = appender.waitAndGet(5000); + assertThat(line.trim()) + .isEqualTo( + expectedMessage( + RETRYING_ON_REQUEST_ERROR, + "next host", + LOCAL_ONE, + defaultCL, + 0, + new ServerError(host1.getEndPoint(), "Server Error").toString())); + } + + @Test(groups = "short") + public void should_call_init_method_on_inner_policy() { + RetryPolicy innerPolicyMock = Mockito.mock(RetryPolicy.class); + + new LoggingRetryPolicy(innerPolicyMock).init(cluster); + + Mockito.verify(innerPolicyMock).init(cluster); + } + + @Test(groups = "unit") + public void should_call_close_method_on_inner_policy() { + RetryPolicy innerPolicyMock = Mockito.mock(RetryPolicy.class); + + new LoggingRetryPolicy(innerPolicyMock).close(); + + Mockito.verify(innerPolicyMock).close(); + } + + private String expectedMessage(String template, Object... args) { + return MessageFormatter.arrayFormat(template, args).getMessage(); + } + + /** Dynamically modifiable retry policy. */ + class CustomRetryPolicy implements RetryPolicy { + + @Override + public RetryPolicy.RetryDecision onReadTimeout( + Statement statement, + ConsistencyLevel cl, + int requiredResponses, + int receivedResponses, + boolean dataRetrieved, + int nbRetry) { + return retryDecision; } - @Test(groups = "short") - public void should_log_ignored_unavailable() throws InterruptedException { - simulateError(1, unavailable); - retryDecision = ignore(); - query(); - String line = appender.waitAndGet(5000); - assertThat(line.trim()).isEqualTo(expectedMessage(IGNORING_UNAVAILABLE, defaultCL, 1, 0, 0)); + @Override + public RetryPolicy.RetryDecision onWriteTimeout( + Statement statement, + ConsistencyLevel cl, + WriteType writeType, + int requiredAcks, + int receivedAcks, + int nbRetry) { + return retryDecision; } - @Test(groups = "short") - public void should_log_retried_unavailable() throws InterruptedException { - simulateError(1, unavailable); - retryDecision = tryNextHost(LOCAL_ONE); - query(); - String line = appender.waitAndGet(5000); - assertThat(line.trim()).isEqualTo(expectedMessage(RETRYING_ON_UNAVAILABLE, "next host", LOCAL_ONE, defaultCL, 1, 0, 0)); + @Override + public RetryPolicy.RetryDecision onUnavailable( + Statement statement, + ConsistencyLevel cl, + int requiredReplica, + int aliveReplica, + int nbRetry) { + return retryDecision; } - @Test(groups = "short") - public void should_log_ignored_request_error() throws InterruptedException { - simulateError(1, server_error); - retryDecision = ignore(); - query(); - String line = appender.waitAndGet(5000); - assertThat(line.trim()).isEqualTo(expectedMessage(IGNORING_REQUEST_ERROR, defaultCL, 0, new ServerError(host1.getSocketAddress(), "Server Error").toString())); + @Override + public RetryPolicy.RetryDecision onRequestError( + Statement statement, ConsistencyLevel cl, DriverException e, int nbRetry) { + return retryDecision; } - @Test(groups = "short") - public void should_log_retried_request_error() throws InterruptedException { - simulateError(1, server_error); - retryDecision = tryNextHost(LOCAL_ONE); - query(); - String line = appender.waitAndGet(5000); - assertThat(line.trim()).isEqualTo(expectedMessage(RETRYING_ON_REQUEST_ERROR, "next host", LOCAL_ONE, defaultCL, 0, new ServerError(host1.getSocketAddress(), "Server Error").toString())); + @Override + public void init(Cluster cluster) { + // nothing to do } - @Test(groups = "short") - public void should_call_init_method_on_inner_policy() { - RetryPolicy innerPolicyMock = Mockito.mock(RetryPolicy.class); - - new LoggingRetryPolicy(innerPolicyMock).init(cluster); - - Mockito.verify(innerPolicyMock).init(cluster); - } - - @Test(groups = "unit") - public void should_call_close_method_on_inner_policy() { - RetryPolicy innerPolicyMock = Mockito.mock(RetryPolicy.class); - - new LoggingRetryPolicy(innerPolicyMock).close(); - - Mockito.verify(innerPolicyMock).close(); - } - - private String expectedMessage(String template, Object... args) { - return MessageFormatter.arrayFormat(template, args).getMessage(); - } - - /** - * Dynamically modifiable retry policy. - */ - class CustomRetryPolicy implements RetryPolicy { - - @Override - public RetryPolicy.RetryDecision onReadTimeout(Statement statement, ConsistencyLevel cl, int requiredResponses, int receivedResponses, boolean dataRetrieved, int nbRetry) { - return retryDecision; - } - - @Override - public RetryPolicy.RetryDecision onWriteTimeout(Statement statement, ConsistencyLevel cl, WriteType writeType, int requiredAcks, int receivedAcks, int nbRetry) { - return retryDecision; - } - - @Override - public RetryPolicy.RetryDecision onUnavailable(Statement statement, ConsistencyLevel cl, int requiredReplica, int aliveReplica, int nbRetry) { - return retryDecision; - } - - @Override - public RetryPolicy.RetryDecision onRequestError(Statement statement, ConsistencyLevel cl, DriverException e, int nbRetry) { - return retryDecision; - } - - @Override - public void init(Cluster cluster) { - // nothing to do - } - - @Override - public void close() { - // nothing to do - } - + @Override + public void close() { + // nothing to do } + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/policies/RetryDecisionTest.java b/driver-core/src/test/java/com/datastax/driver/core/policies/RetryDecisionTest.java index 00e192b146a..221aa9b4f05 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/policies/RetryDecisionTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/policies/RetryDecisionTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,47 +17,37 @@ */ package com.datastax.driver.core.policies; -import com.datastax.driver.core.policies.RetryPolicy.RetryDecision; -import org.testng.annotations.Test; - import static com.datastax.driver.core.ConsistencyLevel.ONE; -import static com.datastax.driver.core.policies.RetryPolicy.RetryDecision.Type.*; +import static com.datastax.driver.core.policies.RetryPolicy.RetryDecision.Type.IGNORE; +import static com.datastax.driver.core.policies.RetryPolicy.RetryDecision.Type.RETHROW; +import static com.datastax.driver.core.policies.RetryPolicy.RetryDecision.Type.RETRY; import static org.assertj.core.api.Assertions.assertThat; +import com.datastax.driver.core.policies.RetryPolicy.RetryDecision; +import org.testng.annotations.Test; + public class RetryDecisionTest { - @Test(groups = "unit") - public void should_expose_decision_properties() throws Throwable { - RetryDecision retryAtOne = RetryDecision.retry(ONE); - assertThat(retryAtOne.getType()) - .isEqualTo(RETRY); - assertThat(retryAtOne.getRetryConsistencyLevel()) - .isEqualTo(ONE); - assertThat(retryAtOne.isRetryCurrent()) - .isTrue(); - assertThat(retryAtOne.toString()) - .isEqualTo("Retry at ONE on same host."); + @Test(groups = "unit") + public void should_expose_decision_properties() throws Throwable { + RetryDecision retryAtOne = RetryDecision.retry(ONE); + assertThat(retryAtOne.getType()).isEqualTo(RETRY); + assertThat(retryAtOne.getRetryConsistencyLevel()).isEqualTo(ONE); + assertThat(retryAtOne.isRetryCurrent()).isTrue(); + assertThat(retryAtOne.toString()).isEqualTo("Retry at ONE on same host."); - RetryDecision tryNextAtOne = RetryDecision.tryNextHost(ONE); - assertThat(tryNextAtOne.getType()) - .isEqualTo(RETRY); - assertThat(tryNextAtOne.getRetryConsistencyLevel()) - .isEqualTo(ONE); - assertThat(tryNextAtOne.isRetryCurrent()) - .isFalse(); - assertThat(tryNextAtOne.toString()) - .isEqualTo("Retry at ONE on next host."); + RetryDecision tryNextAtOne = RetryDecision.tryNextHost(ONE); + assertThat(tryNextAtOne.getType()).isEqualTo(RETRY); + assertThat(tryNextAtOne.getRetryConsistencyLevel()).isEqualTo(ONE); + assertThat(tryNextAtOne.isRetryCurrent()).isFalse(); + assertThat(tryNextAtOne.toString()).isEqualTo("Retry at ONE on next host."); - RetryDecision rethrow = RetryDecision.rethrow(); - assertThat(rethrow.getType()) - .isEqualTo(RETHROW); - assertThat(rethrow.toString()) - .isEqualTo("Rethrow"); + RetryDecision rethrow = RetryDecision.rethrow(); + assertThat(rethrow.getType()).isEqualTo(RETHROW); + assertThat(rethrow.toString()).isEqualTo("Rethrow"); - RetryDecision ignore = RetryDecision.ignore(); - assertThat(ignore.getType()) - .isEqualTo(IGNORE); - assertThat(ignore.toString()) - .isEqualTo("Ignore"); - } + RetryDecision ignore = RetryDecision.ignore(); + assertThat(ignore.getType()).isEqualTo(IGNORE); + assertThat(ignore.toString()).isEqualTo("Ignore"); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/policies/RollingCountTest.java b/driver-core/src/test/java/com/datastax/driver/core/policies/RollingCountTest.java index 5db8a8011be..a2eb3e0b5cc 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/policies/RollingCountTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/policies/RollingCountTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,135 +17,133 @@ */ package com.datastax.driver.core.policies; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; -import org.testng.annotations.BeforeMethod; -import org.testng.annotations.Test; - -import java.util.concurrent.TimeUnit; - import static com.datastax.driver.core.Assertions.assertThat; import static java.util.concurrent.TimeUnit.NANOSECONDS; import static java.util.concurrent.TimeUnit.SECONDS; import static org.mockito.Mockito.when; -public class RollingCountTest { - @Mock - Clock clock; - - RollingCount rollingCount; - - @BeforeMethod(groups = "unit") - public void setup() { - MockitoAnnotations.initMocks(this); - assertThat(clock.nanoTime()).isEqualTo(0); - rollingCount = new RollingCount(clock); - } - - @Test(groups = "unit") - public void should_record_adds_in_first_interval() { - // t = 0 - rollingCount.increment(); - setTime(1, SECONDS); - rollingCount.increment(); - setTime(2, SECONDS); - rollingCount.increment(); - - // the count does not update in real time... - assertThat(rollingCount.get()).isEqualTo(0); - - // but only at the end of each 5-second interval - setTime(5, SECONDS); - assertThat(rollingCount.get()).isEqualTo(3); - } - - @Test(groups = "unit") - public void should_record_adds_over_two_intervals() { - rollingCount.add(2); - - setTime(5, SECONDS); // 2nd interval - rollingCount.add(3); - - setTime(10, SECONDS); - assertThat(rollingCount.get()).isEqualTo(5); - } - - @Test(groups = "unit") - public void should_record_adds_separated_by_idle_intervals() { - rollingCount.add(2); - - setTime(10, SECONDS); // 3rd interval - rollingCount.add(3); +import java.util.concurrent.TimeUnit; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; - setTime(15, SECONDS); - assertThat(rollingCount.get()).isEqualTo(5); +public class RollingCountTest { + @Mock Clock clock; + + RollingCount rollingCount; + + @BeforeMethod(groups = "unit") + public void setup() { + MockitoAnnotations.initMocks(this); + assertThat(clock.nanoTime()).isEqualTo(0); + rollingCount = new RollingCount(clock); + } + + @Test(groups = "unit") + public void should_record_adds_in_first_interval() { + // t = 0 + rollingCount.increment(); + setTime(1, SECONDS); + rollingCount.increment(); + setTime(2, SECONDS); + rollingCount.increment(); + + // the count does not update in real time... + assertThat(rollingCount.get()).isEqualTo(0); + + // but only at the end of each 5-second interval + setTime(5, SECONDS); + assertThat(rollingCount.get()).isEqualTo(3); + } + + @Test(groups = "unit") + public void should_record_adds_over_two_intervals() { + rollingCount.add(2); + + setTime(5, SECONDS); // 2nd interval + rollingCount.add(3); + + setTime(10, SECONDS); + assertThat(rollingCount.get()).isEqualTo(5); + } + + @Test(groups = "unit") + public void should_record_adds_separated_by_idle_intervals() { + rollingCount.add(2); + + setTime(10, SECONDS); // 3rd interval + rollingCount.add(3); + + setTime(15, SECONDS); + assertThat(rollingCount.get()).isEqualTo(5); + } + + @Test(groups = "unit") + public void should_rotate() { + // 2 in [0,5[, then 1 every 5 seconds in [5,60[ + rollingCount.add(2); + for (int i = 1; i < 12; i++) { + setTime(i * 5, SECONDS); + rollingCount.add(1); } - - @Test(groups = "unit") - public void should_rotate() { - // 2 in [0,5[, then 1 every 5 seconds in [5,60[ - rollingCount.add(2); - for (int i = 1; i < 12; i++) { - setTime(i * 5, SECONDS); - rollingCount.add(1); - } - setTime(60, SECONDS); - // the previous minute is now [0,60[ - assertThat(rollingCount.get()).isEqualTo(13); - - rollingCount.add(1); - setTime(65, SECONDS); - // the previous minute is now [5,65[, so the 2 events from [0,5[ should be forgotten - assertThat(rollingCount.get()).isEqualTo(12); + setTime(60, SECONDS); + // the previous minute is now [0,60[ + assertThat(rollingCount.get()).isEqualTo(13); + + rollingCount.add(1); + setTime(65, SECONDS); + // the previous minute is now [5,65[, so the 2 events from [0,5[ should be forgotten + assertThat(rollingCount.get()).isEqualTo(12); + } + + @Test(groups = "unit") + public void should_rotate_with_idle_intervals() { + // 1 every 5 seconds in [0,60[ + for (int i = 0; i < 12; i++) { + setTime(i * 5, SECONDS); + rollingCount.add(1); } - - @Test(groups = "unit") - public void should_rotate_with_idle_intervals() { - // 1 every 5 seconds in [0,60[ - for (int i = 0; i < 12; i++) { - setTime(i * 5, SECONDS); - rollingCount.add(1); - } - // idle in [60, 75[, then 1 in [75,80[ - setTime(75, SECONDS); - rollingCount.add(1); - - setTime(80, SECONDS); - // the last minute is [20,80[, with 1 every 5 seconds except during 15 seconds - assertThat(rollingCount.get()).isEqualTo(9); + // idle in [60, 75[, then 1 in [75,80[ + setTime(75, SECONDS); + rollingCount.add(1); + + setTime(80, SECONDS); + // the last minute is [20,80[, with 1 every 5 seconds except during 15 seconds + assertThat(rollingCount.get()).isEqualTo(9); + } + + @Test(groups = "unit") + public void should_rotate_when_idle_for_full_period() { + // 1 every 5 seconds in [0,60[ + for (int i = 0; i < 12; i++) { + setTime(i * 5, SECONDS); + rollingCount.add(1); } - - @Test(groups = "unit") - public void should_rotate_when_idle_for_full_period() { - // 1 every 5 seconds in [0,60[ - for (int i = 0; i < 12; i++) { - setTime(i * 5, SECONDS); - rollingCount.add(1); - } - // idle for the next minute [60,120[, then 1 in [120,125[ - setTime(120, SECONDS); - rollingCount.add(1); - - setTime(125, SECONDS); - assertThat(rollingCount.get()).isEqualTo(1); + // idle for the next minute [60,120[, then 1 in [120,125[ + setTime(120, SECONDS); + rollingCount.add(1); + + setTime(125, SECONDS); + assertThat(rollingCount.get()).isEqualTo(1); + } + + @Test(groups = "unit") + public void should_rotate_when_idle_for_more_than_full_period() { + // 1 every 5 seconds in [0,60[ + for (int i = 0; i < 12; i++) { + setTime(i * 5, SECONDS); + rollingCount.add(1); } + // idle for the next minute and 5 seconds [60,125[, then 1 in [125,130[ + setTime(125, SECONDS); + rollingCount.add(1); - @Test(groups = "unit") - public void should_rotate_when_idle_for_more_than_full_period() { - // 1 every 5 seconds in [0,60[ - for (int i = 0; i < 12; i++) { - setTime(i * 5, SECONDS); - rollingCount.add(1); - } - // idle for the next minute and 5 seconds [60,125[, then 1 in [125,130[ - setTime(125, SECONDS); - rollingCount.add(1); - - setTime(130, SECONDS); - assertThat(rollingCount.get()).isEqualTo(1); - } + setTime(130, SECONDS); + assertThat(rollingCount.get()).isEqualTo(1); + } - private void setTime(long time, TimeUnit unit) { - when(clock.nanoTime()).thenReturn(NANOSECONDS.convert(time, unit)); - } + private void setTime(long time, TimeUnit unit) { + when(clock.nanoTime()).thenReturn(NANOSECONDS.convert(time, unit)); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/policies/RoundRobinPolicyTest.java b/driver-core/src/test/java/com/datastax/driver/core/policies/RoundRobinPolicyTest.java index 073b313ab75..c1c133396b5 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/policies/RoundRobinPolicyTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/policies/RoundRobinPolicyTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,153 +17,168 @@ */ package com.datastax.driver.core.policies; -import com.datastax.driver.core.*; +import static com.datastax.driver.core.Assertions.assertThat; +import static com.datastax.driver.core.TestUtils.nonQuietClusterCloseOptions; + +import com.datastax.driver.core.Cluster; +import com.datastax.driver.core.ConsistencyLevel; +import com.datastax.driver.core.DataProviders; +import com.datastax.driver.core.MemoryAppender; +import com.datastax.driver.core.QueryTracker; +import com.datastax.driver.core.ScassandraCluster; +import com.datastax.driver.core.Session; import org.apache.log4j.Level; import org.apache.log4j.Logger; import org.testng.annotations.AfterMethod; import org.testng.annotations.BeforeMethod; import org.testng.annotations.Test; -import static com.datastax.driver.core.Assertions.assertThat; -import static com.datastax.driver.core.TestUtils.nonQuietClusterCloseOptions; - public class RoundRobinPolicyTest { - Logger policyLogger = Logger.getLogger(RoundRobinPolicy.class); - MemoryAppender logs; - QueryTracker queryTracker; - Level originalLevel; - - @BeforeMethod(groups = "short") - public void setUp() { - queryTracker = new QueryTracker(); - originalLevel = policyLogger.getLevel(); - policyLogger.setLevel(Level.WARN); - logs = new MemoryAppender(); - policyLogger.addAppender(logs); - } - - @AfterMethod(groups = "short", alwaysRun = true) - public void tearDown() { - policyLogger.setLevel(originalLevel); - policyLogger.removeAppender(logs); - } - - /** - * Ensures that when used {@link RoundRobinPolicy} properly round robins requests within - * nodes in a single datacenter. - * - * @test_category load_balancing:round_robin - */ - @Test(groups = "short") - public void should_round_robin_within_single_datacenter() { - // given: a 5 node cluster using RoundRobinPolicy. - ScassandraCluster sCluster = ScassandraCluster.builder().withNodes(5).build(); - - Cluster cluster = Cluster.builder() - .addContactPoints(sCluster.address(1).getAddress()) - .withPort(sCluster.getBinaryPort()) - .withLoadBalancingPolicy(new RoundRobinPolicy()) - .withNettyOptions(nonQuietClusterCloseOptions) - .build(); - - try { - sCluster.init(); - - Session session = cluster.connect(); - // when: a query is executed 50 times. - queryTracker.query(session, 50); - - // then: all nodes should be queried equally. - for (int i = 1; i <= 5; i++) { - queryTracker.assertQueried(sCluster, 1, i, 10); - } - } finally { - cluster.close(); - sCluster.stop(); - } - + Logger policyLogger = Logger.getLogger(RoundRobinPolicy.class); + MemoryAppender logs; + QueryTracker queryTracker; + Level originalLevel; + + @BeforeMethod(groups = "short") + public void setUp() { + queryTracker = new QueryTracker(); + originalLevel = policyLogger.getLevel(); + policyLogger.setLevel(Level.WARN); + logs = new MemoryAppender(); + policyLogger.addAppender(logs); + } + + @AfterMethod(groups = "short", alwaysRun = true) + public void tearDown() { + policyLogger.setLevel(originalLevel); + policyLogger.removeAppender(logs); + } + + /** + * Ensures that when used {@link RoundRobinPolicy} properly round robins requests within nodes in + * a single datacenter. + * + * @test_category load_balancing:round_robin + */ + @Test(groups = "short") + public void should_round_robin_within_single_datacenter() { + // given: a 5 node cluster using RoundRobinPolicy. + ScassandraCluster sCluster = ScassandraCluster.builder().withNodes(5).build(); + + Cluster cluster = + Cluster.builder() + .addContactPoints(sCluster.address(1).getAddress()) + .withPort(sCluster.getBinaryPort()) + .withLoadBalancingPolicy(new RoundRobinPolicy()) + .withNettyOptions(nonQuietClusterCloseOptions) + .build(); + + try { + sCluster.init(); + + Session session = cluster.connect(); + // when: a query is executed 50 times. + queryTracker.query(session, 50); + + // then: all nodes should be queried equally. + for (int i = 1; i <= 5; i++) { + queryTracker.assertQueried(sCluster, 1, i, 10); + } + } finally { + cluster.close(); + sCluster.stop(); } - - /** - * Ensures that when used {@link RoundRobinPolicy} properly round robins requests to nodes irrespective - * of cluster topology by ensuring nodes in different data centers are queried equally to others. - * - * @test_category load_balancing:round_robin - */ - @Test(groups = "short") - public void should_round_robin_irrespective_of_topology() { - // given: a 10 node, 5 DC cluster using RoundRobinPolicy. - ScassandraCluster sCluster = ScassandraCluster.builder().withNodes(2, 2, 2, 2, 2).build(); - - Cluster cluster = Cluster.builder() - .addContactPoints(sCluster.address(1).getAddress()) - .withPort(sCluster.getBinaryPort()) - .withLoadBalancingPolicy(new RoundRobinPolicy()) - .withNettyOptions(nonQuietClusterCloseOptions) - .build(); - - try { - sCluster.init(); - - Session session = cluster.connect(); - // when: a query is executed 50 times. - queryTracker.query(session, 50); - - // then: all nodes should be queried equally. - for (int dc = 1; dc <= 5; dc++) { - for (int i = 1; i <= 2; i++) { - queryTracker.assertQueried(sCluster, dc, i, 5); - } - } - } finally { - cluster.close(); - sCluster.stop(); + } + + /** + * Ensures that when used {@link RoundRobinPolicy} properly round robins requests to nodes + * irrespective of cluster topology by ensuring nodes in different data centers are queried + * equally to others. + * + * @test_category load_balancing:round_robin + */ + @Test(groups = "short") + public void should_round_robin_irrespective_of_topology() { + // given: a 10 node, 5 DC cluster using RoundRobinPolicy. + ScassandraCluster sCluster = ScassandraCluster.builder().withNodes(2, 2, 2, 2, 2).build(); + + Cluster cluster = + Cluster.builder() + .addContactPoints(sCluster.address(1).getAddress()) + .withPort(sCluster.getBinaryPort()) + .withLoadBalancingPolicy(new RoundRobinPolicy()) + .withNettyOptions(nonQuietClusterCloseOptions) + .build(); + + try { + sCluster.init(); + + Session session = cluster.connect(); + // when: a query is executed 50 times. + queryTracker.query(session, 50); + + // then: all nodes should be queried equally. + for (int dc = 1; dc <= 5; dc++) { + for (int i = 1; i <= 2; i++) { + queryTracker.assertQueried(sCluster, dc, i, 5); } + } + } finally { + cluster.close(); + sCluster.stop(); } - - /** - * Ensures that when used {@link RoundRobinPolicy} generates a warning if a consistency level is used - * that is data center local (i.e. LOCAL_QUORUM) and nodes from multiple data centers are in the cluster - * and that warning is only generated once. Also validates that is a non-Dc local consistency level is - * used (i.e. ONE) that no such warning is generated. - * - * @test_category load_balancing:round_robin - */ - @Test(groups = "short", dataProvider = "consistencyLevels", dataProviderClass = DataProviders.class) - public void should_warn_if_using_dc_local_consistency_level(ConsistencyLevel cl) { - // given: a 2 node, 2 DC cluster using RoundRobinPolicy. - ScassandraCluster sCluster = ScassandraCluster.builder().withNodes(1, 1).build(); - - Cluster cluster = Cluster.builder() - .addContactPoints(sCluster.address(1).getAddress()) - .withPort(sCluster.getBinaryPort()) - .withLoadBalancingPolicy(new RoundRobinPolicy()) - .withNettyOptions(nonQuietClusterCloseOptions) - .build(); - - String expectedLogMessage = "Detected request at Consistency Level " + cl + " but the non-DC aware RoundRobinPolicy is in use."; - - try { - sCluster.init(); - - Session session = cluster.connect(); - // when: a query is executed 50 times. - queryTracker.query(session, 50, cl); - - // then: all nodes should be queried equally. - queryTracker.assertQueried(sCluster, 1, 1, 25); - queryTracker.assertQueried(sCluster, 2, 1, 25); - - // Should get a warning if using a local DC cl. - if (cl.isDCLocal()) { - assertThat(logs.get()).containsOnlyOnce(expectedLogMessage); - } else { - assertThat(logs.get()).doesNotContain(expectedLogMessage); - } - } finally { - cluster.close(); - sCluster.stop(); - } + } + + /** + * Ensures that when used {@link RoundRobinPolicy} generates a warning if a consistency level is + * used that is data center local (i.e. LOCAL_QUORUM) and nodes from multiple data centers are in + * the cluster and that warning is only generated once. Also validates that is a non-Dc local + * consistency level is used (i.e. ONE) that no such warning is generated. + * + * @test_category load_balancing:round_robin + */ + @Test( + groups = "short", + dataProvider = "consistencyLevels", + dataProviderClass = DataProviders.class) + public void should_warn_if_using_dc_local_consistency_level(ConsistencyLevel cl) { + // given: a 2 node, 2 DC cluster using RoundRobinPolicy. + ScassandraCluster sCluster = ScassandraCluster.builder().withNodes(1, 1).build(); + + Cluster cluster = + Cluster.builder() + .addContactPoints(sCluster.address(1).getAddress()) + .withPort(sCluster.getBinaryPort()) + .withLoadBalancingPolicy(new RoundRobinPolicy()) + .withNettyOptions(nonQuietClusterCloseOptions) + .build(); + + String expectedLogMessage = + "Detected request at Consistency Level " + + cl + + " but the non-DC aware RoundRobinPolicy is in use."; + + try { + sCluster.init(); + + Session session = cluster.connect(); + // when: a query is executed 50 times. + queryTracker.query(session, 50, cl); + + // then: all nodes should be queried equally. + queryTracker.assertQueried(sCluster, 1, 1, 25); + queryTracker.assertQueried(sCluster, 2, 1, 25); + + // Should get a warning if using a local DC cl. + if (cl.isDCLocal()) { + assertThat(logs.get()).containsOnlyOnce(expectedLogMessage); + } else { + assertThat(logs.get()).doesNotContain(expectedLogMessage); + } + } finally { + cluster.close(); + sCluster.stop(); } + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/policies/TokenAwarePolicyTest.java b/driver-core/src/test/java/com/datastax/driver/core/policies/TokenAwarePolicyTest.java index 5a5810caf6d..21f7f50118e 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/policies/TokenAwarePolicyTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/policies/TokenAwarePolicyTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,419 +17,540 @@ */ package com.datastax.driver.core.policies; -import com.datastax.driver.core.*; +import static com.datastax.driver.core.Assertions.assertThat; +import static com.datastax.driver.core.TestUtils.CREATE_KEYSPACE_SIMPLE_FORMAT; +import static com.datastax.driver.core.TestUtils.nonQuietClusterCloseOptions; +import static com.datastax.driver.core.policies.TokenAwarePolicy.ReplicaOrdering.NEUTRAL; +import static com.datastax.driver.core.policies.TokenAwarePolicy.ReplicaOrdering.RANDOM; +import static com.datastax.driver.core.policies.TokenAwarePolicy.ReplicaOrdering.TOPOLOGICAL; +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.datastax.driver.core.BoundStatement; +import com.datastax.driver.core.CCMBridge; +import com.datastax.driver.core.Cluster; +import com.datastax.driver.core.CodecRegistry; +import com.datastax.driver.core.Configuration; +import com.datastax.driver.core.Host; +import com.datastax.driver.core.HostDistance; +import com.datastax.driver.core.Metadata; +import com.datastax.driver.core.PreparedStatement; +import com.datastax.driver.core.ProtocolOptions; +import com.datastax.driver.core.ProtocolVersion; +import com.datastax.driver.core.QueryTracker; +import com.datastax.driver.core.RegularStatement; +import com.datastax.driver.core.ResultSet; +import com.datastax.driver.core.Row; +import com.datastax.driver.core.ScassandraCluster; +import com.datastax.driver.core.Session; +import com.datastax.driver.core.SimpleStatement; +import com.datastax.driver.core.SortingLoadBalancingPolicy; +import com.datastax.driver.core.TestUtils; +import com.datastax.driver.core.TypeCodec; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Lists; +import java.nio.ByteBuffer; +import java.util.Iterator; +import java.util.List; +import org.assertj.core.util.Sets; import org.testng.annotations.BeforeMethod; import org.testng.annotations.DataProvider; import org.testng.annotations.Test; -import java.nio.ByteBuffer; -import java.util.List; - -import static com.datastax.driver.core.Assertions.assertThat; -import static com.datastax.driver.core.CreateCCM.TestMode.PER_METHOD; -import static com.datastax.driver.core.TestUtils.CREATE_KEYSPACE_SIMPLE_FORMAT; -import static com.datastax.driver.core.TestUtils.nonQuietClusterCloseOptions; - -@CreateCCM(PER_METHOD) -@CCMConfig(createCcm = false) -public class TokenAwarePolicyTest extends CCMTestsSupport { - - QueryTracker queryTracker; - - @BeforeMethod(groups = "short") - public void setUp() { - queryTracker = new QueryTracker(); +public class TokenAwarePolicyTest { + + private ByteBuffer routingKey = ByteBuffer.wrap(new byte[] {1, 2, 3, 4}); + + private RegularStatement statement = new SimpleStatement("irrelevant").setRoutingKey(routingKey); + + private Host host1 = mock(Host.class); + private Host host2 = mock(Host.class); + private Host host3 = mock(Host.class); + private Host host4 = mock(Host.class); + + private LoadBalancingPolicy childPolicy; + private Cluster cluster; + + @BeforeMethod(groups = "unit") + public void initMocks() { + CodecRegistry codecRegistry = new CodecRegistry(); + cluster = mock(Cluster.class); + Configuration configuration = mock(Configuration.class); + ProtocolOptions protocolOptions = mock(ProtocolOptions.class); + Metadata metadata = mock(Metadata.class); + childPolicy = mock(LoadBalancingPolicy.class); + when(cluster.getConfiguration()).thenReturn(configuration); + when(configuration.getCodecRegistry()).thenReturn(codecRegistry); + when(configuration.getProtocolOptions()).thenReturn(protocolOptions); + when(protocolOptions.getProtocolVersion()).thenReturn(ProtocolVersion.NEWEST_SUPPORTED); + when(cluster.getMetadata()).thenReturn(metadata); + when(metadata.getReplicas(Metadata.quote("keyspace"), routingKey)) + .thenReturn(Sets.newLinkedHashSet(host1, host2)); + when(childPolicy.newQueryPlan("keyspace", statement)) + .thenReturn(Sets.newLinkedHashSet(host4, host3, host2, host1).iterator()); + when(childPolicy.distance(any(Host.class))).thenReturn(HostDistance.LOCAL); + when(host1.isUp()).thenReturn(true); + when(host2.isUp()).thenReturn(true); + when(host3.isUp()).thenReturn(true); + when(host4.isUp()).thenReturn(true); + } + + @DataProvider(name = "shuffleProvider") + public Object[][] shuffleProvider() { + return new Object[][] { + {TokenAwarePolicy.ReplicaOrdering.TOPOLOGICAL}, + {TokenAwarePolicy.ReplicaOrdering.RANDOM}, + {TokenAwarePolicy.ReplicaOrdering.NEUTRAL} + }; + } + + @Test(groups = "unit") + public void should_respect_topological_order() { + // given + TokenAwarePolicy policy = new TokenAwarePolicy(childPolicy, TOPOLOGICAL); + policy.init(cluster, null); + // when + Iterator queryPlan = policy.newQueryPlan("keyspace", statement); + // then + assertThat(queryPlan).containsExactly(host1, host2, host4, host3); + } + + @Test(groups = "unit") + public void should_respect_child_policy_order() { + // given + TokenAwarePolicy policy = new TokenAwarePolicy(childPolicy, NEUTRAL); + policy.init(cluster, null); + // when + Iterator queryPlan = policy.newQueryPlan("keyspace", statement); + // then + assertThat(queryPlan).containsExactly(host2, host1, host4, host3); + } + + @Test(groups = "unit") + public void should_create_random_order() { + // given + TokenAwarePolicy policy = new TokenAwarePolicy(childPolicy, RANDOM); + policy.init(cluster, null); + // when + Iterator queryPlan = policy.newQueryPlan("keyspace", statement); + // then + assertThat(queryPlan).containsOnlyOnce(host1, host2, host3, host4).endsWith(host4, host3); + } + + /** + * Ensures that {@link TokenAwarePolicy} will shuffle discovered replicas depending on the value + * of shuffleReplicas used when constructing with {@link + * TokenAwarePolicy#TokenAwarePolicy(LoadBalancingPolicy, boolean)} and that if not provided + * replicas are shuffled by default when using {@link + * TokenAwarePolicy#TokenAwarePolicy(LoadBalancingPolicy, boolean)}. + * + * @test_category load_balancing:token_aware + */ + @Test(groups = "short", dataProvider = "shuffleProvider") + public void should_order_replicas_based_on_configuration( + TokenAwarePolicy.ReplicaOrdering ordering) { + // given: an 8 node cluster using TokenAwarePolicy and some shuffle replica configuration with a + // keyspace with replication factor of 3. + ScassandraCluster sCluster = + ScassandraCluster.builder().withNodes(8).withSimpleKeyspace("keyspace", 3).build(); + + LoadBalancingPolicy loadBalancingPolicy = + new TokenAwarePolicy(new SortingLoadBalancingPolicy(), ordering); + + Cluster cluster = + Cluster.builder() + .addContactPoints(sCluster.address(1).getAddress()) + .withPort(sCluster.getBinaryPort()) + .withNettyOptions(nonQuietClusterCloseOptions) + .withLoadBalancingPolicy(loadBalancingPolicy) + .build(); + + try { + sCluster.init(); + + // given: A routing key that falls in the token range of node 6. + + // Encodes into murmur hash '4874351301193663061' which should be owned by node 6 with + // replicas 7 and 8. + ByteBuffer routingKey = + TypeCodec.varchar() + .serialize("This is some sample text", ProtocolVersion.NEWEST_SUPPORTED); + + // then: The replicas resolved from the cluster metadata must match node 6 and its replicas. + List replicas = + Lists.newArrayList(cluster.getMetadata().getReplicas("keyspace", routingKey)); + assertThat(replicas) + .containsExactly( + sCluster.host(cluster, 1, 6), + sCluster.host(cluster, 1, 7), + sCluster.host(cluster, 1, 8)); + + // then: generating a query plan on a statement using that routing key should properly + // prioritize node 6 and its replicas. + // Actual query does not matter, only the keyspace and routing key will be used + SimpleStatement statement = new SimpleStatement("select * from table where k=5"); + statement.setRoutingKey(routingKey); + statement.setKeyspace("keyspace"); + + List queryPlan = Lists.newArrayList(loadBalancingPolicy.newQueryPlan(null, statement)); + assertThat(queryPlan).containsOnlyElementsOf(cluster.getMetadata().getAllHosts()); + + List firstThree = queryPlan.subList(0, 3); + // then: if ordering is RANDOM, the first three hosts returned should be 6,7,8 in any order. + // if ordering is TOPOLOGICAL or NEUTRAL, the first three hosts returned should be 6,7,8 + // in that order. + if (ordering == RANDOM) { + assertThat(firstThree).containsOnlyElementsOf(replicas); + } else { + assertThat(firstThree).containsExactlyElementsOf(replicas); + } + } finally { + cluster.close(); + sCluster.stop(); } - - @DataProvider(name = "shuffleProvider") - public Object[][] shuffleProvider() { - return new Object[][]{ - {true}, - {false}, - {null} - }; - } - - /** - * Ensures that {@link TokenAwarePolicy} will shuffle discovered replicas depending on the value of shuffleReplicas - * used when constructing with {@link TokenAwarePolicy#TokenAwarePolicy(LoadBalancingPolicy, boolean)} and that if not - * provided replicas are shuffled by default when using {@link TokenAwarePolicy#TokenAwarePolicy(LoadBalancingPolicy, boolean)}. - * - * @test_category load_balancing:token_aware - */ - @Test(groups = "short", dataProvider = "shuffleProvider") - public void should_shuffle_replicas_based_on_configuration(Boolean shuffleReplicas) { - // given: an 8 node cluster using TokenAwarePolicy and some shuffle replica configuration with a keyspace with replication factor of 3. - ScassandraCluster sCluster = ScassandraCluster.builder() - .withNodes(8) - .withSimpleKeyspace("keyspace", 3) - .build(); - - LoadBalancingPolicy loadBalancingPolicy; - if (shuffleReplicas == null) { - loadBalancingPolicy = new TokenAwarePolicy(new RoundRobinPolicy()); - shuffleReplicas = true; - } else { - loadBalancingPolicy = new TokenAwarePolicy(new RoundRobinPolicy(), shuffleReplicas); - } - - Cluster cluster = Cluster.builder() - .addContactPoints(sCluster.address(1).getAddress()) - .withPort(sCluster.getBinaryPort()) - .withNettyOptions(nonQuietClusterCloseOptions) - .withLoadBalancingPolicy(loadBalancingPolicy) - .build(); - - try { - sCluster.init(); - - Session session = cluster.connect(); - - // given: A routing key that falls in the token range of node 6. - - // Encodes into murmur hash '4874351301193663061' which should belong be owned by node 6 with replicas 7 and 8. - ByteBuffer routingKey = TypeCodec.varchar().serialize("This is some sample text", ProtocolVersion.NEWEST_SUPPORTED); - - // then: The replicas resolved from the cluster metadata must match node 6 and its replicas. - List replicas = Lists.newArrayList(cluster.getMetadata().getReplicas("keyspace", routingKey)); - assertThat(replicas).containsExactly( - sCluster.host(cluster, 1, 6), - sCluster.host(cluster, 1, 7), - sCluster.host(cluster, 1, 8)); - - // then: generating a query plan on a statement using that routing key should properly prioritize node 6 and its replicas. - // Actual query does not matter, only the keyspace and routing key will be used - SimpleStatement statement = new SimpleStatement("select * from table where k=5"); - statement.setRoutingKey(routingKey); - statement.setKeyspace("keyspace"); - - boolean shuffledAtLeastOnce = false; - for (int i = 0; i < 1024; i++) { - List queryPlan = Lists.newArrayList(loadBalancingPolicy.newQueryPlan(null, statement)); - assertThat(queryPlan).containsOnlyElementsOf(cluster.getMetadata().getAllHosts()); - - List firstThree = queryPlan.subList(0, 3); - // then: if shuffle replicas was used or using default, the first three hosts returned should be 6,7,8 in any order. - // if shuffle replicas was not used, the first three hosts returned should be 6,7,8 in that order. - if (shuffleReplicas) { - assertThat(firstThree).containsOnlyElementsOf(replicas); - if (!firstThree.equals(replicas)) { - shuffledAtLeastOnce = true; - } - } else { - assertThat(firstThree).containsExactlyElementsOf(replicas); - } - } - - // then: given 1024 query plans, the replicas should be shuffled at least once. - assertThat(shuffledAtLeastOnce).isEqualTo(shuffleReplicas); - } finally { - cluster.close(); - sCluster.stop(); - } + } + + /** + * Ensures that {@link TokenAwarePolicy} will properly prioritize replicas if a provided {@link + * SimpleStatement} is using an explicitly set keyspace and routing key and the keyspace provided + * is using SimpleStrategy with a replication factor of 1. + * + * @test_category load_balancing:token_aware + */ + @Test(groups = "short") + public void should_choose_proper_host_based_on_routing_key() { + // given: A 3 node cluster using TokenAwarePolicy with a replication factor of 1. + ScassandraCluster sCluster = + ScassandraCluster.builder().withNodes(3).withSimpleKeyspace("keyspace", 1).build(); + Cluster cluster = + Cluster.builder() + .addContactPoints(sCluster.address(1).getAddress()) + .withPort(sCluster.getBinaryPort()) + .withNettyOptions(nonQuietClusterCloseOptions) + .withLoadBalancingPolicy(new TokenAwarePolicy(new RoundRobinPolicy())) + .build(); + + // when: A query is made with a routing key + try { + sCluster.init(); + + Session session = cluster.connect(); + + // Encodes into murmur hash '4557949199137838892' which should be owned by node 3. + ByteBuffer routingKey = + TypeCodec.varchar() + .serialize( + "should_choose_proper_host_based_on_routing_key", + ProtocolVersion.NEWEST_SUPPORTED); + SimpleStatement statement = + new SimpleStatement("select * from table where k=5") + .setRoutingKey(routingKey) + .setKeyspace("keyspace"); + + QueryTracker queryTracker = new QueryTracker(); + queryTracker.query(session, 10, statement); + + // then: The host having that token should be queried. + queryTracker.assertQueried(sCluster, 1, 1, 0); + queryTracker.assertQueried(sCluster, 1, 2, 0); + queryTracker.assertQueried(sCluster, 1, 3, 10); + } finally { + cluster.close(); + sCluster.stop(); } - - /** - * Ensures that {@link TokenAwarePolicy} will properly prioritize replicas if a provided - * {@link SimpleStatement} is using an explicitly set keyspace and routing key and the - * keyspace provided is using SimpleStrategy with a replication factor of 1. - * - * @test_category load_balancing:token_aware - */ - @Test(groups = "short") - public void should_choose_proper_host_based_on_routing_key() { - // given: A 3 node cluster using TokenAwarePolicy with a replication factor of 1. - ScassandraCluster sCluster = ScassandraCluster.builder() - .withNodes(3) - .withSimpleKeyspace("keyspace", 1) - .build(); - Cluster cluster = Cluster.builder() - .addContactPoints(sCluster.address(1).getAddress()) - .withPort(sCluster.getBinaryPort()) - .withNettyOptions(nonQuietClusterCloseOptions) - .withLoadBalancingPolicy(new TokenAwarePolicy(new RoundRobinPolicy())) - .build(); - - // when: A query is made with a routing key - try { - sCluster.init(); - - Session session = cluster.connect(); - - // Encodes into murmur hash '4557949199137838892' which should belong be owned by node 3. - ByteBuffer routingKey = TypeCodec.varchar().serialize("should_choose_proper_host_based_on_routing_key", ProtocolVersion.NEWEST_SUPPORTED); - SimpleStatement statement = new SimpleStatement("select * from table where k=5") - .setRoutingKey(routingKey) - .setKeyspace("keyspace"); - - queryTracker.query(session, 10, statement); - - // then: The host having that token should be queried. - queryTracker.assertQueried(sCluster, 1, 1, 0); - queryTracker.assertQueried(sCluster, 1, 2, 0); - queryTracker.assertQueried(sCluster, 1, 3, 10); - } finally { - cluster.close(); - sCluster.stop(); - } - } - - /** - * Ensures that {@link TokenAwarePolicy} will properly prioritize replicas in the local datacenter - * if a provided {@link SimpleStatement} is using an explicitly set keyspace and routing key and - * the keyspace provided is using NetworkTopologyStrategy with an RF of 1:1. - * - * @test_category load_balancing:token_aware - */ - @Test(groups = "short") - public void should_choose_host_in_local_dc_when_using_network_topology_strategy_and_dc_aware() { - // given: A 6 node, 2 DC cluster with RF 1:1, using TokenAwarePolicy wrapping DCAwareRoundRobinPolicy with remote hosts. - ScassandraCluster sCluster = ScassandraCluster.builder() - .withNodes(3, 3) - .withNetworkTopologyKeyspace("keyspace", ImmutableMap.of(1, 1, 2, 1)) - .build(); - Cluster cluster = Cluster.builder() - .addContactPoints(sCluster.address(1).getAddress()) - .withPort(sCluster.getBinaryPort()) - .withNettyOptions(nonQuietClusterCloseOptions) - .withLoadBalancingPolicy(new TokenAwarePolicy(DCAwareRoundRobinPolicy.builder() + } + + /** + * Ensures that {@link TokenAwarePolicy} will properly prioritize replicas in the local datacenter + * if a provided {@link SimpleStatement} is using an explicitly set keyspace and routing key and + * the keyspace provided is using NetworkTopologyStrategy with an RF of 1:1. + * + * @test_category load_balancing:token_aware + */ + @Test(groups = "short") + public void should_choose_host_in_local_dc_when_using_network_topology_strategy_and_dc_aware() { + // given: A 6 node, 2 DC cluster with RF 1:1, using TokenAwarePolicy wrapping + // DCAwareRoundRobinPolicy with remote hosts. + ScassandraCluster sCluster = + ScassandraCluster.builder() + .withNodes(3, 3) + .withNetworkTopologyKeyspace("keyspace", ImmutableMap.of(1, 1, 2, 1)) + .build(); + @SuppressWarnings("deprecation") + Cluster cluster = + Cluster.builder() + .addContactPoints(sCluster.address(1).getAddress()) + .withPort(sCluster.getBinaryPort()) + .withNettyOptions(nonQuietClusterCloseOptions) + .withLoadBalancingPolicy( + new TokenAwarePolicy( + DCAwareRoundRobinPolicy.builder() .withLocalDc(ScassandraCluster.datacenter(2)) .withUsedHostsPerRemoteDc(3) .build())) - .build(); - - // when: A query is made with a routing key - try { - sCluster.init(); - - Session session = cluster.connect(); - - // Encodes into murmur hash '-8124212968526248339' which should belong to 1:1 in DC1 and 2:1 in DC2. - ByteBuffer routingKey = TypeCodec.varchar().serialize("should_choose_host_in_local_dc_when_using_network_topology_strategy_and_dc_aware", ProtocolVersion.NEWEST_SUPPORTED); - SimpleStatement statement = new SimpleStatement("select * from table where k=5") - .setRoutingKey(routingKey) - .setKeyspace("keyspace"); - - queryTracker.query(session, 10, statement); - - // then: The local replica (2:1) should be queried and never the remote one. - queryTracker.assertQueried(sCluster, 2, 1, 10); - queryTracker.assertQueried(sCluster, 1, 1, 0); - } finally { - cluster.close(); - sCluster.stop(); - } + .build(); + + // when: A query is made with a routing key + try { + sCluster.init(); + + Session session = cluster.connect(); + + // Encodes into murmur hash '-8124212968526248339' which should belong to 1:1 in DC1 and 2:1 + // in DC2. + ByteBuffer routingKey = + TypeCodec.varchar() + .serialize( + "should_choose_host_in_local_dc_when_using_network_topology_strategy_and_dc_aware", + ProtocolVersion.NEWEST_SUPPORTED); + SimpleStatement statement = + new SimpleStatement("select * from table where k=5") + .setRoutingKey(routingKey) + .setKeyspace("keyspace"); + + QueryTracker queryTracker = new QueryTracker(); + queryTracker.query(session, 10, statement); + + // then: The local replica (2:1) should be queried and never the remote one. + queryTracker.assertQueried(sCluster, 2, 1, 10); + queryTracker.assertQueried(sCluster, 1, 1, 0); + } finally { + cluster.close(); + sCluster.stop(); } - - /** - * Ensures that {@link TokenAwarePolicy} will properly handle unavailability of replicas - * matching with routing keys by falling back on its child policy and that when those - * replicas become available the policy uses those replicas once again. - * - * @test_category load_balancing:token_aware - */ - @Test(groups = "short") - public void should_use_other_nodes_when_replicas_having_token_are_down() { - // given: A 4 node cluster using TokenAwarePolicy with a replication factor of 2. - ScassandraCluster sCluster = ScassandraCluster.builder() - .withNodes(4) - .withSimpleKeyspace("keyspace", 2) - .build(); - Cluster cluster = Cluster.builder() - .addContactPoints(sCluster.address(2).getAddress()) - .withPort(sCluster.getBinaryPort()) - .withNettyOptions(nonQuietClusterCloseOptions) - // Don't shuffle replicas just to keep test deterministic. - .withLoadBalancingPolicy(new TokenAwarePolicy(new RoundRobinPolicy(), false)) - .build(); - - try { - sCluster.init(); - - Session session = cluster.connect(); - - // when: A query is made with a routing key and both hosts having that key's token are down. - // Encodes into murmur hash '6444339665561646341' which should belong to node 4. - ByteBuffer routingKey = TypeCodec.varchar().serialize("should_use_other_nodes_when_replicas_having_token_are_down", ProtocolVersion.NEWEST_SUPPORTED); - SimpleStatement statement = new SimpleStatement("select * from table where k=5") - .setRoutingKey(routingKey) - .setKeyspace("keyspace"); - - queryTracker.query(session, 10, statement); - - // then: The node that is the primary for that key's hash is chosen. - queryTracker.assertQueried(sCluster, 1, 1, 0); - queryTracker.assertQueried(sCluster, 1, 2, 0); - queryTracker.assertQueried(sCluster, 1, 3, 0); - queryTracker.assertQueried(sCluster, 1, 4, 10); - - // when: The primary node owning that key goes down and a query is made. - queryTracker.reset(); - sCluster.stop(cluster, 4); - queryTracker.query(session, 10, statement); - - // then: The next replica having that data should be chosen (node 1). - queryTracker.assertQueried(sCluster, 1, 1, 10); - queryTracker.assertQueried(sCluster, 1, 2, 0); - queryTracker.assertQueried(sCluster, 1, 3, 0); - queryTracker.assertQueried(sCluster, 1, 4, 0); - - // when: All nodes having that token are down and a query is made. - queryTracker.reset(); - sCluster.stop(cluster, 1); - queryTracker.query(session, 10, statement); - - // then: The remaining nodes which are non-replicas of that token should be used - // delegating to the child policy (RoundRobin). - queryTracker.assertQueried(sCluster, 1, 1, 0); - queryTracker.assertQueried(sCluster, 1, 2, 5); - queryTracker.assertQueried(sCluster, 1, 3, 5); - queryTracker.assertQueried(sCluster, 1, 4, 0); - - // when: A replica having that key becomes up and a query is made. - queryTracker.reset(); - sCluster.start(cluster, 1); - queryTracker.query(session, 10, statement); - - // then: The newly up replica should be queried. - queryTracker.assertQueried(sCluster, 1, 1, 10); - queryTracker.assertQueried(sCluster, 1, 2, 0); - queryTracker.assertQueried(sCluster, 1, 3, 0); - queryTracker.assertQueried(sCluster, 1, 4, 0); - - // when: The primary replicas becomes up and a query is made. - queryTracker.reset(); - sCluster.start(cluster, 4); - queryTracker.query(session, 10, statement); - - // then: The primary replica which is now up should be queried. - queryTracker.assertQueried(sCluster, 1, 1, 0); - queryTracker.assertQueried(sCluster, 1, 2, 0); - queryTracker.assertQueried(sCluster, 1, 3, 0); - queryTracker.assertQueried(sCluster, 1, 4, 10); - } finally { - cluster.close(); - sCluster.stop(); - } + } + + /** + * Ensures that {@link TokenAwarePolicy} will properly handle unavailability of replicas matching + * with routing keys by falling back on its child policy and that when those replicas become + * available the policy uses those replicas once again. + * + * @test_category load_balancing:token_aware + */ + @Test(groups = "short") + public void should_use_other_nodes_when_replicas_having_token_are_down() { + // given: A 4 node cluster using TokenAwarePolicy with a replication factor of 2. + ScassandraCluster sCluster = + ScassandraCluster.builder().withNodes(4).withSimpleKeyspace("keyspace", 2).build(); + Cluster cluster = + Cluster.builder() + .addContactPoints(sCluster.address(2).getAddress()) + .withPort(sCluster.getBinaryPort()) + .withNettyOptions(nonQuietClusterCloseOptions) + // Don't shuffle replicas just to keep test deterministic. + .withLoadBalancingPolicy( + new TokenAwarePolicy(new SortingLoadBalancingPolicy(), NEUTRAL)) + .build(); + + try { + sCluster.init(); + + Session session = cluster.connect(); + + // when: A query is made with a routing key and both hosts having that key's token are down. + // Encodes into murmur hash '6444339665561646341' which should belong to node 4. + ByteBuffer routingKey = + TypeCodec.varchar() + .serialize( + "should_use_other_nodes_when_replicas_having_token_are_down", + ProtocolVersion.NEWEST_SUPPORTED); + SimpleStatement statement = + new SimpleStatement("select * from table where k=5") + .setRoutingKey(routingKey) + .setKeyspace("keyspace"); + + QueryTracker queryTracker = new QueryTracker(); + queryTracker.query(session, 10, statement); + + // then: primary replica is 4, secondary is 1; since the child policy returns [1,2,3,4], the + // TAP reorders the plan to [1,4,2,3]. Only 1 should be queried + queryTracker.assertQueried(sCluster, 1, 1, 10); + queryTracker.assertQueried(sCluster, 1, 2, 0); + queryTracker.assertQueried(sCluster, 1, 3, 0); + queryTracker.assertQueried(sCluster, 1, 4, 0); + + // when: The secondary node owning that key (1) goes down and a query is made. + queryTracker.reset(); + sCluster.stop(cluster, 1); + queryTracker.query(session, 10, statement); + + // then: The next replica having that data should be chosen (node 4 - primary replica). + queryTracker.assertQueried(sCluster, 1, 1, 0); + queryTracker.assertQueried(sCluster, 1, 2, 0); + queryTracker.assertQueried(sCluster, 1, 3, 0); + queryTracker.assertQueried(sCluster, 1, 4, 10); + + // when: All nodes having that token are down and a query is made. + queryTracker.reset(); + sCluster.stop(cluster, 4); + queryTracker.query(session, 10, statement); + + // then: The remaining nodes which are non-replicas of that token should be used + // delegating to the child policy. + queryTracker.assertQueried(sCluster, 1, 1, 0); + queryTracker.assertQueried(sCluster, 1, 2, 10); + queryTracker.assertQueried(sCluster, 1, 3, 0); + queryTracker.assertQueried(sCluster, 1, 4, 0); + + // when: A replica having that key (4) becomes up and a query is made. + queryTracker.reset(); + sCluster.start(cluster, 4); + queryTracker.query(session, 10, statement); + + // then: The newly up replica should be queried. + queryTracker.assertQueried(sCluster, 1, 1, 0); + queryTracker.assertQueried(sCluster, 1, 2, 0); + queryTracker.assertQueried(sCluster, 1, 3, 0); + queryTracker.assertQueried(sCluster, 1, 4, 10); + + // when: The other replica becomes up and a query is made. + queryTracker.reset(); + sCluster.start(cluster, 1); + queryTracker.query(session, 10, statement); + + // then: The secondary replica (1) which is now up should be queried. + queryTracker.assertQueried(sCluster, 1, 1, 10); + queryTracker.assertQueried(sCluster, 1, 2, 0); + queryTracker.assertQueried(sCluster, 1, 3, 0); + queryTracker.assertQueried(sCluster, 1, 4, 0); + } finally { + cluster.close(); + sCluster.stop(); } - - /** - * Validates that when overriding a routing key on a {@link BoundStatement} - * using {@link BoundStatement#setRoutingKey(ByteBuffer...)} and - * {@link BoundStatement#setRoutingKey(ByteBuffer)} that this routing key is used to determine - * which hosts to route queries to. - * - * @test_category load_balancing:token_aware - */ - @Test(groups = "short") - public void should_use_provided_routing_key_boundstatement() { - // given: A 4 node cluster using TokenAwarePolicy with a replication factor of 1. - ScassandraCluster sCluster = ScassandraCluster.builder() - .withNodes(4) - .withSimpleKeyspace("keyspace", 1) - .build(); - Cluster cluster = Cluster.builder() - .addContactPoints(sCluster.address(2).getAddress()) - .withPort(sCluster.getBinaryPort()) - .withNettyOptions(nonQuietClusterCloseOptions) - // Don't shuffle replicas just to keep test deterministic. - .withLoadBalancingPolicy(new TokenAwarePolicy(new RoundRobinPolicy(), false)) - .build(); - - try { - sCluster.init(); - - Session session = cluster.connect("keyspace"); - - PreparedStatement preparedStatement = session.prepare("insert into tbl (k0, v) values (?, ?)"); - // bind text values since scassandra defaults to use varchar if not primed. - // this is inconsequential in this case since we are explicitly providing the routing key. - BoundStatement bs = preparedStatement.bind("a", "b"); - - // Derive a routing key for single routing key component, this should resolve to - // '4891967783720036163' - ByteBuffer routingKey = TypeCodec.bigint().serialize(33L, ProtocolVersion.NEWEST_SUPPORTED); - bs.setRoutingKey(routingKey); - - queryTracker.query(session, 10, bs); - - // Expect only node 3 to have been queried, give it has ownership of that partition - // (token range is (4611686018427387902, 6917529027641081853]) - queryTracker.assertQueried(sCluster, 1, 1, 0); - queryTracker.assertQueried(sCluster, 1, 2, 0); - queryTracker.assertQueried(sCluster, 1, 3, 0); - queryTracker.assertQueried(sCluster, 1, 4, 10); - - // reset counts. - queryTracker.reset(); - - // Derive a routing key for multiple routing key components, this should resolve to - // '3735658072872431718' - bs = preparedStatement.bind("a", "b"); - ByteBuffer routingKeyK0Part = TypeCodec.bigint().serialize(42L, ProtocolVersion.NEWEST_SUPPORTED); - ByteBuffer routingKeyK1Part = TypeCodec.varchar().serialize("hello_world", ProtocolVersion.NEWEST_SUPPORTED); - bs.setRoutingKey(routingKeyK0Part, routingKeyK1Part); - - queryTracker.query(session, 10, bs); - - // Expect only node 3 to have been queried, give it has ownership of that partition - // (token range is (2305843009213693951, 4611686018427387902]) - queryTracker.assertQueried(sCluster, 1, 1, 0); - queryTracker.assertQueried(sCluster, 1, 2, 0); - queryTracker.assertQueried(sCluster, 1, 3, 10); - queryTracker.assertQueried(sCluster, 1, 4, 0); - } finally { - cluster.close(); - sCluster.stop(); - } + } + + /** + * Validates that when overriding a routing key on a {@link BoundStatement} using {@link + * BoundStatement#setRoutingKey(ByteBuffer...)} and {@link + * BoundStatement#setRoutingKey(ByteBuffer)} that this routing key is used to determine which + * hosts to route queries to. + * + * @test_category load_balancing:token_aware + */ + @Test(groups = "short") + public void should_use_provided_routing_key_boundstatement() { + // given: A 4 node cluster using TokenAwarePolicy with a replication factor of 1. + ScassandraCluster sCluster = + ScassandraCluster.builder().withNodes(4).withSimpleKeyspace("keyspace", 1).build(); + Cluster cluster = + Cluster.builder() + .addContactPoints(sCluster.address(2).getAddress()) + .withPort(sCluster.getBinaryPort()) + .withNettyOptions(nonQuietClusterCloseOptions) + // Don't shuffle replicas just to keep test deterministic. + .withLoadBalancingPolicy( + new TokenAwarePolicy(new SortingLoadBalancingPolicy(), NEUTRAL)) + .build(); + + try { + sCluster.init(); + + Session session = cluster.connect("keyspace"); + + PreparedStatement preparedStatement = + session.prepare("insert into tbl (k0, v) values (?, ?)"); + // bind text values since scassandra defaults to use varchar if not primed. + // this is inconsequential in this case since we are explicitly providing the routing key. + BoundStatement bs = preparedStatement.bind("a", "b"); + + // Derive a routing key for single routing key component, this should resolve to + // '4891967783720036163' + ByteBuffer routingKey = TypeCodec.bigint().serialize(33L, ProtocolVersion.NEWEST_SUPPORTED); + bs.setRoutingKey(routingKey); + + QueryTracker queryTracker = new QueryTracker(); + queryTracker.query(session, 10, bs); + + // Expect only node 3 to have been queried, give it has ownership of that partition + // (token range is (4611686018427387902, 6917529027641081853]) + queryTracker.assertQueried(sCluster, 1, 1, 0); + queryTracker.assertQueried(sCluster, 1, 2, 0); + queryTracker.assertQueried(sCluster, 1, 3, 0); + queryTracker.assertQueried(sCluster, 1, 4, 10); + + // reset counts. + queryTracker.reset(); + + // Derive a routing key for multiple routing key components, this should resolve to + // '3735658072872431718' + bs = preparedStatement.bind("a", "b"); + ByteBuffer routingKeyK0Part = + TypeCodec.bigint().serialize(42L, ProtocolVersion.NEWEST_SUPPORTED); + ByteBuffer routingKeyK1Part = + TypeCodec.varchar().serialize("hello_world", ProtocolVersion.NEWEST_SUPPORTED); + bs.setRoutingKey(routingKeyK0Part, routingKeyK1Part); + + queryTracker.query(session, 10, bs); + + // Expect only node 3 to have been queried, give it has ownership of that partition + // (token range is (2305843009213693951, 4611686018427387902]) + queryTracker.assertQueried(sCluster, 1, 1, 0); + queryTracker.assertQueried(sCluster, 1, 2, 0); + queryTracker.assertQueried(sCluster, 1, 3, 10); + queryTracker.assertQueried(sCluster, 1, 4, 0); + } finally { + cluster.close(); + sCluster.stop(); } - - /** - * Ensures that {@link TokenAwarePolicy} will properly handle a routing key for a {@link PreparedStatement} - * whose table uses multiple columns for its partition key. - * - * @test_category load_balancing:token_aware - * @jira_ticket JAVA-123 (to ensure routing key buffers are not destroyed). - */ - @CCMConfig(createCcm = true, numberOfNodes = 3, createCluster = false) - @Test(groups = "long") - public void should_properly_generate_and_use_routing_key_for_composite_partition_key() { - // given: a 3 node cluster with a keyspace with RF 1. - Cluster cluster = register(Cluster.builder() - .withLoadBalancingPolicy(new TokenAwarePolicy(new RoundRobinPolicy())) - .addContactPoints(getContactPoints().get(0)) - .withPort(ccm().getBinaryPort()) - .build()); - Session session = cluster.connect(); - - String table = "composite"; - String ks = TestUtils.generateIdentifier("ks_"); - session.execute(String.format(CREATE_KEYSPACE_SIMPLE_FORMAT, ks, 1)); - session.execute("USE " + ks); - session.execute(String.format("CREATE TABLE %s (k1 int, k2 int, i int, PRIMARY KEY ((k1, k2)))", table)); - - // (1,2) resolves to token '4881097376275569167' which belongs to node 1 so all queries should go to that node. - PreparedStatement insertPs = session.prepare("INSERT INTO " + table + "(k1, k2, i) VALUES (?, ?, ?)"); - BoundStatement insertBs = insertPs.bind(1, 2, 3); - - PreparedStatement selectPs = session.prepare("SELECT * FROM " + table + " WHERE k1=? and k2=?"); - BoundStatement selectBs = selectPs.bind(1, 2); - - // when: executing a prepared statement with a composite partition key. - // then: should query the correct node (1) in for both insert and select queries. - for (int i = 0; i < 10; i++) { - ResultSet rs = session.execute(insertBs); - assertThat(rs.getExecutionInfo().getQueriedHost()).isEqualTo(TestUtils.findHost(cluster, 1)); - - rs = session.execute(selectBs); - assertThat(rs.getExecutionInfo().getQueriedHost()).isEqualTo(TestUtils.findHost(cluster, 1)); - assertThat(rs.isExhausted()).isFalse(); - Row r = rs.one(); - assertThat(rs.isExhausted()).isTrue(); - - assertThat(r.getInt("i")).isEqualTo(3); - } + } + + /** + * Ensures that {@link TokenAwarePolicy} will properly handle a routing key for a {@link + * PreparedStatement} whose table uses multiple columns for its partition key. + * + * @test_category load_balancing:token_aware + * @jira_ticket JAVA-123 (to ensure routing key buffers are not destroyed). + */ + @Test(groups = "long") + public void should_properly_generate_and_use_routing_key_for_composite_partition_key() { + // given: a 3 node cluster with a keyspace with RF 1. + CCMBridge ccm = CCMBridge.builder().withNodes(3).build(); + + ccm.start(); + + Cluster cluster = + TestUtils.configureClusterBuilder(Cluster.builder(), ccm) + .withNettyOptions(nonQuietClusterCloseOptions) + .withLoadBalancingPolicy(new TokenAwarePolicy(new RoundRobinPolicy())) + .build(); + + try { + + Session session = cluster.connect(); + + String ks = TestUtils.generateIdentifier("ks_"); + session.execute(String.format(CREATE_KEYSPACE_SIMPLE_FORMAT, ks, 1)); + session.execute("USE " + ks); + session.execute("CREATE TABLE composite (k1 int, k2 int, i int, PRIMARY KEY ((k1, k2)))"); + + // (1,2) resolves to token '4881097376275569167' which belongs to node 1 so all queries should + // go to that node. + PreparedStatement insertPs = + session.prepare("INSERT INTO composite(k1, k2, i) VALUES (?, ?, ?)"); + BoundStatement insertBs = insertPs.bind(1, 2, 3); + + PreparedStatement selectPs = session.prepare("SELECT * FROM composite WHERE k1=? and k2=?"); + BoundStatement selectBs = selectPs.bind(1, 2); + + // when: executing a prepared statement with a composite partition key. + // then: should query the correct node (1) in for both insert and select queries. + Host host1 = TestUtils.findHost(cluster, 1); + for (int i = 0; i < 10; i++) { + ResultSet rs = session.execute(insertBs); + assertThat(rs.getExecutionInfo().getQueriedHost()).isEqualTo(host1); + + rs = session.execute(selectBs); + assertThat(rs.getExecutionInfo().getQueriedHost()).isEqualTo(host1); + assertThat(rs.isExhausted()).isFalse(); + Row r = rs.one(); + assertThat(rs.isExhausted()).isTrue(); + + assertThat(r.getInt("i")).isEqualTo(3); + } + } finally { + cluster.close(); + ccm.remove(); } + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/policies/WhiteListPolicyTest.java b/driver-core/src/test/java/com/datastax/driver/core/policies/WhiteListPolicyTest.java index 537dd8bf010..ce4efca5c3c 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/policies/WhiteListPolicyTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/policies/WhiteListPolicyTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,113 +17,191 @@ */ package com.datastax.driver.core.policies; -import com.datastax.driver.core.*; +import static com.datastax.driver.core.TestUtils.nonQuietClusterCloseOptions; + +import com.datastax.driver.core.Cluster; +import com.datastax.driver.core.ConsistencyLevel; +import com.datastax.driver.core.QueryTracker; +import com.datastax.driver.core.ScassandraCluster; +import com.datastax.driver.core.Session; import com.datastax.driver.core.exceptions.NoHostAvailableException; import com.google.common.collect.Lists; -import org.testng.annotations.BeforeMethod; -import org.testng.annotations.Test; - import java.net.InetSocketAddress; import java.util.List; - -import static com.datastax.driver.core.TestUtils.nonQuietClusterCloseOptions; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; public class WhiteListPolicyTest { - QueryTracker queryTracker; - - @BeforeMethod(groups = "short") - public void setUp() { - queryTracker = new QueryTracker(); + QueryTracker queryTracker; + + @BeforeMethod(groups = "short") + public void setUp() { + queryTracker = new QueryTracker(); + } + + /** + * Provides basic validation of {@link WhiteListPolicy}. + * + *

    Ensures that: + * + *

      + *
    1. Only addresses provided in the whitelist are every used for querying. + *
    2. If no nodes present in the whitelist are available, queries fail with a {@link + * NoHostAvailableException} + *
    + * + * @test_category load_balancing:white_list + */ + @Test(groups = "short") + public void should_only_query_hosts_in_white_list() throws Exception { + // given: a 5 node cluster with a WhiteListPolicy targeting nodes 3 and 5. + ScassandraCluster sCluster = ScassandraCluster.builder().withNodes(5).build(); + List whiteList = + Lists.newArrayList(sCluster.address(3), sCluster.address(5)); + + Cluster cluster = + Cluster.builder() + .addContactPoints(sCluster.address(5).getAddress()) + .withPort(sCluster.getBinaryPort()) + .withLoadBalancingPolicy(new WhiteListPolicy(new RoundRobinPolicy(), whiteList)) + .withNettyOptions(nonQuietClusterCloseOptions) + .build(); + + try { + sCluster.init(); + + Session session = cluster.connect(); + // when: a query is executed 50 times. + queryTracker.query(session, 50); + + // then: only nodes 3 and 5 should have been queried. + queryTracker.assertQueried(sCluster, 1, 1, 0); + queryTracker.assertQueried(sCluster, 1, 2, 0); + queryTracker.assertQueried(sCluster, 1, 3, 25); + queryTracker.assertQueried(sCluster, 1, 4, 0); + queryTracker.assertQueried(sCluster, 1, 5, 25); + + queryTracker.reset(); + + // when: the only nodes in the whitelist are stopped. + sCluster.stop(cluster, 3); + sCluster.stop(cluster, 5); + + // then: all queries should raise a NHAE. + queryTracker.query(session, 50, ConsistencyLevel.ONE, NoHostAvailableException.class); + + queryTracker.assertQueried(sCluster, 1, 1, 0); + queryTracker.assertQueried(sCluster, 1, 2, 0); + queryTracker.assertQueried(sCluster, 1, 3, 0); + queryTracker.assertQueried(sCluster, 1, 4, 0); + queryTracker.assertQueried(sCluster, 1, 5, 0); + } finally { + cluster.close(); + sCluster.stop(); } - - /** - * Provides basic validation of {@link WhiteListPolicy}. - *

    - * Ensures that: - *

      - *
    1. Only addresses provided in the whitelist are every used for querying.
    2. - *
    3. If no nodes present in the whitelist are available, queries fail with a {@link NoHostAvailableException}
    4. - *
    - * - * @test_category load_balancing:white_list - */ - @Test(groups = "short") - public void should_only_query_hosts_in_white_list() throws Exception { - // given: a 5 node cluster with a WhiteListPolicy targeting nodes 3 and 5. - ScassandraCluster sCluster = ScassandraCluster.builder().withNodes(5).build(); - List whiteList = Lists.newArrayList(sCluster.address(3), sCluster.address(5)); - - Cluster cluster = Cluster.builder() - .addContactPoints(sCluster.address(5).getAddress()) - .withPort(sCluster.getBinaryPort()) - .withLoadBalancingPolicy(new WhiteListPolicy(new RoundRobinPolicy(), whiteList)) - .withNettyOptions(nonQuietClusterCloseOptions) - .build(); - - try { - sCluster.init(); - - Session session = cluster.connect(); - // when: a query is executed 50 times. - queryTracker.query(session, 50); - - // then: only nodes 3 and 5 should have been queried. - queryTracker.assertQueried(sCluster, 1, 1, 0); - queryTracker.assertQueried(sCluster, 1, 2, 0); - queryTracker.assertQueried(sCluster, 1, 3, 25); - queryTracker.assertQueried(sCluster, 1, 4, 0); - queryTracker.assertQueried(sCluster, 1, 5, 25); - - queryTracker.reset(); - - // when: the only nodes in the whitelist are stopped. - sCluster.stop(cluster, 3); - sCluster.stop(cluster, 5); - - // then: all queries should raise a NHAE. - queryTracker.query(session, 50, ConsistencyLevel.ONE, NoHostAvailableException.class); - - queryTracker.assertQueried(sCluster, 1, 1, 0); - queryTracker.assertQueried(sCluster, 1, 2, 0); - queryTracker.assertQueried(sCluster, 1, 3, 0); - queryTracker.assertQueried(sCluster, 1, 4, 0); - queryTracker.assertQueried(sCluster, 1, 5, 0); - } finally { - cluster.close(); - sCluster.stop(); - } + } + + /** + * Ensures that {@link WhiteListPolicy#ofHosts(LoadBalancingPolicy, String...)} throws an {@link + * IllegalArgumentException} if a name could not be resolved. + * + * @test_category load_balancing:white_list + */ + @Test(groups = "unit", expectedExceptions = IllegalArgumentException.class) + public void should_throw_IAE_if_name_could_not_be_resolved() { + WhiteListPolicy.ofHosts(new RoundRobinPolicy(), "a.b.c.d.e.f.UNRESOLVEABLE"); + } + + /** + * Ensures that {@link WhiteListPolicy#ofHosts(LoadBalancingPolicy, String...)} throws a {@link + * NullPointerException} if a name provided is null. + * + * @test_category load_balancing:white_list + */ + @Test(groups = "unit", expectedExceptions = NullPointerException.class) + public void should_throw_NPE_if_null_provided() { + WhiteListPolicy.ofHosts(new RoundRobinPolicy(), null, null); + } + + /** + * Ensures that {@link WhiteListPolicy#ofHosts(LoadBalancingPolicy, String...)} appropriately + * choses hosts based on their resolved ip addresses. + * + * @test_category load_balancing:white_list + */ + @Test(groups = "short") + public void should_only_query_hosts_in_white_list_from_hosts() throws Exception { + // given: a 5 node cluster with a WhiteListPolicy targeting nodes 1 and 4 + ScassandraCluster sCluster = ScassandraCluster.builder().withNodes(5).build(); + + // In this case, we can't rely on DNS. However, node 1 should be 127.0.0.1 which depending on + // /etc/hosts configuration is likely to resolve the name of the machine running the test. + WhiteListPolicy policy = + WhiteListPolicy.ofHosts( + new RoundRobinPolicy(), + sCluster.address(1).getHostName(), + sCluster.address(4).getHostName()); + + Cluster cluster = + Cluster.builder() + .addContactPoints(sCluster.address(1).getAddress()) + .withPort(sCluster.getBinaryPort()) + .withLoadBalancingPolicy(policy) + .withNettyOptions(nonQuietClusterCloseOptions) + .build(); + + try { + sCluster.init(); + + Session session = cluster.connect(); + // when: a query is executed 50 times. + queryTracker.query(session, 50); + + // then: only nodes 1 and 4 should have been queried. + queryTracker.assertQueried(sCluster, 1, 1, 25); + queryTracker.assertQueried(sCluster, 1, 2, 0); + queryTracker.assertQueried(sCluster, 1, 3, 0); + queryTracker.assertQueried(sCluster, 1, 4, 25); + queryTracker.assertQueried(sCluster, 1, 5, 0); + } finally { + cluster.close(); + sCluster.stop(); } - - /** - * Validates that a {@link Cluster} cannot be initiated if using a {@link WhiteListPolicy} and - * none of the specified contact point addresses are present in the white list. - * - * @test_category load_balancing:white_list - */ - @Test(groups = "short", expectedExceptions = {IllegalArgumentException.class}) - public void should_require_contact_point_in_white_list() throws Exception { - // given: a 5 node cluster with a WhiteListPolicy targeting node2. - ScassandraCluster sCluster = ScassandraCluster.builder().withNodes(5).build(); - - // when: using a Cluster instance with none of the contact points in the declared - // WhiteListPolicy. - List whiteList = Lists.newArrayList(sCluster.address(2)); - Cluster cluster = Cluster.builder() - .addContactPointsWithPorts(sCluster.address(3)) - .withPort(sCluster.getBinaryPort()) - .withLoadBalancingPolicy(new WhiteListPolicy(new RoundRobinPolicy(), whiteList)) - .withNettyOptions(nonQuietClusterCloseOptions) - .build(); - - try { - sCluster.init(); - // then: The cluster instance should fail to initialize as none of the contact - // points is present in the white list. - cluster.init(); - } finally { - cluster.close(); - sCluster.stop(); - } + } + + /** + * Validates that a {@link Cluster} cannot be initiated if using a {@link WhiteListPolicy} and + * none of the specified contact point addresses are present in the white list. + * + * @test_category load_balancing:white_list + */ + @Test( + groups = "short", + expectedExceptions = {IllegalArgumentException.class}) + public void should_require_contact_point_in_white_list() throws Exception { + // given: a 5 node cluster with a WhiteListPolicy targeting node2. + ScassandraCluster sCluster = ScassandraCluster.builder().withNodes(5).build(); + + // when: using a Cluster instance with none of the contact points in the declared + // WhiteListPolicy. + List whiteList = Lists.newArrayList(sCluster.address(2)); + Cluster cluster = + Cluster.builder() + .addContactPointsWithPorts(sCluster.address(3)) + .withPort(sCluster.getBinaryPort()) + .withLoadBalancingPolicy(new WhiteListPolicy(new RoundRobinPolicy(), whiteList)) + .withNettyOptions(nonQuietClusterCloseOptions) + .build(); + + try { + sCluster.init(); + // then: The cluster instance should fail to initialize as none of the contact + // points is present in the white list. + cluster.init(); + } finally { + cluster.close(); + sCluster.stop(); } + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/querybuilder/BatchIdempotencyTest.java b/driver-core/src/test/java/com/datastax/driver/core/querybuilder/BatchIdempotencyTest.java index 76b4ff99b36..2d48e143c77 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/querybuilder/BatchIdempotencyTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/querybuilder/BatchIdempotencyTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,28 +22,28 @@ public class BatchIdempotencyTest extends AbstractBatchIdempotencyTest { - @Override - protected AbstractBatchIdempotencyTest.TestBatch createBatch() { - return new TestBatchWrapper(); - } + @Override + protected AbstractBatchIdempotencyTest.TestBatch createBatch() { + return new TestBatchWrapper(); + } - static class TestBatchWrapper implements TestBatch { + static class TestBatchWrapper implements TestBatch { - private final Batch batch = QueryBuilder.batch(); + private final Batch batch = QueryBuilder.batch(); - @Override - public void add(RegularStatement statement) { - batch.add(statement); - } + @Override + public void add(RegularStatement statement) { + batch.add(statement); + } - @Override - public Boolean isIdempotent() { - return batch.isIdempotent(); - } + @Override + public Boolean isIdempotent() { + return batch.isIdempotent(); + } - @Override - public void setIdempotent(boolean idempotent) { - batch.setIdempotent(idempotent); - } + @Override + public void setIdempotent(boolean idempotent) { + batch.setIdempotent(idempotent); } + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/querybuilder/QueryBuilder21ExecutionTest.java b/driver-core/src/test/java/com/datastax/driver/core/querybuilder/QueryBuilder21ExecutionTest.java index dd73519d5a7..85859dc3056 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/querybuilder/QueryBuilder21ExecutionTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/querybuilder/QueryBuilder21ExecutionTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,6 +17,12 @@ */ package com.datastax.driver.core.querybuilder; +import static com.datastax.driver.core.Assertions.assertThat; +import static com.datastax.driver.core.querybuilder.QueryBuilder.bindMarker; +import static com.datastax.driver.core.querybuilder.QueryBuilder.contains; +import static com.datastax.driver.core.querybuilder.QueryBuilder.containsKey; +import static com.datastax.driver.core.querybuilder.QueryBuilder.select; + import com.datastax.driver.core.CCMTestsSupport; import com.datastax.driver.core.PreparedStatement; import com.datastax.driver.core.ResultSet; @@ -22,85 +30,93 @@ import com.datastax.driver.core.utils.CassandraVersion; import org.testng.annotations.Test; -import static com.datastax.driver.core.Assertions.assertThat; -import static com.datastax.driver.core.querybuilder.QueryBuilder.*; - @CassandraVersion("2.1.0") public class QueryBuilder21ExecutionTest extends CCMTestsSupport { - @Override - public void onTestContextInitialized() { - // Taken from http://www.datastax.com/dev/blog/cql-in-2-1 - execute( - "CREATE TABLE products (id int PRIMARY KEY, description text, price int, categories set, buyers list, features_keys map, features_values map)", - "CREATE INDEX cat_index ON products(categories)", - "CREATE INDEX buyers_index ON products(buyers)", - "CREATE INDEX feat_index ON products(features_values)", - "CREATE INDEX feat_key_index ON products(KEYS(features_keys))", - "INSERT INTO products(id, description, price, categories, buyers, features_keys, features_values) " + - "VALUES (34134, '120-inch 1080p 3D plasma TV', 9999, {'tv', '3D', 'hdtv'}, [1], {'screen' : '120-inch', 'refresh-rate' : '400hz', 'techno' : 'plasma'}, {'screen' : '120-inch', 'refresh-rate' : '400hz', 'techno' : 'plasma'})", - "INSERT INTO products(id, description, price, categories, buyers, features_keys, features_values) " + - "VALUES (29412, '32-inch LED HDTV (black)', 929, {'tv', 'hdtv'}, [1,2,3], {'screen' : '32-inch', 'techno' : 'LED'}, {'screen' : '32-inch', 'techno' : 'LED'})", - "INSERT INTO products(id, description, price, categories, buyers, features_keys, features_values) " + - "VALUES (38471, '32-inch LCD TV', 110, {'tv', 'used'}, [2,4], {'screen' : '32-inch', 'techno' : 'LCD'}, {'screen' : '32-inch', 'techno' : 'LCD'})" - ); - } - - @Test(groups = "short") - public void should_handle_contains_on_set_with_index() { - PreparedStatement byCategory = session().prepare(select("id", "description", "categories") - .from("products") - .where(contains("categories", bindMarker("category")))); - - ResultSet results = session().execute(byCategory.bind().setString("category", "hdtv")); - - assertThat(results.getAvailableWithoutFetching()).isEqualTo(2); - for (Row row : results) { - assertThat(row.getSet("categories", String.class)).contains("hdtv"); - } + @Override + public void onTestContextInitialized() { + // Taken from http://www.datastax.com/dev/blog/cql-in-2-1 + execute( + "CREATE TABLE products (id int PRIMARY KEY, description text, price int, categories set, buyers list, features_keys map, features_values map)", + "CREATE INDEX cat_index ON products(categories)", + "CREATE INDEX buyers_index ON products(buyers)", + "CREATE INDEX feat_index ON products(features_values)", + "CREATE INDEX feat_key_index ON products(KEYS(features_keys))", + "INSERT INTO products(id, description, price, categories, buyers, features_keys, features_values) " + + "VALUES (34134, '120-inch 1080p 3D plasma TV', 9999, {'tv', '3D', 'hdtv'}, [1], {'screen' : '120-inch', 'refresh-rate' : '400hz', 'techno' : 'plasma'}, {'screen' : '120-inch', 'refresh-rate' : '400hz', 'techno' : 'plasma'})", + "INSERT INTO products(id, description, price, categories, buyers, features_keys, features_values) " + + "VALUES (29412, '32-inch LED HDTV (black)', 929, {'tv', 'hdtv'}, [1,2,3], {'screen' : '32-inch', 'techno' : 'LED'}, {'screen' : '32-inch', 'techno' : 'LED'})", + "INSERT INTO products(id, description, price, categories, buyers, features_keys, features_values) " + + "VALUES (38471, '32-inch LCD TV', 110, {'tv', 'used'}, [2,4], {'screen' : '32-inch', 'techno' : 'LCD'}, {'screen' : '32-inch', 'techno' : 'LCD'})"); + } + + @Test(groups = "short") + public void should_handle_contains_on_set_with_index() { + PreparedStatement byCategory = + session() + .prepare( + select("id", "description", "categories") + .from("products") + .where(contains("categories", bindMarker("category")))); + + ResultSet results = session().execute(byCategory.bind().setString("category", "hdtv")); + + assertThat(results.getAvailableWithoutFetching()).isEqualTo(2); + for (Row row : results) { + assertThat(row.getSet("categories", String.class)).contains("hdtv"); } - - @Test(groups = "short") - public void should_handle_contains_on_list_with_index() { - PreparedStatement byBuyer = session().prepare(select("id", "description", "buyers") - .from("products") - .where(contains("buyers", bindMarker("buyer")))); - - ResultSet results = session().execute(byBuyer.bind().setInt("buyer", 4)); - - Row row = results.one(); - assertThat(row).isNotNull(); - assertThat(row.getInt("id")).isEqualTo(38471); - assertThat(row.getList("buyers", Integer.class)).contains(4); - } - - @Test(groups = "short") - public void should_handle_contains_on_map_with_index() { - PreparedStatement byFeatures = session().prepare(select("id", "description", "features_values") - .from("products") - .where(contains("features_values", bindMarker("feature")))); - - ResultSet results = session().execute(byFeatures.bind().setString("feature", "LED")); - - Row row = results.one(); - assertThat(row).isNotNull(); - assertThat(row.getInt("id")).isEqualTo(29412); - assertThat(row.getMap("features_values", String.class, String.class)).containsEntry("techno", "LED"); - } - - - @Test(groups = "short") - public void should_handle_contains_key_on_map_with_index() { - PreparedStatement byFeatures = session().prepare(select("id", "description", "features_keys") - .from("products") - .where(containsKey("features_keys", bindMarker("feature")))); - - ResultSet results = session().execute(byFeatures.bind().setString("feature", "refresh-rate")); - - Row row = results.one(); - assertThat(row).isNotNull(); - assertThat(row.getInt("id")).isEqualTo(34134); - assertThat(row.getMap("features_keys", String.class, String.class)).containsEntry("refresh-rate", "400hz"); - } - + } + + @Test(groups = "short") + public void should_handle_contains_on_list_with_index() { + PreparedStatement byBuyer = + session() + .prepare( + select("id", "description", "buyers") + .from("products") + .where(contains("buyers", bindMarker("buyer")))); + + ResultSet results = session().execute(byBuyer.bind().setInt("buyer", 4)); + + Row row = results.one(); + assertThat(row).isNotNull(); + assertThat(row.getInt("id")).isEqualTo(38471); + assertThat(row.getList("buyers", Integer.class)).contains(4); + } + + @Test(groups = "short") + public void should_handle_contains_on_map_with_index() { + PreparedStatement byFeatures = + session() + .prepare( + select("id", "description", "features_values") + .from("products") + .where(contains("features_values", bindMarker("feature")))); + + ResultSet results = session().execute(byFeatures.bind().setString("feature", "LED")); + + Row row = results.one(); + assertThat(row).isNotNull(); + assertThat(row.getInt("id")).isEqualTo(29412); + assertThat(row.getMap("features_values", String.class, String.class)) + .containsEntry("techno", "LED"); + } + + @Test(groups = "short") + public void should_handle_contains_key_on_map_with_index() { + PreparedStatement byFeatures = + session() + .prepare( + select("id", "description", "features_keys") + .from("products") + .where(containsKey("features_keys", bindMarker("feature")))); + + ResultSet results = session().execute(byFeatures.bind().setString("feature", "refresh-rate")); + + Row row = results.one(); + assertThat(row).isNotNull(); + assertThat(row.getInt("id")).isEqualTo(34134); + assertThat(row.getMap("features_keys", String.class, String.class)) + .containsEntry("refresh-rate", "400hz"); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/querybuilder/QueryBuilderExecutionTest.java b/driver-core/src/test/java/com/datastax/driver/core/querybuilder/QueryBuilderExecutionTest.java index 5fb775d685c..f7b0d2690ec 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/querybuilder/QueryBuilderExecutionTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/querybuilder/QueryBuilderExecutionTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,614 +17,961 @@ */ package com.datastax.driver.core.querybuilder; -import com.datastax.driver.core.*; +import static com.datastax.driver.core.Assertions.assertThat; +import static com.datastax.driver.core.ResultSetAssert.row; +import static com.datastax.driver.core.querybuilder.QueryBuilder.batch; +import static com.datastax.driver.core.querybuilder.QueryBuilder.bindMarker; +import static com.datastax.driver.core.querybuilder.QueryBuilder.cast; +import static com.datastax.driver.core.querybuilder.QueryBuilder.column; +import static com.datastax.driver.core.querybuilder.QueryBuilder.count; +import static com.datastax.driver.core.querybuilder.QueryBuilder.delete; +import static com.datastax.driver.core.querybuilder.QueryBuilder.desc; +import static com.datastax.driver.core.querybuilder.QueryBuilder.eq; +import static com.datastax.driver.core.querybuilder.QueryBuilder.fcall; +import static com.datastax.driver.core.querybuilder.QueryBuilder.gt; +import static com.datastax.driver.core.querybuilder.QueryBuilder.in; +import static com.datastax.driver.core.querybuilder.QueryBuilder.insertInto; +import static com.datastax.driver.core.querybuilder.QueryBuilder.like; +import static com.datastax.driver.core.querybuilder.QueryBuilder.max; +import static com.datastax.driver.core.querybuilder.QueryBuilder.select; +import static com.datastax.driver.core.querybuilder.QueryBuilder.set; +import static com.datastax.driver.core.querybuilder.QueryBuilder.token; +import static com.datastax.driver.core.querybuilder.QueryBuilder.update; +import static com.datastax.driver.core.schemabuilder.SchemaBuilder.createTable; +import static java.util.concurrent.TimeUnit.MINUTES; +import static org.assertj.core.data.MapEntry.entry; +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertFalse; +import static org.testng.Assert.assertTrue; +import static org.testng.Assert.fail; + +import com.datastax.driver.core.CCMTestsSupport; +import com.datastax.driver.core.ConditionChecker; +import com.datastax.driver.core.DataType; +import com.datastax.driver.core.MaterializedViewMetadata; +import com.datastax.driver.core.PreparedStatement; +import com.datastax.driver.core.RegularStatement; +import com.datastax.driver.core.ResultSet; +import com.datastax.driver.core.Row; +import com.datastax.driver.core.SimpleStatement; +import com.datastax.driver.core.TestUtils; import com.datastax.driver.core.exceptions.InvalidQueryException; import com.datastax.driver.core.utils.CassandraVersion; -import org.assertj.core.api.iterable.Extractor; -import org.testng.annotations.Test; - import java.util.Date; import java.util.List; import java.util.Map; import java.util.Set; - -import static com.datastax.driver.core.Assertions.assertThat; -import static com.datastax.driver.core.ResultSetAssert.row; -import static com.datastax.driver.core.querybuilder.QueryBuilder.*; -import static com.datastax.driver.core.schemabuilder.SchemaBuilder.createTable; -import static org.assertj.core.data.MapEntry.entry; -import static org.testng.Assert.*; +import java.util.concurrent.Callable; +import org.assertj.core.api.iterable.Extractor; +import org.testng.annotations.Test; public class QueryBuilderExecutionTest extends CCMTestsSupport { - private static final String TABLE1 = TestUtils.generateIdentifier("test1"); - private static final String TABLE2 = TestUtils.generateIdentifier("test2"); - - @Override - public void onTestContextInitialized() { - execute( - String.format("CREATE TABLE %s (k text PRIMARY KEY, t text, i int, f float)", TABLE1), - String.format("CREATE TABLE %s (k text, t text, i int, f float, PRIMARY KEY (k, t))", TABLE2), - "CREATE TABLE dateTest (t timestamp PRIMARY KEY)", - "CREATE TABLE test_coll (k int PRIMARY KEY, a list, b map, c set)", - "CREATE TABLE test_ppl (a int, b int, c int, PRIMARY KEY (a, b))", - insertInto(TABLE2).value("k", "cast_t").value("t", "a").value("i", 1).value("f", 1.1).toString(), - insertInto(TABLE2).value("k", "cast_t").value("t", "b").value("i", 2).value("f", 2.5).toString(), - insertInto(TABLE2).value("k", "cast_t").value("t", "c").value("i", 3).value("f", 3.7).toString(), - insertInto(TABLE2).value("k", "cast_t").value("t", "d").value("i", 4).value("f", 5.0).toString() - ); - // for per partition limit tests - for (int i = 0; i < 5; i++) { - for (int j = 0; j < 5; j++) { - session().execute(String.format("INSERT INTO test_ppl (a, b, c) VALUES (%d, %d, %d)", i, j, j)); - } - } + private static final String TABLE1 = TestUtils.generateIdentifier("test1"); + private static final String TABLE2 = TestUtils.generateIdentifier("test2"); + + @Override + public void onTestContextInitialized() { + execute( + String.format("CREATE TABLE %s (k text PRIMARY KEY, t text, i int, f float)", TABLE1), + String.format( + "CREATE TABLE %s (k text, t text, i int, f float, PRIMARY KEY (k, t))", TABLE2), + "CREATE TABLE dateTest (t timestamp PRIMARY KEY)", + "CREATE TABLE test_coll (k int PRIMARY KEY, a list, b map, c set)", + "CREATE TABLE test_ppl (a int, b int, c int, PRIMARY KEY (a, b))", + insertInto(TABLE2) + .value("k", "cast_t") + .value("t", "a") + .value("i", 1) + .value("f", 1.1) + .toString(), + insertInto(TABLE2) + .value("k", "cast_t") + .value("t", "b") + .value("i", 2) + .value("f", 2.5) + .toString(), + insertInto(TABLE2) + .value("k", "cast_t") + .value("t", "c") + .value("i", 3) + .value("f", 3.7) + .toString(), + insertInto(TABLE2) + .value("k", "cast_t") + .value("t", "d") + .value("i", 4) + .value("f", 5.0) + .toString()); + // for per partition limit tests + for (int i = 0; i < 5; i++) { + for (int j = 0; j < 5; j++) { + session() + .execute(String.format("INSERT INTO test_ppl (a, b, c) VALUES (%d, %d, %d)", i, j, j)); + } } - - @Test(groups = "short") - public void executeTest() throws Exception { - - session().execute(insertInto(TABLE1).value("k", "k1").value("t", "This is a test").value("i", 3).value("f", 0.42)); - session().execute(update(TABLE1).with(set("t", "Another test")).where(eq("k", "k2"))); - - List rows = session().execute(select().from(TABLE1).where(in("k", "k1", "k2"))).all(); - - assertEquals(2, rows.size()); - - Row r1 = rows.get(0); - assertEquals("k1", r1.getString("k")); - assertEquals("This is a test", r1.getString("t")); - assertEquals(3, r1.getInt("i")); - assertFalse(r1.isNull("f")); - - Row r2 = rows.get(1); - assertEquals("k2", r2.getString("k")); - assertEquals("Another test", r2.getString("t")); - assertTrue(r2.isNull("i")); - assertTrue(r2.isNull("f")); + } + + @Test(groups = "short") + public void executeTest() throws Exception { + + session() + .execute( + insertInto(TABLE1) + .value("k", "k1") + .value("t", "This is a test") + .value("i", 3) + .value("f", 0.42)); + session().execute(update(TABLE1).with(set("t", "Another test")).where(eq("k", "k2"))); + + List rows = session().execute(select().from(TABLE1).where(in("k", "k1", "k2"))).all(); + + assertEquals(2, rows.size()); + + Row r1 = rows.get(0); + assertEquals("k1", r1.getString("k")); + assertEquals("This is a test", r1.getString("t")); + assertEquals(3, r1.getInt("i")); + assertFalse(r1.isNull("f")); + + Row r2 = rows.get(1); + assertEquals("k2", r2.getString("k")); + assertEquals("Another test", r2.getString("t")); + assertTrue(r2.isNull("i")); + assertTrue(r2.isNull("f")); + } + + @Test(groups = "short") + public void dateHandlingTest() throws Exception { + + Date d = new Date(); + session().execute(insertInto("dateTest").value("t", d)); + String query = select().from("dateTest").where(eq(token("t"), fcall("token", d))).toString(); + List rows = session().execute(query).all(); + + assertEquals(1, rows.size()); + + Row r1 = rows.get(0); + assertEquals(d, r1.getTimestamp("t")); + } + + @Test(groups = "short") + public void prepareTest() throws Exception { + // Just check we correctly avoid values when there is a bind marker + String query = "INSERT INTO foo (a,b,c,d) VALUES ('foo','bar',?,0);"; + BuiltStatement stmt = + insertInto("foo") + .value("a", "foo") + .value("b", "bar") + .value("c", bindMarker()) + .value("d", 0); + assertEquals(stmt.getQueryString(), query); + + query = "INSERT INTO foo (a,b,c,d) VALUES ('foo','bar',:c,0);"; + stmt = + insertInto("foo") + .value("a", "foo") + .value("b", "bar") + .value("c", bindMarker("c")) + .value("d", 0); + assertEquals(stmt.getQueryString(), query); + } + + @Test(groups = "short") + public void batchNonBuiltStatementTest() throws Exception { + SimpleStatement simple = + new SimpleStatement("INSERT INTO " + TABLE1 + " (k, t) VALUES ('batchTest1', 'val1')"); + RegularStatement built = insertInto(TABLE1).value("k", "batchTest2").value("t", "val2"); + session().execute(batch().add(simple).add(built)); + + List rows = + session().execute(select().from(TABLE1).where(in("k", "batchTest1", "batchTest2"))).all(); + assertEquals(2, rows.size()); + + Row r1 = rows.get(0); + assertEquals("batchTest1", r1.getString("k")); + assertEquals("val1", r1.getString("t")); + + Row r2 = rows.get(1); + assertEquals("batchTest2", r2.getString("k")); + assertEquals("val2", r2.getString("t")); + } + + @Test(groups = "short") + public void should_delete_list_element() throws Exception { + // given + session().execute("INSERT INTO test_coll (k, a, b) VALUES (1, [1,2,3], null)"); + // when + BuiltStatement statement = delete().listElt("a", 1).from("test_coll").where(eq("k", 1)); + session().execute(statement); + // then + List actual = + session().execute("SELECT a FROM test_coll WHERE k = 1").one().getList("a", Integer.class); + assertThat(actual).containsExactly(1, 3); + } + + @Test(groups = "short") + public void should_delete_list_element_with_bind_marker() throws Exception { + // given + session().execute("INSERT INTO test_coll (k, a) VALUES (1, [1,2,3])"); + // when + BuiltStatement statement = + delete().listElt("a", bindMarker()).from("test_coll").where(eq("k", 1)); + PreparedStatement ps = session().prepare(statement); + session().execute(ps.bind(1)); + // then + List actual = + session().execute("SELECT a FROM test_coll WHERE k = 1").one().getList("a", Integer.class); + assertThat(actual).containsExactly(1, 3); + } + + @Test(groups = "short") + public void should_delete_set_element() throws Exception { + // given + session().execute("INSERT INTO test_coll (k, c) VALUES (1, {'foo','bar','qix'})"); + // when + BuiltStatement statement = delete().setElt("c", "foo").from("test_coll").where(eq("k", 1)); + session().execute(statement); + // then + Set actual = + session().execute("SELECT c FROM test_coll WHERE k = 1").one().getSet("c", String.class); + assertThat(actual).containsOnly("bar", "qix"); + } + + @Test(groups = "short") + public void should_delete_set_element_with_bind_marker() throws Exception { + // given + session().execute("INSERT INTO test_coll (k, c) VALUES (1, {'foo','bar','qix'})"); + // when + BuiltStatement statement = + delete().setElt("c", bindMarker()).from("test_coll").where(eq("k", 1)); + PreparedStatement ps = session().prepare(statement); + session().execute(ps.bind("foo")); + // then + Set actual = + session().execute("SELECT c FROM test_coll WHERE k = 1").one().getSet("c", String.class); + assertThat(actual).containsOnly("bar", "qix"); + } + + @Test(groups = "short") + public void should_delete_map_entry() throws Exception { + // given + session().execute("INSERT INTO test_coll (k, b) VALUES (1, {1:'foo', 2:'bar'})"); + // when + BuiltStatement statement = delete().mapElt("b", 1).from("test_coll").where(eq("k", 1)); + session().execute(statement); + // then + Map actual = + session() + .execute("SELECT b FROM test_coll WHERE k = 1") + .one() + .getMap("b", Integer.class, String.class); + assertThat(actual).containsExactly(entry(2, "bar")); + } + + @Test(groups = "short") + public void should_delete_map_entry_with_bind_marker() throws Exception { + // given + session().execute("INSERT INTO test_coll (k, a, b) VALUES (1, null, {1:'foo', 2:'bar'})"); + // when + BuiltStatement statement = + delete().mapElt("b", bindMarker()).from("test_coll").where(eq("k", 1)); + PreparedStatement ps = session().prepare(statement); + session().execute(ps.bind().setInt(0, 1)); + // then + Map actual = + session() + .execute("SELECT b FROM test_coll WHERE k = 1") + .one() + .getMap("b", Integer.class, String.class); + assertThat(actual).containsExactly(entry(2, "bar")); + } + + /** + * Validates that {@link QueryBuilder} may be used to create a query that casts a column from one + * type to another, i.e.: + * + *

    select CAST(f as int) as fint, i from table2 where k='cast_t' + * + *

    and validates that the query executes successfully with the anticipated results. + * + * @jira_ticket JAVA-1086 + * @test_category queries:builder + * @since 3.0.1 + */ + @Test(groups = "short") + @CassandraVersion("3.2") + public void should_support_cast_function_on_column() { + // when + ResultSet r = + session() + .execute( + select() + .cast("f", DataType.cint()) + .as("fint") + .column("i") + .from(TABLE2) + .where(eq("k", "cast_t"))); + // then + assertThat(r.getAvailableWithoutFetching()).isEqualTo(4); + for (Row row : r) { + Integer i = row.getInt("i"); + assertThat(row.getColumnDefinitions().getType("fint")).isEqualTo(DataType.cint()); + Integer f = row.getInt("fint"); + switch (i) { + case 1: + assertThat(f).isEqualTo(1); + break; + case 2: + assertThat(f).isEqualTo(2); + break; + case 3: + assertThat(f).isEqualTo(3); + break; + case 4: + assertThat(f).isEqualTo(5); + break; + default: + fail("Unexpected values: " + i + "," + f); + } } - - @Test(groups = "short") - public void dateHandlingTest() throws Exception { - - Date d = new Date(); - session().execute(insertInto("dateTest").value("t", d)); - String query = select().from("dateTest").where(eq(token("t"), fcall("token", d))).toString(); - List rows = session().execute(query).all(); - - assertEquals(1, rows.size()); - - Row r1 = rows.get(0); - assertEquals(d, r1.getTimestamp("t")); - } - - @Test(groups = "short") - public void prepareTest() throws Exception { - // Just check we correctly avoid values when there is a bind marker - String query = "INSERT INTO foo (a,b,c,d) VALUES ('foo','bar',?,0);"; - BuiltStatement stmt = insertInto("foo").value("a", "foo").value("b", "bar").value("c", bindMarker()).value("d", 0); - assertEquals(stmt.getQueryString(), query); - - query = "INSERT INTO foo (a,b,c,d) VALUES ('foo','bar',:c,0);"; - stmt = insertInto("foo").value("a", "foo").value("b", "bar").value("c", bindMarker("c")).value("d", 0); - assertEquals(stmt.getQueryString(), query); - } - - @Test(groups = "short") - public void batchNonBuiltStatementTest() throws Exception { - SimpleStatement simple = new SimpleStatement("INSERT INTO " + TABLE1 + " (k, t) VALUES ('batchTest1', 'val1')"); - RegularStatement built = insertInto(TABLE1).value("k", "batchTest2").value("t", "val2"); - session().execute(batch().add(simple).add(built)); - - List rows = session().execute(select().from(TABLE1).where(in("k", "batchTest1", "batchTest2"))).all(); - assertEquals(2, rows.size()); - - Row r1 = rows.get(0); - assertEquals("batchTest1", r1.getString("k")); - assertEquals("val1", r1.getString("t")); - - Row r2 = rows.get(1); - assertEquals("batchTest2", r2.getString("k")); - assertEquals("val2", r2.getString("t")); - } - - @Test(groups = "short") - public void should_delete_list_element() throws Exception { - //given - session().execute("INSERT INTO test_coll (k, a, b) VALUES (1, [1,2,3], null)"); - //when - BuiltStatement statement = delete().listElt("a", 1).from("test_coll").where(eq("k", 1)); - session().execute(statement); - //then - List actual = session().execute("SELECT a FROM test_coll WHERE k = 1").one().getList("a", Integer.class); - assertThat(actual).containsExactly(1, 3); - } - - @Test(groups = "short") - public void should_delete_list_element_with_bind_marker() throws Exception { - //given - session().execute("INSERT INTO test_coll (k, a) VALUES (1, [1,2,3])"); - //when - BuiltStatement statement = delete().listElt("a", bindMarker()).from("test_coll").where(eq("k", 1)); - PreparedStatement ps = session().prepare(statement); - session().execute(ps.bind(1)); - //then - List actual = session().execute("SELECT a FROM test_coll WHERE k = 1").one().getList("a", Integer.class); - assertThat(actual).containsExactly(1, 3); - } - - @Test(groups = "short") - public void should_delete_set_element() throws Exception { - //given - session().execute("INSERT INTO test_coll (k, c) VALUES (1, {'foo','bar','qix'})"); - //when - BuiltStatement statement = delete().setElt("c", "foo").from("test_coll").where(eq("k", 1)); - session().execute(statement); - //then - Set actual = session().execute("SELECT c FROM test_coll WHERE k = 1").one().getSet("c", String.class); - assertThat(actual).containsOnly("bar", "qix"); - } - - @Test(groups = "short") - public void should_delete_set_element_with_bind_marker() throws Exception { - //given - session().execute("INSERT INTO test_coll (k, c) VALUES (1, {'foo','bar','qix'})"); - //when - BuiltStatement statement = delete().setElt("c", bindMarker()).from("test_coll").where(eq("k", 1)); - PreparedStatement ps = session().prepare(statement); - session().execute(ps.bind("foo")); - //then - Set actual = session().execute("SELECT c FROM test_coll WHERE k = 1").one().getSet("c", String.class); - assertThat(actual).containsOnly("bar", "qix"); - } - - @Test(groups = "short") - public void should_delete_map_entry() throws Exception { - //given - session().execute("INSERT INTO test_coll (k, b) VALUES (1, {1:'foo', 2:'bar'})"); - //when - BuiltStatement statement = delete().mapElt("b", 1).from("test_coll").where(eq("k", 1)); - session().execute(statement); - //then - Map actual = session().execute("SELECT b FROM test_coll WHERE k = 1").one().getMap("b", Integer.class, String.class); - assertThat(actual).containsExactly(entry(2, "bar")); - } - - @Test(groups = "short") - public void should_delete_map_entry_with_bind_marker() throws Exception { - //given - session().execute("INSERT INTO test_coll (k, a, b) VALUES (1, null, {1:'foo', 2:'bar'})"); - //when - BuiltStatement statement = delete().mapElt("b", bindMarker()).from("test_coll").where(eq("k", 1)); - PreparedStatement ps = session().prepare(statement); - session().execute(ps.bind().setInt(0, 1)); - //then - Map actual = session().execute("SELECT b FROM test_coll WHERE k = 1").one().getMap("b", Integer.class, String.class); - assertThat(actual).containsExactly(entry(2, "bar")); - } - - /** - * Validates that {@link QueryBuilder} may be used to create a query that casts a column from one type to another, - * i.e.: - *

    - * select CAST(f as int) as fint, i from table2 where k='cast_t' - *

    - * and validates that the query executes successfully with the anticipated results. - * - * @jira_ticket JAVA-1086 - * @test_category queries:builder - * @since 3.0.1 - */ - @Test(groups = "short") - @CassandraVersion("3.2") - public void should_support_cast_function_on_column() { - //when - ResultSet r = session().execute(select().cast("f", DataType.cint()).as("fint").column("i").from(TABLE2).where(eq("k", "cast_t"))); - //then - assertThat(r.getAvailableWithoutFetching()).isEqualTo(4); - for (Row row : r) { - Integer i = row.getInt("i"); - assertThat(row.getColumnDefinitions().getType("fint")).isEqualTo(DataType.cint()); - Integer f = row.getInt("fint"); - switch (i) { - case 1: - assertThat(f).isEqualTo(1); - break; - case 2: - assertThat(f).isEqualTo(2); - break; - case 3: - assertThat(f).isEqualTo(3); - break; - case 4: - assertThat(f).isEqualTo(5); - break; - default: - fail("Unexpected values: " + i + "," + f); - } - } - } - - /** - * Validates that {@link QueryBuilder} may be used to create a query that makes an aggregate function call, casting - * the column(s) that the function operates on from one type to another. - * i.e.: - *

    - * select avg(CAST(i as float)) as iavg from table2 where k='cast_t' - *

    - * and validates that the query executes successfully with the anticipated results. - * - * @jira_ticket JAVA-1086 - * @test_category queries:builder - * @since 3.0.1 - */ - @Test(groups = "short") - @CassandraVersion("3.2") - public void should_support_fcall_on_cast_column() { - //when - ResultSet ar = session().execute(select().fcall("avg", cast(column("i"), DataType.cfloat())).as("iavg").from(TABLE2).where(eq("k", "cast_t"))); - //then - assertThat(ar.getAvailableWithoutFetching()).isEqualTo(1); - Row row = ar.one(); - assertThat(row.getColumnDefinitions().getType("iavg")).isEqualTo(DataType.cfloat()); - Float f = row.getFloat("iavg"); - // (1.0+2.0+3.0+4.0) / 4 = 2.5 - assertThat(f).isEqualTo(2.5f); - } - - /** - * Validates that {@link QueryBuilder} can construct a query using the 'LIKE' operator to retrieve data from a - * table on a column that has a SASI index, i.e.: - *

    - * select n from s_table where n like 'Hello%' - *

    - * - * @test_category queries:builder - * @jira_ticket JAVA-1113 - * @since 3.0.1 - */ - @Test(groups = "short") - @CassandraVersion("3.6") - public void should_retrieve_using_like_operator_on_table_with_sasi_index() { - //given - String table = "s_table"; - session().execute(createTable(table).addPartitionKey("k", DataType.text()) + } + + /** + * Validates that {@link QueryBuilder} may be used to create a query that makes an aggregate + * function call, casting the column(s) that the function operates on from one type to another. + * i.e.: + * + *

    select avg(CAST(i as float)) as iavg from table2 where k='cast_t' + * + *

    and validates that the query executes successfully with the anticipated results. + * + * @jira_ticket JAVA-1086 + * @test_category queries:builder + * @since 3.0.1 + */ + @Test(groups = "short") + @CassandraVersion("3.2") + public void should_support_fcall_on_cast_column() { + // when + ResultSet ar = + session() + .execute( + select() + .fcall("avg", cast(column("i"), DataType.cfloat())) + .as("iavg") + .from(TABLE2) + .where(eq("k", "cast_t"))); + // then + assertThat(ar.getAvailableWithoutFetching()).isEqualTo(1); + Row row = ar.one(); + assertThat(row.getColumnDefinitions().getType("iavg")).isEqualTo(DataType.cfloat()); + Float f = row.getFloat("iavg"); + // (1.0+2.0+3.0+4.0) / 4 = 2.5 + assertThat(f).isEqualTo(2.5f); + } + + /** + * Validates that {@link QueryBuilder} can construct a query using the 'LIKE' operator to retrieve + * data from a table on a column that has a SASI index, i.e.: + * + *

    select n from s_table where n like 'Hello%' + * + *

    + * + * @test_category queries:builder + * @jira_ticket JAVA-1113 + * @since 3.0.1 + */ + @Test(groups = "short") + @CassandraVersion("3.6") + public void should_retrieve_using_like_operator_on_table_with_sasi_index() { + // given + String table = "s_table"; + session() + .execute( + createTable(table) + .addPartitionKey("k", DataType.text()) .addClusteringColumn("cc", DataType.cint()) - .addColumn("n", DataType.text()) - ); - session().execute(String.format( - "CREATE CUSTOM INDEX on %s (n) USING 'org.apache.cassandra.index.sasi.SASIIndex';", table)); - session().execute(insertInto(table).value("k", "a").value("cc", 0).value("n", "Hello World")); - session().execute(insertInto(table).value("k", "a").value("cc", 1).value("n", "Goodbye World")); - session().execute(insertInto(table).value("k", "b").value("cc", 2).value("n", "Hello Moon")); - - //when - BuiltStatement query = select("n").from(table).where(like("n", "Hello%")); - ResultSet r = session().execute(query); - - //then - assertThat(r.getAvailableWithoutFetching()).isEqualTo(2); - assertThat(r.all()).extracting(new Extractor() { - @Override - public String extract(Row input) { + .addColumn("n", DataType.text())); + session() + .execute( + String.format( + "CREATE CUSTOM INDEX on %s (n) USING 'org.apache.cassandra.index.sasi.SASIIndex';", + table)); + session().execute(insertInto(table).value("k", "a").value("cc", 0).value("n", "Hello World")); + session().execute(insertInto(table).value("k", "a").value("cc", 1).value("n", "Goodbye World")); + session().execute(insertInto(table).value("k", "b").value("cc", 2).value("n", "Hello Moon")); + + // when + BuiltStatement query = select("n").from(table).where(like("n", "Hello%")); + ResultSet r = session().execute(query); + + // then + assertThat(r.getAvailableWithoutFetching()).isEqualTo(2); + assertThat(r.all()) + .extracting( + new Extractor() { + @Override + public String extract(Row input) { return input.getString("n"); - } - }).containsOnly("Hello World", "Hello Moon"); - } - - /** - * Validates that {@link QueryBuilder} can construct a query using the 'PER PARTITION LIMIT' operator to restrict - * the number of rows returned per partition in a query, i.e.: - *

    - * SELECT * FROM test_ppl PER PARTITION LIMIT 2 - *

    - * - * @test_category queries:builder - * @jira_ticket JAVA-1153 - * @since 3.1.0 - */ - @CassandraVersion(value = "3.6", description = "Support for PER PARTITION LIMIT was added to C* 3.6 (CASSANDRA-7017)") - @Test(groups = "short") - public void should_support_per_partition_limit() throws Exception { - assertThat(session().execute(select().all().from("test_ppl").perPartitionLimit(2))) - .contains( - row(0, 0, 0), - row(0, 1, 1), - row(1, 0, 0), - row(1, 1, 1), - row(2, 0, 0), - row(2, 1, 1), - row(3, 0, 0), - row(3, 1, 1), - row(4, 0, 0), - row(4, 1, 1)); - // Combined Per Partition and "global" limit - assertThat(session().execute(select().all().from("test_ppl").perPartitionLimit(2).limit(6))).hasSize(6); - // odd amount of results - assertThat(session().execute(select().all().from("test_ppl").perPartitionLimit(2).limit(5))) - .contains( - row(0, 0, 0), - row(0, 1, 1), - row(1, 0, 0), - row(1, 1, 1), - row(2, 0, 0)); - // IN query - assertThat(session().execute(select().all().from("test_ppl").where(in("a", 2, 3)).perPartitionLimit(2))) - .contains( - row(2, 0, 0), - row(2, 1, 1), - row(3, 0, 0), - row(3, 1, 1)); - assertThat(session().execute(select().all().from("test_ppl").where(in("a", 2, 3)) - .perPartitionLimit(bindMarker()).limit(3).getQueryString(), 2)) - .hasSize(3); - assertThat(session().execute(select().all().from("test_ppl").where(in("a", 1, 2, 3)) - .perPartitionLimit(bindMarker()).limit(3).getQueryString(), 2)) - .hasSize(3); - // with restricted partition key - assertThat(session().execute(select().all().from("test_ppl").where(eq("a", bindMarker())) - .perPartitionLimit(bindMarker()).getQueryString(), 2, 3)) - .containsExactly( - row(2, 0, 0), - row(2, 1, 1), - row(2, 2, 2)); - // with ordering - assertThat(session().execute(select().all().from("test_ppl").where(eq("a", bindMarker())) - .orderBy(desc("b")).perPartitionLimit(bindMarker()).getQueryString(), 2, 3)) - .containsExactly( - row(2, 4, 4), - row(2, 3, 3), - row(2, 2, 2)); - // with filtering - assertThat(session().execute(select().all().from("test_ppl").where(eq("a", bindMarker())) - .and(gt("b", bindMarker())).perPartitionLimit(bindMarker()).allowFiltering().getQueryString(), 2, 0, 2)) - .containsExactly( - row(2, 1, 1), - row(2, 2, 2)); - assertThat(session().execute(select().all().from("test_ppl").where(eq("a", bindMarker())) - .and(gt("b", bindMarker())).orderBy(desc("b")).perPartitionLimit(bindMarker()).allowFiltering().getQueryString(), 2, 2, 2)) - .containsExactly( - row(2, 4, 4), - row(2, 3, 3)); + } + }) + .containsOnly("Hello World", "Hello Moon"); + } + + /** + * Validates that {@link QueryBuilder} can construct a query using the 'PER PARTITION LIMIT' + * operator to restrict the number of rows returned per partition in a query, i.e.: + * + *

    SELECT * FROM test_ppl PER PARTITION LIMIT 2 + * + *

    + * + * @test_category queries:builder + * @jira_ticket JAVA-1153 + * @since 3.1.0 + */ + @CassandraVersion( + value = "3.6", + description = "Support for PER PARTITION LIMIT was added to C* 3.6 (CASSANDRA-7017)") + @Test(groups = "short") + public void should_support_per_partition_limit() throws Exception { + assertThat(session().execute(select().all().from("test_ppl").perPartitionLimit(2))) + .contains( + row(0, 0, 0), + row(0, 1, 1), + row(1, 0, 0), + row(1, 1, 1), + row(2, 0, 0), + row(2, 1, 1), + row(3, 0, 0), + row(3, 1, 1), + row(4, 0, 0), + row(4, 1, 1)); + // Combined Per Partition and "global" limit + assertThat(session().execute(select().all().from("test_ppl").perPartitionLimit(2).limit(6))) + .hasSize(6); + // odd amount of results + assertThat(session().execute(select().all().from("test_ppl").perPartitionLimit(2).limit(5))) + .contains(row(0, 0, 0), row(0, 1, 1), row(1, 0, 0), row(1, 1, 1), row(2, 0, 0)); + // IN query + assertThat( + session() + .execute(select().all().from("test_ppl").where(in("a", 2, 3)).perPartitionLimit(2))) + .contains(row(2, 0, 0), row(2, 1, 1), row(3, 0, 0), row(3, 1, 1)); + assertThat( + session() + .execute( + select() + .all() + .from("test_ppl") + .where(in("a", 2, 3)) + .perPartitionLimit(bindMarker()) + .limit(3) + .getQueryString(), + 2)) + .hasSize(3); + assertThat( + session() + .execute( + select() + .all() + .from("test_ppl") + .where(in("a", 1, 2, 3)) + .perPartitionLimit(bindMarker()) + .limit(3) + .getQueryString(), + 2)) + .hasSize(3); + // with restricted partition key + assertThat( + session() + .execute( + select() + .all() + .from("test_ppl") + .where(eq("a", bindMarker())) + .perPartitionLimit(bindMarker()) + .getQueryString(), + 2, + 3)) + .containsExactly(row(2, 0, 0), row(2, 1, 1), row(2, 2, 2)); + // with ordering + assertThat( + session() + .execute( + select() + .all() + .from("test_ppl") + .where(eq("a", bindMarker())) + .orderBy(desc("b")) + .perPartitionLimit(bindMarker()) + .getQueryString(), + 2, + 3)) + .containsExactly(row(2, 4, 4), row(2, 3, 3), row(2, 2, 2)); + // with filtering + assertThat( + session() + .execute( + select() + .all() + .from("test_ppl") + .where(eq("a", bindMarker())) + .and(gt("b", bindMarker())) + .perPartitionLimit(bindMarker()) + .allowFiltering() + .getQueryString(), + 2, + 0, + 2)) + .containsExactly(row(2, 1, 1), row(2, 2, 2)); + assertThat( + session() + .execute( + select() + .all() + .from("test_ppl") + .where(eq("a", bindMarker())) + .and(gt("b", bindMarker())) + .orderBy(desc("b")) + .perPartitionLimit(bindMarker()) + .allowFiltering() + .getQueryString(), + 2, + 2, + 2)) + .containsExactly(row(2, 4, 4), row(2, 3, 3)); + } + + /** + * Validates that {@link QueryBuilder} can construct an INSERT INTO ... JSON query using the + * 'DEFAULT UNSET/NULL' clause. + * + * @test_category queries:builder + * @jira_ticket JAVA-1446 + * @since 3.3.0 + */ + @CassandraVersion( + value = "3.10", + description = "Support for DEFAULT UNSET/NULL was added to C* 3.10 (CASSANDRA-11424)") + @Test(groups = "short") + public void should_support_insert_json_with_default_unset_and_default_null() throws Throwable { + + String table = TestUtils.generateIdentifier("table"); + execute( + String.format("CREATE TABLE %s (k int primary key, v1 int, v2 int)", table), + String.format("INSERT INTO %s JSON '{\"k\": 0, \"v1\": 0, \"v2\": 0}'", table)); + + // leave v1 unset + session() + .execute( + session() + .prepare(insertInto(table).json(bindMarker()).defaultUnset()) + .bind("{\"k\": 0, \"v2\": 2}")); + assertThat(session().execute(select().from(table))).containsExactly(row(0, 0, 2)); + + // explicit specification DEFAULT NULL + session() + .execute( + session() + .prepare(insertInto(table).json(bindMarker()).defaultNull()) + .bind("{\"k\": 0, \"v2\": 2}")); + assertThat(session().execute(select().from(table))).containsExactly(row(0, null, 2)); + + // implicitly setting v2 to null + session() + .execute( + session() + .prepare(insertInto(table).json(bindMarker()).defaultNull()) + .bind("{\"k\": 0}")); + assertThat(session().execute(select().from(table))).containsExactly(row(0, null, null)); + + // mix setting null explicitly with default unset: + // set values for all fields + session() + .execute( + session() + .prepare(insertInto(table).json(bindMarker())) + .bind("{\"k\": 1, \"v1\": 1, \"v2\": 1}")); + // explicitly set v1 to null while leaving v2 unset which retains its value + session() + .execute( + session() + .prepare(insertInto(table).json(bindMarker()).defaultUnset()) + .bind("{\"k\": 1, \"v1\": null}")); + assertThat(session().execute(select().from(table).where(eq("k", 1)))) + .containsExactly(row(1, null, 1)); + + // test string literal instead of bind marker + session().execute(insertInto(table).json("{\"k\": 2, \"v1\": 2, \"v2\": 2}")); + // explicitly set v1 to null while leaving v2 unset which retains its value + session().execute(insertInto(table).json("{\"k\": 2, \"v1\": null}").defaultUnset()); + assertThat(session().execute(select().from(table).where(eq("k", 2)))) + .containsExactly(row(2, null, 2)); + session().execute(insertInto(table).json("{\"k\": 2}").defaultNull()); + assertThat(session().execute(select().from(table).where(eq("k", 2)))) + .containsExactly(row(2, null, null)); + } + + /** + * Validates that {@link QueryBuilder} can construct a query using the 'GROUP BY' clause. + * + * @test_category queries:builder + * @jira_ticket JAVA-1443 + * @since 3.3.0 + */ + @CassandraVersion( + value = "3.10", + description = "Support for GROUP BY was added to C* 3.10 (CASSANDRA-10707)") + @Test(groups = "short") + public void should_support_group_by() throws Exception { + String table = TestUtils.generateIdentifier("table"); + execute( + String.format( + "CREATE TABLE %s (a int, b int, c int, d int, e int, primary key (a, b, c, d))", + table)); + + execute( + String.format("INSERT INTO %s (a, b, c, d, e) VALUES (1, 2, 1, 3, 6)", table), + String.format("INSERT INTO %s (a, b, c, d, e) VALUES (1, 2, 2, 6, 12)", table), + String.format("INSERT INTO %s (a, b, c, d, e) VALUES (1, 3, 2, 12, 24)", table), + String.format("INSERT INTO %s (a, b, c, d, e) VALUES (1, 4, 2, 12, 24)", table), + String.format("INSERT INTO %s (a, b, c, d, e) VALUES (1, 4, 2, 6, 12)", table), + String.format("INSERT INTO %s (a, b, c, d, e) VALUES (2, 2, 3, 3, 6)", table), + String.format("INSERT INTO %s (a, b, c, d, e) VALUES (2, 4, 3, 6, 12)", table), + String.format("INSERT INTO %s (a, b, c, d, e) VALUES (3, 3, 2, 12, 24)", table), + String.format("INSERT INTO %s (a, b, c, d, e) VALUES (4, 8, 2, 12, 24)", table)); + + // Make sure that we have some tombstones + execute( + String.format("DELETE FROM %s WHERE a = 1 AND b = 3 AND c = 2 AND d = 12", table), + String.format("DELETE FROM %s WHERE a = 3", table)); + + // Range queries + assertThat( + session() + .execute( + select("a", "b", "e", count("b"), max("e")) + .from(table) + .where(eq("b", 2)) + .groupBy("a", "b") + .allowFiltering())) + .containsExactly(row(1, 2, 6, 2L, 12), row(2, 2, 6, 1L, 6)); + + // Range query with LIMIT + assertThat( + session() + .execute( + select("a", "b", "e", count("b"), max("e")) + .from(table) + .groupBy("a", "b") + .limit(2))) + .containsExactly(row(1, 2, 6, 2L, 12), row(1, 4, 12, 2L, 24)); + + // Range queries with PER PARTITION LIMIT + assertThat( + session() + .execute( + select("a", "b", "e", count("b"), max("e")) + .from(table) + .groupBy("a", "b") + .perPartitionLimit(1))) + .containsExactly(row(1, 2, 6, 2L, 12), row(2, 2, 6, 1L, 6), row(4, 8, 24, 1L, 24)); + + // Range query with PER PARTITION LIMIT and LIMIT + assertThat( + session() + .execute( + select("a", "b", "e", count("b"), max("e")) + .from(table) + .groupBy("a", "b") + .perPartitionLimit(1) + .limit(2))) + .containsExactly(row(1, 2, 6, 2L, 12), row(2, 2, 6, 1L, 6)); + + // Range query with DISTINCT + assertThat(session().execute(select("a", count("a")).distinct().from(table).groupBy("a"))) + .containsExactly(row(1, 1L), row(2, 1L), row(4, 1L)); + + // Range query with DISTINCT and LIMIT + assertThat( + session().execute(select("a", count("a")).distinct().from(table).groupBy("a").limit(2))) + .containsExactly(row(1, 1L), row(2, 1L)); + + // Single partition queries + assertThat( + session() + .execute( + select("a", "b", "e", count("b"), max("e")) + .from(table) + .where(eq("a", 1)) + .groupBy("a", "b", "c"))) + .containsExactly(row(1, 2, 6, 1L, 6), row(1, 2, 12, 1L, 12), row(1, 4, 12, 2L, 24)); + + // Single partition queries with DISTINCT + assertThat( + session() + .execute( + select("a", count("a")).distinct().from(table).where(eq("a", 1)).groupBy("a"))) + .containsExactly(row(1, 1L)); + + // Single partition queries with LIMIT + assertThat( + session() + .execute( + select("a", "b", "e", count("b"), max("e")) + .from(table) + .where(eq("a", 1)) + .groupBy("a", "b", "c") + .limit(2))) + .containsExactly(row(1, 2, 6, 1L, 6), row(1, 2, 12, 1L, 12)); + + // Single partition queries with PER PARTITION LIMIT + assertThat( + session() + .execute( + select("a", "b", "e", count("b"), max("e")) + .from(table) + .where(eq("a", 1)) + .groupBy("a", "b", "c") + .perPartitionLimit(2))) + .containsExactly(row(1, 2, 6, 1L, 6), row(1, 2, 12, 1L, 12)); + + // Single partition queries with ORDER BY + assertThat( + session() + .execute( + select("a", "b", "e", count("b"), max("e")) + .from(table) + .where(eq("a", 1)) + .groupBy("a", "b", "c") + .orderBy(desc("b"), desc("c")))) + .containsExactly(row(1, 4, 24, 2L, 24), row(1, 2, 12, 1L, 12), row(1, 2, 6, 1L, 6)); + + // Single partition queries with ORDER BY and PER PARTITION LIMIT + assertThat( + session() + .execute( + select("a", "b", "e", count("b"), max("e")) + .from(table) + .where(eq("a", 1)) + .groupBy("a", "b", "c") + .orderBy(desc("b"), desc("c")) + .perPartitionLimit(1))) + .containsExactly(row(1, 4, 24, 2L, 24)); + + // Single partition queries with ORDER BY and LIMIT + assertThat( + session() + .execute( + select("a", "b", "e", count("b"), max("e")) + .from(table) + .where(eq("a", 1)) + .groupBy("a", "b", "c") + .orderBy(desc("b"), desc("c")) + .limit(2))) + .containsExactly(row(1, 4, 24, 2L, 24), row(1, 2, 12, 1L, 12)); + + // Multi-partitions queries + assertThat( + session() + .execute( + select("a", "b", "e", count("b"), max("e")) + .from(table) + .where(in("a", 1, 2, 4)) + .and(eq("b", 2)) + .groupBy("a", "b", "c"))) + .containsExactly(row(1, 2, 6, 1L, 6), row(1, 2, 12, 1L, 12), row(2, 2, 6, 1L, 6)); + + // Multi-partitions query with DISTINCT + assertThat( + session() + .execute( + select("a", count("a")) + .distinct() + .from(table) + .where(in("a", 1, 2, 4)) + .groupBy("a"))) + .containsExactly(row(1, 1L), row(2, 1L), row(4, 1L)); + + // Multi-partitions query with DISTINCT and LIMIT + assertThat( + session() + .execute( + select("a", count("a")) + .distinct() + .from(table) + .where(in("a", 1, 2, 4)) + .groupBy("a") + .limit(2))) + .containsExactly(row(1, 1L), row(2, 1L)); + + // Multi-partitions queries with PER PARTITION LIMIT + assertThat( + session() + .execute( + select("a", "b", "e", count("b"), max("e")) + .from(table) + .where(in("a", 1, 2, 4)) + .groupBy("a", "b", "c") + .perPartitionLimit(1))) + .containsExactly(row(1, 2, 6, 1L, 6), row(2, 2, 6, 1L, 6), row(4, 8, 24, 1L, 24)); + + assertThat( + session() + .execute( + select("a", "b", "e", count("b"), max("e")) + .from(table) + .where(in("a", 1, 2, 4)) + .groupBy("a", "b", "c") + .perPartitionLimit(2))) + .containsExactly( + row(1, 2, 6, 1L, 6), + row(1, 2, 12, 1L, 12), + row(2, 2, 6, 1L, 6), + row(2, 4, 12, 1L, 12), + row(4, 8, 24, 1L, 24)); + + // Multi-partitions queries with ORDER BY + assertThat( + session() + .execute( + select("a", "b", "c", count("b"), max("e")) + .from(table) + .where(in("a", 1, 2, 4)) + .groupBy("a", "b") + .orderBy(desc("b"), desc("c")) + .setFetchSize(Integer.MAX_VALUE))) + .containsExactly( + row(4, 8, 2, 1L, 24), + row(2, 4, 3, 1L, 12), + row(1, 4, 2, 2L, 24), + row(2, 2, 3, 1L, 6), + row(1, 2, 2, 2L, 12)); + + // Multi-partitions queries with ORDER BY and LIMIT + assertThat( + session() + .execute( + select("a", "b", "c", "d") + .from(table) + .where(in("a", 1, 2, 4)) + .groupBy("a", "b") + .orderBy(desc("b"), desc("c")) + .limit(3) + .setFetchSize(Integer.MAX_VALUE))) + .containsExactly(row(4, 8, 2, 12), row(2, 4, 3, 6), row(1, 4, 2, 12)); + + try { + session() + .execute( + select() + .column("a") + .column("b") + .as("clustering1") + .max("c") + .from(table) + .where(eq("a", 1)) + .groupBy("a", "clustering1")); + fail("Expecting IQE"); + } catch (InvalidQueryException e) { + assertThat(e.getMessage()).startsWith("Undefined column name clustering1"); } - /** - * Validates that {@link QueryBuilder} can construct an INSERT INTO ... JSON query using the 'DEFAULT UNSET/NULL' clause. - * - * @test_category queries:builder - * @jira_ticket JAVA-1446 - * @since 3.3.0 - */ - @CassandraVersion(value = "3.10", description = "Support for DEFAULT UNSET/NULL was added to C* 3.10 (CASSANDRA-11424)") - @Test(groups = "short") - public void should_support_insert_json_with_default_unset_and_default_null() throws Throwable { - - String table = TestUtils.generateIdentifier("table"); - execute( - String.format("CREATE TABLE %s (k int primary key, v1 int, v2 int)", table), - String.format("INSERT INTO %s JSON '{\"k\": 0, \"v1\": 0, \"v2\": 0}'", table) - ); - - // leave v1 unset - session().execute(session().prepare(insertInto(table).json(bindMarker()).defaultUnset()).bind("{\"k\": 0, \"v2\": 2}")); - assertThat(session().execute(select().from(table))).containsExactly( - row(0, 0, 2) - ); - - // explicit specification DEFAULT NULL - session().execute(session().prepare(insertInto(table).json(bindMarker()).defaultNull()).bind("{\"k\": 0, \"v2\": 2}")); - assertThat(session().execute(select().from(table))).containsExactly( - row(0, null, 2) - ); - - // implicitly setting v2 to null - session().execute(session().prepare(insertInto(table).json(bindMarker()).defaultNull()).bind("{\"k\": 0}")); - assertThat(session().execute(select().from(table))).containsExactly( - row(0, null, null) - ); - - // mix setting null explicitly with default unset: - // set values for all fields - session().execute(session().prepare(insertInto(table).json(bindMarker())).bind("{\"k\": 1, \"v1\": 1, \"v2\": 1}")); - // explicitly set v1 to null while leaving v2 unset which retains its value - session().execute(session().prepare(insertInto(table).json(bindMarker()).defaultUnset()).bind("{\"k\": 1, \"v1\": null}")); - assertThat(session().execute(select().from(table).where(eq("k", 1)))).containsExactly( - row(1, null, 1) - ); - - // test string literal instead of bind marker - session().execute(insertInto(table).json("{\"k\": 2, \"v1\": 2, \"v2\": 2}")); - // explicitly set v1 to null while leaving v2 unset which retains its value - session().execute(insertInto(table).json("{\"k\": 2, \"v1\": null}").defaultUnset()); - assertThat(session().execute(select().from(table).where(eq("k", 2)))).containsExactly( - row(2, null, 2) - ); - session().execute(insertInto(table).json("{\"k\": 2}").defaultNull()); - assertThat(session().execute(select().from(table).where(eq("k", 2)))).containsExactly( - row(2, null, null) - ); + try { + session() + .execute( + select() + .column("a") + .column("b") + .max("c") + .from(table) + .where(eq("a", 1)) + .groupBy("a", "b", "z")); + fail("Expecting IQE"); + } catch (InvalidQueryException e) { + assertThat(e.getMessage()).startsWith("Undefined column name z"); } - /** - * Validates that {@link QueryBuilder} can construct a query using the 'GROUP BY' clause. - * - * @test_category queries:builder - * @jira_ticket JAVA-1443 - * @since 3.3.0 - */ - @CassandraVersion(value = "3.10", description = "Support for GROUP BY was added to C* 3.10 (CASSANDRA-10707)") - @Test(groups = "short") - public void should_support_group_by() throws Exception { - String table = TestUtils.generateIdentifier("table"); - execute(String.format("CREATE TABLE %s (a int, b int, c int, d int, e int, primary key (a, b, c, d))", table)); - - execute( - String.format("INSERT INTO %s (a, b, c, d, e) VALUES (1, 2, 1, 3, 6)", table), - String.format("INSERT INTO %s (a, b, c, d, e) VALUES (1, 2, 2, 6, 12)", table), - String.format("INSERT INTO %s (a, b, c, d, e) VALUES (1, 3, 2, 12, 24)", table), - String.format("INSERT INTO %s (a, b, c, d, e) VALUES (1, 4, 2, 12, 24)", table), - String.format("INSERT INTO %s (a, b, c, d, e) VALUES (1, 4, 2, 6, 12)", table), - String.format("INSERT INTO %s (a, b, c, d, e) VALUES (2, 2, 3, 3, 6)", table), - String.format("INSERT INTO %s (a, b, c, d, e) VALUES (2, 4, 3, 6, 12)", table), - String.format("INSERT INTO %s (a, b, c, d, e) VALUES (3, 3, 2, 12, 24)", table), - String.format("INSERT INTO %s (a, b, c, d, e) VALUES (4, 8, 2, 12, 24)", table) - ); - - // Make sure that we have some tombstones - execute( - String.format("DELETE FROM %s WHERE a = 1 AND b = 3 AND c = 2 AND d = 12", table), - String.format("DELETE FROM %s WHERE a = 3", table)); - - // Range queries - assertThat(session().execute(select("a", "b", "e", count("b"), max("e")).from(table).where(eq("b", 2)).groupBy("a", "b").allowFiltering())).containsExactly( - row(1, 2, 6, 2L, 12), - row(2, 2, 6, 1L, 6)); - - // Range query with LIMIT - assertThat(session().execute(select("a", "b", "e", count("b"), max("e")).from(table).groupBy("a", "b").limit(2))).containsExactly( - row(1, 2, 6, 2L, 12), - row(1, 4, 12, 2L, 24)); - - // Range queries with PER PARTITION LIMIT - assertThat(session().execute(select("a", "b", "e", count("b"), max("e")).from(table).groupBy("a", "b").perPartitionLimit(1))).containsExactly( - row(1, 2, 6, 2L, 12), - row(2, 2, 6, 1L, 6), - row(4, 8, 24, 1L, 24)); - - // Range query with PER PARTITION LIMIT and LIMIT - assertThat(session().execute(select("a", "b", "e", count("b"), max("e")).from(table).groupBy("a", "b").perPartitionLimit(1).limit(2))).containsExactly( - row(1, 2, 6, 2L, 12), - row(2, 2, 6, 1L, 6)); - - // Range query with DISTINCT - assertThat(session().execute(select("a", count("a")).distinct().from(table).groupBy("a"))).containsExactly( - row(1, 1L), - row(2, 1L), - row(4, 1L)); - - // Range query with DISTINCT and LIMIT - assertThat(session().execute(select("a", count("a")).distinct().from(table).groupBy("a").limit(2))).containsExactly( - row(1, 1L), - row(2, 1L)); - - // Single partition queries - assertThat(session().execute(select("a", "b", "e", count("b"), max("e")).from(table).where(eq("a", 1)).groupBy("a", "b", "c"))).containsExactly( - row(1, 2, 6, 1L, 6), - row(1, 2, 12, 1L, 12), - row(1, 4, 12, 2L, 24)); - - // Single partition queries with DISTINCT - assertThat(session().execute(select("a", count("a")).distinct().from(table).where(eq("a", 1)).groupBy("a"))).containsExactly( - row(1, 1L)); - - // Single partition queries with LIMIT - assertThat(session().execute(select("a", "b", "e", count("b"), max("e")).from(table).where(eq("a", 1)).groupBy("a", "b", "c").limit(2))).containsExactly( - row(1, 2, 6, 1L, 6), - row(1, 2, 12, 1L, 12)); - - // Single partition queries with PER PARTITION LIMIT - assertThat(session().execute(select("a", "b", "e", count("b"), max("e")).from(table).where(eq("a", 1)).groupBy("a", "b", "c").perPartitionLimit(2))).containsExactly( - row(1, 2, 6, 1L, 6), - row(1, 2, 12, 1L, 12)); - - // Single partition queries with ORDER BY - assertThat(session().execute(select("a", "b", "e", count("b"), max("e")).from(table).where(eq("a", 1)).groupBy("a", "b", "c").orderBy(desc("b"), desc("c")))).containsExactly( - row(1, 4, 24, 2L, 24), - row(1, 2, 12, 1L, 12), - row(1, 2, 6, 1L, 6)); - - // Single partition queries with ORDER BY and PER PARTITION LIMIT - assertThat(session().execute(select("a", "b", "e", count("b"), max("e")).from(table).where(eq("a", 1)).groupBy("a", "b", "c").orderBy(desc("b"), desc("c")).perPartitionLimit(1))).containsExactly( - row(1, 4, 24, 2L, 24)); - - // Single partition queries with ORDER BY and LIMIT - assertThat(session().execute(select("a", "b", "e", count("b"), max("e")).from(table).where(eq("a", 1)).groupBy("a", "b", "c").orderBy(desc("b"), desc("c")).limit(2))).containsExactly( - row(1, 4, 24, 2L, 24), - row(1, 2, 12, 1L, 12)); - - // Multi-partitions queries - assertThat(session().execute(select("a", "b", "e", count("b"), max("e")).from(table).where(in("a", 1, 2, 4)).and(eq("b", 2)).groupBy("a", "b", "c"))).containsExactly( - row(1, 2, 6, 1L, 6), - row(1, 2, 12, 1L, 12), - row(2, 2, 6, 1L, 6)); - - // Multi-partitions query with DISTINCT - assertThat(session().execute(select("a", count("a")).distinct().from(table).where(in("a", 1, 2, 4)).groupBy("a"))).containsExactly( - row(1, 1L), - row(2, 1L), - row(4, 1L)); - - // Multi-partitions query with DISTINCT and LIMIT - assertThat(session().execute(select("a", count("a")).distinct().from(table).where(in("a", 1, 2, 4)).groupBy("a").limit(2))).containsExactly( - row(1, 1L), - row(2, 1L)); - - // Multi-partitions queries with PER PARTITION LIMIT - assertThat(session().execute(select("a", "b", "e", count("b"), max("e")).from(table).where(in("a", 1, 2, 4)).groupBy("a", "b", "c").perPartitionLimit(1))).containsExactly( - row(1, 2, 6, 1L, 6), - row(2, 2, 6, 1L, 6), - row(4, 8, 24, 1L, 24)); - - assertThat(session().execute(select("a", "b", "e", count("b"), max("e")).from(table).where(in("a", 1, 2, 4)).groupBy("a", "b", "c").perPartitionLimit(2))).containsExactly( - row(1, 2, 6, 1L, 6), - row(1, 2, 12, 1L, 12), - row(2, 2, 6, 1L, 6), - row(2, 4, 12, 1L, 12), - row(4, 8, 24, 1L, 24)); - - // Multi-partitions queries with ORDER BY - assertThat(session().execute(select("a", "b", "c", count("b"), max("e")).from(table).where(in("a", 1, 2, 4)).groupBy("a", "b").orderBy(desc("b"), desc("c")).setFetchSize(Integer.MAX_VALUE))).containsExactly( - row(4, 8, 2, 1L, 24), - row(2, 4, 3, 1L, 12), - row(1, 4, 2, 2L, 24), - row(2, 2, 3, 1L, 6), - row(1, 2, 2, 2L, 12)); - - // Multi-partitions queries with ORDER BY and LIMIT - assertThat(session().execute(select("a", "b", "c", "d").from(table).where(in("a", 1, 2, 4)).groupBy("a", "b").orderBy(desc("b"), desc("c")).limit(3).setFetchSize(Integer.MAX_VALUE))).containsExactly( - row(4, 8, 2, 12), - row(2, 4, 3, 6), - row(1, 4, 2, 12)); - - try { - session().execute(select().column("a").column("b").as("clustering1").max("c").from(table).where(eq("a", 1)).groupBy("a", "clustering1")); - fail("Expecting IQE"); - } catch (InvalidQueryException e) { - assertThat(e.getMessage()).isEqualTo("Undefined column name clustering1"); - } - - try { - session().execute(select().column("a").column("b").max("c").from(table).where(eq("a", 1)).groupBy("a", "b", "z")); - fail("Expecting IQE"); - } catch (InvalidQueryException e) { - assertThat(e.getMessage()).isEqualTo("Undefined column name z"); - } - - // Test with composite partition key - table = TestUtils.generateIdentifier("table"); - execute(String.format("CREATE TABLE %s (a int, b int, c int, d int, e int, primary key ((a, b), c, d))", table)); - - execute( - String.format("INSERT INTO %s (a, b, c, d, e) VALUES (1, 1, 1, 3, 6)", table), - String.format("INSERT INTO %s (a, b, c, d, e) VALUES (1, 1, 2, 6, 12)", table), - String.format("INSERT INTO %s (a, b, c, d, e) VALUES (1, 1, 3, 12, 24)", table), - String.format("INSERT INTO %s (a, b, c, d, e) VALUES (1, 2, 1, 12, 24)", table), - String.format("INSERT INTO %s (a, b, c, d, e) VALUES (1, 2, 2, 6, 12)", table) - ); - - try { - session().execute(select().column("a").column("b").max("d").from(table).groupBy("a")); - fail("Expecting IQE"); - } catch (InvalidQueryException e) { - assertThat(e.getMessage()).isEqualTo("Group by is not supported on only a part of the partition key"); - } - - assertThat(session().execute(select("a", "b", max("d")).from(table).groupBy("a", "b"))).containsExactly( - row(1, 2, 12), - row(1, 1, 12)); - - assertThat(session().execute(select("a", "b", max("d")).from(table).where(eq("a", 1)).and(eq("b", 1)).groupBy("b"))).containsExactly( - row(1, 1, 12)); + // Test with composite partition key + table = TestUtils.generateIdentifier("table"); + execute( + String.format( + "CREATE TABLE %s (a int, b int, c int, d int, e int, primary key ((a, b), c, d))", + table)); + + execute( + String.format("INSERT INTO %s (a, b, c, d, e) VALUES (1, 1, 1, 3, 6)", table), + String.format("INSERT INTO %s (a, b, c, d, e) VALUES (1, 1, 2, 6, 12)", table), + String.format("INSERT INTO %s (a, b, c, d, e) VALUES (1, 1, 3, 12, 24)", table), + String.format("INSERT INTO %s (a, b, c, d, e) VALUES (1, 2, 1, 12, 24)", table), + String.format("INSERT INTO %s (a, b, c, d, e) VALUES (1, 2, 2, 6, 12)", table)); + + try { + session().execute(select().column("a").column("b").max("d").from(table).groupBy("a")); + fail("Expecting IQE"); + } catch (InvalidQueryException e) { + assertThat(e.getMessage()) + .isEqualTo("Group by is not supported on only a part of the partition key"); } + assertThat(session().execute(select("a", "b", max("d")).from(table).groupBy("a", "b"))) + .containsExactly(row(1, 2, 12), row(1, 1, 12)); + + assertThat( + session() + .execute( + select("a", "b", max("d")) + .from(table) + .where(eq("a", 1)) + .and(eq("b", 1)) + .groupBy("b"))) + .containsExactly(row(1, 1, 12)); + } + + /** + * Validates that {@link QueryBuilder} can construct a SELECT query for a materialized view. + * + * @test_category queries:builder + * @jira_ticket JAVA-2123 + * @since 3.7.0 + */ + @CassandraVersion( + value = "3.0", + description = "Support for materialized views was added to C* 3.0") + @Test(groups = "short") + public void should_select_from_materialized_view() { + + String table = TestUtils.generateIdentifier("table"); + final String mv = TestUtils.generateIdentifier("mv"); + + execute( + String.format("CREATE TABLE %s (pk int, cc int, v int, PRIMARY KEY (pk, cc))", table), + String.format("INSERT INTO %s (pk, cc, v) VALUES (0,0,0)", table), + String.format("INSERT INTO %s (pk, cc, v) VALUES (0,1,1)", table), + String.format("INSERT INTO %s (pk, cc, v) VALUES (0,2,2)", table), + String.format( + "CREATE MATERIALIZED VIEW %s AS SELECT pk, cc FROM %s WHERE cc IS NOT NULL AND pk IS NOT NULL PRIMARY KEY (pk, cc)", + mv, table)); + + // Wait until the MV is fully constructed + ConditionChecker.check() + .that( + new Callable() { + @Override + public Boolean call() { + return session().execute("SELECT * FROM " + mv).all().size() == 3; + } + }) + .before(1, MINUTES) + .becomesTrue(); + + MaterializedViewMetadata materializedView = + session().getCluster().getMetadata().getKeyspace(keyspace).getMaterializedView(mv); + + assertThat(session().execute(select().column("cc").as("mycc").from(materializedView))) + .containsExactly(row(0), row(1), row(2)); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/querybuilder/QueryBuilderITest.java b/driver-core/src/test/java/com/datastax/driver/core/querybuilder/QueryBuilderITest.java index c2fd60405ef..a1f4d8a8bbe 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/querybuilder/QueryBuilderITest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/querybuilder/QueryBuilderITest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,117 +17,166 @@ */ package com.datastax.driver.core.querybuilder; -import com.datastax.driver.core.*; +import static com.datastax.driver.core.querybuilder.QueryBuilder.bindMarker; +import static com.datastax.driver.core.querybuilder.QueryBuilder.delete; +import static com.datastax.driver.core.querybuilder.QueryBuilder.eq; +import static com.datastax.driver.core.querybuilder.QueryBuilder.select; +import static com.datastax.driver.core.querybuilder.QueryBuilder.set; +import static com.datastax.driver.core.querybuilder.QueryBuilder.update; +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertFalse; +import static org.testng.Assert.assertNotNull; +import static org.testng.Assert.assertTrue; +import static org.testng.Assert.fail; + +import com.datastax.driver.core.BoundStatement; +import com.datastax.driver.core.CCMConfig; +import com.datastax.driver.core.CCMTestsSupport; +import com.datastax.driver.core.PreparedStatement; +import com.datastax.driver.core.Row; +import com.datastax.driver.core.Statement; +import com.datastax.driver.core.TableMetadata; import com.datastax.driver.core.exceptions.SyntaxError; import com.datastax.driver.core.utils.CassandraVersion; import org.testng.annotations.Test; -import static com.datastax.driver.core.querybuilder.QueryBuilder.*; -import static org.testng.Assert.*; - @CCMConfig(clusterProvider = "createClusterBuilderNoDebouncing") public class QueryBuilderITest extends CCMTestsSupport { - private static final String TABLE_TEXT = "test_text"; - private static final String TABLE_INT = "test_int"; - - @Override - public void onTestContextInitialized() { - execute(String.format("CREATE TABLE %s (k text PRIMARY KEY, a int, b int)", TABLE_TEXT), - String.format("CREATE TABLE %s (k int PRIMARY KEY, a int, b int)", TABLE_INT)); - } - - @Test(groups = "short") - public void remainingDeleteTests() throws Exception { - - Statement query; - TableMetadata table = cluster().getMetadata().getKeyspace(keyspace).getTable(TABLE_TEXT); - assertNotNull(table); - - String expected = String.format("DELETE k FROM %s.test_text;", keyspace); - query = delete("k").from(table); - assertEquals(query.toString(), expected); - try { - session().execute(query); - fail(); - } catch (SyntaxError e) { - // Missing WHERE clause - } - } - - @Test(groups = "short") - public void selectInjectionTests() throws Exception { - - String query; - Statement select; - PreparedStatement ps; - BoundStatement bs; - - session().execute("CREATE TABLE foo ( k ascii PRIMARY KEY , i int, s ascii )"); - - query = "SELECT * FROM foo WHERE k=?;"; - select = select().all().from("foo").where(eq("k", bindMarker())); - ps = session().prepare(select.toString()); - bs = ps.bind(); - assertEquals(select.toString(), query); - session().execute(bs.setString("k", "4 AND c=5")); - } - - @Test(groups = "short") - @CassandraVersion(value = "2.0.7", description = "DELETE..IF EXISTS only supported in 2.0.7+ (CASSANDRA-5708)") - public void conditionalDeletesTest() throws Exception { - session().execute(String.format("INSERT INTO %s.test_int (k, a, b) VALUES (1, 1, 1)", keyspace)); - - Statement delete; - Row row; - delete = delete().from(keyspace, TABLE_INT).where(eq("k", 2)).ifExists(); - row = session().execute(delete).one(); - assertFalse(row.getBool("[applied]")); - - delete = delete().from(keyspace, TABLE_INT).where(eq("k", 1)).ifExists(); - row = session().execute(delete).one(); - assertTrue(row.getBool("[applied]")); - - session().execute(String.format("INSERT INTO %s.test_int (k, a, b) VALUES (1, 1, 1)", keyspace)); - - delete = delete().from(keyspace, TABLE_INT).where(eq("k", 1)).onlyIf(eq("a", 1)).and(eq("b", 2)); - row = session().execute(delete).one(); - assertFalse(row.getBool("[applied]")); - - delete = delete().from(keyspace, TABLE_INT).where(eq("k", 1)).onlyIf(eq("a", 1)).and(eq("b", 1)); - row = session().execute(delete).one(); - assertTrue(row.getBool("[applied]")); - } - - @Test(groups = "short") - @CassandraVersion(value = "2.0.13", description = "Allow IF EXISTS for UPDATE statements (CASSANDRA-8610)") - public void conditionalUpdatesTest() throws Exception { - session().execute(String.format("INSERT INTO %s.test_int (k, a, b) VALUES (1, 1, 1)", keyspace)); - - Statement update; - Row row; - update = update(TABLE_INT).with(set("a", 2)).and(set("b", 2)).where(eq("k", 2)).ifExists(); - row = session().execute(update).one(); - assertFalse(row.getBool("[applied]")); - - update = update(TABLE_INT).with(set("a", 2)).and(set("b", 2)).where(eq("k", 1)).ifExists(); - row = session().execute(update).one(); - assertTrue(row.getBool("[applied]")); - - update = update(TABLE_INT).with(set("a", 2)).and(set("b", 2)).where(eq("k", 2)).onlyIf(eq("a", 1)).and(eq("b", 2)); - row = session().execute(update).one(); - assertFalse(row.getBool("[applied]")); - - update = update(TABLE_INT).with(set("a", 3)).and(set("b", 3)).where(eq("k", 1)).onlyIf(eq("a", 2)).and(eq("b", 2)); - row = session().execute(update).one(); - assertTrue(row.getBool("[applied]")); - - update = update(TABLE_INT).with(set("a", 4)).and(set("b", 4)).onlyIf(eq("a", 2)).and(eq("b", 2)).where(eq("k", 1)); - row = session().execute(update).one(); - assertFalse(row.getBool("[applied]")); - - update = update(TABLE_INT).with(set("a", 4)).and(set("b", 4)).onlyIf(eq("a", 3)).and(eq("b", 3)).where(eq("k", 1)); - row = session().execute(update).one(); - assertTrue(row.getBool("[applied]")); + private static final String TABLE_TEXT = "test_text"; + private static final String TABLE_INT = "test_int"; + + @Override + public void onTestContextInitialized() { + execute( + String.format("CREATE TABLE %s (k text PRIMARY KEY, a int, b int)", TABLE_TEXT), + String.format("CREATE TABLE %s (k int PRIMARY KEY, a int, b int)", TABLE_INT)); + } + + @Test(groups = "short") + public void remainingDeleteTests() throws Exception { + + Statement query; + TableMetadata table = cluster().getMetadata().getKeyspace(keyspace).getTable(TABLE_TEXT); + assertNotNull(table); + + String expected = String.format("DELETE k FROM %s.test_text;", keyspace); + query = delete("k").from(table); + assertEquals(query.toString(), expected); + try { + session().execute(query); + fail(); + } catch (SyntaxError e) { + // Missing WHERE clause } + } + + @Test(groups = "short") + public void selectInjectionTests() throws Exception { + + String query; + Statement select; + PreparedStatement ps; + BoundStatement bs; + + session().execute("CREATE TABLE foo ( k ascii PRIMARY KEY , i int, s ascii )"); + + query = "SELECT * FROM foo WHERE k=?;"; + select = select().all().from("foo").where(eq("k", bindMarker())); + ps = session().prepare(select.toString()); + bs = ps.bind(); + assertEquals(select.toString(), query); + session().execute(bs.setString("k", "4 AND c=5")); + } + + @Test(groups = "short") + @CassandraVersion( + value = "2.0.7", + description = "DELETE..IF EXISTS only supported in 2.0.7+ (CASSANDRA-5708)") + public void conditionalDeletesTest() throws Exception { + session() + .execute(String.format("INSERT INTO %s.test_int (k, a, b) VALUES (1, 1, 1)", keyspace)); + + Statement delete; + Row row; + delete = delete().from(keyspace, TABLE_INT).where(eq("k", 2)).ifExists(); + row = session().execute(delete).one(); + assertFalse(row.getBool("[applied]")); + + delete = delete().from(keyspace, TABLE_INT).where(eq("k", 1)).ifExists(); + row = session().execute(delete).one(); + assertTrue(row.getBool("[applied]")); + + session() + .execute(String.format("INSERT INTO %s.test_int (k, a, b) VALUES (1, 1, 1)", keyspace)); + + delete = + delete().from(keyspace, TABLE_INT).where(eq("k", 1)).onlyIf(eq("a", 1)).and(eq("b", 2)); + row = session().execute(delete).one(); + assertFalse(row.getBool("[applied]")); + + delete = + delete().from(keyspace, TABLE_INT).where(eq("k", 1)).onlyIf(eq("a", 1)).and(eq("b", 1)); + row = session().execute(delete).one(); + assertTrue(row.getBool("[applied]")); + } + + @Test(groups = "short") + @CassandraVersion( + value = "2.0.13", + description = "Allow IF EXISTS for UPDATE statements (CASSANDRA-8610)") + public void conditionalUpdatesTest() throws Exception { + session() + .execute(String.format("INSERT INTO %s.test_int (k, a, b) VALUES (1, 1, 1)", keyspace)); + + Statement update; + Row row; + update = update(TABLE_INT).with(set("a", 2)).and(set("b", 2)).where(eq("k", 2)).ifExists(); + row = session().execute(update).one(); + assertFalse(row.getBool("[applied]")); + + update = update(TABLE_INT).with(set("a", 2)).and(set("b", 2)).where(eq("k", 1)).ifExists(); + row = session().execute(update).one(); + assertTrue(row.getBool("[applied]")); + + update = + update(TABLE_INT) + .with(set("a", 2)) + .and(set("b", 2)) + .where(eq("k", 2)) + .onlyIf(eq("a", 1)) + .and(eq("b", 2)); + row = session().execute(update).one(); + assertFalse(row.getBool("[applied]")); + + update = + update(TABLE_INT) + .with(set("a", 3)) + .and(set("b", 3)) + .where(eq("k", 1)) + .onlyIf(eq("a", 2)) + .and(eq("b", 2)); + row = session().execute(update).one(); + assertTrue(row.getBool("[applied]")); + + update = + update(TABLE_INT) + .with(set("a", 4)) + .and(set("b", 4)) + .onlyIf(eq("a", 2)) + .and(eq("b", 2)) + .where(eq("k", 1)); + row = session().execute(update).one(); + assertFalse(row.getBool("[applied]")); + + update = + update(TABLE_INT) + .with(set("a", 4)) + .and(set("b", 4)) + .onlyIf(eq("a", 3)) + .and(eq("b", 3)) + .where(eq("k", 1)); + row = session().execute(update).one(); + assertTrue(row.getBool("[applied]")); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/querybuilder/QueryBuilderRoutingKeyTest.java b/driver-core/src/test/java/com/datastax/driver/core/querybuilder/QueryBuilderRoutingKeyTest.java index 03bba01621a..bbcc45ca170 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/querybuilder/QueryBuilderRoutingKeyTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/querybuilder/QueryBuilderRoutingKeyTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,185 +17,212 @@ */ package com.datastax.driver.core.querybuilder; -import com.datastax.driver.core.*; -import org.testng.annotations.Test; - -import java.nio.ByteBuffer; - -import static com.datastax.driver.core.querybuilder.QueryBuilder.*; +import static com.datastax.driver.core.querybuilder.QueryBuilder.batch; +import static com.datastax.driver.core.querybuilder.QueryBuilder.eq; +import static com.datastax.driver.core.querybuilder.QueryBuilder.insertInto; +import static com.datastax.driver.core.querybuilder.QueryBuilder.select; +import static com.datastax.driver.core.querybuilder.QueryBuilder.timestamp; +import static com.datastax.driver.core.querybuilder.QueryBuilder.ttl; +import static com.datastax.driver.core.querybuilder.QueryBuilder.update; import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertNotNull; import static org.testng.Assert.assertNull; +import com.datastax.driver.core.CCMConfig; +import com.datastax.driver.core.CCMTestsSupport; +import com.datastax.driver.core.CodecRegistry; +import com.datastax.driver.core.ProtocolVersion; +import com.datastax.driver.core.Row; +import com.datastax.driver.core.TableMetadata; +import java.nio.ByteBuffer; +import org.testng.annotations.Test; + @CCMConfig(clusterProvider = "createClusterBuilderNoDebouncing") public class QueryBuilderRoutingKeyTest extends CCMTestsSupport { - private static final String TABLE_TEXT = "test_text"; - private static final String TABLE_INT = "test_int"; - private static final String TABLE_CASE = "test_case"; - private static final String TABLE_CASE_QUOTED = "test_case_quoted"; - - @Override - public void onTestContextInitialized() { - execute(String.format("CREATE TABLE %s (k text PRIMARY KEY, a int, b int)", TABLE_TEXT), - String.format("CREATE TABLE %s (k int PRIMARY KEY, a int, b int)", TABLE_INT), - String.format("CREATE TABLE %s (theKey int PRIMARY KEY, a int, b int)", TABLE_CASE), - String.format("CREATE TABLE %s (\"theKey\" int PRIMARY KEY, a int, b int, \"tHEkEY\" int)", TABLE_CASE_QUOTED)); - } - - @Test(groups = "short") - public void textRoutingKeyTest() throws Exception { - - BuiltStatement query; - TableMetadata table = cluster().getMetadata().getKeyspace(keyspace).getTable(TABLE_TEXT); - assertNotNull(table); - ProtocolVersion protocolVersion = cluster().getConfiguration().getProtocolOptions().getProtocolVersion(); - CodecRegistry codecRegistry = CodecRegistry.DEFAULT_INSTANCE; - - String txt = "If she weighs the same as a duck... she's made of wood."; - query = insertInto(table).values(new String[]{"k", "a", "b"}, new Object[]{txt, 1, 2}); - assertEquals(query.getRoutingKey(protocolVersion, codecRegistry), ByteBuffer.wrap(txt.getBytes())); - session().execute(query); - - query = select().from(table).where(eq("k", txt)); - assertEquals(query.getRoutingKey(protocolVersion, codecRegistry), ByteBuffer.wrap(txt.getBytes())); - Row row = session().execute(query).one(); - assertEquals(row.getString("k"), txt); - assertEquals(row.getInt("a"), 1); - assertEquals(row.getInt("b"), 2); - } - - @Test(groups = "short") - public void routingKeyColumnCaseSensitivityTest() throws Exception { - - BuiltStatement query; - TableMetadata table = cluster().getMetadata().getKeyspace(keyspace).getTable(TABLE_CASE); - assertNotNull(table); - ProtocolVersion protocolVersion = cluster().getConfiguration().getProtocolOptions().getProtocolVersion(); - CodecRegistry codecRegistry = CodecRegistry.DEFAULT_INSTANCE; - - query = insertInto(table).values(new String[]{"theKey", "a", "b"}, new Object[]{42, 1, 2}); - ByteBuffer bb = ByteBuffer.allocate(4); - bb.putInt(0, 42); - assertEquals(query.getRoutingKey(protocolVersion, codecRegistry), bb); - session().execute(query); - - query = select().from(table).where(eq("theKey", 42)); - assertEquals(query.getRoutingKey(protocolVersion, codecRegistry), bb); - Row row = session().execute(query).one(); - assertEquals(row.getInt("theKey"), 42); - assertEquals(row.getInt("a"), 1); - assertEquals(row.getInt("b"), 2); - - query = insertInto(table).values(new String[]{"ThEkEy", "a", "b"}, new Object[]{42, 1, 2}); - bb = ByteBuffer.allocate(4); - bb.putInt(0, 42); - assertEquals(query.getRoutingKey(protocolVersion, codecRegistry), bb); - session().execute(query); - - query = select().from(table).where(eq("ThEkEy", 42)); - assertEquals(query.getRoutingKey(protocolVersion, codecRegistry), bb); - row = session().execute(query).one(); - assertEquals(row.getInt("theKey"), 42); - assertEquals(row.getInt("a"), 1); - assertEquals(row.getInt("b"), 2); - } - - @Test(groups = "short") - public void routingKeyColumnCaseSensitivityForQuotedIdentifiersTest() throws Exception { - - BuiltStatement query; - TableMetadata table = cluster().getMetadata().getKeyspace(keyspace).getTable(TABLE_CASE_QUOTED); - assertNotNull(table); - ProtocolVersion protocolVersion = cluster().getConfiguration().getProtocolOptions().getProtocolVersion(); - CodecRegistry codecRegistry = CodecRegistry.DEFAULT_INSTANCE; - - query = insertInto(table).values(new String[]{"\"theKey\"", "a", "b", "\"tHEkEY\""}, new Object[]{42, 1, 2, 3}); - ByteBuffer bb = ByteBuffer.allocate(4); - bb.putInt(0, 42); - assertEquals(query.getRoutingKey(protocolVersion, codecRegistry), bb); - - query = insertInto(table).values(new String[]{"theKey", "a", "b", "\"tHEkEY\""}, new Object[]{42, 1, 2, 3}); - assertNull(query.getRoutingKey(protocolVersion, codecRegistry)); - - query = insertInto(table).values(new String[]{"theKey", "a", "b", "theKey"}, new Object[]{42, 1, 2, 3}); - assertNull(query.getRoutingKey(protocolVersion, codecRegistry)); - - } - - @Test(groups = "short") - public void intRoutingKeyTest() throws Exception { - - BuiltStatement query; - TableMetadata table = cluster().getMetadata().getKeyspace(keyspace).getTable(TABLE_INT); - assertNotNull(table); - ProtocolVersion protocolVersion = cluster().getConfiguration().getProtocolOptions().getProtocolVersion(); - CodecRegistry codecRegistry = CodecRegistry.DEFAULT_INSTANCE; - - query = insertInto(table).values(new String[]{"k", "a", "b"}, new Object[]{42, 1, 2}); - ByteBuffer bb = ByteBuffer.allocate(4); - bb.putInt(0, 42); - assertEquals(query.getRoutingKey(protocolVersion, codecRegistry), bb); - session().execute(query); - - query = select().from(table).where(eq("k", 42)); - assertEquals(query.getRoutingKey(protocolVersion, codecRegistry), bb); - Row row = session().execute(query).one(); - assertEquals(row.getInt("k"), 42); - assertEquals(row.getInt("a"), 1); - assertEquals(row.getInt("b"), 2); - } - - @Test(groups = "short") - public void intRoutingBatchKeyTest() throws Exception { - - BuiltStatement query; - TableMetadata table = cluster().getMetadata().getKeyspace(keyspace).getTable(TABLE_INT); - assertNotNull(table); - ProtocolVersion protocolVersion = cluster().getConfiguration().getProtocolOptions().getProtocolVersion(); - CodecRegistry codecRegistry = CodecRegistry.DEFAULT_INSTANCE; - - ByteBuffer bb = ByteBuffer.allocate(4); - bb.putInt(0, 42); - - String batch_query; - BuiltStatement batch; - - query = select().from(table).where(eq("k", 42)); - - batch_query = "BEGIN BATCH "; - batch_query += String.format("INSERT INTO %s.test_int (k,a) VALUES (42,1);", keyspace); - batch_query += String.format("UPDATE %s.test_int USING TTL 400;", keyspace); - batch_query += "APPLY BATCH;"; - batch = batch() - .add(insertInto(table).values(new String[]{"k", "a"}, new Object[]{42, 1})) - .add(update(table).using(ttl(400))); - assertEquals(batch.getRoutingKey(protocolVersion, codecRegistry), bb); - assertEquals(batch.toString(), batch_query); - // TODO: rs = session().execute(batch); // Not guaranteed to be valid CQL - - batch_query = "BEGIN BATCH "; - batch_query += String.format("SELECT * FROM %s.test_int WHERE k=42;", keyspace); - batch_query += "APPLY BATCH;"; - batch = batch(query); - assertEquals(batch.getRoutingKey(protocolVersion, codecRegistry), bb); - assertEquals(batch.toString(), batch_query); - // TODO: rs = session().execute(batch); // Not guaranteed to be valid CQL - - batch_query = "BEGIN BATCH "; - batch_query += "SELECT * FROM foo WHERE k=42;"; - batch_query += "APPLY BATCH;"; - batch = batch().add(select().from("foo").where(eq("k", 42))); - assertEquals(batch.getRoutingKey(protocolVersion, codecRegistry), null); - assertEquals(batch.toString(), batch_query); - // TODO: rs = session().execute(batch); // Not guaranteed to be valid CQL - - batch_query = "BEGIN BATCH USING TIMESTAMP 42 "; - batch_query += "INSERT INTO foo.bar (a) VALUES (123);"; - batch_query += "APPLY BATCH;"; - batch = batch().using(timestamp(42)).add(insertInto("foo", "bar").value("a", 123)); - assertEquals(batch.getRoutingKey(protocolVersion, codecRegistry), null); - assertEquals(batch.toString(), batch_query); - // TODO: rs = session().execute(batch); // Not guaranteed to be valid CQL - } + private static final String TABLE_TEXT = "test_text"; + private static final String TABLE_INT = "test_int"; + private static final String TABLE_CASE = "test_case"; + private static final String TABLE_CASE_QUOTED = "test_case_quoted"; + + @Override + public void onTestContextInitialized() { + execute( + String.format("CREATE TABLE %s (k text PRIMARY KEY, a int, b int)", TABLE_TEXT), + String.format("CREATE TABLE %s (k int PRIMARY KEY, a int, b int)", TABLE_INT), + String.format("CREATE TABLE %s (theKey int PRIMARY KEY, a int, b int)", TABLE_CASE), + String.format( + "CREATE TABLE %s (\"theKey\" int PRIMARY KEY, a int, b int, \"tHEkEY\" int)", + TABLE_CASE_QUOTED)); + } + + @Test(groups = "short") + public void textRoutingKeyTest() throws Exception { + + BuiltStatement query; + TableMetadata table = cluster().getMetadata().getKeyspace(keyspace).getTable(TABLE_TEXT); + assertNotNull(table); + ProtocolVersion protocolVersion = + cluster().getConfiguration().getProtocolOptions().getProtocolVersion(); + CodecRegistry codecRegistry = CodecRegistry.DEFAULT_INSTANCE; + + String txt = "If she weighs the same as a duck... she's made of wood."; + query = insertInto(table).values(new String[] {"k", "a", "b"}, new Object[] {txt, 1, 2}); + assertEquals( + query.getRoutingKey(protocolVersion, codecRegistry), ByteBuffer.wrap(txt.getBytes())); + session().execute(query); + + query = select().from(table).where(eq("k", txt)); + assertEquals( + query.getRoutingKey(protocolVersion, codecRegistry), ByteBuffer.wrap(txt.getBytes())); + Row row = session().execute(query).one(); + assertEquals(row.getString("k"), txt); + assertEquals(row.getInt("a"), 1); + assertEquals(row.getInt("b"), 2); + } + + @Test(groups = "short") + public void routingKeyColumnCaseSensitivityTest() throws Exception { + + BuiltStatement query; + TableMetadata table = cluster().getMetadata().getKeyspace(keyspace).getTable(TABLE_CASE); + assertNotNull(table); + ProtocolVersion protocolVersion = + cluster().getConfiguration().getProtocolOptions().getProtocolVersion(); + CodecRegistry codecRegistry = CodecRegistry.DEFAULT_INSTANCE; + + query = insertInto(table).values(new String[] {"theKey", "a", "b"}, new Object[] {42, 1, 2}); + ByteBuffer bb = ByteBuffer.allocate(4); + bb.putInt(0, 42); + assertEquals(query.getRoutingKey(protocolVersion, codecRegistry), bb); + session().execute(query); + + query = select().from(table).where(eq("theKey", 42)); + assertEquals(query.getRoutingKey(protocolVersion, codecRegistry), bb); + Row row = session().execute(query).one(); + assertEquals(row.getInt("theKey"), 42); + assertEquals(row.getInt("a"), 1); + assertEquals(row.getInt("b"), 2); + + query = insertInto(table).values(new String[] {"ThEkEy", "a", "b"}, new Object[] {42, 1, 2}); + bb = ByteBuffer.allocate(4); + bb.putInt(0, 42); + assertEquals(query.getRoutingKey(protocolVersion, codecRegistry), bb); + session().execute(query); + + query = select().from(table).where(eq("ThEkEy", 42)); + assertEquals(query.getRoutingKey(protocolVersion, codecRegistry), bb); + row = session().execute(query).one(); + assertEquals(row.getInt("theKey"), 42); + assertEquals(row.getInt("a"), 1); + assertEquals(row.getInt("b"), 2); + } + + @Test(groups = "short") + public void routingKeyColumnCaseSensitivityForQuotedIdentifiersTest() throws Exception { + + BuiltStatement query; + TableMetadata table = cluster().getMetadata().getKeyspace(keyspace).getTable(TABLE_CASE_QUOTED); + assertNotNull(table); + ProtocolVersion protocolVersion = + cluster().getConfiguration().getProtocolOptions().getProtocolVersion(); + CodecRegistry codecRegistry = CodecRegistry.DEFAULT_INSTANCE; + + query = + insertInto(table) + .values( + new String[] {"\"theKey\"", "a", "b", "\"tHEkEY\""}, new Object[] {42, 1, 2, 3}); + ByteBuffer bb = ByteBuffer.allocate(4); + bb.putInt(0, 42); + assertEquals(query.getRoutingKey(protocolVersion, codecRegistry), bb); + + query = + insertInto(table) + .values(new String[] {"theKey", "a", "b", "\"tHEkEY\""}, new Object[] {42, 1, 2, 3}); + assertNull(query.getRoutingKey(protocolVersion, codecRegistry)); + + query = + insertInto(table) + .values(new String[] {"theKey", "a", "b", "theKey"}, new Object[] {42, 1, 2, 3}); + assertNull(query.getRoutingKey(protocolVersion, codecRegistry)); + } + + @Test(groups = "short") + public void intRoutingKeyTest() throws Exception { + + BuiltStatement query; + TableMetadata table = cluster().getMetadata().getKeyspace(keyspace).getTable(TABLE_INT); + assertNotNull(table); + ProtocolVersion protocolVersion = + cluster().getConfiguration().getProtocolOptions().getProtocolVersion(); + CodecRegistry codecRegistry = CodecRegistry.DEFAULT_INSTANCE; + + query = insertInto(table).values(new String[] {"k", "a", "b"}, new Object[] {42, 1, 2}); + ByteBuffer bb = ByteBuffer.allocate(4); + bb.putInt(0, 42); + assertEquals(query.getRoutingKey(protocolVersion, codecRegistry), bb); + session().execute(query); + + query = select().from(table).where(eq("k", 42)); + assertEquals(query.getRoutingKey(protocolVersion, codecRegistry), bb); + Row row = session().execute(query).one(); + assertEquals(row.getInt("k"), 42); + assertEquals(row.getInt("a"), 1); + assertEquals(row.getInt("b"), 2); + } + + @Test(groups = "short") + public void intRoutingBatchKeyTest() throws Exception { + + BuiltStatement query; + TableMetadata table = cluster().getMetadata().getKeyspace(keyspace).getTable(TABLE_INT); + assertNotNull(table); + ProtocolVersion protocolVersion = + cluster().getConfiguration().getProtocolOptions().getProtocolVersion(); + CodecRegistry codecRegistry = CodecRegistry.DEFAULT_INSTANCE; + + ByteBuffer bb = ByteBuffer.allocate(4); + bb.putInt(0, 42); + + String batch_query; + BuiltStatement batch; + + query = select().from(table).where(eq("k", 42)); + + batch_query = "BEGIN BATCH "; + batch_query += String.format("INSERT INTO %s.test_int (k,a) VALUES (42,1);", keyspace); + batch_query += String.format("UPDATE %s.test_int USING TTL 400;", keyspace); + batch_query += "APPLY BATCH;"; + batch = + batch() + .add(insertInto(table).values(new String[] {"k", "a"}, new Object[] {42, 1})) + .add(update(table).using(ttl(400))); + assertEquals(batch.getRoutingKey(protocolVersion, codecRegistry), bb); + assertEquals(batch.toString(), batch_query); + // TODO: rs = session().execute(batch); // Not guaranteed to be valid CQL + + batch_query = "BEGIN BATCH "; + batch_query += String.format("SELECT * FROM %s.test_int WHERE k=42;", keyspace); + batch_query += "APPLY BATCH;"; + batch = batch(query); + assertEquals(batch.getRoutingKey(protocolVersion, codecRegistry), bb); + assertEquals(batch.toString(), batch_query); + // TODO: rs = session().execute(batch); // Not guaranteed to be valid CQL + + batch_query = "BEGIN BATCH "; + batch_query += "SELECT * FROM foo WHERE k=42;"; + batch_query += "APPLY BATCH;"; + batch = batch().add(select().from("foo").where(eq("k", 42))); + assertEquals(batch.getRoutingKey(protocolVersion, codecRegistry), null); + assertEquals(batch.toString(), batch_query); + // TODO: rs = session().execute(batch); // Not guaranteed to be valid CQL + + batch_query = "BEGIN BATCH USING TIMESTAMP 42 "; + batch_query += "INSERT INTO foo.bar (a) VALUES (123);"; + batch_query += "APPLY BATCH;"; + batch = batch().using(timestamp(42)).add(insertInto("foo", "bar").value("a", 123)); + assertEquals(batch.getRoutingKey(protocolVersion, codecRegistry), null); + assertEquals(batch.toString(), batch_query); + // TODO: rs = session().execute(batch); // Not guaranteed to be valid CQL + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/querybuilder/QueryBuilderTest.java b/driver-core/src/test/java/com/datastax/driver/core/querybuilder/QueryBuilderTest.java index 176be7abccb..0bfd9493f04 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/querybuilder/QueryBuilderTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/querybuilder/QueryBuilderTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +17,67 @@ */ package com.datastax.driver.core.querybuilder; -import com.datastax.driver.core.*; +import static com.datastax.driver.core.querybuilder.QueryBuilder.add; +import static com.datastax.driver.core.querybuilder.QueryBuilder.addAll; +import static com.datastax.driver.core.querybuilder.QueryBuilder.alias; +import static com.datastax.driver.core.querybuilder.QueryBuilder.append; +import static com.datastax.driver.core.querybuilder.QueryBuilder.appendAll; +import static com.datastax.driver.core.querybuilder.QueryBuilder.asc; +import static com.datastax.driver.core.querybuilder.QueryBuilder.batch; +import static com.datastax.driver.core.querybuilder.QueryBuilder.bindMarker; +import static com.datastax.driver.core.querybuilder.QueryBuilder.cast; +import static com.datastax.driver.core.querybuilder.QueryBuilder.column; +import static com.datastax.driver.core.querybuilder.QueryBuilder.contains; +import static com.datastax.driver.core.querybuilder.QueryBuilder.containsKey; +import static com.datastax.driver.core.querybuilder.QueryBuilder.decr; +import static com.datastax.driver.core.querybuilder.QueryBuilder.delete; +import static com.datastax.driver.core.querybuilder.QueryBuilder.desc; +import static com.datastax.driver.core.querybuilder.QueryBuilder.discard; +import static com.datastax.driver.core.querybuilder.QueryBuilder.discardAll; +import static com.datastax.driver.core.querybuilder.QueryBuilder.eq; +import static com.datastax.driver.core.querybuilder.QueryBuilder.fcall; +import static com.datastax.driver.core.querybuilder.QueryBuilder.fromJson; +import static com.datastax.driver.core.querybuilder.QueryBuilder.gt; +import static com.datastax.driver.core.querybuilder.QueryBuilder.gte; +import static com.datastax.driver.core.querybuilder.QueryBuilder.in; +import static com.datastax.driver.core.querybuilder.QueryBuilder.incr; +import static com.datastax.driver.core.querybuilder.QueryBuilder.insertInto; +import static com.datastax.driver.core.querybuilder.QueryBuilder.like; +import static com.datastax.driver.core.querybuilder.QueryBuilder.lt; +import static com.datastax.driver.core.querybuilder.QueryBuilder.lte; +import static com.datastax.driver.core.querybuilder.QueryBuilder.ne; +import static com.datastax.driver.core.querybuilder.QueryBuilder.notNull; +import static com.datastax.driver.core.querybuilder.QueryBuilder.path; +import static com.datastax.driver.core.querybuilder.QueryBuilder.prepend; +import static com.datastax.driver.core.querybuilder.QueryBuilder.prependAll; +import static com.datastax.driver.core.querybuilder.QueryBuilder.put; +import static com.datastax.driver.core.querybuilder.QueryBuilder.putAll; +import static com.datastax.driver.core.querybuilder.QueryBuilder.quote; +import static com.datastax.driver.core.querybuilder.QueryBuilder.raw; +import static com.datastax.driver.core.querybuilder.QueryBuilder.remove; +import static com.datastax.driver.core.querybuilder.QueryBuilder.removeAll; +import static com.datastax.driver.core.querybuilder.QueryBuilder.select; +import static com.datastax.driver.core.querybuilder.QueryBuilder.set; +import static com.datastax.driver.core.querybuilder.QueryBuilder.setIdx; +import static com.datastax.driver.core.querybuilder.QueryBuilder.timestamp; +import static com.datastax.driver.core.querybuilder.QueryBuilder.toJson; +import static com.datastax.driver.core.querybuilder.QueryBuilder.token; +import static com.datastax.driver.core.querybuilder.QueryBuilder.truncate; +import static com.datastax.driver.core.querybuilder.QueryBuilder.ttl; +import static com.datastax.driver.core.querybuilder.QueryBuilder.update; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.fail; +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertTrue; + +import com.datastax.driver.core.CodecRegistry; +import com.datastax.driver.core.ConsistencyLevel; +import com.datastax.driver.core.DataType; +import com.datastax.driver.core.Metadata; +import com.datastax.driver.core.ProtocolVersion; +import com.datastax.driver.core.RegularStatement; +import com.datastax.driver.core.Statement; +import com.datastax.driver.core.TypeCodec; import com.datastax.driver.core.exceptions.CodecNotFoundException; import com.datastax.driver.core.exceptions.InvalidQueryException; import com.datastax.driver.core.exceptions.InvalidTypeException; @@ -24,1271 +86,1606 @@ import com.google.common.collect.ImmutableMap; import com.google.common.collect.Lists; import com.google.common.collect.Sets; -import org.testng.annotations.Test; - import java.io.PrintWriter; import java.io.StringWriter; import java.math.BigDecimal; import java.math.BigInteger; import java.net.InetAddress; import java.nio.ByteBuffer; -import java.util.*; - -import static com.datastax.driver.core.querybuilder.QueryBuilder.*; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.fail; -import static org.testng.Assert.assertEquals; -import static org.testng.Assert.assertTrue; +import java.util.Arrays; +import java.util.Collections; +import java.util.Date; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.TreeMap; +import java.util.TreeSet; +import java.util.UUID; +import org.testng.annotations.Test; public class QueryBuilderTest { - @Test(groups = "unit") - public void selectTest() throws Exception { - - String query; - Statement select; - - query = "SELECT * FROM foo WHERE k=4 AND c>'a' AND c<='z';"; - select = select().all().from("foo").where(eq("k", 4)).and(gt("c", "a")).and(lte("c", "z")); - assertEquals(select.toString(), query); - - // Ensure where() and where(...) are equal - select = select().all().from("foo").where().and(eq("k", 4)).and(gt("c", "a")).and(lte("c", "z")); - assertEquals(select.toString(), query); - - query = "SELECT a,b,\"C\" FROM foo WHERE a IN ('127.0.0.1','127.0.0.3') AND \"C\"='foo' ORDER BY a ASC,b DESC LIMIT 42;"; - select = select("a", "b", quote("C")).from("foo") - .where(in("a", InetAddress.getByName("127.0.0.1"), InetAddress.getByName("127.0.0.3"))) - .and(eq(quote("C"), "foo")) - .orderBy(asc("a"), desc("b")) - .limit(42); - assertEquals(select.toString(), query); - - query = "SELECT writetime(a),ttl(a) FROM foo ALLOW FILTERING;"; - select = select().writeTime("a").ttl("a").from("foo").allowFiltering(); - assertEquals(select.toString(), query); - - query = "SELECT DISTINCT longName AS a,ttl(longName) AS ttla FROM foo LIMIT :limit;"; - select = select().distinct().column("longName").as("a").ttl("longName").as("ttla").from("foo").limit(bindMarker("limit")); - assertEquals(select.toString(), query); - - query = "SELECT DISTINCT longName AS a,ttl(longName) AS ttla FROM foo WHERE k IN () LIMIT :limit;"; - select = select().distinct().column("longName").as("a").ttl("longName").as("ttla").from("foo").where(in("k")).limit(bindMarker("limit")); - assertEquals(select.toString(), query); - - query = "SELECT * FROM foo WHERE bar=:barmark AND baz=:bazmark LIMIT :limit;"; - select = select().all().from("foo").where().and(eq("bar", bindMarker("barmark"))).and(eq("baz", bindMarker("bazmark"))).limit(bindMarker("limit")); - assertEquals(select.toString(), query); - - query = "SELECT a FROM foo WHERE k IN ();"; - select = select("a").from("foo").where(in("k")); - assertEquals(select.toString(), query); - - query = "SELECT a FROM foo WHERE k IN ?;"; - select = select("a").from("foo").where(in("k", bindMarker())); - assertEquals(select.toString(), query); - - query = "SELECT DISTINCT a FROM foo WHERE k=1;"; - select = select("a").distinct().from("foo").where(eq("k", 1)); - assertEquals(select.toString(), query); - - query = "SELECT DISTINCT a,b FROM foo WHERE k=1;"; - select = select("a", "b").distinct().from("foo").where(eq("k", 1)); - assertEquals(select.toString(), query); - - query = "SELECT count(*) FROM foo;"; - select = select().countAll().from("foo"); - assertEquals(select.toString(), query); - - query = "SELECT intToBlob(b) FROM foo;"; - select = select().fcall("intToBlob", column("b")).from("foo"); - assertEquals(select.toString(), query); - - query = "SELECT * FROM foo WHERE k>42 LIMIT 42;"; - select = select().all().from("foo").where(gt("k", 42)).limit(42); - assertEquals(select.toString(), query); - - query = "SELECT * FROM foo WHERE token(k)>token(42);"; - select = select().all().from("foo").where(gt(token("k"), fcall("token", 42))); - assertEquals(select.toString(), query); - - query = "SELECT * FROM foo2 WHERE token(a,b)>token(42,101);"; - select = select().all().from("foo2").where(gt(token("a", "b"), fcall("token", 42, 101))); - assertEquals(select.toString(), query); - - query = "SELECT * FROM words WHERE w='):,ydL ;O,D';"; - select = select().all().from("words").where(eq("w", "):,ydL ;O,D")); - assertEquals(select.toString(), query); - - query = "SELECT * FROM words WHERE w='WA(!:gS)r(UfW';"; - select = select().all().from("words").where(eq("w", "WA(!:gS)r(UfW")); - assertEquals(select.toString(), query); - - Date date = new Date(); - date.setTime(1234325); - query = "SELECT * FROM foo WHERE d=1234325;"; - select = select().all().from("foo").where(eq("d", date)); - assertEquals(select.toString(), query); - - query = "SELECT * FROM foo WHERE b=0xcafebabe;"; - select = select().all().from("foo").where(eq("b", Bytes.fromHexString("0xCAFEBABE"))); - assertEquals(select.toString(), query); - - query = "SELECT * FROM foo WHERE e CONTAINS 'text';"; - select = select().from("foo").where(contains("e", "text")); - assertEquals(select.toString(), query); - - query = "SELECT * FROM foo WHERE e CONTAINS KEY 'key1';"; - select = select().from("foo").where(containsKey("e", "key1")); - assertEquals(select.toString(), query); - - query = "SELECT CAST(writetime(country) AS text) FROM artists LIMIT 2;"; - select = select().cast(fcall("writetime", column("country")), DataType.text()).from("artists").limit(2); - assertEquals(select.toString(), query); - - query = "SELECT avg(CAST(v AS float)) FROM e;"; - select = select().fcall("avg", cast(column("v"), DataType.cfloat())).from("e"); - assertEquals(select.toString(), query); - - query = "SELECT CAST(writetime(country) AS text) FROM artists LIMIT 2;"; - select = select().raw("CAST(writetime(country) AS text)").from("artists").limit(2); - assertEquals(select.toString(), query); - - query = "SELECT * FROM foo WHERE e LIKE 'a%';"; - select = select().from("foo").where(like("e", "a%")); - assertEquals(select.toString(), query); - - try { - select().countAll().from("foo").orderBy(asc("a"), desc("b")).orderBy(asc("a"), desc("b")); - fail("Expected an IllegalStateException"); - } catch (IllegalStateException e) { - assertEquals(e.getMessage(), "An ORDER BY clause has already been provided"); - } - - try { - select().column("a").all().from("foo"); - fail("Expected an IllegalStateException"); - } catch (IllegalStateException e) { - assertEquals(e.getMessage(), "Some columns ([a]) have already been selected."); - } - - try { - select().column("a").countAll().from("foo"); - fail("Expected an IllegalStateException"); - } catch (IllegalStateException e) { - assertEquals(e.getMessage(), "Some columns ([a]) have already been selected."); - } - - try { - select().all().from("foo").limit(-42); - fail("Expected an IllegalArgumentException"); - } catch (IllegalArgumentException e) { - assertEquals(e.getMessage(), "Invalid LIMIT value, must be strictly positive"); - } - - try { - select().all().from("foo").limit(42).limit(42); - fail("Expected an IllegalStateException"); - } catch (IllegalStateException e) { - assertEquals(e.getMessage(), "A LIMIT value has already been provided"); - } - } - - @Test(groups = "unit") - @SuppressWarnings({"serial", "deprecation"}) - public void insertTest() throws Exception { - - String query; - Statement insert; - - query = "INSERT INTO foo (a,b,\"C\",d) VALUES (123,'127.0.0.1','foo''bar',{'x':3,'y':2}) USING TIMESTAMP 42 AND TTL 24;"; - insert = insertInto("foo") - .value("a", 123) - .value("b", InetAddress.getByName("127.0.0.1")) - .value(quote("C"), "foo'bar") - .value("d", new TreeMap() {{ - put("x", 3); - put("y", 2); - }}) - .using(timestamp(42)).and(ttl(24)); - assertEquals(insert.toString(), query); - - query = "INSERT INTO foo (a,b) VALUES (2,null);"; - insert = insertInto("foo") - .value("a", 2) - .value("b", null); - assertEquals(insert.toString(), query); - - query = "INSERT INTO foo (a,b) VALUES ({2,3,4},3.4) USING TTL 24 AND TIMESTAMP 42;"; - insert = insertInto("foo").values(new String[]{"a", "b"}, new Object[]{new TreeSet() {{ - add(2); - add(3); - add(4); - }}, 3.4}).using(ttl(24)).and(timestamp(42)); - assertEquals(insert.toString(), query); - - query = "INSERT INTO foo.bar (a,b) VALUES ({2,3,4},3.4) USING TTL ? AND TIMESTAMP ?;"; - insert = insertInto("foo", "bar") - .values(new String[]{"a", "b"}, new Object[]{new TreeSet() {{ - add(2); - add(3); - add(4); - }}, 3.4}) - .using(ttl(bindMarker())) - .and(timestamp(bindMarker())); - assertEquals(insert.toString(), query); - - // commutative result of TIMESTAMP - query = "INSERT INTO foo.bar (a,b,c) VALUES ({2,3,4},3.4,123) USING TIMESTAMP 42;"; - insert = insertInto("foo", "bar") - .using(timestamp(42)) - .values(new String[]{"a", "b"}, new Object[]{new TreeSet() {{ - add(2); - add(3); - add(4); - }}, 3.4}) - .value("c", 123); - assertEquals(insert.toString(), query); - - // commutative result of value() and values() - query = "INSERT INTO foo (c,a,b) VALUES (123,{2,3,4},3.4) USING TIMESTAMP 42;"; - insert = insertInto("foo") - .using(timestamp(42)) - .value("c", 123) - .values(new String[]{"a", "b"}, new Object[]{new TreeSet() {{ - add(2); - add(3); - add(4); - }}, 3.4}); - assertEquals(insert.toString(), query); - - try { - insertInto("foo").values(new String[]{"a", "b"}, new Object[]{1, 2, 3}); - fail("Expected an IllegalArgumentException"); - } catch (IllegalArgumentException e) { - assertEquals(e.getMessage(), "Got 2 names but 3 values"); - } - - // CAS test - query = "INSERT INTO foo (k,x) VALUES (0,1) IF NOT EXISTS;"; - insert = insertInto("foo").value("k", 0).value("x", 1).ifNotExists(); - assertEquals(insert.toString(), query); - - // Tuples: see QueryBuilderTupleExecutionTest - // UDT: see QueryBuilderExecutionTest - } - - @Test(groups = "unit") - @SuppressWarnings("serial") - public void updateTest() throws Exception { - - String query; - Statement update; - - query = "UPDATE foo.bar USING TIMESTAMP 42 SET a=12,b=[3,2,1],c=c+3 WHERE k=2;"; - update = update("foo", "bar").using(timestamp(42)).with(set("a", 12)).and(set("b", Arrays.asList(3, 2, 1))).and(incr("c", 3)).where(eq("k", 2)); - assertEquals(update.toString(), query); - - query = "UPDATE foo SET b=null WHERE k=2;"; - update = update("foo").where().and(eq("k", 2)).with(set("b", null)); - assertEquals(update.toString(), query); - - query = "UPDATE foo SET a[2]='foo',b=[3,2,1]+b,c=c-{'a'} WHERE k=2 AND l='foo' AND m<4 AND n>=1;"; - update = update("foo").with(setIdx("a", 2, "foo")).and(prependAll("b", Arrays.asList(3, 2, 1))).and(remove("c", "a")).where(eq("k", 2)).and(eq("l", "foo")).and(lt("m", 4)).and(gte("n", 1)); - assertEquals(update.toString(), query); - - query = "UPDATE foo SET b=[3]+b,c=c+['a'],d=d+[1,2,3],e=e-[1];"; - update = update("foo").with().and(prepend("b", 3)).and(append("c", "a")).and(appendAll("d", Arrays.asList(1, 2, 3))).and(discard("e", 1)); - assertEquals(update.toString(), query); - - query = "UPDATE foo SET b=b-[1,2,3],c=c+{1},d=d+{2,3,4};"; - update = update("foo").with(discardAll("b", Arrays.asList(1, 2, 3))).and(add("c", 1)).and(addAll("d", new TreeSet() {{ - add(2); - add(3); - add(4); - }})); - assertEquals(update.toString(), query); - - query = "UPDATE foo SET b=b-{2,3,4},c['k']='v',d=d+{'x':3,'y':2};"; - update = update("foo").with(removeAll("b", new TreeSet() {{ - add(2); - add(3); - add(4); - }})) - .and(put("c", "k", "v")) - .and(putAll("d", new TreeMap() {{ - put("x", 3); - put("y", 2); - }})); - assertEquals(update.toString(), query); - - query = "UPDATE foo USING TTL 400;"; - update = update("foo").using(ttl(400)); - assertEquals(update.toString(), query); - - query = "UPDATE foo SET a=" + new BigDecimal(3.2) + ",b=42 WHERE k=2;"; - update = update("foo").with(set("a", new BigDecimal(3.2))).and(set("b", new BigInteger("42"))).where(eq("k", 2)); - assertEquals(update.toString(), query); - - query = "UPDATE foo USING TIMESTAMP 42 SET b=[3,2,1]+b WHERE k=2 AND l='foo';"; - update = update("foo").where().and(eq("k", 2)).and(eq("l", "foo")).with(prependAll("b", Arrays.asList(3, 2, 1))).using(timestamp(42)); - assertEquals(update.toString(), query); - - // Test commutative USING - update = update("foo").where().and(eq("k", 2)).and(eq("l", "foo")).using(timestamp(42)).with(prependAll("b", Arrays.asList(3, 2, 1))); - assertEquals(update.toString(), query); - - // Test commutative USING - update = update("foo").using(timestamp(42)).where(eq("k", 2)).and(eq("l", "foo")).with(prependAll("b", Arrays.asList(3, 2, 1))); - assertEquals(update.toString(), query); - - try { - update("foo").using(ttl(-400)); - fail("Expected an IllegalArgumentException"); - } catch (IllegalArgumentException e) { - assertEquals(e.getMessage(), "Invalid ttl, must be positive"); - } - - // CAS test - query = "UPDATE foo SET x=4 WHERE k=0 IF x=1;"; - update = update("foo").with(set("x", 4)).where(eq("k", 0)).onlyIf(eq("x", 1)); - assertEquals(update.toString(), query); - - // IF EXISTS CAS test - update = update("foo").with(set("x", 3)).where(eq("k", 2)).ifExists(); - assertThat(update.toString()).isEqualTo("UPDATE foo SET x=3 WHERE k=2 IF EXISTS;"); - } - - @Test(groups = "unit") - public void deleteTest() throws Exception { - - String query; - Statement delete; - - query = "DELETE a,b,c FROM foo USING TIMESTAMP 0 WHERE k=1;"; - delete = delete("a", "b", "c").from("foo").using(timestamp(0)).where(eq("k", 1)); - assertEquals(delete.toString(), query); - - query = "DELETE a[3],b['foo'],c FROM foo WHERE k=1;"; - delete = delete().listElt("a", 3).mapElt("b", "foo").column("c").from("foo").where(eq("k", 1)); - assertEquals(delete.toString(), query); - - query = "DELETE a[?],b[?],c FROM foo WHERE k=1;"; - delete = delete().listElt("a", bindMarker()).mapElt("b", bindMarker()).column("c").from("foo").where(eq("k", 1)); - assertEquals(delete.toString(), query); - - // Invalid CQL, testing edge case - query = "DELETE a,b,c FROM foo;"; - delete = delete("a", "b", "c").from("foo"); - assertEquals(delete.toString(), query); - - query = "DELETE FROM foo USING TIMESTAMP 1240003134 WHERE k='value';"; - delete = delete().all().from("foo").using(timestamp(1240003134L)).where(eq("k", "value")); - assertEquals(delete.toString(), query); - delete = delete().from("foo").using(timestamp(1240003134L)).where(eq("k", "value")); - assertEquals(delete.toString(), query); - - query = "DELETE a,b,c FROM foo.bar USING TIMESTAMP 1240003134 WHERE k=1;"; - delete = delete("a", "b", "c").from("foo", "bar").where().and(eq("k", 1)).using(timestamp(1240003134L)); - assertEquals(delete.toString(), query); - - query = "DELETE FROM foo.bar WHERE k1='foo' AND k2=1;"; - delete = delete().from("foo", "bar").where(eq("k1", "foo")).and(eq("k2", 1)); - assertEquals(delete.toString(), query); - - try { - delete().column("a").all().from("foo"); - fail("Expected an IllegalStateException"); - } catch (IllegalStateException e) { - assertEquals(e.getMessage(), "Some columns ([a]) have already been selected."); - } - - try { - delete().from("foo").using(timestamp(-1240003134L)); - fail("Expected an IllegalArgumentException"); - } catch (IllegalArgumentException e) { - assertEquals(e.getMessage(), "Invalid timestamp, must be positive"); - } - - query = "DELETE FROM foo.bar WHERE k1='foo' IF EXISTS;"; - delete = delete().from("foo", "bar").where(eq("k1", "foo")).ifExists(); - assertEquals(delete.toString(), query); - - query = "DELETE FROM foo.bar WHERE k1='foo' IF a=1 AND b=2;"; - delete = delete().from("foo", "bar").where(eq("k1", "foo")).onlyIf(eq("a", 1)).and(eq("b", 2)); - assertEquals(delete.toString(), query); - - query = "DELETE FROM foo WHERE k=:key;"; - delete = delete().from("foo").where(eq("k", bindMarker("key"))); - assertEquals(delete.toString(), query); - } - - @Test(groups = "unit") - @SuppressWarnings("serial") - public void batchTest() throws Exception { - String query; - Statement batch; - - query = "BEGIN BATCH USING TIMESTAMP 42 "; - query += "INSERT INTO foo (a,b) VALUES ({2,3,4},3.4);"; - query += "UPDATE foo SET a[2]='foo',b=[3,2,1]+b,c=c-{'a'} WHERE k=2;"; - query += "DELETE a[3],b['foo'],c FROM foo WHERE k=1;"; - query += "APPLY BATCH;"; - batch = batch() - .add(insertInto("foo").values(new String[]{"a", "b"}, new Object[]{new TreeSet() {{ - add(2); - add(3); - add(4); - }}, 3.4})) - .add(update("foo").with(setIdx("a", 2, "foo")).and(prependAll("b", Arrays.asList(3, 2, 1))).and(remove("c", "a")).where(eq("k", 2))) - .add(delete().listElt("a", 3).mapElt("b", "foo").column("c").from("foo").where(eq("k", 1))) - .using(timestamp(42)); - assertEquals(batch.toString(), query); - - // Test passing batch(statement) - query = "BEGIN BATCH "; - query += "DELETE a[3] FROM foo WHERE k=1;"; - query += "APPLY BATCH;"; - batch = batch(delete().listElt("a", 3).from("foo").where(eq("k", 1))); - assertEquals(batch.toString(), query); - - assertEquals(batch().toString(), "BEGIN BATCH APPLY BATCH;"); - } - - @Test(groups = "unit") - public void batchCounterTest() throws Exception { - String query; - Statement batch; - - // Test value increments - query = "BEGIN COUNTER BATCH USING TIMESTAMP 42 "; - query += "UPDATE foo SET a=a+1;"; - query += "UPDATE foo SET b=b+2;"; - query += "UPDATE foo SET c=c+3;"; - query += "APPLY BATCH;"; - batch = batch() - .add(update("foo").with(incr("a", 1))) - .add(update("foo").with(incr("b", 2))) - .add(update("foo").with(incr("c", 3))) - .using(timestamp(42)); - assertEquals(batch.toString(), query); - - // Test single increments - query = "BEGIN COUNTER BATCH USING TIMESTAMP 42 "; - query += "UPDATE foo SET a=a+1;"; - query += "UPDATE foo SET b=b+1;"; - query += "UPDATE foo SET c=c+1;"; - query += "APPLY BATCH;"; - batch = batch() - .add(update("foo").with(incr("a"))) - .add(update("foo").with(incr("b"))) - .add(update("foo").with(incr("c"))) - .using(timestamp(42)); - assertEquals(batch.toString(), query); - - // Test value decrements - query = "BEGIN COUNTER BATCH USING TIMESTAMP 42 "; - query += "UPDATE foo SET a=a-1;"; - query += "UPDATE foo SET b=b-2;"; - query += "UPDATE foo SET c=c-3;"; - query += "APPLY BATCH;"; - batch = batch() - .add(update("foo").with(decr("a", 1))) - .add(update("foo").with(decr("b", 2))) - .add(update("foo").with(decr("c", 3))) - .using(timestamp(42)); - assertEquals(batch.toString(), query); - - // Test single decrements - query = "BEGIN COUNTER BATCH USING TIMESTAMP 42 "; - query += "UPDATE foo SET a=a-1;"; - query += "UPDATE foo SET b=b-1;"; - query += "UPDATE foo SET c=c-1;"; - query += "APPLY BATCH;"; - batch = batch() - .add(update("foo").with(decr("a"))) - .add(update("foo").with(decr("b"))) - .add(update("foo").with(decr("c"))) - .using(timestamp(42)); - assertEquals(batch.toString(), query); - - // Test negative decrements and negative increments - query = "BEGIN COUNTER BATCH USING TIMESTAMP 42 "; - query += "UPDATE foo SET a=a+1;"; - query += "UPDATE foo SET b=b+-2;"; - query += "UPDATE foo SET c=c-3;"; - query += "APPLY BATCH;"; - batch = batch() - .add(update("foo").with(decr("a", -1))) - .add(update("foo").with(incr("b", -2))) - .add(update("foo").with(decr("c", 3))) - .using(timestamp(42)); - assertEquals(batch.toString(), query); - } - - @Test(groups = "unit", expectedExceptions = {IllegalArgumentException.class}) - public void batchMixedCounterTest() throws Exception { - batch() - .add(update("foo").with(incr("a", 1))) - .add(update("foo").with(set("b", 2))) - .add(update("foo").with(incr("c", 3))) - .using(timestamp(42)); + @Test(groups = "unit") + public void selectTest() throws Exception { + + String query; + Statement select; + + query = "SELECT * FROM foo WHERE k=4 AND c>'a' AND c<='z';"; + select = select().all().from("foo").where(eq("k", 4)).and(gt("c", "a")).and(lte("c", "z")); + assertEquals(select.toString(), query); + + // Ensure where() and where(...) are equal + select = + select().all().from("foo").where().and(eq("k", 4)).and(gt("c", "a")).and(lte("c", "z")); + assertEquals(select.toString(), query); + + query = + "SELECT a,b,\"C\" FROM foo WHERE a IN ('127.0.0.1','127.0.0.3') AND \"C\"='foo' ORDER BY a ASC,b DESC LIMIT 42;"; + select = + select("a", "b", quote("C")) + .from("foo") + .where(in("a", InetAddress.getByName("127.0.0.1"), InetAddress.getByName("127.0.0.3"))) + .and(eq(quote("C"), "foo")) + .orderBy(asc("a"), desc("b")) + .limit(42); + assertEquals(select.toString(), query); + + query = "SELECT writetime(a),ttl(a) FROM foo ALLOW FILTERING;"; + select = select().writeTime("a").ttl("a").from("foo").allowFiltering(); + assertEquals(select.toString(), query); + + query = "SELECT DISTINCT longName AS a,ttl(longName) AS ttla FROM foo LIMIT :limit;"; + select = + select() + .distinct() + .column("longName") + .as("a") + .ttl("longName") + .as("ttla") + .from("foo") + .limit(bindMarker("limit")); + assertEquals(select.toString(), query); + + query = + "SELECT DISTINCT longName AS a,ttl(longName) AS ttla FROM foo WHERE k IN () LIMIT :limit;"; + select = + select() + .distinct() + .column("longName") + .as("a") + .ttl("longName") + .as("ttla") + .from("foo") + .where(in("k")) + .limit(bindMarker("limit")); + assertEquals(select.toString(), query); + + query = "SELECT * FROM foo WHERE bar=:barmark AND baz=:bazmark LIMIT :limit;"; + select = + select() + .all() + .from("foo") + .where() + .and(eq("bar", bindMarker("barmark"))) + .and(eq("baz", bindMarker("bazmark"))) + .limit(bindMarker("limit")); + assertEquals(select.toString(), query); + + query = "SELECT a FROM foo WHERE k IN ();"; + select = select("a").from("foo").where(in("k")); + assertEquals(select.toString(), query); + + query = "SELECT a FROM foo WHERE k IN ?;"; + select = select("a").from("foo").where(in("k", bindMarker())); + assertEquals(select.toString(), query); + + query = "SELECT DISTINCT a FROM foo WHERE k=1;"; + select = select("a").distinct().from("foo").where(eq("k", 1)); + assertEquals(select.toString(), query); + + query = "SELECT DISTINCT a,b FROM foo WHERE k=1;"; + select = select("a", "b").distinct().from("foo").where(eq("k", 1)); + assertEquals(select.toString(), query); + + query = "SELECT count(*) FROM foo;"; + select = select().countAll().from("foo"); + assertEquals(select.toString(), query); + + query = "SELECT intToBlob(b) FROM foo;"; + select = select().fcall("intToBlob", column("b")).from("foo"); + assertEquals(select.toString(), query); + + query = "SELECT * FROM foo WHERE k>42 LIMIT 42;"; + select = select().all().from("foo").where(gt("k", 42)).limit(42); + assertEquals(select.toString(), query); + + query = "SELECT * FROM foo WHERE token(k)>token(42);"; + select = select().all().from("foo").where(gt(token("k"), fcall("token", 42))); + assertEquals(select.toString(), query); + + query = "SELECT * FROM foo2 WHERE token(a,b)>token(42,101);"; + select = select().all().from("foo2").where(gt(token("a", "b"), fcall("token", 42, 101))); + assertEquals(select.toString(), query); + + query = "SELECT * FROM words WHERE w='):,ydL ;O,D';"; + select = select().all().from("words").where(eq("w", "):,ydL ;O,D")); + assertEquals(select.toString(), query); + + query = "SELECT * FROM words WHERE w='WA(!:gS)r(UfW';"; + select = select().all().from("words").where(eq("w", "WA(!:gS)r(UfW")); + assertEquals(select.toString(), query); + + Date date = new Date(); + date.setTime(1234325); + query = "SELECT * FROM foo WHERE d=1234325;"; + select = select().all().from("foo").where(eq("d", date)); + assertEquals(select.toString(), query); + + query = "SELECT * FROM foo WHERE b=0xcafebabe;"; + select = select().all().from("foo").where(eq("b", Bytes.fromHexString("0xCAFEBABE"))); + assertEquals(select.toString(), query); + + query = "SELECT * FROM foo WHERE e CONTAINS 'text';"; + select = select().from("foo").where(contains("e", "text")); + assertEquals(select.toString(), query); + + query = "SELECT * FROM foo WHERE e CONTAINS KEY 'key1';"; + select = select().from("foo").where(containsKey("e", "key1")); + assertEquals(select.toString(), query); + + query = "SELECT CAST(writetime(country) AS text) FROM artists LIMIT 2;"; + select = + select() + .cast(fcall("writetime", column("country")), DataType.text()) + .from("artists") + .limit(2); + assertEquals(select.toString(), query); + + query = "SELECT avg(CAST(v AS float)) FROM e;"; + select = select().fcall("avg", cast(column("v"), DataType.cfloat())).from("e"); + assertEquals(select.toString(), query); + + query = "SELECT CAST(writetime(country) AS text) FROM artists LIMIT 2;"; + select = select().raw("CAST(writetime(country) AS text)").from("artists").limit(2); + assertEquals(select.toString(), query); + + query = "SELECT * FROM foo WHERE e LIKE 'a%';"; + select = select().from("foo").where(like("e", "a%")); + assertEquals(select.toString(), query); + + query = "SELECT * FROM foo WHERE k!=1;"; + select = select().from("foo").where(ne("k", 1)); + assertEquals(select.toString(), query); + + query = "SELECT * FROM foo WHERE (k1,k2)!=(1,2);"; + select = select().from("foo").where(ne(ImmutableList.of("k1", "k2"), ImmutableList.of(1, 2))); + assertEquals(select.toString(), query); + + query = "SELECT * FROM foo WHERE k IS NOT NULL;"; + select = select().from("foo").where(notNull("k")); + assertEquals(select.toString(), query); + + try { + select().countAll().from("foo").orderBy(asc("a"), desc("b")).orderBy(asc("a"), desc("b")); + fail("Expected an IllegalStateException"); + } catch (IllegalStateException e) { + assertEquals(e.getMessage(), "An ORDER BY clause has already been provided"); } - @Test(groups = "unit") - public void markerTest() throws Exception { - String query; - Statement insert; - - query = "INSERT INTO test (k,c) VALUES (0,?);"; - insert = insertInto("test") - .value("k", 0) - .value("c", bindMarker()); - assertEquals(insert.toString(), query); + try { + select().from("foo").orderBy(); + fail("Expected an IllegalArgumentException"); + } catch (IllegalArgumentException e) { + assertEquals(e.getMessage(), "Invalid ORDER BY argument, the orderings must not be empty."); } - @Test(groups = "unit") - public void rawEscapingTest() throws Exception { - - String query; - Statement select; - - query = "SELECT * FROM t WHERE c='C''est la vie!';"; - select = select().from("t").where(eq("c", "C'est la vie!")); - assertEquals(select.toString(), query); - - query = "SELECT * FROM t WHERE c=C'est la vie!;"; - select = select().from("t").where(eq("c", raw("C'est la vie!"))); - assertEquals(select.toString(), query); - - query = "SELECT * FROM t WHERE c=now();"; - select = select().from("t").where(eq("c", fcall("now"))); - assertEquals(select.toString(), query); - - query = "SELECT * FROM t WHERE c='now()';"; - select = select().from("t").where(eq("c", raw("'now()'"))); - assertEquals(select.toString(), query); - } - - @Test(groups = "unit") - public void selectInjectionTests() throws Exception { - - String query; - Statement select; - - query = "SELECT * FROM \"foo WHERE k=4\";"; - select = select().all().from("foo WHERE k=4"); - assertEquals(select.toString(), query); - - query = "SELECT * FROM foo WHERE k='4 AND c=5';"; - select = select().all().from("foo").where(eq("k", "4 AND c=5")); - assertEquals(select.toString(), query); - - query = "SELECT * FROM foo WHERE k='4'' AND c=''5';"; - select = select().all().from("foo").where(eq("k", "4' AND c='5")); - assertEquals(select.toString(), query); - - query = "SELECT * FROM foo WHERE k='4'' OR ''1''=''1';"; - select = select().all().from("foo").where(eq("k", "4' OR '1'='1")); - assertEquals(select.toString(), query); - - query = "SELECT * FROM foo WHERE k='4; --test comment;';"; - select = select().all().from("foo").where(eq("k", "4; --test comment;")); - assertEquals(select.toString(), query); - - query = "SELECT \"*\" FROM foo;"; - select = select("*").from("foo"); - assertEquals(select.toString(), query); - - query = "SELECT a,b FROM foo WHERE a IN ('b','c''); --comment');"; - select = select("a", "b").from("foo").where(in("a", "b", "c'); --comment")); - assertEquals(select.toString(), query); - - // User Injection? - query = "SELECT * FROM bar; --(b) FROM foo;"; - select = select().fcall("* FROM bar; --", column("b")).from("foo"); - assertEquals(select.toString(), query); - - query = "SELECT writetime(\"a) FROM bar; --\"),ttl(a) FROM foo ALLOW FILTERING;"; - select = select().writeTime("a) FROM bar; --").ttl("a").from("foo").allowFiltering(); - assertEquals(select.toString(), query); - - query = "SELECT writetime(a),ttl(\"a) FROM bar; --\") FROM foo ALLOW FILTERING;"; - select = select().writeTime("a").ttl("a) FROM bar; --").from("foo").allowFiltering(); - assertEquals(select.toString(), query); - - query = "SELECT * FROM foo WHERE \"k=1 OR k\">42 LIMIT 42;"; - select = select().all().from("foo").where(gt("k=1 OR k", 42)).limit(42); - assertEquals(select.toString(), query); - - query = "SELECT * FROM foo WHERE token(\"k)>0 OR token(k\")>token(42);"; - select = select().all().from("foo").where(gt(token("k)>0 OR token(k"), fcall("token", 42))); - assertEquals(select.toString(), query); - } - - @Test(groups = "unit") - @SuppressWarnings("serial") - public void insertInjectionTest() throws Exception { - - String query; - Statement insert; - - query = "INSERT INTO foo (a) VALUES ('123); --comment');"; - insert = insertInto("foo").value("a", "123); --comment"); - assertEquals(insert.toString(), query); - - query = "INSERT INTO foo (\"a,b\") VALUES (123);"; - insert = insertInto("foo").value("a,b", 123); - assertEquals(insert.toString(), query); - - query = "INSERT INTO foo (a,b) VALUES ({'2''} space','3','4'},3.4) USING TTL 24 AND TIMESTAMP 42;"; - insert = insertInto("foo").values(new String[]{"a", "b"}, new Object[]{new TreeSet() {{ - add("2'} space"); - add("3"); - add("4"); - }}, 3.4}).using(ttl(24)).and(timestamp(42)); - assertEquals(insert.toString(), query); - } - - @Test(groups = "unit") - public void updateInjectionTest() throws Exception { - - String query; - Statement update; - - query = "UPDATE foo.bar USING TIMESTAMP 42 SET a=12 WHERE k='2 OR 1=1';"; - update = update("foo", "bar").using(timestamp(42)).with(set("a", 12)).where(eq("k", "2 OR 1=1")); - assertEquals(update.toString(), query); - - query = "UPDATE foo SET b='null WHERE k=1; --comment' WHERE k=2;"; - update = update("foo").where().and(eq("k", 2)).with(set("b", "null WHERE k=1; --comment")); - assertEquals(update.toString(), query); - - query = "UPDATE foo USING TIMESTAMP 42 SET \"b WHERE k=1; --comment\"=[3,2,1]+\"b WHERE k=1; --comment\" WHERE k=2;"; - update = update("foo").where().and(eq("k", 2)).with(prependAll("b WHERE k=1; --comment", Arrays.asList(3, 2, 1))).using(timestamp(42)); - assertEquals(update.toString(), query); - } - - @Test(groups = "unit") - public void deleteInjectionTests() throws Exception { - - String query; - Statement delete; - - query = "DELETE FROM \"foo WHERE k=4\";"; - delete = delete().from("foo WHERE k=4"); - assertEquals(delete.toString(), query); - - query = "DELETE FROM foo WHERE k='4 AND c=5';"; - delete = delete().from("foo").where(eq("k", "4 AND c=5")); - assertEquals(delete.toString(), query); - - query = "DELETE FROM foo WHERE k='4'' AND c=''5';"; - delete = delete().from("foo").where(eq("k", "4' AND c='5")); - assertEquals(delete.toString(), query); - - query = "DELETE FROM foo WHERE k='4'' OR ''1''=''1';"; - delete = delete().from("foo").where(eq("k", "4' OR '1'='1")); - assertEquals(delete.toString(), query); - - query = "DELETE FROM foo WHERE k='4; --test comment;';"; - delete = delete().from("foo").where(eq("k", "4; --test comment;")); - assertEquals(delete.toString(), query); - - query = "DELETE \"*\" FROM foo;"; - delete = delete("*").from("foo"); - assertEquals(delete.toString(), query); - - query = "DELETE a,b FROM foo WHERE a IN ('b','c''); --comment');"; - delete = delete("a", "b").from("foo") - .where(in("a", "b", "c'); --comment")); - assertEquals(delete.toString(), query); - - query = "DELETE FROM foo WHERE \"k=1 OR k\">42;"; - delete = delete().from("foo").where(gt("k=1 OR k", 42)); - assertEquals(delete.toString(), query); - - query = "DELETE FROM foo WHERE token(\"k)>0 OR token(k\")>token(42);"; - delete = delete().from("foo").where(gt(token("k)>0 OR token(k"), fcall("token", 42))); - assertEquals(delete.toString(), query); + try { + select().column("a").all().from("foo"); + fail("Expected an IllegalStateException"); + } catch (IllegalStateException e) { + assertEquals(e.getMessage(), "Some columns ([a]) have already been selected."); } - @Test(groups = "unit") - public void statementForwardingTest() throws Exception { - - Update upd = update("foo"); - upd.setConsistencyLevel(ConsistencyLevel.QUORUM); - upd.enableTracing(); - - Statement query = upd.using(timestamp(42)).with(set("a", 12)).and(incr("c", 3)).where(eq("k", 2)); - - assertEquals(query.getConsistencyLevel(), ConsistencyLevel.QUORUM); - assertTrue(query.isTracing()); + try { + select().column("a").countAll().from("foo"); + fail("Expected an IllegalStateException"); + } catch (IllegalStateException e) { + assertEquals(e.getMessage(), "Some columns ([a]) have already been selected."); } - @Test(groups = "unit", expectedExceptions = CodecNotFoundException.class) - public void rejectUnknownValueTest() throws Exception { - RegularStatement s = update("foo").with(set("a", new byte[13])).where(eq("k", 2)) - .setForceNoValues(true); - s.getQueryString(); + try { + select().all().from("foo").limit(-42); + fail("Expected an IllegalArgumentException"); + } catch (IllegalArgumentException e) { + assertEquals(e.getMessage(), "Invalid LIMIT value, must be strictly positive"); } - @Test(groups = "unit") - public void truncateTest() throws Exception { - assertEquals(truncate("foo").toString(), "TRUNCATE foo;"); - assertEquals(truncate("foo", quote("Bar")).toString(), "TRUNCATE foo.\"Bar\";"); + try { + select().all().from("foo").limit(42).limit(42); + fail("Expected an IllegalStateException"); + } catch (IllegalStateException e) { + assertEquals(e.getMessage(), "A LIMIT value has already been provided"); } - - @Test(groups = "unit") - public void quotingTest() { - assertEquals(select().from("Metrics", "epochs").toString(), - "SELECT * FROM Metrics.epochs;"); - assertEquals(select().from("Metrics", quote("epochs")).toString(), - "SELECT * FROM Metrics.\"epochs\";"); - assertEquals(select().from(quote("Metrics"), "epochs").toString(), - "SELECT * FROM \"Metrics\".epochs;"); - assertEquals(select().from(quote("Metrics"), quote("epochs")).toString(), - "SELECT * FROM \"Metrics\".\"epochs\";"); - - assertEquals(insertInto("Metrics", "epochs").toString(), - "INSERT INTO Metrics.epochs () VALUES ();"); - assertEquals(insertInto("Metrics", quote("epochs")).toString(), - "INSERT INTO Metrics.\"epochs\" () VALUES ();"); - assertEquals(insertInto(quote("Metrics"), "epochs").toString(), - "INSERT INTO \"Metrics\".epochs () VALUES ();"); - assertEquals(insertInto(quote("Metrics"), quote("epochs")).toString(), - "INSERT INTO \"Metrics\".\"epochs\" () VALUES ();"); + } + + @Test(groups = "unit") + @SuppressWarnings({"serial", "deprecation"}) + public void insertTest() throws Exception { + + String query; + Statement insert; + + query = + "INSERT INTO foo (a,b,\"C\",d) VALUES (123,'127.0.0.1','foo''bar',{'x':3,'y':2}) USING TIMESTAMP 42 AND TTL 24;"; + insert = + insertInto("foo") + .value("a", 123) + .value("b", InetAddress.getByName("127.0.0.1")) + .value(quote("C"), "foo'bar") + .value( + "d", + new TreeMap() { + { + put("x", 3); + put("y", 2); + } + }) + .using(timestamp(42)) + .and(ttl(24)); + assertEquals(insert.toString(), query); + + query = "INSERT INTO foo (a,b) VALUES (2,null);"; + insert = insertInto("foo").value("a", 2).value("b", null); + assertEquals(insert.toString(), query); + + query = "INSERT INTO foo (a,b) VALUES ({2,3,4},3.4) USING TTL 24 AND TIMESTAMP 42;"; + insert = + insertInto("foo") + .values( + new String[] {"a", "b"}, + new Object[] { + new TreeSet() { + { + add(2); + add(3); + add(4); + } + }, + 3.4 + }) + .using(ttl(24)) + .and(timestamp(42)); + assertEquals(insert.toString(), query); + + query = "INSERT INTO foo.bar (a,b) VALUES ({2,3,4},3.4) USING TTL ? AND TIMESTAMP ?;"; + insert = + insertInto("foo", "bar") + .values( + new String[] {"a", "b"}, + new Object[] { + new TreeSet() { + { + add(2); + add(3); + add(4); + } + }, + 3.4 + }) + .using(ttl(bindMarker())) + .and(timestamp(bindMarker())); + assertEquals(insert.toString(), query); + + // commutative result of TIMESTAMP + query = "INSERT INTO foo.bar (a,b,c) VALUES ({2,3,4},3.4,123) USING TIMESTAMP 42;"; + insert = + insertInto("foo", "bar") + .using(timestamp(42)) + .values( + new String[] {"a", "b"}, + new Object[] { + new TreeSet() { + { + add(2); + add(3); + add(4); + } + }, + 3.4 + }) + .value("c", 123); + assertEquals(insert.toString(), query); + + // commutative result of value() and values() + query = "INSERT INTO foo (c,a,b) VALUES (123,{2,3,4},3.4) USING TIMESTAMP 42;"; + insert = + insertInto("foo") + .using(timestamp(42)) + .value("c", 123) + .values( + new String[] {"a", "b"}, + new Object[] { + new TreeSet() { + { + add(2); + add(3); + add(4); + } + }, + 3.4 + }); + assertEquals(insert.toString(), query); + + try { + insertInto("foo").values(new String[] {"a", "b"}, new Object[] {1, 2, 3}); + fail("Expected an IllegalArgumentException"); + } catch (IllegalArgumentException e) { + assertEquals(e.getMessage(), "Got 2 names but 3 values"); } - @Test(groups = "unit") - public void compoundWhereClauseTest() throws Exception { - String query; - Statement select; - - query = "SELECT * FROM foo WHERE k=4 AND (c1,c2)=('a',2);"; - select = select().all().from("foo").where(eq("k", 4)).and(eq(Arrays.asList("c1", "c2"), Arrays.asList("a", 2))); - assertEquals(select.toString(), query); - - query = "SELECT * FROM foo WHERE k=4 AND (c1,c2)>('a',2);"; - select = select().all().from("foo").where(eq("k", 4)).and(gt(Arrays.asList("c1", "c2"), Arrays.asList("a", 2))); - assertEquals(select.toString(), query); - - query = "SELECT * FROM foo WHERE k=4 AND (c1,c2)>=('a',2) AND (c1,c2)<('b',0);"; - select = select().all().from("foo").where(eq("k", 4)).and(gte(Arrays.asList("c1", "c2"), Arrays.asList("a", 2))) - .and(lt(Arrays.asList("c1", "c2"), Arrays.asList("b", 0))); - assertEquals(select.toString(), query); - - query = "SELECT * FROM foo WHERE k=4 AND (c1,c2)<=('a',2);"; - select = select().all().from("foo").where(eq("k", 4)).and(lte(Arrays.asList("c1", "c2"), Arrays.asList("a", 2))); - assertEquals(select.toString(), query); - - query = "SELECT * FROM foo WHERE k=4 AND (c1,c2) IN ((1,'foo'),(2,'bar'),(3,'qix'));"; - List names = ImmutableList.of("c1", "c2"); - List values = ImmutableList.>of( - ImmutableList.of(1, "foo"), - ImmutableList.of(2, "bar"), - ImmutableList.of(3, "qix")); - select = select().all().from("foo").where(eq("k", 4)).and(in(names, values)); - assertEquals(select.toString(), query); - - query = "SELECT * FROM foo WHERE k=4 AND (c1,c2) IN ((1,'foo'),(2,?),?);"; - names = ImmutableList.of("c1", "c2"); - values = ImmutableList.of( - ImmutableList.of(1, "foo"), - ImmutableList.of(2, bindMarker()), - bindMarker()); - select = select().all().from("foo").where(eq("k", 4)).and(in(names, values)); - assertEquals(select.toString(), query); - - // special case, single element list with bind marker should be (?) instead of ((?)) - query = "SELECT * FROM foo WHERE k=4 AND (c1) IN (?);"; - names = ImmutableList.of("c1"); - values = ImmutableList.of(ImmutableList.of(bindMarker())); - select = select().all().from("foo").where(eq("k", 4)).and(in(names, values)); - assertEquals(select.toString(), query); + // CAS test + query = "INSERT INTO foo (k,x) VALUES (0,1) IF NOT EXISTS;"; + insert = insertInto("foo").value("k", 0).value("x", 1).ifNotExists(); + assertEquals(insert.toString(), query); + + // Tuples: see QueryBuilderTupleExecutionTest + // UDT: see QueryBuilderExecutionTest + } + + @Test(groups = "unit") + @SuppressWarnings("serial") + public void updateTest() throws Exception { + + String query; + Statement update; + + query = "UPDATE foo.bar USING TIMESTAMP 42 SET a=12,b=[3,2,1],c=c+3 WHERE k=2;"; + update = + update("foo", "bar") + .using(timestamp(42)) + .with(set("a", 12)) + .and(set("b", Arrays.asList(3, 2, 1))) + .and(incr("c", 3)) + .where(eq("k", 2)); + assertEquals(update.toString(), query); + + query = "UPDATE foo SET b=null WHERE k=2;"; + update = update("foo").where().and(eq("k", 2)).with(set("b", null)); + assertEquals(update.toString(), query); + + query = + "UPDATE foo SET a[2]='foo',b=[3,2,1]+b,c=c-{'a'} WHERE k=2 AND l='foo' AND m<4 AND n>=1;"; + update = + update("foo") + .with(setIdx("a", 2, "foo")) + .and(prependAll("b", Arrays.asList(3, 2, 1))) + .and(remove("c", "a")) + .where(eq("k", 2)) + .and(eq("l", "foo")) + .and(lt("m", 4)) + .and(gte("n", 1)); + assertEquals(update.toString(), query); + + query = "UPDATE foo SET b=[3]+b,c=c+['a'],d=d+[1,2,3],e=e-[1];"; + update = + update("foo") + .with() + .and(prepend("b", 3)) + .and(append("c", "a")) + .and(appendAll("d", Arrays.asList(1, 2, 3))) + .and(discard("e", 1)); + assertEquals(update.toString(), query); + + query = "UPDATE foo SET b=b-[1,2,3],c=c+{1},d=d+{2,3,4};"; + update = + update("foo") + .with(discardAll("b", Arrays.asList(1, 2, 3))) + .and(add("c", 1)) + .and( + addAll( + "d", + new TreeSet() { + { + add(2); + add(3); + add(4); + } + })); + assertEquals(update.toString(), query); + + query = "UPDATE foo SET b=b-{2,3,4},c['k']='v',d=d+{'x':3,'y':2};"; + update = + update("foo") + .with( + removeAll( + "b", + new TreeSet() { + { + add(2); + add(3); + add(4); + } + })) + .and(put("c", "k", "v")) + .and( + putAll( + "d", + new TreeMap() { + { + put("x", 3); + put("y", 2); + } + })); + assertEquals(update.toString(), query); + + query = "UPDATE foo USING TTL 400;"; + update = update("foo").using(ttl(400)); + assertEquals(update.toString(), query); + + query = "UPDATE foo SET a=" + new BigDecimal(3.2) + ",b=42 WHERE k=2;"; + update = + update("foo") + .with(set("a", new BigDecimal(3.2))) + .and(set("b", new BigInteger("42"))) + .where(eq("k", 2)); + assertEquals(update.toString(), query); + + query = "UPDATE foo USING TIMESTAMP 42 SET b=[3,2,1]+b WHERE k=2 AND l='foo';"; + update = + update("foo") + .where() + .and(eq("k", 2)) + .and(eq("l", "foo")) + .with(prependAll("b", Arrays.asList(3, 2, 1))) + .using(timestamp(42)); + assertEquals(update.toString(), query); + + // Test commutative USING + update = + update("foo") + .where() + .and(eq("k", 2)) + .and(eq("l", "foo")) + .using(timestamp(42)) + .with(prependAll("b", Arrays.asList(3, 2, 1))); + assertEquals(update.toString(), query); + + // Test commutative USING + update = + update("foo") + .using(timestamp(42)) + .where(eq("k", 2)) + .and(eq("l", "foo")) + .with(prependAll("b", Arrays.asList(3, 2, 1))); + assertEquals(update.toString(), query); + + try { + update("foo").using(ttl(-400)); + fail("Expected an IllegalArgumentException"); + } catch (IllegalArgumentException e) { + assertEquals(e.getMessage(), "Invalid ttl, must be positive"); } - @Test(groups = "unit", expectedExceptions = IllegalArgumentException.class, expectedExceptionsMessageRegExp = "Too many values for IN clause, the maximum allowed is 65535") - public void should_fail_if_compound_in_clause_has_too_many_values() { - List values = Collections.nCopies(65536, "a"); - select().all().from("foo").where(eq("k", 4)).and(in(ImmutableList.of("name"), values)); + // CAS test + query = "UPDATE foo SET x=4 WHERE k=0 IF x=1;"; + update = update("foo").with(set("x", 4)).where(eq("k", 0)).onlyIf(eq("x", 1)); + assertEquals(update.toString(), query); + + // IF EXISTS CAS test + update = update("foo").with(set("x", 3)).where(eq("k", 2)).ifExists(); + assertThat(update.toString()).isEqualTo("UPDATE foo SET x=3 WHERE k=2 IF EXISTS;"); + } + + @Test(groups = "unit") + public void deleteTest() throws Exception { + + String query; + Statement delete; + + query = "DELETE a,b,c FROM foo USING TIMESTAMP 0 WHERE k=1;"; + delete = delete("a", "b", "c").from("foo").using(timestamp(0)).where(eq("k", 1)); + assertEquals(delete.toString(), query); + + query = "DELETE a[3],b['foo'],c FROM foo WHERE k=1;"; + delete = delete().listElt("a", 3).mapElt("b", "foo").column("c").from("foo").where(eq("k", 1)); + assertEquals(delete.toString(), query); + + query = "DELETE a[?],b[?],c FROM foo WHERE k=1;"; + delete = + delete() + .listElt("a", bindMarker()) + .mapElt("b", bindMarker()) + .column("c") + .from("foo") + .where(eq("k", 1)); + assertEquals(delete.toString(), query); + + // Invalid CQL, testing edge case + query = "DELETE a,b,c FROM foo;"; + delete = delete("a", "b", "c").from("foo"); + assertEquals(delete.toString(), query); + + query = "DELETE FROM foo USING TIMESTAMP 1240003134 WHERE k='value';"; + delete = delete().all().from("foo").using(timestamp(1240003134L)).where(eq("k", "value")); + assertEquals(delete.toString(), query); + delete = delete().from("foo").using(timestamp(1240003134L)).where(eq("k", "value")); + assertEquals(delete.toString(), query); + + query = "DELETE a,b,c FROM foo.bar USING TIMESTAMP 1240003134 WHERE k=1;"; + delete = + delete("a", "b", "c") + .from("foo", "bar") + .where() + .and(eq("k", 1)) + .using(timestamp(1240003134L)); + assertEquals(delete.toString(), query); + + query = "DELETE FROM foo.bar WHERE k1='foo' AND k2=1;"; + delete = delete().from("foo", "bar").where(eq("k1", "foo")).and(eq("k2", 1)); + assertEquals(delete.toString(), query); + + try { + delete().column("a").all().from("foo"); + fail("Expected an IllegalStateException"); + } catch (IllegalStateException e) { + assertEquals(e.getMessage(), "Some columns ([a]) have already been selected."); } - @Test(groups = "unit", expectedExceptions = IllegalArgumentException.class, expectedExceptionsMessageRegExp = "Missing values for IN clause") - public void should_fail_if_compound_in_clause_given_null_values() { - select().all().from("foo").where(eq("k", 4)).and(in(ImmutableList.of("name"), null)); + try { + delete().from("foo").using(timestamp(-1240003134L)); + fail("Expected an IllegalArgumentException"); + } catch (IllegalArgumentException e) { + assertEquals(e.getMessage(), "Invalid timestamp, must be positive"); } - @Test(groups = "unit", expectedExceptions = IllegalArgumentException.class, expectedExceptionsMessageRegExp = "The number of names \\(4\\) and values \\(3\\) don't match") - public void should_fail_if_compound_in_clause_has_mismatch_of_names_and_values() { - select().all().from("foo").where(eq("k", 4)).and(in(ImmutableList.of("a", "b", "c", "d"), + query = "DELETE FROM foo.bar WHERE k1='foo' IF EXISTS;"; + delete = delete().from("foo", "bar").where(eq("k1", "foo")).ifExists(); + assertEquals(delete.toString(), query); + + query = "DELETE FROM foo.bar WHERE k1='foo' IF a=1 AND b=2;"; + delete = delete().from("foo", "bar").where(eq("k1", "foo")).onlyIf(eq("a", 1)).and(eq("b", 2)); + assertEquals(delete.toString(), query); + + query = "DELETE FROM foo WHERE k=:key;"; + delete = delete().from("foo").where(eq("k", bindMarker("key"))); + assertEquals(delete.toString(), query); + } + + @Test(groups = "unit") + @SuppressWarnings("serial") + public void batchTest() throws Exception { + String query; + Statement batch; + + query = "BEGIN BATCH USING TIMESTAMP 42 "; + query += "INSERT INTO foo (a,b) VALUES ({2,3,4},3.4);"; + query += "UPDATE foo SET a[2]='foo',b=[3,2,1]+b,c=c-{'a'} WHERE k=2;"; + query += "DELETE a[3],b['foo'],c FROM foo WHERE k=1;"; + query += "APPLY BATCH;"; + batch = + batch() + .add( + insertInto("foo") + .values( + new String[] {"a", "b"}, + new Object[] { + new TreeSet() { + { + add(2); + add(3); + add(4); + } + }, + 3.4 + })) + .add( + update("foo") + .with(setIdx("a", 2, "foo")) + .and(prependAll("b", Arrays.asList(3, 2, 1))) + .and(remove("c", "a")) + .where(eq("k", 2))) + .add( + delete() + .listElt("a", 3) + .mapElt("b", "foo") + .column("c") + .from("foo") + .where(eq("k", 1))) + .using(timestamp(42)); + assertEquals(batch.toString(), query); + + // Test passing batch(statement) + query = "BEGIN BATCH "; + query += "DELETE a[3] FROM foo WHERE k=1;"; + query += "APPLY BATCH;"; + batch = batch(delete().listElt("a", 3).from("foo").where(eq("k", 1))); + assertEquals(batch.toString(), query); + + assertEquals(batch().toString(), "BEGIN BATCH APPLY BATCH;"); + } + + @Test(groups = "unit") + public void batchCounterTest() throws Exception { + String query; + Statement batch; + + // Test value increments + query = "BEGIN COUNTER BATCH USING TIMESTAMP 42 "; + query += "UPDATE foo SET a=a+1;"; + query += "UPDATE foo SET b=b+2;"; + query += "UPDATE foo SET c=c+3;"; + query += "APPLY BATCH;"; + batch = + batch() + .add(update("foo").with(incr("a", 1))) + .add(update("foo").with(incr("b", 2))) + .add(update("foo").with(incr("c", 3))) + .using(timestamp(42)); + assertEquals(batch.toString(), query); + + // Test single increments + query = "BEGIN COUNTER BATCH USING TIMESTAMP 42 "; + query += "UPDATE foo SET a=a+1;"; + query += "UPDATE foo SET b=b+1;"; + query += "UPDATE foo SET c=c+1;"; + query += "APPLY BATCH;"; + batch = + batch() + .add(update("foo").with(incr("a"))) + .add(update("foo").with(incr("b"))) + .add(update("foo").with(incr("c"))) + .using(timestamp(42)); + assertEquals(batch.toString(), query); + + // Test value decrements + query = "BEGIN COUNTER BATCH USING TIMESTAMP 42 "; + query += "UPDATE foo SET a=a-1;"; + query += "UPDATE foo SET b=b-2;"; + query += "UPDATE foo SET c=c-3;"; + query += "APPLY BATCH;"; + batch = + batch() + .add(update("foo").with(decr("a", 1))) + .add(update("foo").with(decr("b", 2))) + .add(update("foo").with(decr("c", 3))) + .using(timestamp(42)); + assertEquals(batch.toString(), query); + + // Test single decrements + query = "BEGIN COUNTER BATCH USING TIMESTAMP 42 "; + query += "UPDATE foo SET a=a-1;"; + query += "UPDATE foo SET b=b-1;"; + query += "UPDATE foo SET c=c-1;"; + query += "APPLY BATCH;"; + batch = + batch() + .add(update("foo").with(decr("a"))) + .add(update("foo").with(decr("b"))) + .add(update("foo").with(decr("c"))) + .using(timestamp(42)); + assertEquals(batch.toString(), query); + + // Test negative decrements and negative increments + query = "BEGIN COUNTER BATCH USING TIMESTAMP 42 "; + query += "UPDATE foo SET a=a+1;"; + query += "UPDATE foo SET b=b+-2;"; + query += "UPDATE foo SET c=c-3;"; + query += "APPLY BATCH;"; + batch = + batch() + .add(update("foo").with(decr("a", -1))) + .add(update("foo").with(incr("b", -2))) + .add(update("foo").with(decr("c", 3))) + .using(timestamp(42)); + assertEquals(batch.toString(), query); + } + + @Test( + groups = "unit", + expectedExceptions = {IllegalArgumentException.class}) + public void batchMixedCounterTest() throws Exception { + batch() + .add(update("foo").with(incr("a", 1))) + .add(update("foo").with(set("b", 2))) + .add(update("foo").with(incr("c", 3))) + .using(timestamp(42)); + } + + @Test(groups = "unit") + public void markerTest() throws Exception { + String query; + Statement insert; + + query = "INSERT INTO test (k,c) VALUES (0,?);"; + insert = insertInto("test").value("k", 0).value("c", bindMarker()); + assertEquals(insert.toString(), query); + } + + @Test(groups = "unit") + public void rawEscapingTest() throws Exception { + + String query; + Statement select; + + query = "SELECT * FROM t WHERE c='C''est la vie!';"; + select = select().from("t").where(eq("c", "C'est la vie!")); + assertEquals(select.toString(), query); + + query = "SELECT * FROM t WHERE c=C'est la vie!;"; + select = select().from("t").where(eq("c", raw("C'est la vie!"))); + assertEquals(select.toString(), query); + + query = "SELECT * FROM t WHERE c=now();"; + select = select().from("t").where(eq("c", fcall("now"))); + assertEquals(select.toString(), query); + + query = "SELECT * FROM t WHERE c='now()';"; + select = select().from("t").where(eq("c", raw("'now()'"))); + assertEquals(select.toString(), query); + } + + @Test(groups = "unit") + public void selectInjectionTests() throws Exception { + + String query; + Statement select; + + query = "SELECT * FROM \"foo WHERE k=4\";"; + select = select().all().from("foo WHERE k=4"); + assertEquals(select.toString(), query); + + query = "SELECT * FROM foo WHERE k='4 AND c=5';"; + select = select().all().from("foo").where(eq("k", "4 AND c=5")); + assertEquals(select.toString(), query); + + query = "SELECT * FROM foo WHERE k='4'' AND c=''5';"; + select = select().all().from("foo").where(eq("k", "4' AND c='5")); + assertEquals(select.toString(), query); + + query = "SELECT * FROM foo WHERE k='4'' OR ''1''=''1';"; + select = select().all().from("foo").where(eq("k", "4' OR '1'='1")); + assertEquals(select.toString(), query); + + query = "SELECT * FROM foo WHERE k='4; --test comment;';"; + select = select().all().from("foo").where(eq("k", "4; --test comment;")); + assertEquals(select.toString(), query); + + query = "SELECT \"*\" FROM foo;"; + select = select("*").from("foo"); + assertEquals(select.toString(), query); + + query = "SELECT a,b FROM foo WHERE a IN ('b','c''); --comment');"; + select = select("a", "b").from("foo").where(in("a", "b", "c'); --comment")); + assertEquals(select.toString(), query); + + query = "SELECT a,b FROM foo WHERE a IN ('a','b','c');"; + select = + select("a", "b") + .from("foo") + .where(in("a", Sets.newLinkedHashSet(Arrays.asList("a", "b", "c")))); + assertEquals(select.toString(), query); + + // User Injection? + query = "SELECT * FROM bar; --(b) FROM foo;"; + select = select().fcall("* FROM bar; --", column("b")).from("foo"); + assertEquals(select.toString(), query); + + query = "SELECT writetime(\"a) FROM bar; --\"),ttl(a) FROM foo ALLOW FILTERING;"; + select = select().writeTime("a) FROM bar; --").ttl("a").from("foo").allowFiltering(); + assertEquals(select.toString(), query); + + query = "SELECT writetime(a),ttl(\"a) FROM bar; --\") FROM foo ALLOW FILTERING;"; + select = select().writeTime("a").ttl("a) FROM bar; --").from("foo").allowFiltering(); + assertEquals(select.toString(), query); + + query = "SELECT * FROM foo WHERE \"k=1 OR k\">42 LIMIT 42;"; + select = select().all().from("foo").where(gt("k=1 OR k", 42)).limit(42); + assertEquals(select.toString(), query); + + query = "SELECT * FROM foo WHERE token(\"k)>0 OR token(k\")>token(42);"; + select = select().all().from("foo").where(gt(token("k)>0 OR token(k"), fcall("token", 42))); + assertEquals(select.toString(), query); + } + + @Test(groups = "unit") + @SuppressWarnings("serial") + public void insertInjectionTest() throws Exception { + + String query; + Statement insert; + + query = "INSERT INTO foo (a) VALUES ('123); --comment');"; + insert = insertInto("foo").value("a", "123); --comment"); + assertEquals(insert.toString(), query); + + query = "INSERT INTO foo (\"a,b\") VALUES (123);"; + insert = insertInto("foo").value("a,b", 123); + assertEquals(insert.toString(), query); + + query = + "INSERT INTO foo (a,b) VALUES ({'2''} space','3','4'},3.4) USING TTL 24 AND TIMESTAMP 42;"; + insert = + insertInto("foo") + .values( + new String[] {"a", "b"}, + new Object[] { + new TreeSet() { + { + add("2'} space"); + add("3"); + add("4"); + } + }, + 3.4 + }) + .using(ttl(24)) + .and(timestamp(42)); + assertEquals(insert.toString(), query); + } + + @Test(groups = "unit") + public void updateInjectionTest() throws Exception { + + String query; + Statement update; + + query = "UPDATE foo.bar USING TIMESTAMP 42 SET a=12 WHERE k='2 OR 1=1';"; + update = + update("foo", "bar").using(timestamp(42)).with(set("a", 12)).where(eq("k", "2 OR 1=1")); + assertEquals(update.toString(), query); + + query = "UPDATE foo SET b='null WHERE k=1; --comment' WHERE k=2;"; + update = update("foo").where().and(eq("k", 2)).with(set("b", "null WHERE k=1; --comment")); + assertEquals(update.toString(), query); + + query = + "UPDATE foo USING TIMESTAMP 42 SET \"b WHERE k=1; --comment\"=[3,2,1]+\"b WHERE k=1; --comment\" WHERE k=2;"; + update = + update("foo") + .where() + .and(eq("k", 2)) + .with(prependAll("b WHERE k=1; --comment", Arrays.asList(3, 2, 1))) + .using(timestamp(42)); + assertEquals(update.toString(), query); + } + + @Test(groups = "unit") + public void deleteInjectionTests() throws Exception { + + String query; + Statement delete; + + query = "DELETE FROM \"foo WHERE k=4\";"; + delete = delete().from("foo WHERE k=4"); + assertEquals(delete.toString(), query); + + query = "DELETE FROM foo WHERE k='4 AND c=5';"; + delete = delete().from("foo").where(eq("k", "4 AND c=5")); + assertEquals(delete.toString(), query); + + query = "DELETE FROM foo WHERE k='4'' AND c=''5';"; + delete = delete().from("foo").where(eq("k", "4' AND c='5")); + assertEquals(delete.toString(), query); + + query = "DELETE FROM foo WHERE k='4'' OR ''1''=''1';"; + delete = delete().from("foo").where(eq("k", "4' OR '1'='1")); + assertEquals(delete.toString(), query); + + query = "DELETE FROM foo WHERE k='4; --test comment;';"; + delete = delete().from("foo").where(eq("k", "4; --test comment;")); + assertEquals(delete.toString(), query); + + query = "DELETE \"*\" FROM foo;"; + delete = delete("*").from("foo"); + assertEquals(delete.toString(), query); + + query = "DELETE a,b FROM foo WHERE a IN ('b','c''); --comment');"; + delete = delete("a", "b").from("foo").where(in("a", "b", "c'); --comment")); + assertEquals(delete.toString(), query); + + query = "DELETE FROM foo WHERE \"k=1 OR k\">42;"; + delete = delete().from("foo").where(gt("k=1 OR k", 42)); + assertEquals(delete.toString(), query); + + query = "DELETE FROM foo WHERE token(\"k)>0 OR token(k\")>token(42);"; + delete = delete().from("foo").where(gt(token("k)>0 OR token(k"), fcall("token", 42))); + assertEquals(delete.toString(), query); + } + + @Test(groups = "unit") + public void statementForwardingTest() throws Exception { + + Update upd = update("foo"); + upd.setConsistencyLevel(ConsistencyLevel.QUORUM); + upd.enableTracing(); + + Statement query = + upd.using(timestamp(42)).with(set("a", 12)).and(incr("c", 3)).where(eq("k", 2)); + + assertEquals(query.getConsistencyLevel(), ConsistencyLevel.QUORUM); + assertTrue(query.isTracing()); + } + + @Test(groups = "unit", expectedExceptions = CodecNotFoundException.class) + public void rejectUnknownValueTest() throws Exception { + RegularStatement s = + update("foo").with(set("a", new byte[13])).where(eq("k", 2)).setForceNoValues(true); + s.getQueryString(); + } + + @Test(groups = "unit") + public void truncateTest() throws Exception { + assertEquals(truncate("foo").toString(), "TRUNCATE foo;"); + assertEquals(truncate("foo", quote("Bar")).toString(), "TRUNCATE foo.\"Bar\";"); + } + + @Test(groups = "unit") + public void quotingTest() { + assertEquals(select().from("Metrics", "epochs").toString(), "SELECT * FROM Metrics.epochs;"); + assertEquals( + select().from("Metrics", quote("epochs")).toString(), "SELECT * FROM Metrics.\"epochs\";"); + assertEquals( + select().from(quote("Metrics"), "epochs").toString(), "SELECT * FROM \"Metrics\".epochs;"); + assertEquals( + select().from(quote("Metrics"), quote("epochs")).toString(), + "SELECT * FROM \"Metrics\".\"epochs\";"); + + assertEquals( + insertInto("Metrics", "epochs").toString(), "INSERT INTO Metrics.epochs () VALUES ();"); + assertEquals( + insertInto("Metrics", quote("epochs")).toString(), + "INSERT INTO Metrics.\"epochs\" () VALUES ();"); + assertEquals( + insertInto(quote("Metrics"), "epochs").toString(), + "INSERT INTO \"Metrics\".epochs () VALUES ();"); + assertEquals( + insertInto(quote("Metrics"), quote("epochs")).toString(), + "INSERT INTO \"Metrics\".\"epochs\" () VALUES ();"); + } + + @Test(groups = "unit") + public void compoundWhereClauseTest() throws Exception { + String query; + Statement select; + + query = "SELECT * FROM foo WHERE k=4 AND (c1,c2)=('a',2);"; + select = + select() + .all() + .from("foo") + .where(eq("k", 4)) + .and(eq(Arrays.asList("c1", "c2"), Arrays.asList("a", 2))); + assertEquals(select.toString(), query); + + query = "SELECT * FROM foo WHERE k=4 AND (c1,c2)>('a',2);"; + select = + select() + .all() + .from("foo") + .where(eq("k", 4)) + .and(gt(Arrays.asList("c1", "c2"), Arrays.asList("a", 2))); + assertEquals(select.toString(), query); + + query = "SELECT * FROM foo WHERE k=4 AND (c1,c2)>=('a',2) AND (c1,c2)<('b',0);"; + select = + select() + .all() + .from("foo") + .where(eq("k", 4)) + .and(gte(Arrays.asList("c1", "c2"), Arrays.asList("a", 2))) + .and(lt(Arrays.asList("c1", "c2"), Arrays.asList("b", 0))); + assertEquals(select.toString(), query); + + query = "SELECT * FROM foo WHERE k=4 AND (c1,c2)<=('a',2);"; + select = + select() + .all() + .from("foo") + .where(eq("k", 4)) + .and(lte(Arrays.asList("c1", "c2"), Arrays.asList("a", 2))); + assertEquals(select.toString(), query); + + query = "SELECT * FROM foo WHERE k=4 AND (c1,c2) IN ((1,'foo'),(2,'bar'),(3,'qix'));"; + List names = ImmutableList.of("c1", "c2"); + List values = + ImmutableList.>of( + ImmutableList.of(1, "foo"), ImmutableList.of(2, "bar"), ImmutableList.of(3, "qix")); + select = select().all().from("foo").where(eq("k", 4)).and(in(names, values)); + assertEquals(select.toString(), query); + + query = "SELECT * FROM foo WHERE k=4 AND (c1,c2) IN ((1,'foo'),(2,?),?);"; + names = ImmutableList.of("c1", "c2"); + values = + ImmutableList.of( + ImmutableList.of(1, "foo"), ImmutableList.of(2, bindMarker()), bindMarker()); + select = select().all().from("foo").where(eq("k", 4)).and(in(names, values)); + assertEquals(select.toString(), query); + + // special case, single element list with bind marker should be (?) instead of ((?)) + query = "SELECT * FROM foo WHERE k=4 AND (c1) IN (?);"; + names = ImmutableList.of("c1"); + values = ImmutableList.of(ImmutableList.of(bindMarker())); + select = select().all().from("foo").where(eq("k", 4)).and(in(names, values)); + assertEquals(select.toString(), query); + } + + @Test( + groups = "unit", + expectedExceptions = IllegalArgumentException.class, + expectedExceptionsMessageRegExp = + "Too many values for IN clause, the maximum allowed is 65535") + public void should_fail_if_compound_in_clause_has_too_many_values() { + List values = Collections.nCopies(65536, bindMarker()); + select().all().from("foo").where(eq("k", 4)).and(in(ImmutableList.of("name"), values)); + } + + @Test( + groups = "unit", + expectedExceptions = IllegalArgumentException.class, + expectedExceptionsMessageRegExp = "Missing values for IN clause") + public void should_fail_if_compound_in_clause_given_null_values() { + select().all().from("foo").where(eq("k", 4)).and(in(ImmutableList.of("name"), null)); + } + + @Test( + groups = "unit", + expectedExceptions = IllegalArgumentException.class, + expectedExceptionsMessageRegExp = + "The number of names \\(4\\) and values \\(3\\) don't match") + public void should_fail_if_compound_in_clause_has_mismatch_of_names_and_values() { + select() + .all() + .from("foo") + .where(eq("k", 4)) + .and( + in( + ImmutableList.of("a", "b", "c", "d"), ImmutableList.of( - ImmutableList.of(1, 2, 3, 4), // Adequately sized (4) - ImmutableList.of(1, 2, 3) // Inadequately sized (3) - ))); - } - - @Test(groups = "unit", expectedExceptions = IllegalArgumentException.class, expectedExceptionsMessageRegExp = "Wrong element type for values list, expected List or BindMarker, got java.lang.Integer") - public void shoud_fail_if_compound_in_clause_has_value_pair_that_is_not_list_or_bind_marker() { - select().all().from("foo").where(eq("k", 4)).and(in(ImmutableList.of("a", "b", "c", "d"), + ImmutableList.of(1, 2, 3, 4), // Adequately sized (4) + ImmutableList.of(1, 2, 3) // Inadequately sized (3) + ))); + } + + @Test( + groups = "unit", + expectedExceptions = IllegalArgumentException.class, + expectedExceptionsMessageRegExp = + "Wrong element type for values list, expected List or BindMarker, got java.lang.Integer") + public void shoud_fail_if_compound_in_clause_has_value_pair_that_is_not_list_or_bind_marker() { + select() + .all() + .from("foo") + .where(eq("k", 4)) + .and( + in( + ImmutableList.of("a", "b", "c", "d"), ImmutableList.of(1))); // Invalid value 1, must be list or bind marker. + } + + @Test( + groups = "unit", + expectedExceptions = IllegalArgumentException.class, + expectedExceptionsMessageRegExp = "Missing values for IN clause") + public void should_fail_if_in_clause_has_null_values() { + select().all().from("foo").where(in("bar", (List) null)); + } + + @Test(groups = "unit", expectedExceptions = IllegalArgumentException.class) + public void should_fail_if_in_clause_has_too_many_values() { + List values = Collections.nCopies(65536, "a"); + select().all().from("foo").where(in("bar", values.toArray())); + } + + @Test(groups = "unit", expectedExceptions = IllegalArgumentException.class) + public void should_fail_if_built_statement_has_too_many_values() { + List values = Collections.nCopies(65535, "a"); + + // If the excessive count results from successive DSL calls, we don't check it on the fly so + // this statement works: + BuiltStatement statement = + select().all().from("foo").where(eq("bar", "a")).and(in("baz", values.toArray())); + + // But we still want to check it client-side, to fail fast instead of sending a bad query to + // Cassandra. + // getValues() is called on any RegularStatement before we send it (see + // SessionManager.makeRequestMessage). + statement.getValues(ProtocolVersion.NEWEST_SUPPORTED, CodecRegistry.DEFAULT_INSTANCE); + } + + @Test(groups = "unit") + public void should_handle_nested_collections() { + String query; + Statement statement; + + query = "UPDATE foo SET l=[[1],[2]] WHERE k=1;"; + ImmutableList> list = + ImmutableList.of(ImmutableList.of(1), ImmutableList.of(2)); + statement = update("foo").with(set("l", list)).where(eq("k", 1)); + assertThat(statement.toString()).isEqualTo(query); + + query = "UPDATE foo SET m={1:[[1],[2]],2:[[1],[2]]} WHERE k=1;"; + statement = update("foo").with(set("m", ImmutableMap.of(1, list, 2, list))).where(eq("k", 1)); + assertThat(statement.toString()).isEqualTo(query); + + query = "UPDATE foo SET m=m+{1:[[1],[2]],2:[[1],[2]]} WHERE k=1;"; + statement = + update("foo").with(putAll("m", ImmutableMap.of(1, list, 2, list))).where(eq("k", 1)); + assertThat(statement.toString()).isEqualTo(query); + + query = "UPDATE foo SET l=[[1]]+l WHERE k=1;"; + statement = update("foo").with(prepend("l", ImmutableList.of(1))).where(eq("k", 1)); + assertThat(statement.toString()).isEqualTo(query); + + query = "UPDATE foo SET l=[[1],[2]]+l WHERE k=1;"; + statement = update("foo").with(prependAll("l", list)).where(eq("k", 1)); + assertThat(statement.toString()).isEqualTo(query); + } + + @Test(groups = "unit", expectedExceptions = InvalidQueryException.class) + public void should_not_allow_bind_marker_for_add() { + // This generates the query "UPDATE foo SET s = s + {?} WHERE k = 1", which is invalid in + // Cassandra + update("foo").with(add("s", bindMarker())).where(eq("k", 1)); + } + + @Test(groups = "unit", expectedExceptions = InvalidQueryException.class) + public void should_now_allow_bind_marker_for_prepend() { + update("foo").with(prepend("l", bindMarker())).where(eq("k", 1)); + } + + @Test(groups = "unit", expectedExceptions = InvalidQueryException.class) + public void should_not_allow_bind_marker_for_append() { + update("foo").with(append("l", bindMarker())).where(eq("k", 1)); + } + + @Test(groups = "unit", expectedExceptions = InvalidQueryException.class) + public void should_not_allow_bind_marker_for_remove() { + update("foo").with(remove("s", bindMarker())).where(eq("k", 1)); + } + + @Test(groups = "unit", expectedExceptions = InvalidQueryException.class) + public void should_not_allow_bind_marker_for_discard() { + update("foo").with(discard("l", bindMarker())).where(eq("k", 1)); + } + + @Test(groups = "unit") + public void should_quote_complex_column_names() { + // A column name can be anything as long as it's quoted, so "foo.bar" is a valid name + String query = "SELECT * FROM foo WHERE \"foo.bar\"=1;"; + Statement statement = select().from("foo").where(eq(quote("foo.bar"), 1)); + + assertThat(statement.toString()).isEqualTo(query); + } + + @Test(groups = "unit") + public void should_quote_column_names_with_escaped_quotes() { + // A column name can include quotes as long as it is escaped with another set of quotes, so + // "foo""bar" is a valid name. + String query = "SELECT * FROM foo WHERE \"foo \"\" bar\"=1;"; + Statement statement = select().from("foo").where(eq(quote("foo \" bar"), 1)); + + assertThat(statement.toString()).isEqualTo(query); + } + + @Test(groups = "unit") + public void should_not_serialize_raw_query_values() { + RegularStatement select = select().from("test").where(gt("i", raw("1"))); + assertThat(select.getQueryString()).doesNotContain("?"); + assertThat(select.getValues(ProtocolVersion.NEWEST_SUPPORTED, CodecRegistry.DEFAULT_INSTANCE)) + .isNull(); + } + + @Test( + groups = "unit", + expectedExceptions = {IllegalStateException.class}) + public void should_throw_ISE_if_getObject_called_on_statement_without_values() { + select() + .from("test") + .where(eq("foo", 42)) + .getObject(0); // integers are appended to the CQL string + } + + @Test( + groups = "unit", + expectedExceptions = {IndexOutOfBoundsException.class}) + public void should_throw_IOOBE_if_getObject_called_with_wrong_index() { + select().from("test").where(eq("foo", new Object())).getObject(1); + } + + @Test(groups = "unit") + public void should_return_object_at_ith_index() { + Object expected = new Object(); + Object actual = select().from("test").where(eq("foo", expected)).getObject(0); + assertThat(actual).isSameAs(expected); + } + + @Test(groups = "unit") + public void should_serialize_collections_of_serializable_elements() { + Set set = Sets.newHashSet(UUID.randomUUID()); + List list = Lists.newArrayList(new Date()); + Map map = ImmutableMap.of(new BigInteger("1"), "foo"); + BuiltStatement query = insertInto("foo").value("v", set); + assertThat(query.getQueryString()).isEqualTo("INSERT INTO foo (v) VALUES (?);"); + assertThat(query.getObject(0)).isEqualTo(set); + query = insertInto("foo").value("v", list); + assertThat(query.getQueryString()).isEqualTo("INSERT INTO foo (v) VALUES (?);"); + assertThat(query.getObject(0)).isEqualTo(list); + query = insertInto("foo").value("v", map); + assertThat(query.getQueryString()).isEqualTo("INSERT INTO foo (v) VALUES (?);"); + assertThat(query.getObject(0)).isEqualTo(map); + } + + @Test(groups = "unit") + public void should_not_attempt_to_serialize_function_calls_in_collections() { + BuiltStatement query = insertInto("foo").value("v", Sets.newHashSet(fcall("func", 1))); + assertThat(query.getQueryString()).isEqualTo("INSERT INTO foo (v) VALUES ({func(1)});"); + assertThat(query.getValues(ProtocolVersion.NEWEST_SUPPORTED, CodecRegistry.DEFAULT_INSTANCE)) + .isNullOrEmpty(); + } + + @Test(groups = "unit") + public void should_not_attempt_to_serialize_bind_markers_in_collections() { + BuiltStatement query = insertInto("foo").value("v", Lists.newArrayList(1, 2, bindMarker())); + assertThat(query.getQueryString()).isEqualTo("INSERT INTO foo (v) VALUES ([1,2,?]);"); + assertThat(query.getValues(ProtocolVersion.NEWEST_SUPPORTED, CodecRegistry.DEFAULT_INSTANCE)) + .isNullOrEmpty(); + } + + @Test(groups = "unit") + public void should_not_attempt_to_serialize_raw_values_in_collections() { + BuiltStatement query = insertInto("foo").value("v", ImmutableMap.of(1, raw("x"))); + assertThat(query.getQueryString()).isEqualTo("INSERT INTO foo (v) VALUES ({1:x});"); + assertThat(query.getValues(ProtocolVersion.NEWEST_SUPPORTED, CodecRegistry.DEFAULT_INSTANCE)) + .isNullOrEmpty(); + } + + @Test(groups = "unit") + public void should_not_attempt_to_serialize_collections_containing_numbers() { + BuiltStatement query; + // lists + List list = Lists.newArrayList(1, 2, 3); + query = insertInto("foo").value("v", list); + assertThat(query.getQueryString()).isEqualTo("INSERT INTO foo (v) VALUES ([1,2,3]);"); + assertThat(query.hasValues()).isFalse(); + // sets + Set set = Sets.newHashSet(1, 2, 3); + query = insertInto("foo").value("v", set); + assertThat(query.getQueryString()).isEqualTo("INSERT INTO foo (v) VALUES ({1,2,3});"); + assertThat(query.hasValues()).isFalse(); + // maps + Map map = ImmutableMap.of(1, 12.34f); + query = insertInto("foo").value("v", map); + assertThat(query.getQueryString()).isEqualTo("INSERT INTO foo (v) VALUES ({1:12.34});"); + assertThat(query.hasValues()).isFalse(); + } + + @Test(groups = "unit") + public void should_include_original_cause_when_arguments_invalid() { + // Collection elements in protocol v2 must be at most 65535 bytes + ByteBuffer bb = ByteBuffer.allocate(65536); // too big + List value = Lists.newArrayList(bb); + + BuiltStatement s = insertInto("foo").value("l", value); + try { + s.getValues(ProtocolVersion.V2, CodecRegistry.DEFAULT_INSTANCE); + fail("Expected an IllegalArgumentException"); + } catch (InvalidTypeException e) { + assertThat(e.getCause()).isInstanceOf(IllegalArgumentException.class); + StringWriter writer = new StringWriter(); + e.getCause().printStackTrace(new PrintWriter(writer)); + String stackTrace = writer.toString(); + assertThat(stackTrace) + .contains( + "Native protocol version 2 supports only elements with size up to 65535 bytes - " + + "but element size is 65536 bytes"); } - - @Test(groups = "unit", expectedExceptions = IllegalArgumentException.class, expectedExceptionsMessageRegExp = "Missing values for IN clause") - public void should_fail_if_in_clause_has_null_values() { - select().all().from("foo").where(in("bar", (List) null)); - } - - @Test(groups = "unit", expectedExceptions = IllegalArgumentException.class) - public void should_fail_if_in_clause_has_too_many_values() { - List values = Collections.nCopies(65536, "a"); - select().all().from("foo").where(in("bar", values.toArray())); - } - - @Test(groups = "unit", expectedExceptions = IllegalArgumentException.class) - public void should_fail_if_built_statement_has_too_many_values() { - List values = Collections.nCopies(65535, "a"); - - // If the excessive count results from successive DSL calls, we don't check it on the fly so this statement works: - BuiltStatement statement = select().all().from("foo") - .where(eq("bar", "a")) - .and(in("baz", values.toArray())); - - // But we still want to check it client-side, to fail fast instead of sending a bad query to Cassandra. - // getValues() is called on any RegularStatement before we send it (see SessionManager.makeRequestMessage). - statement.getValues(ProtocolVersion.NEWEST_SUPPORTED, CodecRegistry.DEFAULT_INSTANCE); - } - - @Test(groups = "unit") - public void should_handle_nested_collections() { - String query; - Statement statement; - - query = "UPDATE foo SET l=[[1],[2]] WHERE k=1;"; - ImmutableList> list = ImmutableList.of(ImmutableList.of(1), ImmutableList.of(2)); - statement = update("foo").with(set("l", list)).where(eq("k", 1)); - assertThat(statement.toString()).isEqualTo(query); - - query = "UPDATE foo SET m={1:[[1],[2]],2:[[1],[2]]} WHERE k=1;"; - statement = update("foo").with(set("m", ImmutableMap.of(1, list, 2, list))).where(eq("k", 1)); - assertThat(statement.toString()).isEqualTo(query); - - query = "UPDATE foo SET m=m+{1:[[1],[2]],2:[[1],[2]]} WHERE k=1;"; - statement = update("foo").with(putAll("m", ImmutableMap.of(1, list, 2, list))).where(eq("k", 1)); - assertThat(statement.toString()).isEqualTo(query); - - query = "UPDATE foo SET l=[[1]]+l WHERE k=1;"; - statement = update("foo").with(prepend("l", ImmutableList.of(1))).where(eq("k", 1)); - assertThat(statement.toString()).isEqualTo(query); - - query = "UPDATE foo SET l=[[1],[2]]+l WHERE k=1;"; - statement = update("foo").with(prependAll("l", list)).where(eq("k", 1)); - assertThat(statement.toString()).isEqualTo(query); + } + + @Test(groups = "unit") + public void should_handle_per_partition_limit_clause() { + assertThat(select().all().from("foo").perPartitionLimit(2).toString()) + .isEqualTo("SELECT * FROM foo PER PARTITION LIMIT 2;"); + assertThat(select().all().from("foo").perPartitionLimit(bindMarker()).toString()) + .isEqualTo("SELECT * FROM foo PER PARTITION LIMIT ?;"); + assertThat(select().all().from("foo").perPartitionLimit(bindMarker("limit")).toString()) + .isEqualTo("SELECT * FROM foo PER PARTITION LIMIT :limit;"); + assertThat(select().all().from("foo").perPartitionLimit(2).limit(bindMarker()).toString()) + .isEqualTo("SELECT * FROM foo PER PARTITION LIMIT 2 LIMIT ?;"); + assertThat( + select() + .all() + .from("foo") + .where(in("a", 2, 4)) + .perPartitionLimit(2) + .limit(3) + .toString()) + .isEqualTo("SELECT * FROM foo WHERE a IN (2,4) PER PARTITION LIMIT 2 LIMIT 3;"); + assertThat( + select() + .all() + .from("foo") + .where(eq("a", bindMarker())) + .perPartitionLimit(bindMarker()) + .limit(3) + .toString()) + .isEqualTo("SELECT * FROM foo WHERE a=? PER PARTITION LIMIT ? LIMIT 3;"); + assertThat( + select() + .all() + .from("foo") + .where(eq("a", bindMarker())) + .orderBy(desc("b")) + .perPartitionLimit(2) + .limit(3) + .toString()) + .isEqualTo("SELECT * FROM foo WHERE a=? ORDER BY b DESC PER PARTITION LIMIT 2 LIMIT 3;"); + assertThat( + select() + .all() + .from("foo") + .where(eq("a", bindMarker())) + .and(gt("b", bindMarker())) + .orderBy(desc("b")) + .perPartitionLimit(bindMarker()) + .limit(3) + .allowFiltering() + .toString()) + .isEqualTo( + "SELECT * FROM foo WHERE a=? AND b>? ORDER BY b DESC PER PARTITION LIMIT ? LIMIT 3 ALLOW FILTERING;"); + try { + select().distinct().all().from("foo").perPartitionLimit(3); + fail("Should not allow DISTINCT + PER PARTITION LIMIT"); + } catch (Exception e) { + assertThat(e).hasMessage("PER PARTITION LIMIT is not allowed with SELECT DISTINCT queries"); } - - @Test(groups = "unit", expectedExceptions = InvalidQueryException.class) - public void should_not_allow_bind_marker_for_add() { - // This generates the query "UPDATE foo SET s = s + {?} WHERE k = 1", which is invalid in Cassandra - update("foo").with(add("s", bindMarker())).where(eq("k", 1)); - } - - @Test(groups = "unit", expectedExceptions = InvalidQueryException.class) - public void should_now_allow_bind_marker_for_prepend() { - update("foo").with(prepend("l", bindMarker())).where(eq("k", 1)); - } - - @Test(groups = "unit", expectedExceptions = InvalidQueryException.class) - public void should_not_allow_bind_marker_for_append() { - update("foo").with(append("l", bindMarker())).where(eq("k", 1)); + try { + select().all().from("foo").perPartitionLimit(-1); + fail("Should not allow negative limit"); + } catch (IllegalArgumentException e) { + assertThat(e).hasMessage("Invalid PER PARTITION LIMIT value, must be strictly positive"); } - - @Test(groups = "unit", expectedExceptions = InvalidQueryException.class) - public void should_not_allow_bind_marker_for_remove() { - update("foo").with(remove("s", bindMarker())).where(eq("k", 1)); + try { + select().all().from("foo").perPartitionLimit(1).perPartitionLimit(bindMarker()); + fail("Should not allow to set limit twice"); + } catch (IllegalStateException e) { + assertThat(e).hasMessage("A PER PARTITION LIMIT value has already been provided"); } - - @Test(groups = "unit", expectedExceptions = InvalidQueryException.class) - public void should_not_allow_bind_marker_for_discard() { - update("foo").with(discard("l", bindMarker())).where(eq("k", 1)); + } + + @Test(groups = "unit") + public void should_handle_select_json() throws Exception { + assertThat(select().json().from("users").toString()).isEqualTo("SELECT JSON * FROM users;"); + assertThat(select("id", "age").json().from("users").toString()) + .isEqualTo("SELECT JSON id,age FROM users;"); + assertThat( + select() + .json() + .column("id") + .writeTime("age") + .ttl("state") + .as("ttl") + .from("users") + .toString()) + .isEqualTo("SELECT JSON id,writetime(age),ttl(state) AS ttl FROM users;"); + assertThat(select().distinct().json().column("id").from("users").toString()) + .isEqualTo( + "SELECT JSON DISTINCT id FROM users;"); // note that the correct syntax is JSON DISTINCT + } + + @Test(groups = "unit") + public void should_handle_insert_json() throws Exception { + assertThat( + insertInto("example") + .json( + "{\"id\": 0, \"tupleval\": [1, \"abc\"], \"numbers\": [1, 2, 3], \"letters\": [\"a\", \"b\", \"c\"]}") + .toString()) + .isEqualTo( + "INSERT INTO example JSON '{\"id\": 0, \"tupleval\": [1, \"abc\"], \"numbers\": [1, 2, 3], \"letters\": [\"a\", \"b\", \"c\"]}';"); + assertThat( + insertInto("users") + .json("{\"id\": \"user123\", \"\\\"Age\\\"\": 42, \"\\\"State\\\"\": \"TX\"}") + .toString()) + .isEqualTo( + "INSERT INTO users JSON '{\"id\": \"user123\", \"\\\"Age\\\"\": 42, \"\\\"State\\\"\": \"TX\"}';"); + assertThat(insertInto("users").json(bindMarker()).toString()) + .isEqualTo("INSERT INTO users JSON ?;"); + assertThat(insertInto("users").json(bindMarker("json")).toString()) + .isEqualTo("INSERT INTO users JSON :json;"); + assertThat( + insertInto("example") + .json( + "{\"id\": 0, \"tupleval\": [1, \"abc\"], \"numbers\": [1, 2, 3], \"letters\": [\"a\", \"b\", \"c\"]}") + .defaultNull() + .toString()) + .isEqualTo( + "INSERT INTO example JSON '{\"id\": 0, \"tupleval\": [1, \"abc\"], \"numbers\": [1, 2, 3], \"letters\": [\"a\", \"b\", \"c\"]}' DEFAULT NULL;"); + assertThat( + insertInto("example") + .json( + "{\"id\": 0, \"tupleval\": [1, \"abc\"], \"numbers\": [1, 2, 3], \"letters\": [\"a\", \"b\", \"c\"]}") + .defaultUnset() + .toString()) + .isEqualTo( + "INSERT INTO example JSON '{\"id\": 0, \"tupleval\": [1, \"abc\"], \"numbers\": [1, 2, 3], \"letters\": [\"a\", \"b\", \"c\"]}' DEFAULT UNSET;"); + } + + @Test(groups = "unit") + public void should_handle_to_json() throws Exception { + assertThat(select().toJson("id").as("id").toJson("age").as("age").from("users").toString()) + .isEqualTo("SELECT toJson(id) AS id,toJson(age) AS age FROM users;"); + assertThat(select().distinct().toJson("id").as("id").from("users").toString()) + .isEqualTo("SELECT DISTINCT toJson(id) AS id FROM users;"); + assertThat( + select(alias(toJson("id"), "id"), alias(toJson("age"), "age")).from("users").toString()) + .isEqualTo("SELECT toJson(id) AS id,toJson(age) AS age FROM users;"); + assertThat(select(alias(toJson("id"), "id")).distinct().from("users").toString()) + .isEqualTo("SELECT DISTINCT toJson(id) AS id FROM users;"); + } + + @Test(groups = "unit") + public void should_handle_from_json() throws Exception { + assertThat( + update("users") + .with(set("age", fromJson("42"))) + .where(eq("id", fromJson("\"user123\""))) + .toString()) + .isEqualTo("UPDATE users SET age=fromJson('42') WHERE id=fromJson('\"user123\"');"); + assertThat( + insertInto("users") + .value("id", fromJson("\"user123\"")) + .value("age", fromJson("42")) + .toString()) + .isEqualTo("INSERT INTO users (id,age) VALUES (fromJson('\"user123\"'),fromJson('42'));"); + assertThat(insertInto("users").value("id", fromJson(bindMarker())).toString()) + .isEqualTo("INSERT INTO users (id) VALUES (fromJson(?));"); + assertThat(insertInto("users").value("id", fromJson(bindMarker("id"))).toString()) + .isEqualTo("INSERT INTO users (id) VALUES (fromJson(:id));"); + } + + static class Foo { + int bar; + + public Foo(int bar) { + this.bar = bar; } + } - @Test(groups = "unit") - public void should_quote_complex_column_names() { - // A column name can be anything as long as it's quoted, so "foo.bar" is a valid name - String query = "SELECT * FROM foo WHERE \"foo.bar\"=1;"; - Statement statement = select().from("foo").where(eq(quote("foo.bar"), 1)); + static class FooCodec extends TypeCodec { - assertThat(statement.toString()).isEqualTo(query); + public FooCodec() { + super(DataType.cint(), Foo.class); } - @Test(groups = "unit") - public void should_quote_column_names_with_escaped_quotes() { - // A column name can include quotes as long as it is escaped with another set of quotes, so "foo""bar" is a valid name. - String query = "SELECT * FROM foo WHERE \"foo \"\" bar\"=1;"; - Statement statement = select().from("foo").where(eq(quote("foo \" bar"), 1)); - - assertThat(statement.toString()).isEqualTo(query); + @Override + public ByteBuffer serialize(Foo value, ProtocolVersion protocolVersion) + throws InvalidTypeException { + // not relevant for this test + return null; } - @Test(groups = "unit") - public void should_not_serialize_raw_query_values() { - RegularStatement select = select().from("test").where(gt("i", raw("1"))); - assertThat(select.getQueryString()).doesNotContain("?"); - assertThat(select.getValues(ProtocolVersion.NEWEST_SUPPORTED, CodecRegistry.DEFAULT_INSTANCE)).isNull(); + @Override + public Foo deserialize(ByteBuffer bytes, ProtocolVersion protocolVersion) + throws InvalidTypeException { + // not relevant for this test + return null; } - @Test(groups = "unit", expectedExceptions = {IllegalStateException.class}) - public void should_throw_ISE_if_getObject_called_on_statement_without_values() { - select().from("test").where(eq("foo", 42)).getObject(0); // integers are appended to the CQL string + @Override + public Foo parse(String value) throws InvalidTypeException { + // not relevant for this test + return null; } - @Test(groups = "unit", expectedExceptions = {IndexOutOfBoundsException.class}) - public void should_throw_IOOBE_if_getObject_called_with_wrong_index() { - select().from("test").where(eq("foo", new Object())).getObject(1); + @Override + public String format(Foo foo) throws InvalidTypeException { + return Integer.toString(foo.bar); } - - @Test(groups = "unit") - public void should_return_object_at_ith_index() { - Object expected = new Object(); - Object actual = select().from("test").where(eq("foo", expected)).getObject(0); - assertThat(actual).isSameAs(expected); + } + + /** + * Ensures that a statement can be printed with and without a required custom codec. The + * expectation is that if the codec is not registered, then the query string should contain bind + * markers for all variables; if however all codecs are properly registered, then the query string + * should contain all variables inlined and formatted properly. + * + * @jira_ticket JAVA-1272 + */ + @Test(groups = "unit") + public void should_inline_custom_codec() throws Exception { + assertThat(insertInto("users").value("id", new Foo(42)).toString()) + .isEqualTo("INSERT INTO users (id) VALUES (?);"); + CodecRegistry.DEFAULT_INSTANCE.register(new FooCodec()); + assertThat(insertInto("users").value("id", new Foo(42)).toString()) + .isEqualTo("INSERT INTO users (id) VALUES (42);"); + } + + /** @jira_ticket JAVA-1312 */ + @Test(groups = "unit") + public void should_not_append_last_column_twice() throws Exception { + Select.SelectionOrAlias select = select().column("a").column("b"); + Select fromUsers1 = select.from("users"); + Select fromUsers2 = select.from("users"); + assertThat(fromUsers1.getQueryString()) + .isEqualTo(fromUsers2.getQueryString()) + .isEqualTo("SELECT a,b FROM users;"); + } + + /** + * @test_category queries:builder + * @jira_ticket JAVA-1286 + * @jira_ticket CASSANDRA-7423 + */ + @Test(groups = "unit") + public void should_handle_setting_udt_fields() throws Exception { + assertThat( + update("tbl") + .with(set(path("a", quote("B")), "foo")) + .and(set(raw("c.\"D\""), "bar")) + .where(eq("k", 0)) + .getQueryString()) + .isEqualTo("UPDATE tbl SET a.\"B\"=?,c.\"D\"=? WHERE k=0;"); + } + + /** + * @test_category queries:builder + * @jira_ticket JAVA-1286 + * @jira_ticket CASSANDRA-7423 + */ + @Test(groups = "unit") + public void should_handle_retrieving_udt_fields() throws Exception { + assertThat(select().path("a", Metadata.quote("B")).raw("c.\"D\"").from("tbl").getQueryString()) + .isEqualTo("SELECT a.\"B\",c.\"D\" FROM tbl;"); + } + + /** + * @test_category queries:builder + * @jira_ticket JAVA-1443 + * @jira_ticket CASSANDRA-10707 + */ + @Test(groups = "unit") + public void should_handle_group_by_clause() { + assertThat(select().all().from("foo").groupBy("c1", column("c2"), raw("c3")).toString()) + .isEqualTo("SELECT * FROM foo GROUP BY c1,c2,c3;"); + assertThat( + select() + .all() + .from("foo") + .groupBy("c1", column("c2"), raw("c3")) + .orderBy(asc("c1")) + .toString()) + .isEqualTo("SELECT * FROM foo GROUP BY c1,c2,c3 ORDER BY c1 ASC;"); + assertThat( + select() + .all() + .from("foo") + .where(eq("x", 42)) + .groupBy("c1", column("c2"), raw("c3")) + .toString()) + .isEqualTo("SELECT * FROM foo WHERE x=42 GROUP BY c1,c2,c3;"); + assertThat( + select() + .all() + .from("foo") + .where(eq("x", 42)) + .groupBy("c1", column("c2"), raw("c3")) + .orderBy(asc("c1")) + .toString()) + .isEqualTo("SELECT * FROM foo WHERE x=42 GROUP BY c1,c2,c3 ORDER BY c1 ASC;"); + try { + select().all().from("foo").groupBy("foo").groupBy("bar"); + fail("Should not allow GROUP BY twice"); + } catch (IllegalStateException e) { + assertThat(e).hasMessage("A GROUP BY clause has already been provided"); } - - @Test(groups = "unit") - public void should_serialize_collections_of_serializable_elements() { - Set set = Sets.newHashSet(UUID.randomUUID()); - List list = Lists.newArrayList(new Date()); - Map map = ImmutableMap.of(new BigInteger("1"), "foo"); - BuiltStatement query = insertInto("foo").value("v", set); - assertThat(query.getQueryString()).isEqualTo("INSERT INTO foo (v) VALUES (?);"); - assertThat(query.getObject(0)).isEqualTo(set); - query = insertInto("foo").value("v", list); - assertThat(query.getQueryString()).isEqualTo("INSERT INTO foo (v) VALUES (?);"); - assertThat(query.getObject(0)).isEqualTo(list); - query = insertInto("foo").value("v", map); - assertThat(query.getQueryString()).isEqualTo("INSERT INTO foo (v) VALUES (?);"); - assertThat(query.getObject(0)).isEqualTo(map); - } - - @Test(groups = "unit") - public void should_not_attempt_to_serialize_function_calls_in_collections() { - BuiltStatement query = insertInto("foo").value("v", Sets.newHashSet(fcall("func", 1))); - assertThat(query.getQueryString()).isEqualTo("INSERT INTO foo (v) VALUES ({func(1)});"); - assertThat(query.getValues(ProtocolVersion.NEWEST_SUPPORTED, CodecRegistry.DEFAULT_INSTANCE)).isNullOrEmpty(); - } - - @Test(groups = "unit") - public void should_not_attempt_to_serialize_bind_markers_in_collections() { - BuiltStatement query = insertInto("foo").value("v", Lists.newArrayList(1, 2, bindMarker())); - assertThat(query.getQueryString()).isEqualTo("INSERT INTO foo (v) VALUES ([1,2,?]);"); - assertThat(query.getValues(ProtocolVersion.NEWEST_SUPPORTED, CodecRegistry.DEFAULT_INSTANCE)).isNullOrEmpty(); - } - - @Test(groups = "unit") - public void should_not_attempt_to_serialize_raw_values_in_collections() { - BuiltStatement query = insertInto("foo").value("v", ImmutableMap.of(1, raw("x"))); - assertThat(query.getQueryString()).isEqualTo("INSERT INTO foo (v) VALUES ({1:x});"); - assertThat(query.getValues(ProtocolVersion.NEWEST_SUPPORTED, CodecRegistry.DEFAULT_INSTANCE)).isNullOrEmpty(); - } - - @Test(groups = "unit") - public void should_not_attempt_to_serialize_collections_containing_numbers() { - BuiltStatement query; - // lists - List list = Lists.newArrayList(1, 2, 3); - query = insertInto("foo").value("v", list); - assertThat(query.getQueryString()).isEqualTo("INSERT INTO foo (v) VALUES ([1,2,3]);"); - assertThat(query.hasValues()).isFalse(); - // sets - Set set = Sets.newHashSet(1, 2, 3); - query = insertInto("foo").value("v", set); - assertThat(query.getQueryString()).isEqualTo("INSERT INTO foo (v) VALUES ({1,2,3});"); - assertThat(query.hasValues()).isFalse(); - // maps - Map map = ImmutableMap.of(1, 12.34f); - query = insertInto("foo").value("v", map); - assertThat(query.getQueryString()).isEqualTo("INSERT INTO foo (v) VALUES ({1:12.34});"); - assertThat(query.hasValues()).isFalse(); - } - - @Test(groups = "unit") - public void should_include_original_cause_when_arguments_invalid() { - // Collection elements in protocol v2 must be at most 65535 bytes - ByteBuffer bb = ByteBuffer.allocate(65536); // too big - List value = Lists.newArrayList(bb); - - BuiltStatement s = insertInto("foo").value("l", value); - try { - s.getValues(ProtocolVersion.V2, CodecRegistry.DEFAULT_INSTANCE); - fail("Expected an IllegalArgumentException"); - } catch (InvalidTypeException e) { - assertThat(e.getCause()).isInstanceOf(IllegalArgumentException.class); - StringWriter writer = new StringWriter(); - e.getCause().printStackTrace(new PrintWriter(writer)); - String stackTrace = writer.toString(); - assertThat(stackTrace).contains( - "Native protocol version 2 supports only elements with size up to 65535 bytes - " + - "but element size is 65536 bytes"); - } - } - - @Test(groups = "unit") - public void should_handle_per_partition_limit_clause() { - assertThat( - select().all().from("foo").perPartitionLimit(2).toString()) - .isEqualTo("SELECT * FROM foo PER PARTITION LIMIT 2;"); - assertThat( - select().all().from("foo").perPartitionLimit(bindMarker()).toString()) - .isEqualTo("SELECT * FROM foo PER PARTITION LIMIT ?;"); - assertThat( - select().all().from("foo").perPartitionLimit(bindMarker("limit")).toString()) - .isEqualTo("SELECT * FROM foo PER PARTITION LIMIT :limit;"); - assertThat( - select().all().from("foo").perPartitionLimit(2).limit(bindMarker()).toString()) - .isEqualTo("SELECT * FROM foo PER PARTITION LIMIT 2 LIMIT ?;"); - assertThat( - select().all().from("foo").where(in("a", 2, 4)).perPartitionLimit(2).limit(3).toString()) - .isEqualTo("SELECT * FROM foo WHERE a IN (2,4) PER PARTITION LIMIT 2 LIMIT 3;"); - assertThat( - select().all().from("foo").where(eq("a", bindMarker())).perPartitionLimit(bindMarker()).limit(3).toString()) - .isEqualTo("SELECT * FROM foo WHERE a=? PER PARTITION LIMIT ? LIMIT 3;"); - assertThat( - select().all().from("foo").where(eq("a", bindMarker())).orderBy(desc("b")).perPartitionLimit(2).limit(3).toString()) - .isEqualTo("SELECT * FROM foo WHERE a=? ORDER BY b DESC PER PARTITION LIMIT 2 LIMIT 3;"); - assertThat( - select().all().from("foo").where(eq("a", bindMarker())).and(gt("b", bindMarker())) - .orderBy(desc("b")).perPartitionLimit(bindMarker()).limit(3).allowFiltering().toString()) - .isEqualTo("SELECT * FROM foo WHERE a=? AND b>? ORDER BY b DESC PER PARTITION LIMIT ? LIMIT 3 ALLOW FILTERING;"); - try { - select().distinct().all().from("foo").perPartitionLimit(3); - fail("Should not allow DISTINCT + PER PARTITION LIMIT"); - } catch (Exception e) { - assertThat(e).hasMessage("PER PARTITION LIMIT is not allowed with SELECT DISTINCT queries"); - } - try { - select().all().from("foo").perPartitionLimit(-1); - fail("Should not allow negative limit"); - } catch (IllegalArgumentException e) { - assertThat(e).hasMessage("Invalid PER PARTITION LIMIT value, must be strictly positive"); - } - try { - select().all().from("foo").perPartitionLimit(1).perPartitionLimit(bindMarker()); - fail("Should not allow to set limit twice"); - } catch (IllegalStateException e) { - assertThat(e).hasMessage("A PER PARTITION LIMIT value has already been provided"); - } - } - - @Test(groups = "unit") - public void should_handle_select_json() throws Exception { - assertThat( - select().json().from("users").toString()) - .isEqualTo("SELECT JSON * FROM users;"); - assertThat( - select("id", "age").json().from("users").toString()) - .isEqualTo("SELECT JSON id,age FROM users;"); - assertThat( - select().json().column("id").writeTime("age").ttl("state").as("ttl").from("users").toString()) - .isEqualTo("SELECT JSON id,writetime(age),ttl(state) AS ttl FROM users;"); - assertThat( - select().distinct().json().column("id").from("users").toString()) - .isEqualTo("SELECT JSON DISTINCT id FROM users;"); // note that the correct syntax is JSON DISTINCT - } - - @Test(groups = "unit") - public void should_handle_insert_json() throws Exception { - assertThat( - insertInto("example").json("{\"id\": 0, \"tupleval\": [1, \"abc\"], \"numbers\": [1, 2, 3], \"letters\": [\"a\", \"b\", \"c\"]}").toString()) - .isEqualTo("INSERT INTO example JSON '{\"id\": 0, \"tupleval\": [1, \"abc\"], \"numbers\": [1, 2, 3], \"letters\": [\"a\", \"b\", \"c\"]}';"); - assertThat( - insertInto("users").json("{\"id\": \"user123\", \"\\\"Age\\\"\": 42, \"\\\"State\\\"\": \"TX\"}").toString()) - .isEqualTo("INSERT INTO users JSON '{\"id\": \"user123\", \"\\\"Age\\\"\": 42, \"\\\"State\\\"\": \"TX\"}';"); - assertThat( - insertInto("users").json(bindMarker()).toString()) - .isEqualTo("INSERT INTO users JSON ?;"); - assertThat( - insertInto("users").json(bindMarker("json")).toString()) - .isEqualTo("INSERT INTO users JSON :json;"); - assertThat( - insertInto("example").json("{\"id\": 0, \"tupleval\": [1, \"abc\"], \"numbers\": [1, 2, 3], \"letters\": [\"a\", \"b\", \"c\"]}").defaultNull().toString()) - .isEqualTo("INSERT INTO example JSON '{\"id\": 0, \"tupleval\": [1, \"abc\"], \"numbers\": [1, 2, 3], \"letters\": [\"a\", \"b\", \"c\"]}' DEFAULT NULL;"); - assertThat( - insertInto("example").json("{\"id\": 0, \"tupleval\": [1, \"abc\"], \"numbers\": [1, 2, 3], \"letters\": [\"a\", \"b\", \"c\"]}").defaultUnset().toString()) - .isEqualTo("INSERT INTO example JSON '{\"id\": 0, \"tupleval\": [1, \"abc\"], \"numbers\": [1, 2, 3], \"letters\": [\"a\", \"b\", \"c\"]}' DEFAULT UNSET;"); - } - - @Test(groups = "unit") - public void should_handle_to_json() throws Exception { - assertThat( - select().toJson("id").as("id").toJson("age").as("age").from("users").toString()) - .isEqualTo("SELECT toJson(id) AS id,toJson(age) AS age FROM users;"); - assertThat( - select().distinct().toJson("id").as("id").from("users").toString()) - .isEqualTo("SELECT DISTINCT toJson(id) AS id FROM users;"); - assertThat( - select(alias(toJson("id"), "id"), alias(toJson("age"), "age")).from("users").toString()) - .isEqualTo("SELECT toJson(id) AS id,toJson(age) AS age FROM users;"); - assertThat( - select(alias(toJson("id"), "id")).distinct().from("users").toString()) - .isEqualTo("SELECT DISTINCT toJson(id) AS id FROM users;"); - } - - @Test(groups = "unit") - public void should_handle_from_json() throws Exception { - assertThat( - update("users").with(set("age", fromJson("42"))).where(eq("id", fromJson("\"user123\""))).toString()) - .isEqualTo("UPDATE users SET age=fromJson('42') WHERE id=fromJson('\"user123\"');"); - assertThat( - insertInto("users").value("id", fromJson("\"user123\"")).value("age", fromJson("42")).toString()) - .isEqualTo("INSERT INTO users (id,age) VALUES (fromJson('\"user123\"'),fromJson('42'));"); - assertThat( - insertInto("users").value("id", fromJson(bindMarker())).toString()) - .isEqualTo("INSERT INTO users (id) VALUES (fromJson(?));"); - assertThat( - insertInto("users").value("id", fromJson(bindMarker("id"))).toString()) - .isEqualTo("INSERT INTO users (id) VALUES (fromJson(:id));"); - } - - static class Foo { - int bar; - - public Foo(int bar) { - this.bar = bar; - } - } - - static class FooCodec extends TypeCodec { - - public FooCodec() { - super(DataType.cint(), Foo.class); - } - - @Override - public ByteBuffer serialize(Foo value, ProtocolVersion protocolVersion) throws InvalidTypeException { - // not relevant for this test - return null; - } - - @Override - public Foo deserialize(ByteBuffer bytes, ProtocolVersion protocolVersion) throws InvalidTypeException { - // not relevant for this test - return null; - } - - @Override - public Foo parse(String value) throws InvalidTypeException { - // not relevant for this test - return null; - } - - @Override - public String format(Foo foo) throws InvalidTypeException { - return Integer.toString(foo.bar); - } - } - - /** - * Ensures that a statement can be printed with and without - * a required custom codec. - * The expectation is that if the codec is not registered, - * then the query string should contain bind markers for all variables; - * if however all codecs are properly registered, then - * the query string should contain all variables inlined and formatted properly. - * - * @jira_ticket JAVA-1272 - */ - @Test(groups = "unit") - public void should_inline_custom_codec() throws Exception { - assertThat( - insertInto("users").value("id", new Foo(42)).toString()) - .isEqualTo("INSERT INTO users (id) VALUES (?);"); - CodecRegistry.DEFAULT_INSTANCE.register(new FooCodec()); - assertThat( - insertInto("users").value("id", new Foo(42)).toString()) - .isEqualTo("INSERT INTO users (id) VALUES (42);"); - } - - /** - * @jira_ticket JAVA-1312 - */ - @Test(groups = "unit") - public void should_not_append_last_column_twice() throws Exception { - Select.SelectionOrAlias select = select().column("a").column("b"); - Select fromUsers1 = select.from("users"); - Select fromUsers2 = select.from("users"); - assertThat(fromUsers1.getQueryString()) - .isEqualTo(fromUsers2.getQueryString()) - .isEqualTo("SELECT a,b FROM users;"); - } - - /** - * @test_category queries:builder - * @jira_ticket JAVA-1286 - * @jira_ticket CASSANDRA-7423 - */ - @Test(groups = "unit") - public void should_handle_setting_udt_fields() throws Exception { - assertThat( - update("tbl") - .with(set(path("a", quote("B")), "foo")) - .and(set(raw("c.\"D\""), "bar")) - .where(eq("k", 0)).getQueryString()) - .isEqualTo("UPDATE tbl SET a.\"B\"=?,c.\"D\"=? WHERE k=0;"); - } - - /** - * @test_category queries:builder - * @jira_ticket JAVA-1286 - * @jira_ticket CASSANDRA-7423 - */ - @Test(groups = "unit") - public void should_handle_retrieving_udt_fields() throws Exception { - assertThat( - select() - .path("a", Metadata.quote("B")) - .raw("c.\"D\"") - .from("tbl").getQueryString()) - .isEqualTo("SELECT a.\"B\",c.\"D\" FROM tbl;"); - } - - /** - * @test_category queries:builder - * @jira_ticket JAVA-1443 - * @jira_ticket CASSANDRA-10707 - */ - @Test(groups = "unit") - public void should_handle_group_by_clause() { - assertThat( - select().all().from("foo").groupBy("c1", column("c2"), raw("c3")).toString()) - .isEqualTo("SELECT * FROM foo GROUP BY c1,c2,c3;"); - assertThat( - select().all().from("foo").groupBy("c1", column("c2"), raw("c3")).orderBy(asc("c1")).toString()) - .isEqualTo("SELECT * FROM foo GROUP BY c1,c2,c3 ORDER BY c1 ASC;"); - assertThat( - select().all().from("foo").where(eq("x", 42)).groupBy("c1", column("c2"), raw("c3")).toString()) - .isEqualTo("SELECT * FROM foo WHERE x=42 GROUP BY c1,c2,c3;"); - assertThat( - select().all().from("foo").where(eq("x", 42)).groupBy("c1", column("c2"), raw("c3")).orderBy(asc("c1")).toString()) - .isEqualTo("SELECT * FROM foo WHERE x=42 GROUP BY c1,c2,c3 ORDER BY c1 ASC;"); - try { - select().all().from("foo").groupBy("foo").groupBy("bar"); - fail("Should not allow GROUP BY twice"); - } catch (IllegalStateException e) { - assertThat(e).hasMessage("A GROUP BY clause has already been provided"); - } - } - - /** - * @test_category queries:builder - */ - @Test(groups = "unit") - public void should_handle_allow_filtering() { - assertThat( - select().all().from("foo").allowFiltering().toString()) - .isEqualTo("SELECT * FROM foo ALLOW FILTERING;"); - assertThat( - select().all().from("foo").where(eq("x", 42)).allowFiltering().toString()) - .isEqualTo("SELECT * FROM foo WHERE x=42 ALLOW FILTERING;"); - } - + } + + /** @test_category queries:builder */ + @Test(groups = "unit") + public void should_handle_allow_filtering() { + assertThat(select().all().from("foo").allowFiltering().toString()) + .isEqualTo("SELECT * FROM foo ALLOW FILTERING;"); + assertThat(select().all().from("foo").where(eq("x", 42)).allowFiltering().toString()) + .isEqualTo("SELECT * FROM foo WHERE x=42 ALLOW FILTERING;"); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/querybuilder/QueryBuilderTupleExecutionTest.java b/driver-core/src/test/java/com/datastax/driver/core/querybuilder/QueryBuilderTupleExecutionTest.java index 6db041c18a0..40cf4156477 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/querybuilder/QueryBuilderTupleExecutionTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/querybuilder/QueryBuilderTupleExecutionTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,41 +17,42 @@ */ package com.datastax.driver.core.querybuilder; +import static com.datastax.driver.core.DataType.cint; +import static com.datastax.driver.core.querybuilder.QueryBuilder.eq; +import static com.datastax.driver.core.querybuilder.QueryBuilder.insertInto; +import static com.datastax.driver.core.querybuilder.QueryBuilder.set; +import static com.datastax.driver.core.querybuilder.QueryBuilder.update; +import static org.assertj.core.api.Assertions.assertThat; +import static org.testng.Assert.assertEquals; + import com.datastax.driver.core.CCMTestsSupport; import com.datastax.driver.core.TupleType; import com.datastax.driver.core.TupleValue; import com.datastax.driver.core.utils.CassandraVersion; import com.google.common.collect.ImmutableList; -import org.testng.annotations.Test; - import java.util.List; - -import static com.datastax.driver.core.DataType.cint; -import static com.datastax.driver.core.querybuilder.QueryBuilder.*; -import static org.assertj.core.api.Assertions.assertThat; -import static org.testng.Assert.assertEquals; +import org.testng.annotations.Test; @CassandraVersion("2.1.3") public class QueryBuilderTupleExecutionTest extends CCMTestsSupport { - @Test(groups = "short") - public void should_handle_tuple() throws Exception { - String query = "INSERT INTO foo (k,x) VALUES (0,(1));"; - TupleType tupleType = cluster().getMetadata().newTupleType(cint()); - BuiltStatement insert = insertInto("foo").value("k", 0).value("x", tupleType.newValue(1)); - assertEquals(insert.toString(), query); - } - - @SuppressWarnings("deprecation") - @Test(groups = "short") - public void should_handle_collections_of_tuples() { - String query; - BuiltStatement statement; - query = "UPDATE foo SET l=[(1,2)] WHERE k=1;"; - TupleType tupleType = cluster().getMetadata().newTupleType(cint(), cint()); - List list = ImmutableList.of(tupleType.newValue(1, 2)); - statement = update("foo").with(set("l", list)).where(eq("k", 1)); - assertThat(statement.toString()).isEqualTo(query); - } + @Test(groups = "short") + public void should_handle_tuple() throws Exception { + String query = "INSERT INTO foo (k,x) VALUES (0,(1));"; + TupleType tupleType = cluster().getMetadata().newTupleType(cint()); + BuiltStatement insert = insertInto("foo").value("k", 0).value("x", tupleType.newValue(1)); + assertEquals(insert.toString(), query); + } + @SuppressWarnings("deprecation") + @Test(groups = "short") + public void should_handle_collections_of_tuples() { + String query; + BuiltStatement statement; + query = "UPDATE foo SET l=[(1,2)] WHERE k=1;"; + TupleType tupleType = cluster().getMetadata().newTupleType(cint(), cint()); + List list = ImmutableList.of(tupleType.newValue(1, 2)); + statement = update("foo").with(set("l", list)).where(eq("k", 1)); + assertThat(statement.toString()).isEqualTo(query); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/querybuilder/QueryBuilderUDTExecutionTest.java b/driver-core/src/test/java/com/datastax/driver/core/querybuilder/QueryBuilderUDTExecutionTest.java index 85a91f4df10..66d64b66f7e 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/querybuilder/QueryBuilderUDTExecutionTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/querybuilder/QueryBuilderUDTExecutionTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,122 +17,148 @@ */ package com.datastax.driver.core.querybuilder; -import com.datastax.driver.core.*; +import static com.datastax.driver.core.querybuilder.QueryBuilder.eq; +import static com.datastax.driver.core.querybuilder.QueryBuilder.insertInto; +import static com.datastax.driver.core.querybuilder.QueryBuilder.path; +import static com.datastax.driver.core.querybuilder.QueryBuilder.putAll; +import static com.datastax.driver.core.querybuilder.QueryBuilder.raw; +import static com.datastax.driver.core.querybuilder.QueryBuilder.select; +import static com.datastax.driver.core.querybuilder.QueryBuilder.set; +import static com.datastax.driver.core.querybuilder.QueryBuilder.update; +import static com.datastax.driver.core.schemabuilder.SchemaBuilder.createTable; +import static com.datastax.driver.core.schemabuilder.SchemaBuilder.createType; +import static com.datastax.driver.core.schemabuilder.SchemaBuilder.udtLiteral; +import static org.assertj.core.api.Assertions.assertThat; +import static org.testng.Assert.assertEquals; + +import com.datastax.driver.core.CCMTestsSupport; +import com.datastax.driver.core.DataType; +import com.datastax.driver.core.Row; +import com.datastax.driver.core.Statement; +import com.datastax.driver.core.UDTValue; +import com.datastax.driver.core.UserType; import com.datastax.driver.core.utils.CassandraVersion; import com.google.common.collect.ImmutableList; import com.google.common.collect.Maps; -import org.testng.annotations.Test; - import java.net.InetAddress; import java.util.List; import java.util.Map; - -import static com.datastax.driver.core.querybuilder.QueryBuilder.*; -import static com.datastax.driver.core.schemabuilder.SchemaBuilder.*; -import static org.assertj.core.api.Assertions.assertThat; -import static org.testng.Assert.assertEquals; +import org.testng.annotations.Test; @CassandraVersion("2.1.3") public class QueryBuilderUDTExecutionTest extends CCMTestsSupport { - @Override - public void onTestContextInitialized() { - execute("CREATE TYPE udt (i int, a inet)", - "CREATE TABLE udtTest(k int PRIMARY KEY, t frozen, l list>, m map>)"); - } - - @Test(groups = "short") - public void insertUdtTest() throws Exception { - UserType udtType = cluster().getMetadata().getKeyspace(keyspace).getUserType("udt"); - UDTValue udtValue = udtType.newValue().setInt("i", 2).setInet("a", InetAddress.getByName("localhost")); - - Statement insert = insertInto("udtTest").value("k", 1).value("t", udtValue); - assertEquals(insert.toString(), "INSERT INTO udtTest (k,t) VALUES (1,{i:2,a:'127.0.0.1'});"); - - session().execute(insert); - - List rows = session().execute(select().from("udtTest").where(eq("k", 1))).all(); - - assertEquals(rows.size(), 1); - - Row r1 = rows.get(0); - assertEquals("127.0.0.1", r1.getUDTValue("t").getInet("a").getHostAddress()); - } - - @Test(groups = "short") - public void should_handle_collections_of_UDT() throws Exception { - UserType udtType = cluster().getMetadata().getKeyspace(keyspace).getUserType("udt"); - UDTValue udtValue = udtType.newValue().setInt("i", 2).setInet("a", InetAddress.getByName("localhost")); - UDTValue udtValue2 = udtType.newValue().setInt("i", 3).setInet("a", InetAddress.getByName("localhost")); - - Statement insert = insertInto("udtTest").value("k", 1).value("l", ImmutableList.of(udtValue)); - assertThat(insert.toString()).isEqualTo("INSERT INTO udtTest (k,l) VALUES (1,[{i:2,a:'127.0.0.1'}]);"); - - session().execute(insert); - - List rows = session().execute(select().from("udtTest").where(eq("k", 1))).all(); - - assertThat(rows.size()).isEqualTo(1); - - Row r1 = rows.get(0); - assertThat(r1.getList("l", UDTValue.class).get(0).getInet("a").getHostAddress()).isEqualTo("127.0.0.1"); - - Map map = Maps.newHashMap(); - map.put(0, udtValue); - map.put(2, udtValue2); - Statement updateMap = update("udtTest").with(putAll("m", map)).where(eq("k", 1)); - assertThat(updateMap.toString()) - .isEqualTo("UPDATE udtTest SET m=m+{0:{i:2,a:'127.0.0.1'},2:{i:3,a:'127.0.0.1'}} WHERE k=1;"); - - session().execute(updateMap); - - rows = session().execute(select().from("udtTest").where(eq("k", 1))).all(); - r1 = rows.get(0); - assertThat(r1.getMap("m", Integer.class, UDTValue.class)).isEqualTo(map); - } - - /** - * Ensures that UDT fields can be set and retrieved on their own using {@link QueryBuilder#set} and - * {@link QueryBuilder#select} respectively. - * - * @test_category queries:builder - * @jira_ticket JAVA-1286 - * @jira_ticket CASSANDRA-7423 - */ - @CassandraVersion(value = "3.6", description = "Requires CASSANDRA-7423 introduced in Cassandra 3.6") - @Test(groups = "short") - public void should_support_setting_and_retrieving_udt_fields() { - //given - String table = "unfrozen_udt_table"; - String udt = "person"; - session().execute(createType(udt).addColumn("first", DataType.text()).addColumn("last", DataType.text())); - UserType userType = cluster().getMetadata().getKeyspace(keyspace).getUserType(udt); - assertThat(userType).isNotNull(); - - session().execute(createTable(table).addPartitionKey("k", DataType.text()) - .addUDTColumn("u", udtLiteral(udt)) - ); - - UDTValue value = userType.newValue(); - value.setString("first", "Bob"); - value.setString("last", "Smith"); - session().execute(insertInto(table).value("k", "key").value("u", value)); - - //when - updating udt field - session().execute(update(table).with( - set(path("u", "first"), "Rick")) + @Override + public void onTestContextInitialized() { + execute( + "CREATE TYPE udt (i int, a inet)", + "CREATE TABLE udtTest(k int PRIMARY KEY, t frozen, l list>, m map>)"); + } + + @Test(groups = "short") + public void insertUdtTest() throws Exception { + UserType udtType = cluster().getMetadata().getKeyspace(keyspace).getUserType("udt"); + UDTValue udtValue = + udtType.newValue().setInt("i", 2).setInet("a", InetAddress.getByName("localhost")); + + Statement insert = insertInto("udtTest").value("k", 1).value("t", udtValue); + assertEquals(insert.toString(), "INSERT INTO udtTest (k,t) VALUES (1,{i:2,a:'127.0.0.1'});"); + + session().execute(insert); + + List rows = session().execute(select().from("udtTest").where(eq("k", 1))).all(); + + assertEquals(rows.size(), 1); + + Row r1 = rows.get(0); + assertEquals("127.0.0.1", r1.getUDTValue("t").getInet("a").getHostAddress()); + } + + @Test(groups = "short") + public void should_handle_collections_of_UDT() throws Exception { + UserType udtType = cluster().getMetadata().getKeyspace(keyspace).getUserType("udt"); + UDTValue udtValue = + udtType.newValue().setInt("i", 2).setInet("a", InetAddress.getByName("localhost")); + UDTValue udtValue2 = + udtType.newValue().setInt("i", 3).setInet("a", InetAddress.getByName("localhost")); + + Statement insert = insertInto("udtTest").value("k", 1).value("l", ImmutableList.of(udtValue)); + assertThat(insert.toString()) + .isEqualTo("INSERT INTO udtTest (k,l) VALUES (1,[{i:2,a:'127.0.0.1'}]);"); + + session().execute(insert); + + List rows = session().execute(select().from("udtTest").where(eq("k", 1))).all(); + + assertThat(rows.size()).isEqualTo(1); + + Row r1 = rows.get(0); + assertThat(r1.getList("l", UDTValue.class).get(0).getInet("a").getHostAddress()) + .isEqualTo("127.0.0.1"); + + Map map = Maps.newHashMap(); + map.put(0, udtValue); + map.put(2, udtValue2); + Statement updateMap = update("udtTest").with(putAll("m", map)).where(eq("k", 1)); + assertThat(updateMap.toString()) + .isEqualTo( + "UPDATE udtTest SET m=m+{0:{i:2,a:'127.0.0.1'},2:{i:3,a:'127.0.0.1'}} WHERE k=1;"); + + session().execute(updateMap); + + rows = session().execute(select().from("udtTest").where(eq("k", 1))).all(); + r1 = rows.get(0); + assertThat(r1.getMap("m", Integer.class, UDTValue.class)).isEqualTo(map); + } + + /** + * Ensures that UDT fields can be set and retrieved on their own using {@link QueryBuilder#set} + * and {@link QueryBuilder#select} respectively. + * + * @test_category queries:builder + * @jira_ticket JAVA-1286 + * @jira_ticket CASSANDRA-7423 + */ + @CassandraVersion( + value = "3.6", + description = "Requires CASSANDRA-7423 introduced in Cassandra 3.6") + @Test(groups = "short") + public void should_support_setting_and_retrieving_udt_fields() { + // given + String table = "unfrozen_udt_table"; + String udt = "person"; + session() + .execute( + createType(udt).addColumn("first", DataType.text()).addColumn("last", DataType.text())); + UserType userType = cluster().getMetadata().getKeyspace(keyspace).getUserType(udt); + assertThat(userType).isNotNull(); + + session() + .execute( + createTable(table) + .addPartitionKey("k", DataType.text()) + .addUDTColumn("u", udtLiteral(udt))); + + UDTValue value = userType.newValue(); + value.setString("first", "Bob"); + value.setString("last", "Smith"); + session().execute(insertInto(table).value("k", "key").value("u", value)); + + // when - updating udt field + session() + .execute( + update(table) + .with(set(path("u", "first"), "Rick")) .and(set(raw("u.last"), "Jones")) .where(eq("k", "key"))); - //then - field should be updated and retrievable by field name. - Row r = session().execute(select() - .path("u", "first") - .raw("u.last") - .from(table) - .where(eq("k", "key"))).one(); - - assertThat(r.getString("u.first")).isEqualTo("Rick"); - assertThat(r.getString("u.last")).isEqualTo("Jones"); - } + // then - field should be updated and retrievable by field name. + Row r = + session() + .execute(select().path("u", "first").raw("u.last").from(table).where(eq("k", "key"))) + .one(); + assertThat(r.getString("u.first")).isEqualTo("Rick"); + assertThat(r.getString("u.last")).isEqualTo("Jones"); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/schemabuilder/AlterKeyspaceTest.java b/driver-core/src/test/java/com/datastax/driver/core/schemabuilder/AlterKeyspaceTest.java index 534bd281610..a1c5c50c0bf 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/schemabuilder/AlterKeyspaceTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/schemabuilder/AlterKeyspaceTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,42 +17,40 @@ */ package com.datastax.driver.core.schemabuilder; -import org.testng.annotations.Test; +import static com.datastax.driver.core.schemabuilder.SchemaBuilder.alterKeyspace; +import static org.assertj.core.api.Assertions.assertThat; import java.util.HashMap; import java.util.Map; - -import static com.datastax.driver.core.schemabuilder.SchemaBuilder.alterKeyspace; -import static org.assertj.core.api.Assertions.assertThat; +import org.testng.annotations.Test; public class AlterKeyspaceTest { - @Test(groups = "unit") - public void should_alter_keyspace_with_options() throws Exception { - Map replicationOptions = new HashMap(); - replicationOptions.put("class", "SimpleStrategy"); - replicationOptions.put("replication_factor", 1); - - //When - SchemaStatement statement = alterKeyspace("test").with() - .durableWrites(true) - .replication(replicationOptions); - - //Then - assertThat(statement.getQueryString()) - .isEqualTo("\n\tALTER KEYSPACE test" + - "\n\tWITH\n\t\t" + - "REPLICATION = {'replication_factor': 1, 'class': 'SimpleStrategy'}\n\t\t" + - "AND DURABLE_WRITES = true"); - } - - @Test(groups = "unit", expectedExceptions = IllegalArgumentException.class) - public void incorrect_replication_options() throws Exception { - Map replicationOptions = new HashMap(); - replicationOptions.put("class", 5); - - //When - alterKeyspace("test").with() - .replication(replicationOptions); - } + @Test(groups = "unit") + public void should_alter_keyspace_with_options() throws Exception { + Map replicationOptions = new HashMap(); + replicationOptions.put("class", "SimpleStrategy"); + replicationOptions.put("replication_factor", 1); + + // When + SchemaStatement statement = + alterKeyspace("test").with().durableWrites(true).replication(replicationOptions); + + // Then + assertThat(statement.getQueryString()) + .isEqualTo( + "\n\tALTER KEYSPACE test" + + "\n\tWITH\n\t\t" + + "REPLICATION = {'replication_factor': 1, 'class': 'SimpleStrategy'}\n\t\t" + + "AND DURABLE_WRITES = true"); + } + + @Test(groups = "unit", expectedExceptions = IllegalArgumentException.class) + public void incorrect_replication_options() throws Exception { + Map replicationOptions = new HashMap(); + replicationOptions.put("class", 5); + + // When + alterKeyspace("test").with().replication(replicationOptions); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/schemabuilder/AlterTest.java b/driver-core/src/test/java/com/datastax/driver/core/schemabuilder/AlterTest.java index 3deb2ef57f7..be41789af1a 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/schemabuilder/AlterTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/schemabuilder/AlterTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,183 +17,216 @@ */ package com.datastax.driver.core.schemabuilder; +import static com.datastax.driver.core.schemabuilder.SchemaBuilder.Caching; +import static com.datastax.driver.core.schemabuilder.SchemaBuilder.KeyCaching; +import static com.datastax.driver.core.schemabuilder.SchemaBuilder.alterTable; +import static com.datastax.driver.core.schemabuilder.SchemaBuilder.always; +import static com.datastax.driver.core.schemabuilder.SchemaBuilder.frozen; +import static com.datastax.driver.core.schemabuilder.SchemaBuilder.leveledStrategy; +import static com.datastax.driver.core.schemabuilder.SchemaBuilder.lz4; +import static com.datastax.driver.core.schemabuilder.SchemaBuilder.rows; +import static org.assertj.core.api.Assertions.assertThat; + import com.datastax.driver.core.DataType; import org.testng.annotations.Test; -import static com.datastax.driver.core.schemabuilder.SchemaBuilder.*; -import static org.assertj.core.api.Assertions.assertThat; - public class AlterTest { - @Test(groups = "unit") - public void should_alter_column_type() throws Exception { - //When - SchemaStatement statement = alterTable("test").alterColumn("name").type(DataType.ascii()); - - //Then - assertThat(statement.getQueryString()).isEqualTo("\n\tALTER TABLE test ALTER name TYPE ascii"); - } - - @Test(groups = "unit") - public void should_alter_column_type_with_keyspace() throws Exception { - //When - SchemaStatement statement = alterTable("ks", "test").alterColumn("name").type(DataType.ascii()); - - //Then - assertThat(statement.getQueryString()).isEqualTo("\n\tALTER TABLE ks.test ALTER name TYPE ascii"); - } - - @Test(groups = "unit") - public void should_alter_column_type_to_UDT() throws Exception { - //When - SchemaStatement statement = alterTable("ks", "test").alterColumn("address").udtType(frozen("address")); - - //Then - assertThat(statement.getQueryString()).isEqualTo("\n\tALTER TABLE ks.test ALTER address TYPE frozen
    "); - } - - @Test(groups = "unit") - public void should_add_column() throws Exception { - //When - SchemaStatement statement = alterTable("test").addColumn("location").type(DataType.ascii()); - - //Then - assertThat(statement.getQueryString()).isEqualTo("\n\tALTER TABLE test ADD location ascii"); - } - - @Test(groups = "unit") - public void should_add_column_with_UDT_type() throws Exception { - //When - SchemaStatement statement = alterTable("test").addColumn("location").udtType(frozen("address")); - - //Then - assertThat(statement.getQueryString()).isEqualTo("\n\tALTER TABLE test ADD location frozen
    "); - } - - @Test(groups = "unit") - public void should_rename_column() throws Exception { - //When - SchemaStatement statement = alterTable("test").renameColumn("name").to("description"); - - //Then - assertThat(statement.getQueryString()).isEqualTo("\n\tALTER TABLE test RENAME name TO description"); - } - - @Test(groups = "unit") - public void should_drop_column() throws Exception { - //When - SchemaStatement statement = alterTable("test").dropColumn("name"); - - //Then - assertThat(statement.getQueryString()).isEqualTo("\n\tALTER TABLE test DROP name"); - } - - @Test(groups = "unit") - public void should_alter_table_options() throws Exception { - //When - // Note that this does not necessarily represent a valid configuration, the goal is just to test all options - // (some of which might be specific to C* 2.0 or 2.1) - SchemaStatement statement = alterTable("test").withOptions() - .bloomFilterFPChance(0.01) - .caching(Caching.ROWS_ONLY) - .comment("This is a comment") - .compactionOptions(leveledStrategy().ssTableSizeInMB(160)) - .compressionOptions(lz4()) - .dcLocalReadRepairChance(0.21) - .defaultTimeToLive(100) - .gcGraceSeconds(9999) - .indexInterval(256) - .minIndexInterval(64) - .maxIndexInterval(512) - .memtableFlushPeriodInMillis(12) - .populateIOCacheOnFlush(true) - .replicateOnWrite(true) - .readRepairChance(0.42) - .speculativeRetry(always()) - .cdc(true); - - SchemaStatement statementWith21Caching = alterTable("test").withOptions() - .caching(KeyCaching.NONE, rows(100)); - - //Then - assertThat(statement.getQueryString()).isEqualTo("\n\tALTER TABLE test\n\t" + - "WITH caching = 'rows_only' " + - "AND bloom_filter_fp_chance = 0.01 " + - "AND comment = 'This is a comment' " + - "AND compression = {'sstable_compression' : 'LZ4Compressor'} " + - "AND compaction = {'class' : 'LeveledCompactionStrategy', 'sstable_size_in_mb' : 160} " + - "AND dclocal_read_repair_chance = 0.21 " + - "AND default_time_to_live = 100 " + - "AND gc_grace_seconds = 9999 " + - "AND index_interval = 256 " + - "AND min_index_interval = 64 " + - "AND max_index_interval = 512 " + - "AND memtable_flush_period_in_ms = 12 " + - "AND populate_io_cache_on_flush = true " + - "AND read_repair_chance = 0.42 " + - "AND replicate_on_write = true " + - "AND speculative_retry = 'ALWAYS' " + - "AND cdc = true"); - - assertThat(statementWith21Caching.getQueryString()).isEqualTo("\n\tALTER TABLE test\n\t" + - "WITH caching = {'keys' : 'none', 'rows_per_partition' : 100}"); - } - - @Test(groups = "unit", expectedExceptions = IllegalArgumentException.class, - expectedExceptionsMessageRegExp = "The keyspace name 'add' is not allowed because it is a reserved keyword") - public void should_fail_if_keyspace_name_is_a_reserved_keyword() throws Exception { - alterTable("add", "test") - .addColumn("test").type(DataType.ascii()); - } - - @Test(groups = "unit", expectedExceptions = IllegalArgumentException.class, - expectedExceptionsMessageRegExp = "The table name 'add' is not allowed because it is a reserved keyword") - public void should_fail_if_table_name_is_a_reserved_keyword() throws Exception { - alterTable("add") - .addColumn("test").type(DataType.ascii()); - } - - @Test(groups = "unit", expectedExceptions = IllegalArgumentException.class, - expectedExceptionsMessageRegExp = "The new column name 'add' is not allowed because it is a reserved keyword") - public void should_fail_if_added_column_is_a_reserved_keyword() throws Exception { - alterTable("test") - .addColumn("add").type(DataType.ascii()).getQueryString(); - } - - @Test(groups = "unit", expectedExceptions = IllegalArgumentException.class, - expectedExceptionsMessageRegExp = "The altered column name 'add' is not allowed because it is a reserved keyword") - public void should_fail_if_altered_column_is_a_reserved_keyword() throws Exception { + @Test(groups = "unit") + public void should_alter_column_type() throws Exception { + // When + SchemaStatement statement = alterTable("test").alterColumn("name").type(DataType.ascii()); + + // Then + assertThat(statement.getQueryString()).isEqualTo("\n\tALTER TABLE test ALTER name TYPE ascii"); + } + + @Test(groups = "unit") + public void should_alter_column_type_with_keyspace() throws Exception { + // When + SchemaStatement statement = alterTable("ks", "test").alterColumn("name").type(DataType.ascii()); + + // Then + assertThat(statement.getQueryString()) + .isEqualTo("\n\tALTER TABLE ks.test ALTER name TYPE ascii"); + } + + @Test(groups = "unit") + public void should_alter_column_type_to_UDT() throws Exception { + // When + SchemaStatement statement = + alterTable("ks", "test").alterColumn("address").udtType(frozen("address")); + + // Then + assertThat(statement.getQueryString()) + .isEqualTo("\n\tALTER TABLE ks.test ALTER address TYPE frozen
    "); + } + + @Test(groups = "unit") + public void should_add_column() throws Exception { + // When + SchemaStatement statement = alterTable("test").addColumn("location").type(DataType.ascii()); + + // Then + assertThat(statement.getQueryString()).isEqualTo("\n\tALTER TABLE test ADD location ascii"); + } + + @Test(groups = "unit") + public void should_add_column_with_UDT_type() throws Exception { + // When + SchemaStatement statement = alterTable("test").addColumn("location").udtType(frozen("address")); + + // Then + assertThat(statement.getQueryString()) + .isEqualTo("\n\tALTER TABLE test ADD location frozen
    "); + } + + @Test(groups = "unit") + public void should_rename_column() throws Exception { + // When + SchemaStatement statement = alterTable("test").renameColumn("name").to("description"); + + // Then + assertThat(statement.getQueryString()) + .isEqualTo("\n\tALTER TABLE test RENAME name TO description"); + } + + @Test(groups = "unit") + public void should_drop_column() throws Exception { + // When + SchemaStatement statement = alterTable("test").dropColumn("name"); + + // Then + assertThat(statement.getQueryString()).isEqualTo("\n\tALTER TABLE test DROP name"); + } + + @Test(groups = "unit") + public void should_alter_table_options() throws Exception { + // When + // Note that this does not necessarily represent a valid configuration, the goal is just to test + // all options + // (some of which might be specific to C* 2.0 or 2.1) + SchemaStatement statement = alterTable("test") - .alterColumn("add").type(DataType.ascii()).getQueryString(); - } - - @Test(groups = "unit", expectedExceptions = IllegalArgumentException.class, - expectedExceptionsMessageRegExp = "The renamed column name 'add' is not allowed because it is a reserved keyword") - public void should_fail_if_renamed_column_is_a_reserved_keyword() throws Exception { - alterTable("test") - .renameColumn("add"); - } - - @Test(groups = "unit", expectedExceptions = IllegalArgumentException.class, - expectedExceptionsMessageRegExp = "The new column name 'add' is not allowed because it is a reserved keyword") - public void should_fail_if_new_renamed_column_is_a_reserved_keyword() throws Exception { - alterTable("test") - .renameColumn("col").to("add"); - } - - @Test(groups = "unit", expectedExceptions = IllegalArgumentException.class, - expectedExceptionsMessageRegExp = "The dropped column name 'add' is not allowed because it is a reserved keyword") - public void should_fail_if_drop_column_is_a_reserved_keyword() throws Exception { - alterTable("test") - .dropColumn("add").getQueryString(); - } - - @Test(groups = "unit") - public void should_add_static_column() throws Exception { - //When - SchemaStatement statement = alterTable("test").addStaticColumn("stat").type(DataType.text()); - - //Then - assertThat(statement.getQueryString()).isEqualTo("\n\tALTER TABLE test ADD stat text static"); - } + .withOptions() + .bloomFilterFPChance(0.01) + .caching(Caching.ROWS_ONLY) + .comment("This is a comment") + .compactionOptions(leveledStrategy().ssTableSizeInMB(160)) + .compressionOptions(lz4()) + .dcLocalReadRepairChance(0.21) + .defaultTimeToLive(100) + .gcGraceSeconds(9999) + .indexInterval(256) + .minIndexInterval(64) + .maxIndexInterval(512) + .memtableFlushPeriodInMillis(12) + .populateIOCacheOnFlush(true) + .replicateOnWrite(true) + .readRepairChance(0.42) + .speculativeRetry(always()) + .cdc(true); + + SchemaStatement statementWith21Caching = + alterTable("test").withOptions().caching(KeyCaching.NONE, rows(100)); + + // Then + assertThat(statement.getQueryString()) + .isEqualTo( + "\n\tALTER TABLE test\n\t" + + "WITH caching = 'rows_only' " + + "AND bloom_filter_fp_chance = 0.01 " + + "AND comment = 'This is a comment' " + + "AND compression = {'sstable_compression' : 'LZ4Compressor'} " + + "AND compaction = {'class' : 'LeveledCompactionStrategy', 'sstable_size_in_mb' : 160} " + + "AND dclocal_read_repair_chance = 0.21 " + + "AND default_time_to_live = 100 " + + "AND gc_grace_seconds = 9999 " + + "AND index_interval = 256 " + + "AND min_index_interval = 64 " + + "AND max_index_interval = 512 " + + "AND memtable_flush_period_in_ms = 12 " + + "AND populate_io_cache_on_flush = true " + + "AND read_repair_chance = 0.42 " + + "AND replicate_on_write = true " + + "AND speculative_retry = 'ALWAYS' " + + "AND cdc = true"); + + assertThat(statementWith21Caching.getQueryString()) + .isEqualTo( + "\n\tALTER TABLE test\n\t" + + "WITH caching = {'keys' : 'none', 'rows_per_partition' : 100}"); + } + + @Test( + groups = "unit", + expectedExceptions = IllegalArgumentException.class, + expectedExceptionsMessageRegExp = + "The keyspace name 'add' is not allowed because it is a reserved keyword") + public void should_fail_if_keyspace_name_is_a_reserved_keyword() throws Exception { + alterTable("add", "test").addColumn("test").type(DataType.ascii()); + } + + @Test( + groups = "unit", + expectedExceptions = IllegalArgumentException.class, + expectedExceptionsMessageRegExp = + "The table name 'add' is not allowed because it is a reserved keyword") + public void should_fail_if_table_name_is_a_reserved_keyword() throws Exception { + alterTable("add").addColumn("test").type(DataType.ascii()); + } + + @Test( + groups = "unit", + expectedExceptions = IllegalArgumentException.class, + expectedExceptionsMessageRegExp = + "The new column name 'add' is not allowed because it is a reserved keyword") + public void should_fail_if_added_column_is_a_reserved_keyword() throws Exception { + alterTable("test").addColumn("add").type(DataType.ascii()).getQueryString(); + } + + @Test( + groups = "unit", + expectedExceptions = IllegalArgumentException.class, + expectedExceptionsMessageRegExp = + "The altered column name 'add' is not allowed because it is a reserved keyword") + public void should_fail_if_altered_column_is_a_reserved_keyword() throws Exception { + alterTable("test").alterColumn("add").type(DataType.ascii()).getQueryString(); + } + + @Test( + groups = "unit", + expectedExceptions = IllegalArgumentException.class, + expectedExceptionsMessageRegExp = + "The renamed column name 'add' is not allowed because it is a reserved keyword") + public void should_fail_if_renamed_column_is_a_reserved_keyword() throws Exception { + alterTable("test").renameColumn("add"); + } + + @Test( + groups = "unit", + expectedExceptions = IllegalArgumentException.class, + expectedExceptionsMessageRegExp = + "The new column name 'add' is not allowed because it is a reserved keyword") + public void should_fail_if_new_renamed_column_is_a_reserved_keyword() throws Exception { + alterTable("test").renameColumn("col").to("add"); + } + + @Test( + groups = "unit", + expectedExceptions = IllegalArgumentException.class, + expectedExceptionsMessageRegExp = + "The dropped column name 'add' is not allowed because it is a reserved keyword") + public void should_fail_if_drop_column_is_a_reserved_keyword() throws Exception { + alterTable("test").dropColumn("add").getQueryString(); + } + + @Test(groups = "unit") + public void should_add_static_column() throws Exception { + // When + SchemaStatement statement = alterTable("test").addStaticColumn("stat").type(DataType.text()); + + // Then + assertThat(statement.getQueryString()).isEqualTo("\n\tALTER TABLE test ADD stat text static"); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/schemabuilder/CompactionOptionsTest.java b/driver-core/src/test/java/com/datastax/driver/core/schemabuilder/CompactionOptionsTest.java index b5f01a92d05..001331bb15f 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/schemabuilder/CompactionOptionsTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/schemabuilder/CompactionOptionsTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,140 +17,176 @@ */ package com.datastax.driver.core.schemabuilder; -import org.testng.annotations.Test; - -import static com.datastax.driver.core.schemabuilder.SchemaBuilder.*; -import static com.datastax.driver.core.schemabuilder.TableOptions.CompactionOptions.DateTieredCompactionStrategyOptions.TimeStampResolution; +import static com.datastax.driver.core.schemabuilder.SchemaBuilder.dateTieredStrategy; +import static com.datastax.driver.core.schemabuilder.SchemaBuilder.leveledStrategy; +import static com.datastax.driver.core.schemabuilder.SchemaBuilder.sizedTieredStategy; +import static com.datastax.driver.core.schemabuilder.SchemaBuilder.timeWindowCompactionStrategy; +import static com.datastax.driver.core.schemabuilder.TableOptions.CompactionOptions.DateTieredCompactionStrategyOptions; +import static com.datastax.driver.core.schemabuilder.TableOptions.CompactionOptions.TimeWindowCompactionStrategyOptions; +import static com.datastax.driver.core.schemabuilder.TableOptions.CompactionOptions.TimeWindowCompactionStrategyOptions.CompactionWindowUnit; import static org.assertj.core.api.Assertions.assertThat; +import org.testng.annotations.Test; + public class CompactionOptionsTest { - @Test(groups = "unit") - public void should_create_sized_tiered_compaction_options() throws Exception { - //When - final String built = sizedTieredStategy() - .bucketLow(0.5) - .bucketHigh(1.2) - .coldReadsRatioToOmit(0.89) - .enabled(true) - .minThreshold(2) - .maxThreshold(4) - .minSSTableSizeInBytes(5000000L) - .tombstoneCompactionIntervalInDay(3) - .tombstoneThreshold(0.7) - .uncheckedTombstoneCompaction(true) - .build(); - - //Then - assertThat(built).isEqualTo("{'class' : 'SizeTieredCompactionStrategy', " + - "'enabled' : true, " + - "'tombstone_compaction_interval' : 3, " + - "'tombstone_threshold' : 0.7, " + - "'unchecked_tombstone_compaction' : true, " + - "'bucket_high' : 1.2, " + - "'bucket_low' : 0.5, " + - "'cold_reads_to_omit' : 0.89, " + - "'min_threshold' : 2, " + - "'max_threshold' : 4, " + - "'min_sstable_size' : 5000000}"); - } - - @Test(groups = "unit") - public void should_create_leveled_compaction_option() throws Exception { - //When - final String built = leveledStrategy() - .enabled(true) - .ssTableSizeInMB(160) - .tombstoneCompactionIntervalInDay(3) - .tombstoneThreshold(0.7) - .uncheckedTombstoneCompaction(true) - .build(); - - //Then - assertThat(built).isEqualTo("{'class' : 'LeveledCompactionStrategy', " + - "'enabled' : true, " + - "'tombstone_compaction_interval' : 3, " + - "'tombstone_threshold' : 0.7, " + - "'unchecked_tombstone_compaction' : true, " + - "'sstable_size_in_mb' : 160}"); - } - - @Test(groups = "unit") - public void should_create_date_tiered_compaction_option() throws Exception { - //When - String built = dateTieredStrategy() - .baseTimeSeconds(7200) - .enabled(true) - .maxSSTableAgeDays(400) - .minThreshold(2) - .maxThreshold(4) - .timestampResolution(TimeStampResolution.MICROSECONDS) - .tombstoneCompactionIntervalInDay(3) - .tombstoneThreshold(0.7) - .uncheckedTombstoneCompaction(true) - .build(); - - //Then - assertThat(built).isEqualTo("{'class' : 'DateTieredCompactionStrategy', " + - "'enabled' : true, " + - "'tombstone_compaction_interval' : 3, " + - "'tombstone_threshold' : 0.7, " + - "'unchecked_tombstone_compaction' : true, " + - "'base_time_seconds' : 7200, " + - "'max_sstable_age_days' : 400, " + - "'min_threshold' : 2, " + - "'max_threshold' : 4, " + - "'timestamp_resolution' : 'MICROSECONDS'}"); - } - - @Test(groups = "unit") - public void should_handle_freeform_options() { - //When - String built = dateTieredStrategy() - .freeformOption("foo", "bar") - .freeformOption("baz", 1) - .build(); - - //Then - assertThat(built).isEqualTo("{'class' : 'DateTieredCompactionStrategy', " + - "'foo' : 'bar', " + - "'baz' : 1}"); - - } - - @Test(groups = "unit", expectedExceptions = IllegalArgumentException.class) - public void should_throw_exception_if_cold_read_ratio_out_of_range() throws Exception { - sizedTieredStategy() - .bucketLow(0.5) - .bucketHigh(1.2) - .coldReadsRatioToOmit(1.89) - .build(); - } - - @Test(groups = "unit", expectedExceptions = IllegalArgumentException.class) - public void should_throw_exception_if_cold_read_ratio_negative() throws Exception { - sizedTieredStategy() - .bucketLow(0.5) - .bucketHigh(1.2) - .coldReadsRatioToOmit(-1.0) - .build(); - } - - @Test(groups = "unit", expectedExceptions = IllegalArgumentException.class) - public void should_throw_exception_if_tombstone_threshold_out_of_range() throws Exception { - sizedTieredStategy() - .bucketLow(0.5) - .bucketHigh(1.2) - .tombstoneThreshold(1.89) - .build(); - } - - @Test(groups = "unit", expectedExceptions = IllegalArgumentException.class) - public void should_throw_exception_if_tombstone_threshold_negative() throws Exception { + @Test(groups = "unit") + public void should_create_sized_tiered_compaction_options() throws Exception { + // When + final String built = sizedTieredStategy() - .bucketLow(0.5) - .bucketHigh(1.2) - .coldReadsRatioToOmit(-1.0) - .build(); - } + .bucketLow(0.5) + .bucketHigh(1.2) + .coldReadsRatioToOmit(0.89) + .enabled(true) + .minThreshold(2) + .maxThreshold(4) + .minSSTableSizeInBytes(5000000L) + .tombstoneCompactionIntervalInDay(3) + .tombstoneThreshold(0.7) + .uncheckedTombstoneCompaction(true) + .build(); + + // Then + assertThat(built) + .isEqualTo( + "{'class' : 'SizeTieredCompactionStrategy', " + + "'enabled' : true, " + + "'tombstone_compaction_interval' : 3, " + + "'tombstone_threshold' : 0.7, " + + "'unchecked_tombstone_compaction' : true, " + + "'bucket_high' : 1.2, " + + "'bucket_low' : 0.5, " + + "'cold_reads_to_omit' : 0.89, " + + "'min_threshold' : 2, " + + "'max_threshold' : 4, " + + "'min_sstable_size' : 5000000}"); + } + + @Test(groups = "unit") + public void should_create_leveled_compaction_option() throws Exception { + // When + final String built = + leveledStrategy() + .enabled(true) + .ssTableSizeInMB(160) + .tombstoneCompactionIntervalInDay(3) + .tombstoneThreshold(0.7) + .uncheckedTombstoneCompaction(true) + .build(); + + // Then + assertThat(built) + .isEqualTo( + "{'class' : 'LeveledCompactionStrategy', " + + "'enabled' : true, " + + "'tombstone_compaction_interval' : 3, " + + "'tombstone_threshold' : 0.7, " + + "'unchecked_tombstone_compaction' : true, " + + "'sstable_size_in_mb' : 160}"); + } + + @Test(groups = "unit") + public void should_create_date_tiered_compaction_option() throws Exception { + // When + String built = + dateTieredStrategy() + .baseTimeSeconds(7200) + .enabled(true) + .maxSSTableAgeDays(400) + .minThreshold(2) + .maxThreshold(4) + .timestampResolution( + DateTieredCompactionStrategyOptions.TimeStampResolution.MICROSECONDS) + .tombstoneCompactionIntervalInDay(3) + .tombstoneThreshold(0.7) + .uncheckedTombstoneCompaction(true) + .build(); + + // Then + assertThat(built) + .isEqualTo( + "{'class' : 'DateTieredCompactionStrategy', " + + "'enabled' : true, " + + "'tombstone_compaction_interval' : 3, " + + "'tombstone_threshold' : 0.7, " + + "'unchecked_tombstone_compaction' : true, " + + "'base_time_seconds' : 7200, " + + "'max_sstable_age_days' : 400, " + + "'min_threshold' : 2, " + + "'max_threshold' : 4, " + + "'timestamp_resolution' : 'MICROSECONDS'}"); + } + + @Test(groups = "unit") + public void should_create_time_window_compaction_option() throws Exception { + // When + String built = + timeWindowCompactionStrategy() + .bucketLow(0.5) + .bucketHigh(1.2) + .compactionWindowUnit(CompactionWindowUnit.HOURS) + .compactionWindowSize(5) + .enabled(true) + .minThreshold(2) + .maxThreshold(4) + .minSSTableSizeInBytes(5000000L) + .timestampResolution( + TimeWindowCompactionStrategyOptions.TimeStampResolution.MICROSECONDS) + .tombstoneCompactionIntervalInDay(3) + .tombstoneThreshold(0.7) + .uncheckedTombstoneCompaction(true) + .unsafeAggressiveSSTableExpiration(true) + .build(); + + // Then + assertThat(built) + .isEqualTo( + "{'class' : 'TimeWindowCompactionStrategy', " + + "'enabled' : true, " + + "'tombstone_compaction_interval' : 3, " + + "'tombstone_threshold' : 0.7, " + + "'unchecked_tombstone_compaction' : true, " + + "'bucket_high' : 1.2, " + + "'bucket_low' : 0.5, " + + "'compaction_window_unit' : 'HOURS', " + + "'compaction_window_size' : 5, " + + "'min_threshold' : 2, " + + "'max_threshold' : 4, " + + "'min_sstable_size' : 5000000, " + + "'timestamp_resolution' : 'MICROSECONDS', " + + "'unsafe_aggressive_sstable_expiration' : 'true'}"); + } + + @Test(groups = "unit") + public void should_handle_freeform_options() { + // When + String built = + dateTieredStrategy().freeformOption("foo", "bar").freeformOption("baz", 1).build(); + + // Then + assertThat(built) + .isEqualTo( + "{'class' : 'DateTieredCompactionStrategy', " + "'foo' : 'bar', " + "'baz' : 1}"); + } + + @Test(groups = "unit", expectedExceptions = IllegalArgumentException.class) + public void should_throw_exception_if_cold_read_ratio_out_of_range() throws Exception { + sizedTieredStategy().bucketLow(0.5).bucketHigh(1.2).coldReadsRatioToOmit(1.89).build(); + } + + @Test(groups = "unit", expectedExceptions = IllegalArgumentException.class) + public void should_throw_exception_if_cold_read_ratio_negative() throws Exception { + sizedTieredStategy().bucketLow(0.5).bucketHigh(1.2).coldReadsRatioToOmit(-1.0).build(); + } + + @Test(groups = "unit", expectedExceptions = IllegalArgumentException.class) + public void should_throw_exception_if_tombstone_threshold_out_of_range() throws Exception { + sizedTieredStategy().bucketLow(0.5).bucketHigh(1.2).tombstoneThreshold(1.89).build(); + } + + @Test(groups = "unit", expectedExceptions = IllegalArgumentException.class) + public void should_throw_exception_if_tombstone_threshold_negative() throws Exception { + sizedTieredStategy().bucketLow(0.5).bucketHigh(1.2).coldReadsRatioToOmit(-1.0).build(); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/schemabuilder/CompressionOptionsTest.java b/driver-core/src/test/java/com/datastax/driver/core/schemabuilder/CompressionOptionsTest.java index 9302adf9765..49e46a2669e 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/schemabuilder/CompressionOptionsTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/schemabuilder/CompressionOptionsTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,46 +17,55 @@ */ package com.datastax.driver.core.schemabuilder; -import org.testng.annotations.Test; - -import static com.datastax.driver.core.schemabuilder.SchemaBuilder.*; +import static com.datastax.driver.core.schemabuilder.SchemaBuilder.deflate; +import static com.datastax.driver.core.schemabuilder.SchemaBuilder.lz4; +import static com.datastax.driver.core.schemabuilder.SchemaBuilder.noCompression; +import static com.datastax.driver.core.schemabuilder.SchemaBuilder.snappy; import static org.assertj.core.api.Assertions.assertThat; +import org.testng.annotations.Test; + public class CompressionOptionsTest { - @Test(groups = "unit") - public void should_build_compressions_options_for_lz4() throws Exception { - //When - final String built = lz4().withChunkLengthInKb(128).withCRCCheckChance(0.6D).build(); - - //Then - assertThat(built).isEqualTo("{'sstable_compression' : 'LZ4Compressor', 'chunk_length_kb' : 128, 'crc_check_chance' : 0.6}"); - } - - @Test(groups = "unit") - public void should_create_snappy_compressions_options() throws Exception { - //When - final String built = snappy().withChunkLengthInKb(128).withCRCCheckChance(0.6D).build(); - - //Then - assertThat(built).isEqualTo("{'sstable_compression' : 'SnappyCompressor', 'chunk_length_kb' : 128, 'crc_check_chance' : 0.6}"); - } - - @Test(groups = "unit") - public void should_create_deflate_compressions_options() throws Exception { - //When - final String built = deflate().withChunkLengthInKb(128).withCRCCheckChance(0.6D).build(); - - //Then - assertThat(built).isEqualTo("{'sstable_compression' : 'DeflateCompressor', 'chunk_length_kb' : 128, 'crc_check_chance' : 0.6}"); - } - - @Test(groups = "unit") - public void should_create_no_compressions_options() throws Exception { - //When - final String built = noCompression().withChunkLengthInKb(128).withCRCCheckChance(0.6D).build(); - - //Then - assertThat(built).isEqualTo("{'sstable_compression' : ''}"); - } + @Test(groups = "unit") + public void should_build_compressions_options_for_lz4() throws Exception { + // When + final String built = lz4().withChunkLengthInKb(128).withCRCCheckChance(0.6D).build(); + + // Then + assertThat(built) + .isEqualTo( + "{'sstable_compression' : 'LZ4Compressor', 'chunk_length_kb' : 128, 'crc_check_chance' : 0.6}"); + } + + @Test(groups = "unit") + public void should_create_snappy_compressions_options() throws Exception { + // When + final String built = snappy().withChunkLengthInKb(128).withCRCCheckChance(0.6D).build(); + + // Then + assertThat(built) + .isEqualTo( + "{'sstable_compression' : 'SnappyCompressor', 'chunk_length_kb' : 128, 'crc_check_chance' : 0.6}"); + } + + @Test(groups = "unit") + public void should_create_deflate_compressions_options() throws Exception { + // When + final String built = deflate().withChunkLengthInKb(128).withCRCCheckChance(0.6D).build(); + + // Then + assertThat(built) + .isEqualTo( + "{'sstable_compression' : 'DeflateCompressor', 'chunk_length_kb' : 128, 'crc_check_chance' : 0.6}"); + } + + @Test(groups = "unit") + public void should_create_no_compressions_options() throws Exception { + // When + final String built = noCompression().withChunkLengthInKb(128).withCRCCheckChance(0.6D).build(); + + // Then + assertThat(built).isEqualTo("{'sstable_compression' : ''}"); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/schemabuilder/CreateIndexTest.java b/driver-core/src/test/java/com/datastax/driver/core/schemabuilder/CreateIndexTest.java index 1088b82d6ad..1dc8d408c10 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/schemabuilder/CreateIndexTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/schemabuilder/CreateIndexTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,29 +17,32 @@ */ package com.datastax.driver.core.schemabuilder; -import org.testng.annotations.Test; - import static com.datastax.driver.core.schemabuilder.SchemaBuilder.createIndex; import static org.assertj.core.api.Assertions.assertThat; -public class CreateIndexTest { - - @Test(groups = "unit") - public void should_create_index() throws Exception { - //Given //When - SchemaStatement statement = createIndex("myIndex").ifNotExists().onTable("ks", "test").andColumn("col"); - - //Then - assertThat(statement.getQueryString()).isEqualTo("\n\tCREATE INDEX IF NOT EXISTS myIndex ON ks.test(col)"); - } - - @Test(groups = "unit") - public void should_create_index_on_keys_of_map_column() throws Exception { - //Given //When - SchemaStatement statement = createIndex("myIndex").ifNotExists().onTable("ks", "test").andKeysOfColumn("col"); +import org.testng.annotations.Test; - //Then - assertThat(statement.getQueryString()).isEqualTo("\n\tCREATE INDEX IF NOT EXISTS myIndex ON ks.test(KEYS(col))"); - } +public class CreateIndexTest { + @Test(groups = "unit") + public void should_create_index() throws Exception { + // Given //When + SchemaStatement statement = + createIndex("myIndex").ifNotExists().onTable("ks", "test").andColumn("col"); + + // Then + assertThat(statement.getQueryString()) + .isEqualTo("\n\tCREATE INDEX IF NOT EXISTS myIndex ON ks.test(col)"); + } + + @Test(groups = "unit") + public void should_create_index_on_keys_of_map_column() throws Exception { + // Given //When + SchemaStatement statement = + createIndex("myIndex").ifNotExists().onTable("ks", "test").andKeysOfColumn("col"); + + // Then + assertThat(statement.getQueryString()) + .isEqualTo("\n\tCREATE INDEX IF NOT EXISTS myIndex ON ks.test(KEYS(col))"); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/schemabuilder/CreateKeyspaceTest.java b/driver-core/src/test/java/com/datastax/driver/core/schemabuilder/CreateKeyspaceTest.java index 71983d504c8..2a4a5f9a458 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/schemabuilder/CreateKeyspaceTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/schemabuilder/CreateKeyspaceTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,42 +17,40 @@ */ package com.datastax.driver.core.schemabuilder; -import org.testng.annotations.Test; +import static com.datastax.driver.core.schemabuilder.SchemaBuilder.createKeyspace; +import static org.assertj.core.api.Assertions.assertThat; import java.util.HashMap; import java.util.Map; - -import static com.datastax.driver.core.schemabuilder.SchemaBuilder.createKeyspace; -import static org.assertj.core.api.Assertions.assertThat; +import org.testng.annotations.Test; public class CreateKeyspaceTest { - @Test(groups = "unit") - public void should_create_keyspace_with_options() throws Exception { - Map replicationOptions = new HashMap(); - replicationOptions.put("class", "SimpleStrategy"); - replicationOptions.put("replication_factor", 1); - - //When - SchemaStatement statement = createKeyspace("test").with() - .durableWrites(true) - .replication(replicationOptions); - - //Then - assertThat(statement.getQueryString()) - .isEqualTo("\n\tCREATE KEYSPACE test" + - "\n\tWITH\n\t\t" + - "REPLICATION = {'replication_factor': 1, 'class': 'SimpleStrategy'}\n\t\t" + - "AND DURABLE_WRITES = true"); - } - - @Test(groups = "unit", expectedExceptions = IllegalArgumentException.class) - public void incorrect_replication_options() throws Exception { - Map replicationOptions = new HashMap(); - replicationOptions.put("class", 5); - - //When - createKeyspace("test").with() - .replication(replicationOptions); - } + @Test(groups = "unit") + public void should_create_keyspace_with_options() throws Exception { + Map replicationOptions = new HashMap(); + replicationOptions.put("class", "SimpleStrategy"); + replicationOptions.put("replication_factor", 1); + + // When + SchemaStatement statement = + createKeyspace("test").with().durableWrites(true).replication(replicationOptions); + + // Then + assertThat(statement.getQueryString()) + .isEqualTo( + "\n\tCREATE KEYSPACE test" + + "\n\tWITH\n\t\t" + + "REPLICATION = {'replication_factor': 1, 'class': 'SimpleStrategy'}\n\t\t" + + "AND DURABLE_WRITES = true"); + } + + @Test(groups = "unit", expectedExceptions = IllegalArgumentException.class) + public void incorrect_replication_options() throws Exception { + Map replicationOptions = new HashMap(); + replicationOptions.put("class", 5); + + // When + createKeyspace("test").with().replication(replicationOptions); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/schemabuilder/CreateTest.java b/driver-core/src/test/java/com/datastax/driver/core/schemabuilder/CreateTest.java index d2f5f29f31b..ee994ea5416 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/schemabuilder/CreateTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/schemabuilder/CreateTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,630 +17,769 @@ */ package com.datastax.driver.core.schemabuilder; +import static com.datastax.driver.core.schemabuilder.SchemaBuilder.Caching; +import static com.datastax.driver.core.schemabuilder.SchemaBuilder.Direction; +import static com.datastax.driver.core.schemabuilder.SchemaBuilder.KeyCaching; +import static com.datastax.driver.core.schemabuilder.SchemaBuilder.allRows; +import static com.datastax.driver.core.schemabuilder.SchemaBuilder.always; +import static com.datastax.driver.core.schemabuilder.SchemaBuilder.createTable; +import static com.datastax.driver.core.schemabuilder.SchemaBuilder.frozen; +import static com.datastax.driver.core.schemabuilder.SchemaBuilder.leveledStrategy; +import static com.datastax.driver.core.schemabuilder.SchemaBuilder.lz4; +import static com.datastax.driver.core.schemabuilder.SchemaBuilder.millisecs; +import static com.datastax.driver.core.schemabuilder.SchemaBuilder.noSpeculativeRetry; +import static com.datastax.driver.core.schemabuilder.SchemaBuilder.percentile; +import static com.datastax.driver.core.schemabuilder.SchemaBuilder.rows; +import static org.assertj.core.api.Assertions.assertThat; + import com.datastax.driver.core.DataType; import org.testng.annotations.Test; -import static com.datastax.driver.core.schemabuilder.SchemaBuilder.*; -import static org.assertj.core.api.Assertions.assertThat; - -/** - * Note: some addColumn variants are covered in {@link CreateTypeTest}. - */ +/** Note: some addColumn variants are covered in {@link CreateTypeTest}. */ public class CreateTest { - @Test(groups = "unit") - public void should_create_simple_table() throws Exception { - //When - SchemaStatement statement = createTable("test") - .addPartitionKey("id", DataType.bigint()) - .addColumn("name", DataType.text()); - - //Then - assertThat(statement.getQueryString()).isEqualTo("\n\tCREATE TABLE test(\n\t\t" + - "id bigint,\n\t\t" + - "name text,\n\t\t" + - "PRIMARY KEY(id))"); - } - - @Test(groups = "unit") - public void should_create_table_with_udt_partition_key() throws Exception { - //When - SchemaStatement statement = createTable("test") - .addUDTPartitionKey("u", frozen("user")); - - //Then - assertThat(statement.getQueryString()).isEqualTo("\n\tCREATE TABLE test(\n\t\t" + - "u frozen,\n\t\t" + - "PRIMARY KEY(u))" - ); - } - - @Test(groups = "unit", expectedExceptions = IllegalStateException.class) - public void should_fail_when_creating_table_without_partition_key() throws Exception { - createTable("test").addColumn("name", DataType.text()).getQueryString(); - } - - @Test(groups = "unit") - public void should_create_simple_table_if_not_exists() throws Exception { - //When - SchemaStatement statement = createTable("test") - .ifNotExists() - .addPartitionKey("id", DataType.bigint()) - .addColumn("name", DataType.text()); - //Then - assertThat(statement.getQueryString()).isEqualTo("\n\tCREATE TABLE IF NOT EXISTS test(\n\t\t" + - "id bigint,\n\t\t" + - "name text,\n\t\t" + - "PRIMARY KEY(id))"); - } - - @Test(groups = "unit") - public void should_create_simple_table_with_keyspace() throws Exception { - //When - SchemaStatement statement = createTable("ks", "test") - .addPartitionKey("id", DataType.bigint()) - .addColumn("name", DataType.text()); - //Then - assertThat(statement.getQueryString()).isEqualTo("\n\tCREATE TABLE ks.test(\n\t\t" + - "id bigint,\n\t\t" + - "name text,\n\t\t" + - "PRIMARY KEY(id))"); - } - - @Test(groups = "unit") - public void should_create_simple_table_with_list() throws Exception { - //When - SchemaStatement statement = createTable("test") - .addPartitionKey("id", DataType.bigint()) - .addColumn("friends", DataType.list(DataType.text())); - - //Then - assertThat(statement.getQueryString()).isEqualTo("\n\tCREATE TABLE test(\n\t\t" + - "id bigint,\n\t\t" + - "friends list,\n\t\t" + - "PRIMARY KEY(id))"); - } - - @Test(groups = "unit") - public void should_create_simple_table_with_set() throws Exception { - //When - SchemaStatement statement = createTable("test") - .addPartitionKey("id", DataType.bigint()) - .addColumn("friends", DataType.set(DataType.text())); - - //Then - assertThat(statement.getQueryString()).isEqualTo("\n\tCREATE TABLE test(\n\t\t" + - "id bigint,\n\t\t" + - "friends set,\n\t\t" + - "PRIMARY KEY(id))"); - } - - @Test(groups = "unit") - public void should_create_simple_table_with_map() throws Exception { - //When - SchemaStatement statement = createTable("test") - .addPartitionKey("id", DataType.bigint()) - .addColumn("friends", DataType.map(DataType.cint(), DataType.text())); - - //Then - assertThat(statement.getQueryString()).isEqualTo("\n\tCREATE TABLE test(\n\t\t" + - "id bigint,\n\t\t" + - "friends map,\n\t\t" + - "PRIMARY KEY(id))"); - } - - @Test(groups = "unit") - public void should_create_table_with_clustering_keys() throws Exception { - //When - SchemaStatement statement = createTable("test") - .addPartitionKey("id", DataType.bigint()) - .addClusteringColumn("col1", DataType.uuid()) - .addClusteringColumn("col2", DataType.uuid()) - .addColumn("name", DataType.text()); - - //Then - assertThat(statement.getQueryString()).isEqualTo("\n\tCREATE TABLE test(\n\t\t" + - "id bigint,\n\t\t" + - "col1 uuid,\n\t\t" + - "col2 uuid,\n\t\t" + - "name text,\n\t\t" + - "PRIMARY KEY(id, col1, col2))"); - } - - @Test(groups = "unit") - public void should_create_table_with_udt_clustering_keys() throws Exception { - //When - SchemaStatement statement = createTable("test") - .addPartitionKey("id", DataType.bigint()) - .addClusteringColumn("col1", DataType.uuid()) - .addUDTClusteringColumn("col2", frozen("address")) - .addColumn("name", DataType.text()); - - //Then - assertThat(statement.getQueryString()).isEqualTo("\n\tCREATE TABLE test(\n\t\t" + - "id bigint,\n\t\t" + - "col1 uuid,\n\t\t" + - "col2 frozen
    ,\n\t\t" + - "name text,\n\t\t" + - "PRIMARY KEY(id, col1, col2))"); - } - - @Test(groups = "unit") - public void should_create_table_with_composite_partition_key() throws Exception { - //When - SchemaStatement statement = createTable("test") - .addPartitionKey("id1", DataType.bigint()) - .addPartitionKey("id2", DataType.text()) - .addColumn("name", DataType.text()); - - //Then - assertThat(statement.getQueryString()).isEqualTo("\n\tCREATE TABLE test(\n\t\t" + - "id1 bigint,\n\t\t" + - "id2 text,\n\t\t" + - "name text,\n\t\t" + - "PRIMARY KEY((id1, id2)))"); - } - - @Test(groups = "unit") - public void should_create_table_with_composite_partition_key_and_clustering_keys() throws Exception { - //When - SchemaStatement statement = createTable("test") - .addPartitionKey("id1", DataType.bigint()) - .addPartitionKey("id2", DataType.text()) - .addClusteringColumn("col1", DataType.uuid()) - .addClusteringColumn("col2", DataType.uuid()) - .addColumn("name", DataType.text()); - - //Then - assertThat(statement.getQueryString()).isEqualTo("\n\tCREATE TABLE test(\n\t\t" + - "id1 bigint,\n\t\t" + - "id2 text,\n\t\t" + - "col1 uuid,\n\t\t" + - "col2 uuid,\n\t\t" + - "name text,\n\t\t" + - "PRIMARY KEY((id1, id2), col1, col2))"); - } - - @Test(groups = "unit") - public void should_create_table_with_static_column() throws Exception { - //When - SchemaStatement statement = createTable("test") - .addPartitionKey("id", DataType.bigint()) - .addClusteringColumn("col", DataType.uuid()) - .addStaticColumn("bucket", DataType.cint()) - .addColumn("name", DataType.text()); - - //Then - assertThat(statement.getQueryString()).isEqualTo("\n\tCREATE TABLE test(\n\t\t" + - "id bigint,\n\t\t" + - "col uuid,\n\t\t" + - "bucket int static,\n\t\t" + - "name text,\n\t\t" + - "PRIMARY KEY(id, col))"); - } - - @Test(groups = "unit") - public void should_create_table_with_udt_static_column() throws Exception { - //When - SchemaStatement statement = createTable("test") - .addPartitionKey("id", DataType.bigint()) - .addClusteringColumn("col", DataType.uuid()) - .addUDTStaticColumn("bucket", frozen("address")) - .addColumn("name", DataType.text()); - - //Then - assertThat(statement.getQueryString()).isEqualTo("\n\tCREATE TABLE test(\n\t\t" + - "id bigint,\n\t\t" + - "col uuid,\n\t\t" + - "bucket frozen
    static,\n\t\t" + - "name text,\n\t\t" + - "PRIMARY KEY(id, col))"); - } - - @Test(groups = "unit") - public void should_create_table_with_clustering_order() throws Exception { - //When - SchemaStatement statement = createTable("test") - .addPartitionKey("id", DataType.bigint()) - .addClusteringColumn("col1", DataType.uuid()) - .addClusteringColumn("col2", DataType.uuid()) - .addColumn("name", DataType.text()) - .withOptions() - .clusteringOrder("col1", Direction.ASC) - .clusteringOrder("col2", Direction.DESC); - - //Then - assertThat(statement.getQueryString()).isEqualTo("\n\tCREATE TABLE test(\n\t\t" + - "id bigint,\n\t\t" + - "col1 uuid,\n\t\t" + - "col2 uuid,\n\t\t" + - "name text,\n\t\t" + - "PRIMARY KEY(id, col1, col2))\n\t" + - "WITH CLUSTERING ORDER BY(col1 ASC, col2 DESC)"); - } - - @Test(groups = "unit", expectedExceptions = IllegalArgumentException.class) - public void should_fail_when_blank_clustering_order_column_provided() throws Exception { + @Test(groups = "unit") + public void should_create_simple_table() throws Exception { + // When + SchemaStatement statement = createTable("test") - .addPartitionKey("id", DataType.bigint()) - .addClusteringColumn("col1", DataType.uuid()) - .addClusteringColumn("col2", DataType.uuid()) - .addColumn("name", DataType.text()) - .withOptions().clusteringOrder("", Direction.DESC); - } - - @Test(groups = "unit", expectedExceptions = IllegalArgumentException.class) - public void should_fail_when_clustering_order_column_does_not_match_declared_clustering_keys() throws Exception { + .addPartitionKey("id", DataType.bigint()) + .addColumn("name", DataType.text()); + + // Then + assertThat(statement.getQueryString()) + .isEqualTo( + "\n\tCREATE TABLE test(\n\t\t" + + "id bigint,\n\t\t" + + "name text,\n\t\t" + + "PRIMARY KEY(id))"); + } + + @Test(groups = "unit") + public void should_create_table_with_udt_partition_key() throws Exception { + // When + SchemaStatement statement = createTable("test").addUDTPartitionKey("u", frozen("user")); + + // Then + assertThat(statement.getQueryString()) + .isEqualTo("\n\tCREATE TABLE test(\n\t\t" + "u frozen,\n\t\t" + "PRIMARY KEY(u))"); + } + + @Test(groups = "unit", expectedExceptions = IllegalStateException.class) + public void should_fail_when_creating_table_without_partition_key() throws Exception { + createTable("test").addColumn("name", DataType.text()).getQueryString(); + } + + @Test(groups = "unit") + public void should_create_simple_table_if_not_exists() throws Exception { + // When + SchemaStatement statement = createTable("test") - .addPartitionKey("id", DataType.bigint()) - .addClusteringColumn("col1", DataType.uuid()) - .addClusteringColumn("col2", DataType.uuid()) - .addColumn("name", DataType.text()) - .withOptions().clusteringOrder("col3", Direction.ASC); - } - - @Test(groups = "unit") - public void should_create_table_with_compact_storage() throws Exception { - //When - SchemaStatement statement = createTable("test") - .addPartitionKey("id", DataType.bigint()) - .addClusteringColumn("col1", DataType.uuid()) - .addClusteringColumn("col2", DataType.uuid()) - .addColumn("name", DataType.text()) - .withOptions() - .compactStorage(); - - //Then - assertThat(statement.getQueryString()).isEqualTo("\n\tCREATE TABLE test(\n\t\t" + - "id bigint,\n\t\t" + - "col1 uuid,\n\t\t" + - "col2 uuid,\n\t\t" + - "name text,\n\t\t" + - "PRIMARY KEY(id, col1, col2))\n\t" + - "WITH COMPACT STORAGE"); - } - - @Test(groups = "unit") - public void should_create_table_with_all_options() throws Exception { - //When - SchemaStatement statement = createTable("test") - .addPartitionKey("id", DataType.bigint()) - .addClusteringColumn("col1", DataType.uuid()) - .addClusteringColumn("col2", DataType.uuid()) - .addColumn("name", DataType.text()) - .withOptions() - .clusteringOrder("col1", Direction.ASC) - .clusteringOrder("col2", Direction.DESC) - .compactStorage() - .bloomFilterFPChance(0.01) - .caching(Caching.ROWS_ONLY) - .comment("This is a comment") - .compactionOptions(leveledStrategy().ssTableSizeInMB(160)) - .compressionOptions(lz4()) - .dcLocalReadRepairChance(0.21) - .defaultTimeToLive(100) - .gcGraceSeconds(9999) - .minIndexInterval(64) - .maxIndexInterval(512) - .memtableFlushPeriodInMillis(12) - .populateIOCacheOnFlush(true) - .readRepairChance(0.05) - .replicateOnWrite(true) - .speculativeRetry(always()) - .cdc(true); - - //Then - assertThat(statement.getQueryString()).isEqualTo("\n\tCREATE TABLE test(\n\t\t" + - "id bigint,\n\t\t" + - "col1 uuid,\n\t\t" + - "col2 uuid,\n\t\t" + - "name text,\n\t\tPRIMARY KEY(id, col1, col2))\n\t" + - "WITH caching = 'rows_only' " + - "AND bloom_filter_fp_chance = 0.01 " + - "AND comment = 'This is a comment' " + - "AND compression = {'sstable_compression' : 'LZ4Compressor'} " + - "AND compaction = {'class' : 'LeveledCompactionStrategy', 'sstable_size_in_mb' : 160} " + - "AND dclocal_read_repair_chance = 0.21 " + - "AND default_time_to_live = 100 " + - "AND gc_grace_seconds = 9999 " + - "AND min_index_interval = 64 " + - "AND max_index_interval = 512 " + - "AND memtable_flush_period_in_ms = 12 " + - "AND populate_io_cache_on_flush = true " + - "AND read_repair_chance = 0.05 " + - "AND replicate_on_write = true " + - "AND speculative_retry = 'ALWAYS' " + - "AND cdc = true AND CLUSTERING ORDER BY(col1 ASC, col2 DESC) AND COMPACT STORAGE"); - } - - @Test(groups = "unit") - public void should_build_table_with_new_caching_options() throws Exception { - //When - SchemaStatement statement = createTable("test") - .addPartitionKey("id", DataType.bigint()) - .addColumn("name", DataType.text()) - .withOptions() - .caching(KeyCaching.ALL, rows(100)); - - //Then - assertThat(statement.getQueryString()).isEqualTo("\n\tCREATE TABLE test(\n\t\t" + - "id bigint,\n\t\t" + - "name text,\n\t\tPRIMARY KEY(id))\n\t" + - "WITH caching = {'keys' : 'all', 'rows_per_partition' : 100}"); - - //When - statement = createTable("test") - .addPartitionKey("id", DataType.bigint()) - .addColumn("name", DataType.text()) - .withOptions() - .caching(KeyCaching.ALL, allRows()); - - //Then - assertThat(statement.getQueryString()).isEqualTo("\n\tCREATE TABLE test(\n\t\t" + - "id bigint,\n\t\t" + - "name text,\n\t\tPRIMARY KEY(id))\n\t" + - "WITH caching = {'keys' : 'all', 'rows_per_partition' : 'all'}"); - } - - @Test(groups = "unit") - public void should_build_table_with_custom_option() throws Exception { - //When - SchemaStatement statement = createTable("test") - .addPartitionKey("id", DataType.bigint()) - .addColumn("name", DataType.text()) - .withOptions() - .freeformOption("key1", "value1") - .freeformOption("key2", 1.0); - - //Then - assertThat(statement.getQueryString()).isEqualTo("\n\tCREATE TABLE test(\n\t\t" + - "id bigint,\n\t\t" + - "name text,\n\t\tPRIMARY KEY(id))\n\t" + - "WITH key1 = 'value1' " + - "AND key2 = 1.0"); - } - - @Test(groups = "unit", expectedExceptions = IllegalStateException.class) - public void should_fail_when_both_caching_versions() throws Exception { + .ifNotExists() + .addPartitionKey("id", DataType.bigint()) + .addColumn("name", DataType.text()); + // Then + assertThat(statement.getQueryString()) + .isEqualTo( + "\n\tCREATE TABLE IF NOT EXISTS test(\n\t\t" + + "id bigint,\n\t\t" + + "name text,\n\t\t" + + "PRIMARY KEY(id))"); + } + + @Test(groups = "unit") + public void should_create_simple_table_with_keyspace() throws Exception { + // When + SchemaStatement statement = + createTable("ks", "test") + .addPartitionKey("id", DataType.bigint()) + .addColumn("name", DataType.text()); + // Then + assertThat(statement.getQueryString()) + .isEqualTo( + "\n\tCREATE TABLE ks.test(\n\t\t" + + "id bigint,\n\t\t" + + "name text,\n\t\t" + + "PRIMARY KEY(id))"); + } + + @Test(groups = "unit") + public void should_create_simple_table_with_list() throws Exception { + // When + SchemaStatement statement = createTable("test") - .addPartitionKey("id", DataType.bigint()) - .addColumn("name", DataType.text()) - .withOptions() - .caching(Caching.KEYS_ONLY) - .caching(KeyCaching.ALL, allRows()).getQueryString(); - } - - @Test(groups = "unit", expectedExceptions = IllegalArgumentException.class) - public void should_fail_when_negative_rows_per_partition() throws Exception { + .addPartitionKey("id", DataType.bigint()) + .addColumn("friends", DataType.list(DataType.text())); + + // Then + assertThat(statement.getQueryString()) + .isEqualTo( + "\n\tCREATE TABLE test(\n\t\t" + + "id bigint,\n\t\t" + + "friends list,\n\t\t" + + "PRIMARY KEY(id))"); + } + + @Test(groups = "unit") + public void should_create_simple_table_with_set() throws Exception { + // When + SchemaStatement statement = createTable("test") - .addPartitionKey("id", DataType.bigint()) - .addColumn("name", DataType.text()) - .withOptions() - .caching(KeyCaching.ALL, rows(-3)); - } - - @Test(groups = "unit", expectedExceptions = IllegalArgumentException.class) - public void should_fail_when_read_repair_chance_out_of_bound() throws Exception { + .addPartitionKey("id", DataType.bigint()) + .addColumn("friends", DataType.set(DataType.text())); + + // Then + assertThat(statement.getQueryString()) + .isEqualTo( + "\n\tCREATE TABLE test(\n\t\t" + + "id bigint,\n\t\t" + + "friends set,\n\t\t" + + "PRIMARY KEY(id))"); + } + + @Test(groups = "unit") + public void should_create_simple_table_with_map() throws Exception { + // When + SchemaStatement statement = createTable("test") - .addPartitionKey("id", DataType.bigint()) - .addColumn("name", DataType.text()) - .withOptions() - .readRepairChance(1.3); - } - - @Test(groups = "unit", expectedExceptions = IllegalArgumentException.class) - public void should_fail_when_read_repair_chance_negative() throws Exception { + .addPartitionKey("id", DataType.bigint()) + .addColumn("friends", DataType.map(DataType.cint(), DataType.text())); + + // Then + assertThat(statement.getQueryString()) + .isEqualTo( + "\n\tCREATE TABLE test(\n\t\t" + + "id bigint,\n\t\t" + + "friends map,\n\t\t" + + "PRIMARY KEY(id))"); + } + + @Test(groups = "unit") + public void should_create_table_with_clustering_keys() throws Exception { + // When + SchemaStatement statement = createTable("test") - .addPartitionKey("id", DataType.bigint()) - .addColumn("name", DataType.text()) - .withOptions() - .readRepairChance(-1.3); - } - - @Test(groups = "unit") - public void should_create_table_with_speculative_retry_none() throws Exception { - //When - SchemaStatement statement = createTable("test") - .addPartitionKey("id", DataType.bigint()) - .addColumn("name", DataType.text()) - .withOptions() - .speculativeRetry(noSpeculativeRetry()); - - //Then - assertThat(statement.getQueryString()).isEqualTo("\n\tCREATE TABLE test(\n\t\t" + - "id bigint,\n\t\t" + - "name text,\n\t\t" + - "PRIMARY KEY(id))\n\t" + - "WITH speculative_retry = 'NONE'"); - } - - @Test(groups = "unit") - public void should_create_table_with_speculative_retry_in_percentile() throws Exception { - //When - SchemaStatement statement = createTable("test") - .addPartitionKey("id", DataType.bigint()) - .addColumn("name", DataType.text()) - .withOptions() - .speculativeRetry(percentile(95)); - - //Then - assertThat(statement.getQueryString()).isEqualTo("\n\tCREATE TABLE test(\n\t\t" + - "id bigint,\n\t\t" + - "name text,\n\t\t" + - "PRIMARY KEY(id))\n\t" + - "WITH speculative_retry = '95percentile'"); - } - - @Test(groups = "unit") - public void should_create_table_with_speculative_retry_in_milli_secs() throws Exception { - //When - SchemaStatement statement = createTable("test") - .addPartitionKey("id", DataType.bigint()) - .addColumn("name", DataType.text()) - .withOptions() - .speculativeRetry(millisecs(12)); - - //Then - assertThat(statement.getQueryString()).isEqualTo("\n\tCREATE TABLE test(\n\t\t" + - "id bigint,\n\t\t" + - "name text,\n\t\t" + - "PRIMARY KEY(id))\n\t" + - "WITH speculative_retry = '12ms'"); - } - - @Test(groups = "unit") - public void should_create_table_with_cdc_true() throws Exception { - //When - SchemaStatement statement = createTable("test") - .addPartitionKey("id", DataType.bigint()) - .addColumn("name", DataType.text()) - .withOptions() - .cdc(true); - - //Then - assertThat(statement.getQueryString()).isEqualTo("\n\tCREATE TABLE test(\n\t\t" + - "id bigint,\n\t\t" + - "name text,\n\t\t" + - "PRIMARY KEY(id))\n\t" + - "WITH cdc = true"); - } - - @Test(groups = "unit") - public void should_create_table_with_cdc_false() throws Exception { - //When - SchemaStatement statement = createTable("test") - .addPartitionKey("id", DataType.bigint()) - .addColumn("name", DataType.text()) - .withOptions() - .cdc(false); - - //Then - assertThat(statement.getQueryString()).isEqualTo("\n\tCREATE TABLE test(\n\t\t" + - "id bigint,\n\t\t" + - "name text,\n\t\t" + - "PRIMARY KEY(id))\n\t" + - "WITH cdc = false"); - } - - @Test(groups = "unit", expectedExceptions = IllegalStateException.class, - expectedExceptionsMessageRegExp = "The '\\[pk\\]' columns can not be declared as partition keys and clustering keys at the same time") - public void should_fail_if_same_partition_and_clustering_column() throws Exception { + .addPartitionKey("id", DataType.bigint()) + .addClusteringColumn("col1", DataType.uuid()) + .addClusteringColumn("col2", DataType.uuid()) + .addColumn("name", DataType.text()); + + // Then + assertThat(statement.getQueryString()) + .isEqualTo( + "\n\tCREATE TABLE test(\n\t\t" + + "id bigint,\n\t\t" + + "col1 uuid,\n\t\t" + + "col2 uuid,\n\t\t" + + "name text,\n\t\t" + + "PRIMARY KEY(id, col1, col2))"); + } + + @Test(groups = "unit") + public void should_create_table_with_udt_clustering_keys() throws Exception { + // When + SchemaStatement statement = createTable("test") - .addPartitionKey("pk", DataType.bigint()) - .addClusteringColumn("pk", DataType.bigint()).getQueryString(); - } - - @Test(groups = "unit", expectedExceptions = IllegalStateException.class, - expectedExceptionsMessageRegExp = "The '\\[pk\\]' columns can not be declared as partition keys and simple columns at the same time") - public void should_fail_if_same_partition_and_simple_column() throws Exception { + .addPartitionKey("id", DataType.bigint()) + .addClusteringColumn("col1", DataType.uuid()) + .addUDTClusteringColumn("col2", frozen("address")) + .addColumn("name", DataType.text()); + + // Then + assertThat(statement.getQueryString()) + .isEqualTo( + "\n\tCREATE TABLE test(\n\t\t" + + "id bigint,\n\t\t" + + "col1 uuid,\n\t\t" + + "col2 frozen
    ,\n\t\t" + + "name text,\n\t\t" + + "PRIMARY KEY(id, col1, col2))"); + } + + @Test(groups = "unit") + public void should_create_table_with_composite_partition_key() throws Exception { + // When + SchemaStatement statement = createTable("test") - .addPartitionKey("pk", DataType.bigint()) - .addColumn("pk", DataType.text()).getQueryString(); - } - - @Test(groups = "unit", expectedExceptions = IllegalStateException.class, - expectedExceptionsMessageRegExp = "The '\\[cluster\\]' columns can not be declared as clustering keys and simple columns at the same time") - public void should_fail_if_same_clustering_and_simple_column() throws Exception { + .addPartitionKey("id1", DataType.bigint()) + .addPartitionKey("id2", DataType.text()) + .addColumn("name", DataType.text()); + + // Then + assertThat(statement.getQueryString()) + .isEqualTo( + "\n\tCREATE TABLE test(\n\t\t" + + "id1 bigint,\n\t\t" + + "id2 text,\n\t\t" + + "name text,\n\t\t" + + "PRIMARY KEY((id1, id2)))"); + } + + @Test(groups = "unit") + public void should_create_table_with_composite_partition_key_and_clustering_keys() + throws Exception { + // When + SchemaStatement statement = createTable("test") - .addPartitionKey("pk", DataType.bigint()) - .addClusteringColumn("cluster", DataType.bigint()) - .addColumn("cluster", DataType.text()).getQueryString(); - } - - @Test(groups = "unit", expectedExceptions = IllegalStateException.class, - expectedExceptionsMessageRegExp = "The '\\[pk\\]' columns can not be declared as partition keys and static columns at the same time") - public void should_fail_if_same_partition_and_static_column() throws Exception { + .addPartitionKey("id1", DataType.bigint()) + .addPartitionKey("id2", DataType.text()) + .addClusteringColumn("col1", DataType.uuid()) + .addClusteringColumn("col2", DataType.uuid()) + .addColumn("name", DataType.text()); + + // Then + assertThat(statement.getQueryString()) + .isEqualTo( + "\n\tCREATE TABLE test(\n\t\t" + + "id1 bigint,\n\t\t" + + "id2 text,\n\t\t" + + "col1 uuid,\n\t\t" + + "col2 uuid,\n\t\t" + + "name text,\n\t\t" + + "PRIMARY KEY((id1, id2), col1, col2))"); + } + + @Test(groups = "unit") + public void should_create_table_with_static_column() throws Exception { + // When + SchemaStatement statement = createTable("test") - .addPartitionKey("pk", DataType.bigint()) - .addStaticColumn("pk", DataType.text()).getQueryString(); - } - - @Test(groups = "unit", expectedExceptions = IllegalStateException.class, - expectedExceptionsMessageRegExp = "The '\\[cluster\\]' columns can not be declared as clustering keys and static columns at the same time") - public void should_fail_if_same_clustering_and_static_column() throws Exception { + .addPartitionKey("id", DataType.bigint()) + .addClusteringColumn("col", DataType.uuid()) + .addStaticColumn("bucket", DataType.cint()) + .addColumn("name", DataType.text()); + + // Then + assertThat(statement.getQueryString()) + .isEqualTo( + "\n\tCREATE TABLE test(\n\t\t" + + "id bigint,\n\t\t" + + "col uuid,\n\t\t" + + "bucket int static,\n\t\t" + + "name text,\n\t\t" + + "PRIMARY KEY(id, col))"); + } + + @Test(groups = "unit") + public void should_create_table_with_udt_static_column() throws Exception { + // When + SchemaStatement statement = createTable("test") - .addPartitionKey("pk", DataType.bigint()) - .addClusteringColumn("cluster", DataType.bigint()) - .addStaticColumn("cluster", DataType.text()).getQueryString(); - } - - @Test(groups = "unit", expectedExceptions = IllegalStateException.class, - expectedExceptionsMessageRegExp = "The '\\[col\\]' columns can not be declared as simple columns and static columns at the same time") - public void should_fail_if_same_simple_and_static_column() throws Exception { + .addPartitionKey("id", DataType.bigint()) + .addClusteringColumn("col", DataType.uuid()) + .addUDTStaticColumn("bucket", frozen("address")) + .addColumn("name", DataType.text()); + + // Then + assertThat(statement.getQueryString()) + .isEqualTo( + "\n\tCREATE TABLE test(\n\t\t" + + "id bigint,\n\t\t" + + "col uuid,\n\t\t" + + "bucket frozen
    static,\n\t\t" + + "name text,\n\t\t" + + "PRIMARY KEY(id, col))"); + } + + @Test(groups = "unit") + public void should_create_table_with_clustering_order() throws Exception { + // When + SchemaStatement statement = createTable("test") - .addPartitionKey("pk", DataType.bigint()) - .addClusteringColumn("cluster", DataType.uuid()) - .addColumn("col", DataType.bigint()) - .addStaticColumn("col", DataType.text()).getQueryString(); - } - - @Test(groups = "unit", expectedExceptions = IllegalStateException.class, - expectedExceptionsMessageRegExp = "The table 'test' cannot declare static columns '\\[stat\\]' without clustering columns") - public void should_fail_if_static_column_in_non_clustered_table() throws Exception { + .addPartitionKey("id", DataType.bigint()) + .addClusteringColumn("col1", DataType.uuid()) + .addClusteringColumn("col2", DataType.uuid()) + .addColumn("name", DataType.text()) + .withOptions() + .clusteringOrder("col1", Direction.ASC) + .clusteringOrder("col2", Direction.DESC); + + // Then + assertThat(statement.getQueryString()) + .isEqualTo( + "\n\tCREATE TABLE test(\n\t\t" + + "id bigint,\n\t\t" + + "col1 uuid,\n\t\t" + + "col2 uuid,\n\t\t" + + "name text,\n\t\t" + + "PRIMARY KEY(id, col1, col2))\n\t" + + "WITH CLUSTERING ORDER BY(col1 ASC, col2 DESC)"); + } + + @Test(groups = "unit", expectedExceptions = IllegalArgumentException.class) + public void should_fail_when_blank_clustering_order_column_provided() throws Exception { + createTable("test") + .addPartitionKey("id", DataType.bigint()) + .addClusteringColumn("col1", DataType.uuid()) + .addClusteringColumn("col2", DataType.uuid()) + .addColumn("name", DataType.text()) + .withOptions() + .clusteringOrder("", Direction.DESC); + } + + @Test(groups = "unit", expectedExceptions = IllegalArgumentException.class) + public void should_fail_when_clustering_order_column_does_not_match_declared_clustering_keys() + throws Exception { + createTable("test") + .addPartitionKey("id", DataType.bigint()) + .addClusteringColumn("col1", DataType.uuid()) + .addClusteringColumn("col2", DataType.uuid()) + .addColumn("name", DataType.text()) + .withOptions() + .clusteringOrder("col3", Direction.ASC); + } + + @Test(groups = "unit") + public void should_create_table_with_compact_storage() throws Exception { + // When + SchemaStatement statement = createTable("test") - .addPartitionKey("pk", DataType.bigint()) - .addStaticColumn("stat", DataType.text()).getQueryString(); - } - - @Test(groups = "unit", expectedExceptions = IllegalArgumentException.class, - expectedExceptionsMessageRegExp = "The keyspace name 'ADD' is not allowed because it is a reserved keyword") - public void should_fail_if_keyspace_name_is_a_reserved_keyword() throws Exception { - createTable("ADD", "test") - .addPartitionKey("pk", DataType.bigint()) - .addColumn("col", DataType.text()).getQueryString(); - } - - @Test(groups = "unit", expectedExceptions = IllegalArgumentException.class, - expectedExceptionsMessageRegExp = "The table name 'ADD' is not allowed because it is a reserved keyword") - public void should_fail_if_table_name_is_a_reserved_keyword() throws Exception { - createTable("ADD") - .addPartitionKey("pk", DataType.bigint()) - .addColumn("col", DataType.text()).getQueryString(); - } - - @Test(groups = "unit", expectedExceptions = IllegalArgumentException.class, - expectedExceptionsMessageRegExp = "The partition key name 'ADD' is not allowed because it is a reserved keyword") - public void should_fail_if_partition_key_is_a_reserved_keyword() throws Exception { + .addPartitionKey("id", DataType.bigint()) + .addClusteringColumn("col1", DataType.uuid()) + .addClusteringColumn("col2", DataType.uuid()) + .addColumn("name", DataType.text()) + .withOptions() + .compactStorage(); + + // Then + assertThat(statement.getQueryString()) + .isEqualTo( + "\n\tCREATE TABLE test(\n\t\t" + + "id bigint,\n\t\t" + + "col1 uuid,\n\t\t" + + "col2 uuid,\n\t\t" + + "name text,\n\t\t" + + "PRIMARY KEY(id, col1, col2))\n\t" + + "WITH COMPACT STORAGE"); + } + + @Test(groups = "unit") + public void should_create_table_with_all_options() throws Exception { + // When + SchemaStatement statement = createTable("test") - .addPartitionKey("ADD", DataType.bigint()) - .addColumn("col", DataType.text()).getQueryString(); - } - - @Test(groups = "unit", expectedExceptions = IllegalArgumentException.class, - expectedExceptionsMessageRegExp = "The clustering column name 'ADD' is not allowed because it is a reserved keyword") - public void should_fail_if_clustering_key_is_a_reserved_keyword() throws Exception { + .addPartitionKey("id", DataType.bigint()) + .addClusteringColumn("col1", DataType.uuid()) + .addClusteringColumn("col2", DataType.uuid()) + .addColumn("name", DataType.text()) + .withOptions() + .clusteringOrder("col1", Direction.ASC) + .clusteringOrder("col2", Direction.DESC) + .compactStorage() + .bloomFilterFPChance(0.01) + .caching(Caching.ROWS_ONLY) + .comment("This is a comment") + .compactionOptions(leveledStrategy().ssTableSizeInMB(160)) + .compressionOptions(lz4()) + .dcLocalReadRepairChance(0.21) + .defaultTimeToLive(100) + .gcGraceSeconds(9999) + .minIndexInterval(64) + .maxIndexInterval(512) + .memtableFlushPeriodInMillis(12) + .populateIOCacheOnFlush(true) + .readRepairChance(0.05) + .replicateOnWrite(true) + .speculativeRetry(always()) + .cdc(true); + + // Then + assertThat(statement.getQueryString()) + .isEqualTo( + "\n\tCREATE TABLE test(\n\t\t" + + "id bigint,\n\t\t" + + "col1 uuid,\n\t\t" + + "col2 uuid,\n\t\t" + + "name text,\n\t\tPRIMARY KEY(id, col1, col2))\n\t" + + "WITH caching = 'rows_only' " + + "AND bloom_filter_fp_chance = 0.01 " + + "AND comment = 'This is a comment' " + + "AND compression = {'sstable_compression' : 'LZ4Compressor'} " + + "AND compaction = {'class' : 'LeveledCompactionStrategy', 'sstable_size_in_mb' : 160} " + + "AND dclocal_read_repair_chance = 0.21 " + + "AND default_time_to_live = 100 " + + "AND gc_grace_seconds = 9999 " + + "AND min_index_interval = 64 " + + "AND max_index_interval = 512 " + + "AND memtable_flush_period_in_ms = 12 " + + "AND populate_io_cache_on_flush = true " + + "AND read_repair_chance = 0.05 " + + "AND replicate_on_write = true " + + "AND speculative_retry = 'ALWAYS' " + + "AND cdc = true AND CLUSTERING ORDER BY(col1 ASC, col2 DESC) AND COMPACT STORAGE"); + } + + @Test(groups = "unit") + public void should_build_table_with_new_caching_options() throws Exception { + // When + SchemaStatement statement = + createTable("test") + .addPartitionKey("id", DataType.bigint()) + .addColumn("name", DataType.text()) + .withOptions() + .caching(KeyCaching.ALL, rows(100)); + + // Then + assertThat(statement.getQueryString()) + .isEqualTo( + "\n\tCREATE TABLE test(\n\t\t" + + "id bigint,\n\t\t" + + "name text,\n\t\tPRIMARY KEY(id))\n\t" + + "WITH caching = {'keys' : 'all', 'rows_per_partition' : 100}"); + + // When + statement = + createTable("test") + .addPartitionKey("id", DataType.bigint()) + .addColumn("name", DataType.text()) + .withOptions() + .caching(KeyCaching.ALL, allRows()); + + // Then + assertThat(statement.getQueryString()) + .isEqualTo( + "\n\tCREATE TABLE test(\n\t\t" + + "id bigint,\n\t\t" + + "name text,\n\t\tPRIMARY KEY(id))\n\t" + + "WITH caching = {'keys' : 'all', 'rows_per_partition' : 'all'}"); + } + + @Test(groups = "unit") + public void should_build_table_with_custom_option() throws Exception { + // When + SchemaStatement statement = + createTable("test") + .addPartitionKey("id", DataType.bigint()) + .addColumn("name", DataType.text()) + .withOptions() + .freeformOption("key1", "value1") + .freeformOption("key2", 1.0); + + // Then + assertThat(statement.getQueryString()) + .isEqualTo( + "\n\tCREATE TABLE test(\n\t\t" + + "id bigint,\n\t\t" + + "name text,\n\t\tPRIMARY KEY(id))\n\t" + + "WITH key1 = 'value1' " + + "AND key2 = 1.0"); + } + + @Test(groups = "unit", expectedExceptions = IllegalStateException.class) + public void should_fail_when_both_caching_versions() throws Exception { + createTable("test") + .addPartitionKey("id", DataType.bigint()) + .addColumn("name", DataType.text()) + .withOptions() + .caching(Caching.KEYS_ONLY) + .caching(KeyCaching.ALL, allRows()) + .getQueryString(); + } + + @Test(groups = "unit", expectedExceptions = IllegalArgumentException.class) + public void should_fail_when_negative_rows_per_partition() throws Exception { + createTable("test") + .addPartitionKey("id", DataType.bigint()) + .addColumn("name", DataType.text()) + .withOptions() + .caching(KeyCaching.ALL, rows(-3)); + } + + @Test(groups = "unit", expectedExceptions = IllegalArgumentException.class) + public void should_fail_when_read_repair_chance_out_of_bound() throws Exception { + createTable("test") + .addPartitionKey("id", DataType.bigint()) + .addColumn("name", DataType.text()) + .withOptions() + .readRepairChance(1.3); + } + + @Test(groups = "unit", expectedExceptions = IllegalArgumentException.class) + public void should_fail_when_read_repair_chance_negative() throws Exception { + createTable("test") + .addPartitionKey("id", DataType.bigint()) + .addColumn("name", DataType.text()) + .withOptions() + .readRepairChance(-1.3); + } + + @Test(groups = "unit") + public void should_create_table_with_speculative_retry_none() throws Exception { + // When + SchemaStatement statement = + createTable("test") + .addPartitionKey("id", DataType.bigint()) + .addColumn("name", DataType.text()) + .withOptions() + .speculativeRetry(noSpeculativeRetry()); + + // Then + assertThat(statement.getQueryString()) + .isEqualTo( + "\n\tCREATE TABLE test(\n\t\t" + + "id bigint,\n\t\t" + + "name text,\n\t\t" + + "PRIMARY KEY(id))\n\t" + + "WITH speculative_retry = 'NONE'"); + } + + @Test(groups = "unit") + public void should_create_table_with_speculative_retry_in_percentile() throws Exception { + // When + SchemaStatement statement = createTable("test") - .addPartitionKey("pk", DataType.bigint()) - .addClusteringColumn("ADD", DataType.uuid()) - .addColumn("col", DataType.text()).getQueryString(); - } - - @Test(groups = "unit", expectedExceptions = IllegalArgumentException.class, - expectedExceptionsMessageRegExp = "The column name 'ADD' is not allowed because it is a reserved keyword") - public void should_fail_if_simple_column_is_a_reserved_keyword() throws Exception { + .addPartitionKey("id", DataType.bigint()) + .addColumn("name", DataType.text()) + .withOptions() + .speculativeRetry(percentile(95)); + + // Then + assertThat(statement.getQueryString()) + .isEqualTo( + "\n\tCREATE TABLE test(\n\t\t" + + "id bigint,\n\t\t" + + "name text,\n\t\t" + + "PRIMARY KEY(id))\n\t" + + "WITH speculative_retry = '95percentile'"); + } + + @Test(groups = "unit") + public void should_create_table_with_speculative_retry_in_milli_secs() throws Exception { + // When + SchemaStatement statement = createTable("test") - .addPartitionKey("pk", DataType.bigint()) - .addClusteringColumn("cluster", DataType.uuid()) - .addColumn("ADD", DataType.text()).getQueryString(); - } - - @Test(groups = "unit", expectedExceptions = IllegalArgumentException.class, - expectedExceptionsMessageRegExp = "The static column name 'ADD' is not allowed because it is a reserved keyword") - public void should_fail_if_static_column_is_a_reserved_keyword() throws Exception { + .addPartitionKey("id", DataType.bigint()) + .addColumn("name", DataType.text()) + .withOptions() + .speculativeRetry(millisecs(12)); + + // Then + assertThat(statement.getQueryString()) + .isEqualTo( + "\n\tCREATE TABLE test(\n\t\t" + + "id bigint,\n\t\t" + + "name text,\n\t\t" + + "PRIMARY KEY(id))\n\t" + + "WITH speculative_retry = '12ms'"); + } + + @Test(groups = "unit") + public void should_create_table_with_cdc_true() throws Exception { + // When + SchemaStatement statement = createTable("test") - .addPartitionKey("pk", DataType.bigint()) - .addClusteringColumn("cluster", DataType.uuid()) - .addStaticColumn("ADD", DataType.text()) - .addColumn("col", DataType.text()).getQueryString(); - } - - @Test(groups = "unit", expectedExceptions = IllegalStateException.class, - expectedExceptionsMessageRegExp = "Cannot create table 'test' with compact storage and static columns '\\[stat\\]'") - public void should_fail_creating_table_with_static_columns_and_compact_storage() throws Exception { + .addPartitionKey("id", DataType.bigint()) + .addColumn("name", DataType.text()) + .withOptions() + .cdc(true); + + // Then + assertThat(statement.getQueryString()) + .isEqualTo( + "\n\tCREATE TABLE test(\n\t\t" + + "id bigint,\n\t\t" + + "name text,\n\t\t" + + "PRIMARY KEY(id))\n\t" + + "WITH cdc = true"); + } + + @Test(groups = "unit") + public void should_create_table_with_cdc_false() throws Exception { + // When + SchemaStatement statement = createTable("test") - .addPartitionKey("pk", DataType.bigint()) - .addClusteringColumn("cluster", DataType.uuid()) - .addStaticColumn("stat", DataType.text()) - .withOptions().compactStorage().getQueryString(); - } + .addPartitionKey("id", DataType.bigint()) + .addColumn("name", DataType.text()) + .withOptions() + .cdc(false); + + // Then + assertThat(statement.getQueryString()) + .isEqualTo( + "\n\tCREATE TABLE test(\n\t\t" + + "id bigint,\n\t\t" + + "name text,\n\t\t" + + "PRIMARY KEY(id))\n\t" + + "WITH cdc = false"); + } + + @Test( + groups = "unit", + expectedExceptions = IllegalStateException.class, + expectedExceptionsMessageRegExp = + "The '\\[pk\\]' columns can not be declared as partition keys and clustering keys at the same time") + public void should_fail_if_same_partition_and_clustering_column() throws Exception { + createTable("test") + .addPartitionKey("pk", DataType.bigint()) + .addClusteringColumn("pk", DataType.bigint()) + .getQueryString(); + } + + @Test( + groups = "unit", + expectedExceptions = IllegalStateException.class, + expectedExceptionsMessageRegExp = + "The '\\[pk\\]' columns can not be declared as partition keys and simple columns at the same time") + public void should_fail_if_same_partition_and_simple_column() throws Exception { + createTable("test") + .addPartitionKey("pk", DataType.bigint()) + .addColumn("pk", DataType.text()) + .getQueryString(); + } + + @Test( + groups = "unit", + expectedExceptions = IllegalStateException.class, + expectedExceptionsMessageRegExp = + "The '\\[cluster\\]' columns can not be declared as clustering keys and simple columns at the same time") + public void should_fail_if_same_clustering_and_simple_column() throws Exception { + createTable("test") + .addPartitionKey("pk", DataType.bigint()) + .addClusteringColumn("cluster", DataType.bigint()) + .addColumn("cluster", DataType.text()) + .getQueryString(); + } + + @Test( + groups = "unit", + expectedExceptions = IllegalStateException.class, + expectedExceptionsMessageRegExp = + "The '\\[pk\\]' columns can not be declared as partition keys and static columns at the same time") + public void should_fail_if_same_partition_and_static_column() throws Exception { + createTable("test") + .addPartitionKey("pk", DataType.bigint()) + .addStaticColumn("pk", DataType.text()) + .getQueryString(); + } + + @Test( + groups = "unit", + expectedExceptions = IllegalStateException.class, + expectedExceptionsMessageRegExp = + "The '\\[cluster\\]' columns can not be declared as clustering keys and static columns at the same time") + public void should_fail_if_same_clustering_and_static_column() throws Exception { + createTable("test") + .addPartitionKey("pk", DataType.bigint()) + .addClusteringColumn("cluster", DataType.bigint()) + .addStaticColumn("cluster", DataType.text()) + .getQueryString(); + } + + @Test( + groups = "unit", + expectedExceptions = IllegalStateException.class, + expectedExceptionsMessageRegExp = + "The '\\[col\\]' columns can not be declared as simple columns and static columns at the same time") + public void should_fail_if_same_simple_and_static_column() throws Exception { + createTable("test") + .addPartitionKey("pk", DataType.bigint()) + .addClusteringColumn("cluster", DataType.uuid()) + .addColumn("col", DataType.bigint()) + .addStaticColumn("col", DataType.text()) + .getQueryString(); + } + + @Test( + groups = "unit", + expectedExceptions = IllegalStateException.class, + expectedExceptionsMessageRegExp = + "The table 'test' cannot declare static columns '\\[stat\\]' without clustering columns") + public void should_fail_if_static_column_in_non_clustered_table() throws Exception { + createTable("test") + .addPartitionKey("pk", DataType.bigint()) + .addStaticColumn("stat", DataType.text()) + .getQueryString(); + } + + @Test( + groups = "unit", + expectedExceptions = IllegalArgumentException.class, + expectedExceptionsMessageRegExp = + "The keyspace name 'ADD' is not allowed because it is a reserved keyword") + public void should_fail_if_keyspace_name_is_a_reserved_keyword() throws Exception { + createTable("ADD", "test") + .addPartitionKey("pk", DataType.bigint()) + .addColumn("col", DataType.text()) + .getQueryString(); + } + + @Test( + groups = "unit", + expectedExceptions = IllegalArgumentException.class, + expectedExceptionsMessageRegExp = + "The table name 'ADD' is not allowed because it is a reserved keyword") + public void should_fail_if_table_name_is_a_reserved_keyword() throws Exception { + createTable("ADD") + .addPartitionKey("pk", DataType.bigint()) + .addColumn("col", DataType.text()) + .getQueryString(); + } + + @Test( + groups = "unit", + expectedExceptions = IllegalArgumentException.class, + expectedExceptionsMessageRegExp = + "The partition key name 'ADD' is not allowed because it is a reserved keyword") + public void should_fail_if_partition_key_is_a_reserved_keyword() throws Exception { + createTable("test") + .addPartitionKey("ADD", DataType.bigint()) + .addColumn("col", DataType.text()) + .getQueryString(); + } + + @Test( + groups = "unit", + expectedExceptions = IllegalArgumentException.class, + expectedExceptionsMessageRegExp = + "The clustering column name 'ADD' is not allowed because it is a reserved keyword") + public void should_fail_if_clustering_key_is_a_reserved_keyword() throws Exception { + createTable("test") + .addPartitionKey("pk", DataType.bigint()) + .addClusteringColumn("ADD", DataType.uuid()) + .addColumn("col", DataType.text()) + .getQueryString(); + } + + @Test( + groups = "unit", + expectedExceptions = IllegalArgumentException.class, + expectedExceptionsMessageRegExp = + "The column name 'ADD' is not allowed because it is a reserved keyword") + public void should_fail_if_simple_column_is_a_reserved_keyword() throws Exception { + createTable("test") + .addPartitionKey("pk", DataType.bigint()) + .addClusteringColumn("cluster", DataType.uuid()) + .addColumn("ADD", DataType.text()) + .getQueryString(); + } + + @Test( + groups = "unit", + expectedExceptions = IllegalArgumentException.class, + expectedExceptionsMessageRegExp = + "The static column name 'ADD' is not allowed because it is a reserved keyword") + public void should_fail_if_static_column_is_a_reserved_keyword() throws Exception { + createTable("test") + .addPartitionKey("pk", DataType.bigint()) + .addClusteringColumn("cluster", DataType.uuid()) + .addStaticColumn("ADD", DataType.text()) + .addColumn("col", DataType.text()) + .getQueryString(); + } + + @Test( + groups = "unit", + expectedExceptions = IllegalStateException.class, + expectedExceptionsMessageRegExp = + "Cannot create table 'test' with compact storage and static columns '\\[stat\\]'") + public void should_fail_creating_table_with_static_columns_and_compact_storage() + throws Exception { + createTable("test") + .addPartitionKey("pk", DataType.bigint()) + .addClusteringColumn("cluster", DataType.uuid()) + .addStaticColumn("stat", DataType.text()) + .withOptions() + .compactStorage() + .getQueryString(); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/schemabuilder/CreateTypeTest.java b/driver-core/src/test/java/com/datastax/driver/core/schemabuilder/CreateTypeTest.java index fe4cd402a96..e7936ac177f 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/schemabuilder/CreateTypeTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/schemabuilder/CreateTypeTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,129 +17,149 @@ */ package com.datastax.driver.core.schemabuilder; +import static com.datastax.driver.core.schemabuilder.SchemaBuilder.createType; +import static com.datastax.driver.core.schemabuilder.SchemaBuilder.frozen; +import static com.datastax.driver.core.schemabuilder.SchemaBuilder.udtLiteral; +import static org.assertj.core.api.Assertions.assertThat; + import com.datastax.driver.core.DataType; import org.testng.annotations.Test; -import static com.datastax.driver.core.schemabuilder.SchemaBuilder.*; -import static org.assertj.core.api.Assertions.assertThat; - public class CreateTypeTest { - @Test(groups = "unit") - public void should_create_UDT() throws Exception { - //When - SchemaStatement statement = createType("ks", "myType") - .addColumn("col1", DataType.text()) - .addColumn("col2", DataType.bigint()); - - //Then - assertThat(statement.getQueryString()).isEqualTo("\n\tCREATE TYPE ks.myType(\n\t\t" + - "col1 text,\n\t\t" + - "col2 bigint)"); - } - - @Test(groups = "unit") - public void should_create_UDT_if_not_exists() throws Exception { - //When - SchemaStatement statement = createType("myType") - .ifNotExists() - .addColumn("col1", DataType.text()) - .addColumn("col2", DataType.bigint()); - - //Then - assertThat(statement.getQueryString()).isEqualTo("\n\tCREATE TYPE IF NOT EXISTS myType(\n\t\t" + - "col1 text,\n\t\t" + - "col2 bigint)"); - } - - @Test(groups = "unit") - public void should_create_simple_UDT_column() throws Exception { - //When - SchemaStatement statement = createType("ks", "myType") - .addColumn("col1", DataType.text()) - .addUDTColumn("my_udt", frozen("address")); - - //Then - assertThat(statement.getQueryString()).isEqualTo("\n\tCREATE TYPE ks.myType(\n\t\t" + - "col1 text,\n\t\t" + - "my_udt frozen
    )"); - } - - @Test(groups = "unit") - public void should_create_list_UDT_column() throws Exception { - //When - SchemaStatement statement = createType("ks", "myType") - .addColumn("col1", DataType.text()) - .addUDTListColumn("my_udt", frozen("address")); - - //Then - assertThat(statement.getQueryString()).isEqualTo("\n\tCREATE TYPE ks.myType(\n\t\t" + - "col1 text,\n\t\t" + - "my_udt list>)"); - } - - @Test(groups = "unit") - public void should_create_set_UDT_column() throws Exception { - //When - SchemaStatement statement = createType("ks", "myType") - .addColumn("col1", DataType.text()) - .addUDTSetColumn("my_udt", frozen("address")); - - //Then - assertThat(statement.getQueryString()).isEqualTo("\n\tCREATE TYPE ks.myType(\n\t\t" + - "col1 text,\n\t\t" + - "my_udt set>)"); - } - - @Test(groups = "unit") - public void should_create_key_UDT_map_column() throws Exception { - //When - SchemaStatement statement = createType("ks", "myType") - .addColumn("col1", DataType.text()) - .addUDTMapColumn("my_udt", frozen("address"), DataType.text()); - - //Then - assertThat(statement.getQueryString()).isEqualTo("\n\tCREATE TYPE ks.myType(\n\t\t" + - "col1 text,\n\t\t" + - "my_udt map, text>)"); - } - - @Test(groups = "unit") - public void should_create_value_UDT_map_column() throws Exception { - //When - SchemaStatement statement = createType("ks", "myType") - .addColumn("col1", DataType.text()) - .addUDTMapColumn("my_udt", DataType.cint(), frozen("address")); - - //Then - assertThat(statement.getQueryString()).isEqualTo("\n\tCREATE TYPE ks.myType(\n\t\t" + - "col1 text,\n\t\t" + - "my_udt map>)"); - } - - @Test(groups = "unit") - public void should_create_key_value_UDT_map_column() throws Exception { - //When - SchemaStatement statement = createType("ks", "myType") - .addColumn("col1", DataType.text()) - .addUDTMapColumn("my_udt", frozen("coords"), frozen("address")); - - //Then - assertThat(statement.getQueryString()).isEqualTo("\n\tCREATE TYPE ks.myType(\n\t\t" + - "col1 text,\n\t\t" + - "my_udt map, frozen
    >)"); - } - - @Test(groups = "unit") - public void should_create_column_with_manual_type() throws Exception { - //When - SchemaStatement statement = createType("ks", "myType") - .addColumn("col1", DataType.text()) - .addUDTColumn("my_udt", udtLiteral("frozen
    ")); - - //Then - assertThat(statement.getQueryString()).isEqualTo("\n\tCREATE TYPE ks.myType(\n\t\t" + - "col1 text,\n\t\t" + - "my_udt frozen
    )"); - } -} \ No newline at end of file + @Test(groups = "unit") + public void should_create_UDT() throws Exception { + // When + SchemaStatement statement = + createType("ks", "myType") + .addColumn("col1", DataType.text()) + .addColumn("col2", DataType.bigint()); + + // Then + assertThat(statement.getQueryString()) + .isEqualTo("\n\tCREATE TYPE ks.myType(\n\t\t" + "col1 text,\n\t\t" + "col2 bigint)"); + } + + @Test(groups = "unit") + public void should_create_UDT_if_not_exists() throws Exception { + // When + SchemaStatement statement = + createType("myType") + .ifNotExists() + .addColumn("col1", DataType.text()) + .addColumn("col2", DataType.bigint()); + + // Then + assertThat(statement.getQueryString()) + .isEqualTo( + "\n\tCREATE TYPE IF NOT EXISTS myType(\n\t\t" + "col1 text,\n\t\t" + "col2 bigint)"); + } + + @Test(groups = "unit") + public void should_create_simple_UDT_column() throws Exception { + // When + SchemaStatement statement = + createType("ks", "myType") + .addColumn("col1", DataType.text()) + .addUDTColumn("my_udt", frozen("address")); + + // Then + assertThat(statement.getQueryString()) + .isEqualTo( + "\n\tCREATE TYPE ks.myType(\n\t\t" + "col1 text,\n\t\t" + "my_udt frozen
    )"); + } + + @Test(groups = "unit") + public void should_create_list_UDT_column() throws Exception { + // When + SchemaStatement statement = + createType("ks", "myType") + .addColumn("col1", DataType.text()) + .addUDTListColumn("my_udt", frozen("address")); + + // Then + assertThat(statement.getQueryString()) + .isEqualTo( + "\n\tCREATE TYPE ks.myType(\n\t\t" + + "col1 text,\n\t\t" + + "my_udt list>)"); + } + + @Test(groups = "unit") + public void should_create_set_UDT_column() throws Exception { + // When + SchemaStatement statement = + createType("ks", "myType") + .addColumn("col1", DataType.text()) + .addUDTSetColumn("my_udt", frozen("address")); + + // Then + assertThat(statement.getQueryString()) + .isEqualTo( + "\n\tCREATE TYPE ks.myType(\n\t\t" + + "col1 text,\n\t\t" + + "my_udt set>)"); + } + + @Test(groups = "unit") + public void should_create_key_UDT_map_column() throws Exception { + // When + SchemaStatement statement = + createType("ks", "myType") + .addColumn("col1", DataType.text()) + .addUDTMapColumn("my_udt", frozen("address"), DataType.text()); + + // Then + assertThat(statement.getQueryString()) + .isEqualTo( + "\n\tCREATE TYPE ks.myType(\n\t\t" + + "col1 text,\n\t\t" + + "my_udt map, text>)"); + } + + @Test(groups = "unit") + public void should_create_value_UDT_map_column() throws Exception { + // When + SchemaStatement statement = + createType("ks", "myType") + .addColumn("col1", DataType.text()) + .addUDTMapColumn("my_udt", DataType.cint(), frozen("address")); + + // Then + assertThat(statement.getQueryString()) + .isEqualTo( + "\n\tCREATE TYPE ks.myType(\n\t\t" + + "col1 text,\n\t\t" + + "my_udt map>)"); + } + + @Test(groups = "unit") + public void should_create_key_value_UDT_map_column() throws Exception { + // When + SchemaStatement statement = + createType("ks", "myType") + .addColumn("col1", DataType.text()) + .addUDTMapColumn("my_udt", frozen("coords"), frozen("address")); + + // Then + assertThat(statement.getQueryString()) + .isEqualTo( + "\n\tCREATE TYPE ks.myType(\n\t\t" + + "col1 text,\n\t\t" + + "my_udt map, frozen
    >)"); + } + + @Test(groups = "unit") + public void should_create_column_with_manual_type() throws Exception { + // When + SchemaStatement statement = + createType("ks", "myType") + .addColumn("col1", DataType.text()) + .addUDTColumn("my_udt", udtLiteral("frozen
    ")); + + // Then + assertThat(statement.getQueryString()) + .isEqualTo( + "\n\tCREATE TYPE ks.myType(\n\t\t" + "col1 text,\n\t\t" + "my_udt frozen
    )"); + } +} diff --git a/driver-core/src/test/java/com/datastax/driver/core/schemabuilder/DropKeyspaceTest.java b/driver-core/src/test/java/com/datastax/driver/core/schemabuilder/DropKeyspaceTest.java index b8329c06799..86a4ae6ceb3 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/schemabuilder/DropKeyspaceTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/schemabuilder/DropKeyspaceTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,28 +17,28 @@ */ package com.datastax.driver.core.schemabuilder; -import org.testng.annotations.Test; - import static com.datastax.driver.core.schemabuilder.SchemaBuilder.dropKeyspace; import static org.assertj.core.api.Assertions.assertThat; +import org.testng.annotations.Test; + public class DropKeyspaceTest { - @Test(groups = "unit") - public void should_drop_keyspace() throws Exception { - //When - SchemaStatement statement = dropKeyspace("test"); + @Test(groups = "unit") + public void should_drop_keyspace() throws Exception { + // When + SchemaStatement statement = dropKeyspace("test"); - //Then - assertThat(statement.getQueryString()).isEqualTo("DROP KEYSPACE test"); - } + // Then + assertThat(statement.getQueryString()).isEqualTo("DROP KEYSPACE test"); + } - @Test(groups = "unit") - public void should_drop_keyspace_if_exists() throws Exception { - //When - SchemaStatement statement = dropKeyspace("test").ifExists(); + @Test(groups = "unit") + public void should_drop_keyspace_if_exists() throws Exception { + // When + SchemaStatement statement = dropKeyspace("test").ifExists(); - //Then - assertThat(statement.getQueryString()).isEqualTo("DROP KEYSPACE IF EXISTS test"); - } + // Then + assertThat(statement.getQueryString()).isEqualTo("DROP KEYSPACE IF EXISTS test"); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/schemabuilder/DropTest.java b/driver-core/src/test/java/com/datastax/driver/core/schemabuilder/DropTest.java index 708a50b5dba..46d33c4b00e 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/schemabuilder/DropTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/schemabuilder/DropTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,103 +17,111 @@ */ package com.datastax.driver.core.schemabuilder; -import org.testng.annotations.Test; - -import static com.datastax.driver.core.schemabuilder.SchemaBuilder.*; +import static com.datastax.driver.core.schemabuilder.SchemaBuilder.dropIndex; +import static com.datastax.driver.core.schemabuilder.SchemaBuilder.dropTable; +import static com.datastax.driver.core.schemabuilder.SchemaBuilder.dropType; import static org.assertj.core.api.Assertions.assertThat; +import org.testng.annotations.Test; + public class DropTest { - @Test(groups = "unit") - public void should_drop_table() throws Exception { - //When - SchemaStatement statement = dropTable("test"); - - //Then - assertThat(statement.getQueryString()).isEqualTo("DROP TABLE test"); - } - - @Test(groups = "unit") - public void should_drop_table_with_keyspace() throws Exception { - //When - SchemaStatement statement = dropTable("ks", "test"); - - //Then - assertThat(statement.getQueryString()).isEqualTo("DROP TABLE ks.test"); - } - - @Test(groups = "unit") - public void should_drop_table_with_keyspace_if_exists() throws Exception { - //When - SchemaStatement statement = dropTable("ks", "test").ifExists(); - - //Then - assertThat(statement.getQueryString()).isEqualTo("DROP TABLE IF EXISTS ks.test"); - } - - @Test(groups = "unit") - public void should_drop_type() throws Exception { - //When - SchemaStatement statement = dropType("test"); - - //Then - assertThat(statement.getQueryString()).isEqualTo("DROP TYPE test"); - } - - @Test(groups = "unit") - public void should_drop_type_with_keyspace() throws Exception { - //When - SchemaStatement statement = dropType("ks", "test"); - - //Then - assertThat(statement.getQueryString()).isEqualTo("DROP TYPE ks.test"); - } - - @Test(groups = "unit") - public void should_drop_type_with_keyspace_if_exists() throws Exception { - //When - SchemaStatement statement = dropType("ks", "test").ifExists(); - - //Then - assertThat(statement.getQueryString()).isEqualTo("DROP TYPE IF EXISTS ks.test"); - } - - @Test(groups = "unit") - public void should_drop_index() throws Exception { - //When - SchemaStatement statement = dropIndex("test"); - - //Then - assertThat(statement.getQueryString()).isEqualTo("DROP INDEX test"); - } - - @Test(groups = "unit") - public void should_drop_index_with_keyspace() throws Exception { - //When - SchemaStatement statement = dropIndex("ks", "test"); - - //Then - assertThat(statement.getQueryString()).isEqualTo("DROP INDEX ks.test"); - } - - @Test(groups = "unit") - public void should_drop_index_with_keyspace_if_exists() throws Exception { - //When - SchemaStatement statement = dropIndex("ks", "test").ifExists(); - - //Then - assertThat(statement.getQueryString()).isEqualTo("DROP INDEX IF EXISTS ks.test"); - } - - @Test(groups = "unit", expectedExceptions = IllegalArgumentException.class, - expectedExceptionsMessageRegExp = "The keyspace name 'add' is not allowed because it is a reserved keyword") - public void should_fail_if_keyspace_name_is_a_reserved_keyword() throws Exception { - dropTable("add", "test").getQueryString(); - } - - @Test(groups = "unit", expectedExceptions = IllegalArgumentException.class, - expectedExceptionsMessageRegExp = "The table name 'add' is not allowed because it is a reserved keyword") - public void should_fail_if_table_name_is_a_reserved_keyword() throws Exception { - dropTable("add").getQueryString(); - } + @Test(groups = "unit") + public void should_drop_table() throws Exception { + // When + SchemaStatement statement = dropTable("test"); + + // Then + assertThat(statement.getQueryString()).isEqualTo("DROP TABLE test"); + } + + @Test(groups = "unit") + public void should_drop_table_with_keyspace() throws Exception { + // When + SchemaStatement statement = dropTable("ks", "test"); + + // Then + assertThat(statement.getQueryString()).isEqualTo("DROP TABLE ks.test"); + } + + @Test(groups = "unit") + public void should_drop_table_with_keyspace_if_exists() throws Exception { + // When + SchemaStatement statement = dropTable("ks", "test").ifExists(); + + // Then + assertThat(statement.getQueryString()).isEqualTo("DROP TABLE IF EXISTS ks.test"); + } + + @Test(groups = "unit") + public void should_drop_type() throws Exception { + // When + SchemaStatement statement = dropType("test"); + + // Then + assertThat(statement.getQueryString()).isEqualTo("DROP TYPE test"); + } + + @Test(groups = "unit") + public void should_drop_type_with_keyspace() throws Exception { + // When + SchemaStatement statement = dropType("ks", "test"); + + // Then + assertThat(statement.getQueryString()).isEqualTo("DROP TYPE ks.test"); + } + + @Test(groups = "unit") + public void should_drop_type_with_keyspace_if_exists() throws Exception { + // When + SchemaStatement statement = dropType("ks", "test").ifExists(); + + // Then + assertThat(statement.getQueryString()).isEqualTo("DROP TYPE IF EXISTS ks.test"); + } + + @Test(groups = "unit") + public void should_drop_index() throws Exception { + // When + SchemaStatement statement = dropIndex("test"); + + // Then + assertThat(statement.getQueryString()).isEqualTo("DROP INDEX test"); + } + + @Test(groups = "unit") + public void should_drop_index_with_keyspace() throws Exception { + // When + SchemaStatement statement = dropIndex("ks", "test"); + + // Then + assertThat(statement.getQueryString()).isEqualTo("DROP INDEX ks.test"); + } + + @Test(groups = "unit") + public void should_drop_index_with_keyspace_if_exists() throws Exception { + // When + SchemaStatement statement = dropIndex("ks", "test").ifExists(); + + // Then + assertThat(statement.getQueryString()).isEqualTo("DROP INDEX IF EXISTS ks.test"); + } + + @Test( + groups = "unit", + expectedExceptions = IllegalArgumentException.class, + expectedExceptionsMessageRegExp = + "The keyspace name 'add' is not allowed because it is a reserved keyword") + public void should_fail_if_keyspace_name_is_a_reserved_keyword() throws Exception { + dropTable("add", "test").getQueryString(); + } + + @Test( + groups = "unit", + expectedExceptions = IllegalArgumentException.class, + expectedExceptionsMessageRegExp = + "The table name 'add' is not allowed because it is a reserved keyword") + public void should_fail_if_table_name_is_a_reserved_keyword() throws Exception { + dropTable("add").getQueryString(); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/schemabuilder/SchemaBuilderIT.java b/driver-core/src/test/java/com/datastax/driver/core/schemabuilder/SchemaBuilderIT.java index 14ce5073d73..42b8cc21290 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/schemabuilder/SchemaBuilderIT.java +++ b/driver-core/src/test/java/com/datastax/driver/core/schemabuilder/SchemaBuilderIT.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,37 +17,42 @@ */ package com.datastax.driver.core.schemabuilder; +import static com.datastax.driver.core.schemabuilder.SchemaBuilder.dateTieredStrategy; +import static com.datastax.driver.core.schemabuilder.SchemaBuilder.percentile; +import static com.datastax.driver.core.schemabuilder.SchemaBuilder.rows; +import static com.datastax.driver.core.schemabuilder.SchemaBuilder.snappy; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.fail; + import com.datastax.driver.core.CCMTestsSupport; import com.datastax.driver.core.DataType; import com.datastax.driver.core.ResultSet; import com.datastax.driver.core.Row; import com.datastax.driver.core.schemabuilder.TableOptions.CompactionOptions.DateTieredCompactionStrategyOptions.TimeStampResolution; import com.datastax.driver.core.utils.CassandraVersion; -import org.testng.annotations.Test; - import java.util.Iterator; - -import static com.datastax.driver.core.schemabuilder.SchemaBuilder.*; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.fail; +import org.testng.annotations.Test; public class SchemaBuilderIT extends CCMTestsSupport { - @Test(groups = "short") - @CassandraVersion("2.1.2") - public void should_modify_table_metadata() { - // Create a table - session().execute(SchemaBuilder.createTable("ks", "TableMetadata") - .addPartitionKey("a", DataType.cint()) - .addPartitionKey("b", DataType.cint()) - .addClusteringColumn("c", DataType.cint()) - .addClusteringColumn("d", DataType.cint()) - .withOptions() - .compactStorage() - ); + @Test(groups = "short") + @CassandraVersion("2.1.2") + public void should_modify_table_metadata() { + // Create a table + session() + .execute( + SchemaBuilder.createTable("ks", "TableMetadata") + .addPartitionKey("a", DataType.cint()) + .addPartitionKey("b", DataType.cint()) + .addClusteringColumn("c", DataType.cint()) + .addClusteringColumn("d", DataType.cint()) + .withOptions() + .compactStorage()); - // Modify the table metadata - session().execute(SchemaBuilder.alterTable("TableMetadata") + // Modify the table metadata + session() + .execute( + SchemaBuilder.alterTable("TableMetadata") .withOptions() .defaultTimeToLive(1337) .bloomFilterFPChance(0.42) @@ -59,7 +66,8 @@ public void should_modify_table_metadata() { .speculativeRetry(percentile(50)) .dcLocalReadRepairChance(0.84) .memtableFlushPeriodInMillis(1234567890) - .compactionOptions(dateTieredStrategy() + .compactionOptions( + dateTieredStrategy() .baseTimeSeconds(1) .minThreshold(2) .maxThreshold(3) @@ -67,249 +75,284 @@ public void should_modify_table_metadata() { .timestampResolution(TimeStampResolution.MILLISECONDS)) .compressionOptions(snappy())); - // Retrieve the metadata from Cassandra - ResultSet rows = session().execute("SELECT " - + "bloom_filter_fp_chance, " - + "caching, " - + "cf_id, " - + "column_aliases, " - + "comment, " - + "compaction_strategy_class, " - + "compaction_strategy_options, " - + "comparator, " - + "compression_parameters, " - + "default_time_to_live, " - + "default_validator, " - + "dropped_columns, " - + "gc_grace_seconds, " - + "index_interval, " - + "is_dense, " - + "key_aliases, " - + "key_validator, " - + "local_read_repair_chance, " - + "max_compaction_threshold, " - + "max_index_interval, " - + "memtable_flush_period_in_ms, " - + "min_compaction_threshold, " - + "min_index_interval, " - + "read_repair_chance, " - + "speculative_retry, " - + "subcomparator, " - + "type, " - + "value_alias " - + "FROM system.schema_columnfamilies " - + "WHERE keyspace_name='ks' AND columnfamily_name='tablemetadata'"); - for (Row row : rows) { - // There should be only one row - // Verify that every property we modified is correctly set - assertThat(row.getDouble("bloom_filter_fp_chance")).isEqualTo(0.42); - assertThat(row.getString("caching")).isEqualTo("{\"keys\":\"ALL\", \"rows_per_partition\":\"1\"}"); - assertThat(row.getUUID("cf_id")).isNotNull(); - assertThat(row.getString("column_aliases")).isEqualTo("[\"c\",\"d\"]"); - assertThat(row.getString("comment")).isEqualTo("Useful comment"); - assertThat(row.getString("compaction_strategy_class")) - .isEqualTo("org.apache.cassandra.db.compaction.DateTieredCompactionStrategy"); - assertThat(row.getString("compaction_strategy_options")).isEqualTo( - "{\"base_time_seconds\":\"1\",\"timestamp_resolution\":\"MILLISECONDS\",\"max_sstable_age_days\":\"4\",\"min_threshold\":\"2\",\"max_threshold\":\"3\"}"); - assertThat(row.getString("compression_parameters")) - .isEqualTo("{\"sstable_compression\":\"org.apache.cassandra.io.compress.SnappyCompressor\"}"); - assertThat(row.getInt("default_time_to_live")).isEqualTo(1337); - assertThat(row.getInt("gc_grace_seconds")).isEqualTo(1234567890); - assertThat(row.getInt("min_index_interval")).isEqualTo(6); - assertThat(row.getInt("max_index_interval")).isEqualTo(8); - assertThat(row.getString("key_aliases")).isEqualTo("[\"a\",\"b\"]"); - assertThat(row.getDouble("local_read_repair_chance")).isEqualTo(0.84); - assertThat(row.getInt("max_compaction_threshold")).isEqualTo(3); - assertThat(row.getInt("memtable_flush_period_in_ms")).isEqualTo(1234567890); - assertThat(row.getInt("min_compaction_threshold")).isEqualTo(2); - assertThat(row.getDouble("read_repair_chance")).isEqualTo(0.123456); - assertThat(row.getString("speculative_retry")).isEqualTo("50.0PERCENTILE"); - } + // Retrieve the metadata from Cassandra + ResultSet rows = + session() + .execute( + "SELECT " + + "bloom_filter_fp_chance, " + + "caching, " + + "cf_id, " + + "column_aliases, " + + "comment, " + + "compaction_strategy_class, " + + "compaction_strategy_options, " + + "comparator, " + + "compression_parameters, " + + "default_time_to_live, " + + "default_validator, " + + "dropped_columns, " + + "gc_grace_seconds, " + + "index_interval, " + + "is_dense, " + + "key_aliases, " + + "key_validator, " + + "local_read_repair_chance, " + + "max_compaction_threshold, " + + "max_index_interval, " + + "memtable_flush_period_in_ms, " + + "min_compaction_threshold, " + + "min_index_interval, " + + "read_repair_chance, " + + "speculative_retry, " + + "subcomparator, " + + "type, " + + "value_alias " + + "FROM system.schema_columnfamilies " + + "WHERE keyspace_name='ks' AND columnfamily_name='tablemetadata'"); + for (Row row : rows) { + // There should be only one row + // Verify that every property we modified is correctly set + assertThat(row.getDouble("bloom_filter_fp_chance")).isEqualTo(0.42); + assertThat(row.getString("caching")) + .isEqualTo("{\"keys\":\"ALL\", \"rows_per_partition\":\"1\"}"); + assertThat(row.getUUID("cf_id")).isNotNull(); + assertThat(row.getString("column_aliases")).isEqualTo("[\"c\",\"d\"]"); + assertThat(row.getString("comment")).isEqualTo("Useful comment"); + assertThat(row.getString("compaction_strategy_class")) + .isEqualTo("org.apache.cassandra.db.compaction.DateTieredCompactionStrategy"); + assertThat(row.getString("compaction_strategy_options")) + .isEqualTo( + "{\"base_time_seconds\":\"1\",\"timestamp_resolution\":\"MILLISECONDS\",\"max_sstable_age_days\":\"4\",\"min_threshold\":\"2\",\"max_threshold\":\"3\"}"); + assertThat(row.getString("compression_parameters")) + .isEqualTo( + "{\"sstable_compression\":\"org.apache.cassandra.io.compress.SnappyCompressor\"}"); + assertThat(row.getInt("default_time_to_live")).isEqualTo(1337); + assertThat(row.getInt("gc_grace_seconds")).isEqualTo(1234567890); + assertThat(row.getInt("min_index_interval")).isEqualTo(6); + assertThat(row.getInt("max_index_interval")).isEqualTo(8); + assertThat(row.getString("key_aliases")).isEqualTo("[\"a\",\"b\"]"); + assertThat(row.getDouble("local_read_repair_chance")).isEqualTo(0.84); + assertThat(row.getInt("max_compaction_threshold")).isEqualTo(3); + assertThat(row.getInt("memtable_flush_period_in_ms")).isEqualTo(1234567890); + assertThat(row.getInt("min_compaction_threshold")).isEqualTo(2); + assertThat(row.getDouble("read_repair_chance")).isEqualTo(0.123456); + assertThat(row.getString("speculative_retry")).isEqualTo("50.0PERCENTILE"); } + } - @Test(groups = "short") - @CassandraVersion("2.1.0") - public void should_create_a_table_and_a_udt() { - // Create a UDT and a table - session().execute(SchemaBuilder.createType("MyUDT") - .ifNotExists() - .addColumn("x", DataType.cint()) - ); - UDTType myUDT = UDTType.frozen("MyUDT"); - session().execute(SchemaBuilder.createTable("ks", "CreateTable") - .ifNotExists() - .addPartitionKey("a", DataType.cint()) - .addUDTPartitionKey("b", myUDT) - .addClusteringColumn("c", DataType.ascii()) - .addUDTClusteringColumn("d", myUDT) - .addUDTColumn("e", myUDT) - .addStaticColumn("f", DataType.bigint()) - .addUDTStaticColumn("g", myUDT) - .addUDTListColumn("h", myUDT) - .addUDTMapColumn("i", DataType.cboolean(), myUDT) - .addUDTMapColumn("j", myUDT, DataType.cboolean()) - .addUDTSetColumn("k", myUDT) - ); + @Test(groups = "short") + @CassandraVersion("2.1.0") + public void should_create_a_table_and_a_udt() { + // Create a UDT and a table + session() + .execute(SchemaBuilder.createType("MyUDT").ifNotExists().addColumn("x", DataType.cint())); + UDTType myUDT = UDTType.frozen("MyUDT"); + session() + .execute( + SchemaBuilder.createTable("ks", "CreateTable") + .ifNotExists() + .addPartitionKey("a", DataType.cint()) + .addUDTPartitionKey("b", myUDT) + .addClusteringColumn("c", DataType.ascii()) + .addUDTClusteringColumn("d", myUDT) + .addUDTColumn("e", myUDT) + .addStaticColumn("f", DataType.bigint()) + .addUDTStaticColumn("g", myUDT) + .addUDTListColumn("h", myUDT) + .addUDTMapColumn("i", DataType.cboolean(), myUDT) + .addUDTMapColumn("j", myUDT, DataType.cboolean()) + .addUDTSetColumn("k", myUDT)); - // Check columns a to k - ResultSet rows = session().execute( + // Check columns a to k + ResultSet rows = + session() + .execute( "SELECT column_name, type, validator " - + "FROM system.schema_columns " - + "WHERE keyspace_name='ks' AND columnfamily_name='createtable'"); - Iterator iterator = rows.iterator(); - verifyNextColumnDefinition(iterator, "a", "partition_key", "org.apache.cassandra.db.marshal.Int32Type"); - verifyNextColumnDefinition(iterator, "b", "partition_key", "org.apache.cassandra.db.marshal.UserType"); - verifyNextColumnDefinition(iterator, "c", "clustering_key", "org.apache.cassandra.db.marshal.AsciiType"); - verifyNextColumnDefinition(iterator, "d", "clustering_key", "org.apache.cassandra.db.marshal.UserType"); - verifyNextColumnDefinition(iterator, "e", "regular", "org.apache.cassandra.db.marshal.UserType"); - verifyNextColumnDefinition(iterator, "f", "static", "org.apache.cassandra.db.marshal.LongType"); - verifyNextColumnDefinition(iterator, "g", "static", "org.apache.cassandra.db.marshal.UserType"); - verifyNextColumnDefinition(iterator, "h", "regular", "org.apache.cassandra.db.marshal.ListType" - , "org.apache.cassandra.db.marshal.UserType"); - verifyNextColumnDefinition(iterator, "i", "regular", "org.apache.cassandra.db.marshal.MapType" - , "org.apache.cassandra.db.marshal.BooleanType", "org.apache.cassandra.db.marshal.UserType"); - verifyNextColumnDefinition(iterator, "j", "regular", "org.apache.cassandra.db.marshal.MapType" - , "org.apache.cassandra.db.marshal.UserType", "org.apache.cassandra.db.marshal.BooleanType"); - verifyNextColumnDefinition(iterator, "k", "regular", "org.apache.cassandra.db.marshal.SetType" - , "org.apache.cassandra.db.marshal.UserType"); - } + + "FROM system.schema_columns " + + "WHERE keyspace_name='ks' AND columnfamily_name='createtable'"); + Iterator iterator = rows.iterator(); + verifyNextColumnDefinition( + iterator, "a", "partition_key", "org.apache.cassandra.db.marshal.Int32Type"); + verifyNextColumnDefinition( + iterator, "b", "partition_key", "org.apache.cassandra.db.marshal.UserType"); + verifyNextColumnDefinition( + iterator, "c", "clustering_key", "org.apache.cassandra.db.marshal.AsciiType"); + verifyNextColumnDefinition( + iterator, "d", "clustering_key", "org.apache.cassandra.db.marshal.UserType"); + verifyNextColumnDefinition( + iterator, "e", "regular", "org.apache.cassandra.db.marshal.UserType"); + verifyNextColumnDefinition(iterator, "f", "static", "org.apache.cassandra.db.marshal.LongType"); + verifyNextColumnDefinition(iterator, "g", "static", "org.apache.cassandra.db.marshal.UserType"); + verifyNextColumnDefinition( + iterator, + "h", + "regular", + "org.apache.cassandra.db.marshal.ListType", + "org.apache.cassandra.db.marshal.UserType"); + verifyNextColumnDefinition( + iterator, + "i", + "regular", + "org.apache.cassandra.db.marshal.MapType", + "org.apache.cassandra.db.marshal.BooleanType", + "org.apache.cassandra.db.marshal.UserType"); + verifyNextColumnDefinition( + iterator, + "j", + "regular", + "org.apache.cassandra.db.marshal.MapType", + "org.apache.cassandra.db.marshal.UserType", + "org.apache.cassandra.db.marshal.BooleanType"); + verifyNextColumnDefinition( + iterator, + "k", + "regular", + "org.apache.cassandra.db.marshal.SetType", + "org.apache.cassandra.db.marshal.UserType"); + } - @Test(groups = "short") - public void should_add_and_drop_a_column() { - // Create a table, add a column to it with an alter table statement and delete that column - session().execute(SchemaBuilder.createTable("ks", "DropColumn") - .ifNotExists() - .addPartitionKey("a", DataType.cint()) - ); + @Test(groups = "short") + public void should_add_and_drop_a_column() { + // Create a table, add a column to it with an alter table statement and delete that column + session() + .execute( + SchemaBuilder.createTable("ks", "DropColumn") + .ifNotExists() + .addPartitionKey("a", DataType.cint())); - // Add and then drop a column - session().execute(SchemaBuilder.alterTable("ks", "DropColumn") - .addColumn("b") - .type(DataType.cint()) - ); - session().execute(SchemaBuilder.alterTable("ks", "DropColumn") - .dropColumn("b") - ); + // Add and then drop a column + session() + .execute(SchemaBuilder.alterTable("ks", "DropColumn").addColumn("b").type(DataType.cint())); + session().execute(SchemaBuilder.alterTable("ks", "DropColumn").dropColumn("b")); - // Check that only column a exist - ResultSet rows = session().execute( + // Check that only column a exist + ResultSet rows = + session() + .execute( "SELECT column_name, type, validator " - + "FROM system.schema_columns " - + "WHERE keyspace_name='ks' AND columnfamily_name='dropcolumn'"); - Iterator iterator = rows.iterator(); - verifyNextColumnDefinition(iterator, "a", "partition_key", "org.apache.cassandra.db.marshal.Int32Type"); - assertThat(iterator.hasNext()).isFalse(); - } + + "FROM system.schema_columns " + + "WHERE keyspace_name='ks' AND columnfamily_name='dropcolumn'"); + Iterator iterator = rows.iterator(); + verifyNextColumnDefinition( + iterator, "a", "partition_key", "org.apache.cassandra.db.marshal.Int32Type"); + assertThat(iterator.hasNext()).isFalse(); + } - private void verifyNextColumnDefinition(Iterator rowIterator, String columnName, String type, - String... validatorFragments) { - Row rowA = rowIterator.next(); - assertThat(rowA.getString("column_name")).isEqualTo(columnName); - assertThat(rowA.getString("type")).isEqualTo(type); - for (String validatorFragment : validatorFragments) { - assertThat(rowA.getString("validator")).contains(validatorFragment); - } + private void verifyNextColumnDefinition( + Iterator rowIterator, String columnName, String type, String... validatorFragments) { + Row rowA = rowIterator.next(); + assertThat(rowA.getString("column_name")).isEqualTo(columnName); + assertThat(rowA.getString("type")).isEqualTo(type); + for (String validatorFragment : validatorFragments) { + assertThat(rowA.getString("validator")).contains(validatorFragment); } + } - @Test(groups = "short") - public void should_drop_a_table() { - // Create a table - session().execute(SchemaBuilder.createTable("ks", "DropTable") - .addPartitionKey("a", DataType.cint()) - ); + @Test(groups = "short") + public void should_drop_a_table() { + // Create a table + session() + .execute( + SchemaBuilder.createTable("ks", "DropTable").addPartitionKey("a", DataType.cint())); - // Drop the table - session().execute(SchemaBuilder.dropTable("ks", "DropTable")); - session().execute(SchemaBuilder.dropTable("DropTable").ifExists()); + // Drop the table + session().execute(SchemaBuilder.dropTable("ks", "DropTable")); + session().execute(SchemaBuilder.dropTable("DropTable").ifExists()); - ResultSet rows = session().execute( + ResultSet rows = + session() + .execute( "SELECT columnfamily_name " - + "FROM system.schema_columnfamilies " - + "WHERE keyspace_name='ks' AND columnfamily_name='droptable'"); - if (rows.iterator().hasNext()) { - fail("This table should have been deleted"); - } + + "FROM system.schema_columnfamilies " + + "WHERE keyspace_name='ks' AND columnfamily_name='droptable'"); + if (rows.iterator().hasNext()) { + fail("This table should have been deleted"); } + } - @Test(groups = "short") - public void should_create_an_index() { - // Create a table - session().execute(SchemaBuilder.createTable("ks", "CreateIndex") - .addPartitionKey("a", DataType.cint()) - .addClusteringColumn("b", DataType.cint()) - .addColumn("c", DataType.map(DataType.cint(), DataType.cint())) - ); + @Test(groups = "short") + public void should_create_an_index() { + // Create a table + session() + .execute( + SchemaBuilder.createTable("ks", "CreateIndex") + .addPartitionKey("a", DataType.cint()) + .addClusteringColumn("b", DataType.cint()) + .addColumn("c", DataType.map(DataType.cint(), DataType.cint()))); - // Create an index on a regular column of the table - session().execute(SchemaBuilder.createIndex("ks_Index") - .onTable("ks", "CreateIndex") - .andColumn("b") - ); - session().execute(SchemaBuilder.createIndex("ks_IndexOnMap") - .onTable("ks", "CreateIndex") - .andKeysOfColumn("c") - ); + // Create an index on a regular column of the table + session() + .execute(SchemaBuilder.createIndex("ks_Index").onTable("ks", "CreateIndex").andColumn("b")); + session() + .execute( + SchemaBuilder.createIndex("ks_IndexOnMap") + .onTable("ks", "CreateIndex") + .andKeysOfColumn("c")); - // Verify that the indexes exist on the right columns - ResultSet rows = session().execute( + // Verify that the indexes exist on the right columns + ResultSet rows = + session() + .execute( "SELECT column_name, index_name, index_options, index_type, component_index " - + "FROM system.schema_columns " - + "WHERE keyspace_name='ks' " - + "AND columnfamily_name='createindex' " - + "AND column_name IN ('b', 'c')"); - Iterator iterator = rows.iterator(); - verifyNextIndexDefinition(iterator, "ks_Index", "{}", "COMPOSITES", 0); - verifyNextIndexDefinition(iterator, "ks_IndexOnMap", "{\"index_keys\":\"\"}", "COMPOSITES", 1); - assertThat(iterator.hasNext()).isFalse(); - } + + "FROM system.schema_columns " + + "WHERE keyspace_name='ks' " + + "AND columnfamily_name='createindex' " + + "AND column_name IN ('b', 'c')"); + Iterator iterator = rows.iterator(); + verifyNextIndexDefinition(iterator, "ks_Index", "{}", "COMPOSITES", 0); + verifyNextIndexDefinition(iterator, "ks_IndexOnMap", "{\"index_keys\":\"\"}", "COMPOSITES", 1); + assertThat(iterator.hasNext()).isFalse(); + } - private void verifyNextIndexDefinition(Iterator iterator, String name, String options, String type, - int index) { - Row nextIndex = iterator.next(); - assertThat(nextIndex.getString("index_name")).isEqualTo(name); - assertThat(nextIndex.getString("index_options")).isEqualTo(options); - assertThat(nextIndex.getString("index_type")).isEqualTo(type); - assertThat(nextIndex.getInt("component_index")).isEqualTo(index); - } + private void verifyNextIndexDefinition( + Iterator iterator, String name, String options, String type, int index) { + Row nextIndex = iterator.next(); + assertThat(nextIndex.getString("index_name")).isEqualTo(name); + assertThat(nextIndex.getString("index_options")).isEqualTo(options); + assertThat(nextIndex.getString("index_type")).isEqualTo(type); + assertThat(nextIndex.getInt("component_index")).isEqualTo(index); + } - @Test(groups = "short") - public void should_drop_an_index() { - // Create a table - session().execute(SchemaBuilder.createTable("ks", "DropIndex") - .addPartitionKey("a", DataType.cint()) - .addClusteringColumn("b", DataType.cint()) - ); + @Test(groups = "short") + public void should_drop_an_index() { + // Create a table + session() + .execute( + SchemaBuilder.createTable("ks", "DropIndex") + .addPartitionKey("a", DataType.cint()) + .addClusteringColumn("b", DataType.cint())); - // Create an index - // Note: we have to pick a lower-case name because Cassandra uses the CamelCase index name at creation - // but a lowercase index name at deletion - // See : https://issues.apache.org/jira/browse/CASSANDRA-8365 - session().execute(SchemaBuilder.createIndex("ks_index") - .onTable("ks", "DropIndex") - .andColumn("b") - ); + // Create an index + // Note: we have to pick a lower-case name because Cassandra uses the CamelCase index name at + // creation + // but a lowercase index name at deletion + // See : https://issues.apache.org/jira/browse/CASSANDRA-8365 + session() + .execute(SchemaBuilder.createIndex("ks_index").onTable("ks", "DropIndex").andColumn("b")); - // Verify that the PK index and the secondary indexes both exist - assertThat(numberOfIndexedColumns()).isEqualTo(1); + // Verify that the PK index and the secondary indexes both exist + assertThat(numberOfIndexedColumns()).isEqualTo(1); - // Delete the index - session().execute(SchemaBuilder.dropIndex("ks", "ks_index")); + // Delete the index + session().execute(SchemaBuilder.dropIndex("ks", "ks_index")); - // Verify that only the PK index exists - assertThat(numberOfIndexedColumns()).isEqualTo(0); - } + // Verify that only the PK index exists + assertThat(numberOfIndexedColumns()).isEqualTo(0); + } - private int numberOfIndexedColumns() { - ResultSet columns = session().execute( + private int numberOfIndexedColumns() { + ResultSet columns = + session() + .execute( "SELECT * " - + "FROM system.schema_columns " - + "WHERE keyspace_name='ks' " - + "AND columnfamily_name='dropindex' "); - int count = 0; - for (Row column : columns) { - if (column.getString("index_name") != null) - count += 1; - } - return count; + + "FROM system.schema_columns " + + "WHERE keyspace_name='ks' " + + "AND columnfamily_name='dropindex' "); + int count = 0; + for (Row column : columns) { + if (column.getString("index_name") != null) count += 1; } + return count; + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/utils/CassandraVersion.java b/driver-core/src/test/java/com/datastax/driver/core/utils/CassandraVersion.java index b1a50b82628..93d3cfe3a2d 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/utils/CassandraVersion.java +++ b/driver-core/src/test/java/com/datastax/driver/core/utils/CassandraVersion.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,21 +21,18 @@ import java.lang.annotation.RetentionPolicy; /** - *

    Annotation for a Class or Method that defines a Cassandra Version requirement. If the cassandra version in use - * does not meet the version requirement, the test is skipped.

    + * Annotation for a Class or Method that defines a Cassandra Version requirement. If the cassandra + * version in use does not meet the version requirement, the test is skipped. * - * @see com.datastax.driver.core.TestListener#beforeInvocation(org.testng.IInvokedMethod, org.testng.ITestResult) + * @see com.datastax.driver.core.TestListener#beforeInvocation(org.testng.IInvokedMethod, + * org.testng.ITestResult) */ @Retention(RetentionPolicy.RUNTIME) public @interface CassandraVersion { - /** - * @return The minimum version required to execute this test, i.e. "2.0.13" - */ - String value(); + /** @return The minimum version required to execute this test, i.e. "2.0.13" */ + String value(); - /** - * @return The description returned if this version requirement is not met. - */ - String description() default "Does not meet minimum version requirement."; + /** @return The description returned if this version requirement is not met. */ + String description() default "Does not meet minimum version requirement."; } diff --git a/driver-core/src/test/java/com/datastax/driver/core/utils/DseVersion.java b/driver-core/src/test/java/com/datastax/driver/core/utils/DseVersion.java index bd1955e2b72..204c9a72215 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/utils/DseVersion.java +++ b/driver-core/src/test/java/com/datastax/driver/core/utils/DseVersion.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,21 +21,18 @@ import java.lang.annotation.RetentionPolicy; /** - *

    Annotation for a Class or Method that defines a DataStax Enterprise Version requirement. - * If the version in use does not meet the version requirement or DSE is not used, the test is skipped.

    + * Annotation for a Class or Method that defines a DataStax Enterprise Version requirement. If the + * version in use does not meet the version requirement or DSE is not used, the test is skipped. * - * @see com.datastax.driver.core.TestListener#beforeInvocation(org.testng.IInvokedMethod, org.testng.ITestResult) + * @see com.datastax.driver.core.TestListener#beforeInvocation(org.testng.IInvokedMethod, + * org.testng.ITestResult) */ @Retention(RetentionPolicy.RUNTIME) public @interface DseVersion { - /** - * @return The minimum version required to execute this test, i.e. "2.0.13" - */ - String value(); + /** @return The minimum version required to execute this test, i.e. "2.0.13" */ + String value(); - /** - * @return The description returned if this version requirement is not met. - */ - String description() default "Does not meet minimum version requirement."; + /** @return The description returned if this version requirement is not met. */ + String description() default "Does not meet minimum version requirement."; } diff --git a/driver-core/src/test/java/com/datastax/driver/core/utils/SocketChannelMonitor.java b/driver-core/src/test/java/com/datastax/driver/core/utils/SocketChannelMonitor.java index bc2d902cc46..e012dfd516a 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/utils/SocketChannelMonitor.java +++ b/driver-core/src/test/java/com/datastax/driver/core/utils/SocketChannelMonitor.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -24,168 +26,181 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder; import io.netty.channel.EventLoopGroup; import io.netty.channel.socket.SocketChannel; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import java.io.Closeable; import java.io.IOException; import java.net.InetSocketAddress; -import java.util.*; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.Comparator; +import java.util.List; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** - * Utility that observes {@link SocketChannel}s. Helpful for ensuring that Sockets are actually closed - * when they should be. Utilizes {@link NettyOptions} to monitor created {@link SocketChannel}s. + * Utility that observes {@link SocketChannel}s. Helpful for ensuring that Sockets are actually + * closed when they should be. Utilizes {@link NettyOptions} to monitor created {@link + * SocketChannel}s. */ public class SocketChannelMonitor implements Runnable, Closeable { - private static final Logger logger = LoggerFactory.getLogger(SocketChannelMonitor.class); + private static final Logger logger = LoggerFactory.getLogger(SocketChannelMonitor.class); - private final ScheduledExecutorService executor = Executors.newScheduledThreadPool(1, - new ThreadFactoryBuilder().setDaemon(true).setNameFormat("SocketMonitor-%d").build()); + private final ScheduledExecutorService executor = + Executors.newScheduledThreadPool( + 1, new ThreadFactoryBuilder().setDaemon(true).setNameFormat("SocketMonitor-%d").build()); - // use a weak set so channels may be garbage collected. - private final Collection channels = Collections.newSetFromMap( - new MapMaker().weakKeys().makeMap()); + // use a weak set so channels may be garbage collected. + private final Collection channels = + Collections.newSetFromMap(new MapMaker().weakKeys().makeMap()); - private final AtomicLong channelsCreated = new AtomicLong(0); + private final AtomicLong channelsCreated = new AtomicLong(0); - private final NettyOptions nettyOptions = new NettyOptions() { + private final NettyOptions nettyOptions = + new NettyOptions() { @Override public void afterChannelInitialized(SocketChannel channel) throws Exception { - channels.add(channel); - channelsCreated.incrementAndGet(); + channels.add(channel); + channelsCreated.incrementAndGet(); } @Override public void onClusterClose(EventLoopGroup eventLoopGroup) { - eventLoopGroup.shutdownGracefully(0, 15, TimeUnit.SECONDS).syncUninterruptibly(); - } - }; - - @Override - public void run() { - try { - report(); - } catch (Exception e) { - logger.error("Error countered.", e); + eventLoopGroup.shutdownGracefully(0, 15, TimeUnit.SECONDS).syncUninterruptibly(); } + }; + + @Override + public void run() { + try { + report(); + } catch (Exception e) { + logger.error("Error countered.", e); } - - @Override - public void close() throws IOException { - stop(); - } - - public void stop() { - executor.shutdown(); - try { - executor.awaitTermination(1, TimeUnit.SECONDS); - } catch (InterruptedException e) { - // ok - } - } - - /** - * @return A custom {@link NettyOptions} instance that hooks into afterChannelInitialized added channels may be - * monitored. - */ - public NettyOptions nettyOptions() { - return nettyOptions; + } + + @Override + public void close() throws IOException { + stop(); + } + + public void stop() { + executor.shutdown(); + try { + executor.awaitTermination(1, TimeUnit.SECONDS); + } catch (InterruptedException e) { + // ok } - - public static Predicate openChannels = new Predicate() { + } + + /** + * @return A custom {@link NettyOptions} instance that hooks into afterChannelInitialized added + * channels may be monitored. + */ + public NettyOptions nettyOptions() { + return nettyOptions; + } + + public static Predicate openChannels = + new Predicate() { @Override public boolean apply(SocketChannel input) { - return input.isOpen(); + return input.isOpen(); } - }; - - /** - * Schedules a {@link #report()} to be called every configured interval. - * - * @param interval how often to report. - * @param timeUnit at what time precision to report at. - */ - public void reportAtFixedInterval(int interval, TimeUnit timeUnit) { - executor.scheduleAtFixedRate(this, interval, interval, timeUnit); - } - - /** - * Reports for all sockets. - */ - public void report() { - report(Predicates.alwaysTrue()); + }; + + /** + * Schedules a {@link #report()} to be called every configured interval. + * + * @param interval how often to report. + * @param timeUnit at what time precision to report at. + */ + public void reportAtFixedInterval(int interval, TimeUnit timeUnit) { + executor.scheduleAtFixedRate(this, interval, interval, timeUnit); + } + + /** Reports for all sockets. */ + public void report() { + report(Predicates.alwaysTrue()); + } + + /** + * Report for all sockets matching the given predicate. The report format reflects the number of + * open, closed, live and total sockets created. This is logged at DEBUG if enabled. + * + *

    + * + *

    If TRACE is enabled, each individual socket will be logged as well. + * + * @param channelFilter used to determine which sockets to report on. + */ + public void report(Predicate channelFilter) { + if (logger.isDebugEnabled()) { + Iterable channels = matchingChannels(channelFilter); + Iterable open = Iterables.filter(channels, openChannels); + Iterable closed = Iterables.filter(channels, Predicates.not(openChannels)); + + logger.debug( + "Channel states: {} open, {} closed, live {}, total sockets created " + + "(including those that don't match filter) {}.", + Iterables.size(open), + Iterables.size(closed), + Iterables.size(channels), + channelsCreated.get()); + + if (logger.isTraceEnabled()) { + logger.trace("Open channels {}.", open); + logger.trace("Closed channels {}.", closed); + } } + } - /** - *

    - * Report for all sockets matching the given predicate. The report format reflects the number of open, closed, - * live and total sockets created. This is logged at DEBUG if enabled. - *

    - *

    - * If TRACE is enabled, each individual socket will be logged as well. - * - * @param channelFilter used to determine which sockets to report on. - */ - public void report(Predicate channelFilter) { - if (logger.isDebugEnabled()) { - Iterable channels = matchingChannels(channelFilter); - Iterable open = Iterables.filter(channels, openChannels); - Iterable closed = Iterables.filter(channels, Predicates.not(openChannels)); - - logger.debug("Channel states: {} open, {} closed, live {}, total sockets created " + - "(including those that don't match filter) {}.", - Iterables.size(open), - Iterables.size(closed), - Iterables.size(channels), - channelsCreated.get()); - - if (logger.isTraceEnabled()) { - logger.trace("Open channels {}.", open); - logger.trace("Closed channels {}.", closed); - } - } - } - - private static Comparator BY_REMOTE_ADDRESS = new Comparator() { + private static Comparator BY_REMOTE_ADDRESS = + new Comparator() { @Override public int compare(SocketChannel t0, SocketChannel t1) { - // Should not be null as these are filtered previously in matchingChannels. - assert t0 != null && t0.remoteAddress() != null; - assert t1 != null && t1.remoteAddress() != null; - return t0.remoteAddress().toString().compareTo(t1.remoteAddress().toString()); + // Should not be null as these are filtered previously in matchingChannels. + assert t0 != null && t0.remoteAddress() != null; + assert t1 != null && t1.remoteAddress() != null; + return t0.remoteAddress().toString().compareTo(t1.remoteAddress().toString()); } - }; - - public Collection openChannels(InetSocketAddress... addresses) { - return openChannels(Arrays.asList(addresses)); - } - - /** - * @param addresses The addresses to include. - * @return Open channels matching the given socket addresses. - */ - public Collection openChannels(final Collection addresses) { - List channels = Lists.newArrayList(matchingChannels(new Predicate() { - @Override - public boolean apply(SocketChannel input) { - return input.isOpen() && input.remoteAddress() != null && addresses.contains(input.remoteAddress()); - } - })); - Collections.sort(channels, BY_REMOTE_ADDRESS); - return channels; - } - - /** - * @param channelFilter {@link Predicate} to use to determine whether or not a socket shall be considered. - * @return Channels matching the given {@link Predicate}. - */ - public Iterable matchingChannels(final Predicate channelFilter) { - return Iterables.filter(Lists.newArrayList(channels), Predicates.and(Predicates.notNull(), channelFilter)); - } - + }; + + public Collection openChannels(InetSocketAddress... addresses) { + return openChannels(Arrays.asList(addresses)); + } + + /** + * @param addresses The addresses to include. + * @return Open channels matching the given socket addresses. + */ + public Collection openChannels(final Collection addresses) { + List channels = + Lists.newArrayList( + matchingChannels( + new Predicate() { + @Override + public boolean apply(SocketChannel input) { + return input.isOpen() + && input.remoteAddress() != null + && addresses.contains(input.remoteAddress()); + } + })); + Collections.sort(channels, BY_REMOTE_ADDRESS); + return channels; + } + + /** + * @param channelFilter {@link Predicate} to use to determine whether or not a socket shall be + * considered. + * @return Channels matching the given {@link Predicate}. + */ + public Iterable matchingChannels(final Predicate channelFilter) { + return Iterables.filter( + Lists.newArrayList(channels), Predicates.and(Predicates.notNull(), channelFilter)); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/utils/UUIDsPIDFromPropertyTest.java b/driver-core/src/test/java/com/datastax/driver/core/utils/UUIDsPIDFromPropertyTest.java index 22e919a627b..5c36094bbbe 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/utils/UUIDsPIDFromPropertyTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/utils/UUIDsPIDFromPropertyTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,34 +17,35 @@ */ package com.datastax.driver.core.utils; +import static org.assertj.core.api.Assertions.assertThat; + import com.datastax.driver.core.MemoryAppender; import org.apache.log4j.Level; import org.apache.log4j.Logger; import org.testng.annotations.Test; -import static org.assertj.core.api.Assertions.assertThat; - public class UUIDsPIDFromPropertyTest { - private static final Logger logger = Logger.getLogger(UUIDs.class); + private static final Logger logger = Logger.getLogger(UUIDs.class); - @Test(groups = "isolated") - public void should_obtain_pid_from_system_property() { - // If the com.datastax.driver.PID property is set, it should be used and this should be logged. - MemoryAppender appender = new MemoryAppender(); - Level originalLevel = logger.getLevel(); - try { - logger.setLevel(Level.INFO); - logger.addAppender(appender); - int pid = 8675; - System.setProperty(UUIDs.PID_SYSTEM_PROPERTY, "" + pid); - UUIDs.timeBased(); - assertThat(appender.get()) - .containsOnlyOnce(String.format("PID obtained from System property %s: %d", - UUIDs.PID_SYSTEM_PROPERTY, pid)); - } finally { - logger.removeAppender(appender); - logger.setLevel(originalLevel); - } + @Test(groups = "isolated") + public void should_obtain_pid_from_system_property() { + // If the com.datastax.driver.PID property is set, it should be used and this should be logged. + MemoryAppender appender = new MemoryAppender(); + Level originalLevel = logger.getLevel(); + try { + logger.setLevel(Level.INFO); + logger.addAppender(appender); + int pid = 8675; + System.setProperty(UUIDs.PID_SYSTEM_PROPERTY, "" + pid); + UUIDs.timeBased(); + assertThat(appender.get()) + .containsOnlyOnce( + String.format( + "PID obtained from System property %s: %d", UUIDs.PID_SYSTEM_PROPERTY, pid)); + } finally { + logger.removeAppender(appender); + logger.setLevel(originalLevel); } + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/utils/UUIDsPIDNativeTest.java b/driver-core/src/test/java/com/datastax/driver/core/utils/UUIDsPIDNativeTest.java index 6ce6b4c13be..e8c6f74b244 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/utils/UUIDsPIDNativeTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/utils/UUIDsPIDNativeTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,31 +17,32 @@ */ package com.datastax.driver.core.utils; +import static org.assertj.core.api.Assertions.assertThat; + import com.datastax.driver.core.MemoryAppender; import org.apache.log4j.Level; import org.apache.log4j.Logger; import org.testng.annotations.Test; -import static org.assertj.core.api.Assertions.assertThat; - public class UUIDsPIDNativeTest { - private static final Logger logger = Logger.getLogger(UUIDs.class); + private static final Logger logger = Logger.getLogger(UUIDs.class); - @Test(groups = "isolated") - public void should_obtain_pid_through_native_call() { - // In the general case the JNR call should *just* work as most systems should support POSIX getpid. - MemoryAppender appender = new MemoryAppender(); - Level originalLevel = logger.getLevel(); - try { - logger.setLevel(Level.INFO); - logger.addAppender(appender); - UUIDs.timeBased(); + @Test(groups = "isolated") + public void should_obtain_pid_through_native_call() { + // In the general case the JNR call should *just* work as most systems should support POSIX + // getpid. + MemoryAppender appender = new MemoryAppender(); + Level originalLevel = logger.getLevel(); + try { + logger.setLevel(Level.INFO); + logger.addAppender(appender); + UUIDs.timeBased(); - assertThat(appender.get()).containsOnlyOnce("PID obtained through native call to getpid()"); - } finally { - logger.removeAppender(appender); - logger.setLevel(originalLevel); - } + assertThat(appender.get()).containsOnlyOnce("PID obtained through native call to getpid()"); + } finally { + logger.removeAppender(appender); + logger.setLevel(originalLevel); } + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/utils/UUIDsPIDPropertyInvalidTest.java b/driver-core/src/test/java/com/datastax/driver/core/utils/UUIDsPIDPropertyInvalidTest.java index 7e81244d6e3..64f502ad562 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/utils/UUIDsPIDPropertyInvalidTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/utils/UUIDsPIDPropertyInvalidTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,35 +17,38 @@ */ package com.datastax.driver.core.utils; +import static org.assertj.core.api.Assertions.assertThat; + import com.datastax.driver.core.MemoryAppender; import org.apache.log4j.Level; import org.apache.log4j.Logger; import org.testng.annotations.Test; -import static org.assertj.core.api.Assertions.assertThat; - public class UUIDsPIDPropertyInvalidTest { - private static final Logger logger = Logger.getLogger(UUIDs.class); + private static final Logger logger = Logger.getLogger(UUIDs.class); - @Test(groups = "isolated") - public void should_fallback_on_native_call_if_system_property_invalid() { - // If the com.datastax.driver.PID property is set, but is invalid, it should fallback onto native getpid(). - MemoryAppender appender = new MemoryAppender(); - Level originalLevel = logger.getLevel(); - try { - logger.setLevel(Level.INFO); - logger.addAppender(appender); - String pid = "NOT_A_PID"; - System.setProperty(UUIDs.PID_SYSTEM_PROPERTY, pid); - UUIDs.timeBased(); - assertThat(appender.get()) - .containsOnlyOnce(String.format("Incorrect integer specified for PID in System property %s: %s", - UUIDs.PID_SYSTEM_PROPERTY, pid)) - .containsOnlyOnce("PID obtained through native call to getpid()"); - } finally { - logger.removeAppender(appender); - logger.setLevel(originalLevel); - } + @Test(groups = "isolated") + public void should_fallback_on_native_call_if_system_property_invalid() { + // If the com.datastax.driver.PID property is set, but is invalid, it should fallback onto + // native getpid(). + MemoryAppender appender = new MemoryAppender(); + Level originalLevel = logger.getLevel(); + try { + logger.setLevel(Level.INFO); + logger.addAppender(appender); + String pid = "NOT_A_PID"; + System.setProperty(UUIDs.PID_SYSTEM_PROPERTY, pid); + UUIDs.timeBased(); + assertThat(appender.get()) + .containsOnlyOnce( + String.format( + "Incorrect integer specified for PID in System property %s: %s", + UUIDs.PID_SYSTEM_PROPERTY, pid)) + .containsOnlyOnce("PID obtained through native call to getpid()"); + } finally { + logger.removeAppender(appender); + logger.setLevel(originalLevel); } + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/utils/UUIDsTest.java b/driver-core/src/test/java/com/datastax/driver/core/utils/UUIDsTest.java index 432f06764fc..e45785cc1e0 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/utils/UUIDsTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/utils/UUIDsTest.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,152 +17,150 @@ */ package com.datastax.driver.core.utils; +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertTrue; + import com.datastax.driver.core.ProtocolVersion; import com.datastax.driver.core.TypeCodec; -import org.testng.annotations.Test; - import java.nio.ByteBuffer; import java.util.HashSet; import java.util.Random; import java.util.Set; import java.util.UUID; import java.util.concurrent.ConcurrentSkipListSet; - -import static org.testng.Assert.assertEquals; -import static org.testng.Assert.assertTrue; +import org.testng.annotations.Test; public class UUIDsTest { - @Test(groups = "unit") - public void conformanceTest() { + @Test(groups = "unit") + public void conformanceTest() { - // The UUIDs class does some computation at class initialization, which - // may screw up our assumption below that UUIDs.timeBased() takes less - // than 10ms, so force class loading now. - UUIDs.random(); + // The UUIDs class does some computation at class initialization, which + // may screw up our assumption below that UUIDs.timeBased() takes less + // than 10ms, so force class loading now. + UUIDs.random(); - long now = System.currentTimeMillis(); - UUID uuid = UUIDs.timeBased(); + long now = System.currentTimeMillis(); + UUID uuid = UUIDs.timeBased(); - assertEquals(uuid.version(), 1); - assertEquals(uuid.variant(), 2); + assertEquals(uuid.version(), 1); + assertEquals(uuid.variant(), 2); - long tstamp = UUIDs.unixTimestamp(uuid); + long tstamp = UUIDs.unixTimestamp(uuid); - // Check now and the uuid timestamp are within 10 millisseconds. - assert now <= tstamp && now >= tstamp - 10 : String.format("now = %d, tstamp = %d", now, tstamp); - } + // Check now and the uuid timestamp are within 10 millisseconds. + assert now <= tstamp && now >= tstamp - 10 + : String.format("now = %d, tstamp = %d", now, tstamp); + } - @Test(groups = "unit") - public void uniquenessTest() { - // Generate 1M uuid and check we never have twice the same one + @Test(groups = "unit") + public void uniquenessTest() { + // Generate 1M uuid and check we never have twice the same one - int nbGenerated = 1000000; - Set generated = new HashSet(nbGenerated); + int nbGenerated = 1000000; + Set generated = new HashSet(nbGenerated); - for (int i = 0; i < nbGenerated; ++i) - generated.add(UUIDs.timeBased()); + for (int i = 0; i < nbGenerated; ++i) generated.add(UUIDs.timeBased()); - assertEquals(generated.size(), nbGenerated); - } + assertEquals(generated.size(), nbGenerated); + } - @Test(groups = "unit") - public void multiThreadUniquenessTest() throws Exception { - int nbThread = 10; - int nbGenerated = 10000; - Set generated = new ConcurrentSkipListSet(); + @Test(groups = "unit") + public void multiThreadUniquenessTest() throws Exception { + int nbThread = 10; + int nbGenerated = 10000; + Set generated = new ConcurrentSkipListSet(); - UUIDGenerator[] generators = new UUIDGenerator[nbThread]; - for (int i = 0; i < nbThread; i++) - generators[i] = new UUIDGenerator(nbGenerated, generated); + UUIDGenerator[] generators = new UUIDGenerator[nbThread]; + for (int i = 0; i < nbThread; i++) generators[i] = new UUIDGenerator(nbGenerated, generated); - for (int i = 0; i < nbThread; i++) - generators[i].start(); + for (int i = 0; i < nbThread; i++) generators[i].start(); - for (int i = 0; i < nbThread; i++) - generators[i].join(); + for (int i = 0; i < nbThread; i++) generators[i].join(); - assertEquals(generated.size(), nbThread * nbGenerated); - } + assertEquals(generated.size(), nbThread * nbGenerated); + } - @Test(groups = "unit") - public void timestampIncreasingTest() { - // Generate 1M uuid and check timestamp are always increasing - int nbGenerated = 1000000; - long previous = 0; + @Test(groups = "unit") + public void timestampIncreasingTest() { + // Generate 1M uuid and check timestamp are always increasing + int nbGenerated = 1000000; + long previous = 0; - for (int i = 0; i < nbGenerated; ++i) { - long current = UUIDs.timeBased().timestamp(); - assert previous < current : String.format("previous = %d >= %d = current", previous, current); - } + for (int i = 0; i < nbGenerated; ++i) { + long current = UUIDs.timeBased().timestamp(); + assert previous < current : String.format("previous = %d >= %d = current", previous, current); } + } - @Test(groups = "unit") - public void startEndOfTest() { + @Test(groups = "unit") + public void startEndOfTest() { - Random random = new Random(System.currentTimeMillis()); + Random random = new Random(System.currentTimeMillis()); - int nbTstamp = 10; - int nbPerTstamp = 10; + int nbTstamp = 10; + int nbPerTstamp = 10; - for (int i = 0; i < nbTstamp; i++) { - long tstamp = (long) random.nextInt(); - for (int j = 0; j < nbPerTstamp; j++) { - assertWithin(new UUID(UUIDs.makeMSB(UUIDs.fromUnixTimestamp(tstamp)), random.nextLong()), UUIDs.startOf(tstamp), UUIDs.endOf(tstamp)); - } - } + for (int i = 0; i < nbTstamp; i++) { + long tstamp = (long) random.nextInt(); + for (int j = 0; j < nbPerTstamp; j++) { + assertWithin( + new UUID(UUIDs.makeMSB(UUIDs.fromUnixTimestamp(tstamp)), random.nextLong()), + UUIDs.startOf(tstamp), + UUIDs.endOf(tstamp)); + } } + } - private static void assertWithin(UUID uuid, UUID lowerBound, UUID upperBound) { - ByteBuffer uuidBytes = TypeCodec.uuid().serialize(uuid, ProtocolVersion.V1); - ByteBuffer lb = TypeCodec.uuid().serialize(lowerBound, ProtocolVersion.V1); - ByteBuffer ub = TypeCodec.uuid().serialize(upperBound, ProtocolVersion.V1); - assertTrue(compareTimestampBytes(lb, uuidBytes) <= 0); - assertTrue(compareTimestampBytes(ub, uuidBytes) >= 0); - } + private static void assertWithin(UUID uuid, UUID lowerBound, UUID upperBound) { + ByteBuffer uuidBytes = TypeCodec.uuid().serialize(uuid, ProtocolVersion.V1); + ByteBuffer lb = TypeCodec.uuid().serialize(lowerBound, ProtocolVersion.V1); + ByteBuffer ub = TypeCodec.uuid().serialize(upperBound, ProtocolVersion.V1); + assertTrue(compareTimestampBytes(lb, uuidBytes) <= 0); + assertTrue(compareTimestampBytes(ub, uuidBytes) >= 0); + } - private static int compareTimestampBytes(ByteBuffer o1, ByteBuffer o2) { - int o1Pos = o1.position(); - int o2Pos = o2.position(); + private static int compareTimestampBytes(ByteBuffer o1, ByteBuffer o2) { + int o1Pos = o1.position(); + int o2Pos = o2.position(); - int d = (o1.get(o1Pos + 6) & 0xF) - (o2.get(o2Pos + 6) & 0xF); - if (d != 0) return d; + int d = (o1.get(o1Pos + 6) & 0xF) - (o2.get(o2Pos + 6) & 0xF); + if (d != 0) return d; - d = (o1.get(o1Pos + 7) & 0xFF) - (o2.get(o2Pos + 7) & 0xFF); - if (d != 0) return d; + d = (o1.get(o1Pos + 7) & 0xFF) - (o2.get(o2Pos + 7) & 0xFF); + if (d != 0) return d; - d = (o1.get(o1Pos + 4) & 0xFF) - (o2.get(o2Pos + 4) & 0xFF); - if (d != 0) return d; + d = (o1.get(o1Pos + 4) & 0xFF) - (o2.get(o2Pos + 4) & 0xFF); + if (d != 0) return d; - d = (o1.get(o1Pos + 5) & 0xFF) - (o2.get(o2Pos + 5) & 0xFF); - if (d != 0) return d; + d = (o1.get(o1Pos + 5) & 0xFF) - (o2.get(o2Pos + 5) & 0xFF); + if (d != 0) return d; - d = (o1.get(o1Pos) & 0xFF) - (o2.get(o2Pos) & 0xFF); - if (d != 0) return d; + d = (o1.get(o1Pos) & 0xFF) - (o2.get(o2Pos) & 0xFF); + if (d != 0) return d; - d = (o1.get(o1Pos + 1) & 0xFF) - (o2.get(o2Pos + 1) & 0xFF); - if (d != 0) return d; + d = (o1.get(o1Pos + 1) & 0xFF) - (o2.get(o2Pos + 1) & 0xFF); + if (d != 0) return d; - d = (o1.get(o1Pos + 2) & 0xFF) - (o2.get(o2Pos + 2) & 0xFF); - if (d != 0) return d; + d = (o1.get(o1Pos + 2) & 0xFF) - (o2.get(o2Pos + 2) & 0xFF); + if (d != 0) return d; - return (o1.get(o1Pos + 3) & 0xFF) - (o2.get(o2Pos + 3) & 0xFF); - } + return (o1.get(o1Pos + 3) & 0xFF) - (o2.get(o2Pos + 3) & 0xFF); + } - private static class UUIDGenerator extends Thread { + private static class UUIDGenerator extends Thread { - private final int toGenerate; - private final Set generated; + private final int toGenerate; + private final Set generated; - UUIDGenerator(int toGenerate, Set generated) { - this.toGenerate = toGenerate; - this.generated = generated; - } + UUIDGenerator(int toGenerate, Set generated) { + this.toGenerate = toGenerate; + this.generated = generated; + } - @Override - public void run() { - for (int i = 0; i < toGenerate; ++i) - generated.add(UUIDs.timeBased()); - } + @Override + public void run() { + for (int i = 0; i < toGenerate; ++i) generated.add(UUIDs.timeBased()); } + } } diff --git a/driver-core/src/test/resources/cloud/creds.zip b/driver-core/src/test/resources/cloud/creds.zip new file mode 100644 index 00000000000..3b5d1cb1cbd Binary files /dev/null and b/driver-core/src/test/resources/cloud/creds.zip differ diff --git a/driver-core/src/test/resources/cloud/identity.jks b/driver-core/src/test/resources/cloud/identity.jks new file mode 100644 index 00000000000..8aac969133a Binary files /dev/null and b/driver-core/src/test/resources/cloud/identity.jks differ diff --git a/driver-core/src/test/resources/cloud/metadata.json b/driver-core/src/test/resources/cloud/metadata.json new file mode 100644 index 00000000000..9573731ac3e --- /dev/null +++ b/driver-core/src/test/resources/cloud/metadata.json @@ -0,0 +1 @@ +{"region":"local","contact_info":{"type":"sni_proxy","local_dc":"dc1","contact_points":["4ac06655-f861-49f9-881e-3fee22e69b94","2af7c253-3394-4a0d-bfac-f1ad81b5154d","b17b6e2a-3f48-4d6a-81c1-20a0a1f3192a"],"sni_proxy_address":"localhost:30002"}} \ No newline at end of file diff --git a/driver-core/src/test/resources/cloud/trustStore.jks b/driver-core/src/test/resources/cloud/trustStore.jks new file mode 100644 index 00000000000..8c389a5dd00 Binary files /dev/null and b/driver-core/src/test/resources/cloud/trustStore.jks differ diff --git a/driver-core/src/test/resources/export_as_string_test_2.0.cql b/driver-core/src/test/resources/export_as_string_test_2.0.cql new file mode 100644 index 00000000000..a2e180ad8af --- /dev/null +++ b/driver-core/src/test/resources/export_as_string_test_2.0.cql @@ -0,0 +1,63 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +CREATE KEYSPACE complex_ks WITH REPLICATION = { 'class' : 'org.apache.cassandra.locator.SimpleStrategy', 'replication_factor': '1' } AND DURABLE_WRITES = true; + +CREATE TABLE complex_ks.rank_by_year_and_name ( + race_year int, + race_name text, + rank int, + cyclist_name text, + PRIMARY KEY ((race_year, race_name), rank) +) WITH CLUSTERING ORDER BY (rank ASC) + AND read_repair_chance = 0.0 + AND dclocal_read_repair_chance = 0.1 + AND replicate_on_write = true + AND gc_grace_seconds = 864000 + AND bloom_filter_fp_chance = 0.01 + AND caching = 'KEYS_ONLY' + AND comment = '' + AND compaction = { 'class' : 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy' } + AND compression = { 'sstable_compression' : 'org.apache.cassandra.io.compress.LZ4Compressor' } + AND default_time_to_live = 0 + AND speculative_retry = '99.0PERCENTILE' + AND index_interval = 128 + AND memtable_flush_period_in_ms = 0; + +CREATE INDEX rrank ON complex_ks.rank_by_year_and_name (rank); + +CREATE INDEX ryear ON complex_ks.rank_by_year_and_name (race_year); + +CREATE TABLE complex_ks.ztable ( + zkey text, + a int, + PRIMARY KEY (zkey) +) WITH read_repair_chance = 0.0 + AND dclocal_read_repair_chance = 0.1 + AND replicate_on_write = true + AND gc_grace_seconds = 864000 + AND bloom_filter_fp_chance = 0.1 + AND caching = 'KEYS_ONLY' + AND comment = '' + AND compaction = { 'class' : 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy', 'sstable_size_in_mb' : 95 } + AND compression = { 'sstable_compression' : 'org.apache.cassandra.io.compress.LZ4Compressor' } + AND default_time_to_live = 0 + AND speculative_retry = '99.0PERCENTILE' + AND index_interval = 128 + AND memtable_flush_period_in_ms = 0; diff --git a/driver-core/src/test/resources/export_as_string_test_2.1.cql b/driver-core/src/test/resources/export_as_string_test_2.1.cql new file mode 100644 index 00000000000..71257fbdfd8 --- /dev/null +++ b/driver-core/src/test/resources/export_as_string_test_2.1.cql @@ -0,0 +1,85 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +CREATE KEYSPACE complex_ks WITH REPLICATION = { 'class' : 'org.apache.cassandra.locator.SimpleStrategy', 'replication_factor': '1' } AND DURABLE_WRITES = true; + +CREATE TYPE complex_ks.btype ( + a text +); + +CREATE TYPE complex_ks.xtype ( + d text +); + +CREATE TYPE complex_ks.ztype ( + c text, + a int +); + +CREATE TYPE complex_ks.ctype ( + "Z" frozen, + x frozen +); + +CREATE TYPE complex_ks.atype ( + c frozen +); + +CREATE TABLE complex_ks.rank_by_year_and_name ( + race_year int, + race_name text, + rank int, + cyclist_name text, + PRIMARY KEY ((race_year, race_name), rank) +) WITH CLUSTERING ORDER BY (rank ASC) + AND read_repair_chance = 0.0 + AND dclocal_read_repair_chance = 0.1 + AND gc_grace_seconds = 864000 + AND bloom_filter_fp_chance = 0.01 + AND caching = { 'keys' : 'ALL', 'rows_per_partition' : 'NONE' } + AND comment = '' + AND compaction = { 'class' : 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy' } + AND compression = { 'sstable_compression' : 'org.apache.cassandra.io.compress.LZ4Compressor' } + AND default_time_to_live = 0 + AND speculative_retry = '99.0PERCENTILE' + AND min_index_interval = 128 + AND max_index_interval = 2048 + AND memtable_flush_period_in_ms = 0; + +CREATE INDEX rrank ON complex_ks.rank_by_year_and_name (rank); + +CREATE INDEX ryear ON complex_ks.rank_by_year_and_name (race_year); + +CREATE TABLE complex_ks.ztable ( + zkey text, + a frozen, + PRIMARY KEY (zkey) +) WITH read_repair_chance = 0.0 + AND dclocal_read_repair_chance = 0.1 + AND gc_grace_seconds = 864000 + AND bloom_filter_fp_chance = 0.1 + AND caching = { 'keys' : 'ALL', 'rows_per_partition' : 'NONE' } + AND comment = '' + AND compaction = { 'class' : 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy', 'sstable_size_in_mb' : 95 } + AND compression = { 'sstable_compression' : 'org.apache.cassandra.io.compress.LZ4Compressor' } + AND default_time_to_live = 0 + AND speculative_retry = '99.0PERCENTILE' + AND min_index_interval = 128 + AND max_index_interval = 2048 + AND memtable_flush_period_in_ms = 0; diff --git a/driver-core/src/test/resources/export_as_string_test_2.2.cql b/driver-core/src/test/resources/export_as_string_test_2.2.cql new file mode 100644 index 00000000000..b86ee43b132 --- /dev/null +++ b/driver-core/src/test/resources/export_as_string_test_2.2.cql @@ -0,0 +1,132 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +CREATE KEYSPACE complex_ks WITH REPLICATION = { 'class' : 'org.apache.cassandra.locator.SimpleStrategy', 'replication_factor': '1' } AND DURABLE_WRITES = true; + +CREATE TYPE complex_ks.btype ( + a text +); + +CREATE TYPE complex_ks.xtype ( + d text +); + +CREATE TYPE complex_ks.ztype ( + c text, + a int +); + +CREATE TYPE complex_ks.ctype ( + "Z" frozen, + x frozen +); + +CREATE TYPE complex_ks.atype ( + c frozen +); + +CREATE TABLE complex_ks.cyclist_mv ( + cid uuid, + age int, + birthday date, + country text, + name text, + PRIMARY KEY (cid) +) WITH read_repair_chance = 0.0 + AND dclocal_read_repair_chance = 0.1 + AND gc_grace_seconds = 864000 + AND bloom_filter_fp_chance = 0.01 + AND caching = { 'keys' : 'ALL', 'rows_per_partition' : 'NONE' } + AND comment = '' + AND compaction = { 'class' : 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy' } + AND compression = { 'sstable_compression' : 'org.apache.cassandra.io.compress.LZ4Compressor' } + AND default_time_to_live = 0 + AND speculative_retry = '99.0PERCENTILE' + AND min_index_interval = 128 + AND max_index_interval = 2048 + AND memtable_flush_period_in_ms = 0; + +CREATE INDEX cyclist_by_country ON complex_ks.cyclist_mv (country); + +CREATE TABLE complex_ks.rank_by_year_and_name ( + race_year int, + race_name text, + rank int, + cyclist_name text, + PRIMARY KEY ((race_year, race_name), rank) +) WITH CLUSTERING ORDER BY (rank ASC) + AND read_repair_chance = 0.0 + AND dclocal_read_repair_chance = 0.1 + AND gc_grace_seconds = 864000 + AND bloom_filter_fp_chance = 0.01 + AND caching = { 'keys' : 'ALL', 'rows_per_partition' : 'NONE' } + AND comment = '' + AND compaction = { 'class' : 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy' } + AND compression = { 'sstable_compression' : 'org.apache.cassandra.io.compress.LZ4Compressor' } + AND default_time_to_live = 0 + AND speculative_retry = '99.0PERCENTILE' + AND min_index_interval = 128 + AND max_index_interval = 2048 + AND memtable_flush_period_in_ms = 0; + +CREATE INDEX rrank ON complex_ks.rank_by_year_and_name (rank); + +CREATE INDEX ryear ON complex_ks.rank_by_year_and_name (race_year); + +CREATE TABLE complex_ks.ztable ( + zkey text, + a frozen, + PRIMARY KEY (zkey) +) WITH read_repair_chance = 0.0 + AND dclocal_read_repair_chance = 0.1 + AND gc_grace_seconds = 864000 + AND bloom_filter_fp_chance = 0.1 + AND caching = { 'keys' : 'ALL', 'rows_per_partition' : 'NONE' } + AND comment = '' + AND compaction = { 'class' : 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy', 'sstable_size_in_mb' : 95 } + AND compression = { 'sstable_compression' : 'org.apache.cassandra.io.compress.LZ4Compressor' } + AND default_time_to_live = 0 + AND speculative_retry = '99.0PERCENTILE' + AND min_index_interval = 128 + AND max_index_interval = 2048 + AND memtable_flush_period_in_ms = 0; + +CREATE FUNCTION complex_ks.avgfinal(state tuple) + CALLED ON NULL INPUT + RETURNS double + LANGUAGE java + AS 'double r = 0; if (state.getInt(0) == 0) return null; r = state.getLong(1); r /= state.getInt(0); return Double.valueOf(r);'; + +CREATE FUNCTION complex_ks.avgstate(state tuple,val int) + CALLED ON NULL INPUT + RETURNS tuple + LANGUAGE java + AS 'if (val !=null) { state.setInt(0, state.getInt(0)+1); state.setLong(1, state.getLong(1)+val.intValue()); } return state;'; + +CREATE AGGREGATE complex_ks.average(int) + SFUNC avgstate + STYPE tuple + FINALFUNC avgfinal + INITCOND (0,0); + +CREATE AGGREGATE complex_ks.mean(int) + SFUNC avgstate + STYPE tuple + FINALFUNC avgfinal + INITCOND (0,0); diff --git a/driver-core/src/test/resources/export_as_string_test_3.0.cql b/driver-core/src/test/resources/export_as_string_test_3.0.cql new file mode 100644 index 00000000000..90a91603e26 --- /dev/null +++ b/driver-core/src/test/resources/export_as_string_test_3.0.cql @@ -0,0 +1,198 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +CREATE KEYSPACE complex_ks WITH REPLICATION = { 'class' : 'org.apache.cassandra.locator.SimpleStrategy', 'replication_factor': '1' } AND DURABLE_WRITES = true; + +CREATE TYPE complex_ks.btype ( + a text +); + +CREATE TYPE complex_ks.xtype ( + d text +); + +CREATE TYPE complex_ks.ztype ( + c text, + a int +); + +CREATE TYPE complex_ks.ctype ( + "Z" frozen, + x frozen +); + +CREATE TYPE complex_ks.atype ( + c frozen +); + +CREATE TABLE complex_ks.cyclist_mv ( + cid uuid, + age int, + birthday date, + country text, + name text, + PRIMARY KEY (cid) +) WITH read_repair_chance = 0.0 + AND dclocal_read_repair_chance = 0.1 + AND gc_grace_seconds = 864000 + AND bloom_filter_fp_chance = 0.01 + AND caching = { 'keys' : 'ALL', 'rows_per_partition' : 'NONE' } + AND comment = '' + AND compaction = { 'class' : 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold' : 32, 'min_threshold' : 4 } + AND compression = { 'chunk_length_in_kb' : 64, 'class' : 'org.apache.cassandra.io.compress.LZ4Compressor' } + AND default_time_to_live = 0 + AND speculative_retry = '99PERCENTILE' + AND min_index_interval = 128 + AND max_index_interval = 2048 + AND crc_check_chance = 1.0 + AND memtable_flush_period_in_ms = 0; + +CREATE INDEX cyclist_by_country ON complex_ks.cyclist_mv (country); + +CREATE MATERIALIZED VIEW complex_ks.cyclist_by_a_age AS + SELECT * + FROM complex_ks.cyclist_mv + WHERE age IS NOT NULL AND cid IS NOT NULL + PRIMARY KEY (age, cid) + WITH CLUSTERING ORDER BY (cid ASC) + AND read_repair_chance = 0.0 + AND dclocal_read_repair_chance = 0.1 + AND gc_grace_seconds = 864000 + AND bloom_filter_fp_chance = 0.01 + AND caching = { 'keys' : 'ALL', 'rows_per_partition' : 'NONE' } + AND comment = '' + AND compaction = { 'class' : 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold' : 32, 'min_threshold' : 4 } + AND compression = { 'chunk_length_in_kb' : 64, 'class' : 'org.apache.cassandra.io.compress.LZ4Compressor' } + AND default_time_to_live = 0 + AND speculative_retry = '99PERCENTILE' + AND min_index_interval = 128 + AND max_index_interval = 2048 + AND crc_check_chance = 1.0 + AND memtable_flush_period_in_ms = 0; + +CREATE MATERIALIZED VIEW complex_ks.cyclist_by_age AS + SELECT age, cid, birthday, country, name + FROM complex_ks.cyclist_mv + WHERE age IS NOT NULL AND cid IS NOT NULL + PRIMARY KEY (age, cid) + WITH CLUSTERING ORDER BY (cid ASC) + AND read_repair_chance = 0.0 + AND dclocal_read_repair_chance = 0.1 + AND gc_grace_seconds = 864000 + AND bloom_filter_fp_chance = 0.01 + AND caching = { 'keys' : 'ALL', 'rows_per_partition' : 'NONE' } + AND comment = 'simple view' + AND compaction = { 'class' : 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold' : 32, 'min_threshold' : 4 } + AND compression = { 'chunk_length_in_kb' : 64, 'class' : 'org.apache.cassandra.io.compress.LZ4Compressor' } + AND default_time_to_live = 0 + AND speculative_retry = '99PERCENTILE' + AND min_index_interval = 128 + AND max_index_interval = 2048 + AND crc_check_chance = 1.0 + AND memtable_flush_period_in_ms = 0; + +CREATE MATERIALIZED VIEW complex_ks.cyclist_by_r_age AS + SELECT age, cid, birthday, country, name + FROM complex_ks.cyclist_mv + WHERE age IS NOT NULL AND cid IS NOT NULL + PRIMARY KEY (age, cid) + WITH CLUSTERING ORDER BY (cid DESC) + AND read_repair_chance = 0.0 + AND dclocal_read_repair_chance = 0.1 + AND gc_grace_seconds = 864000 + AND bloom_filter_fp_chance = 0.01 + AND caching = { 'keys' : 'ALL', 'rows_per_partition' : 'NONE' } + AND comment = '' + AND compaction = { 'class' : 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold' : 32, 'min_threshold' : 4 } + AND compression = { 'chunk_length_in_kb' : 64, 'class' : 'org.apache.cassandra.io.compress.LZ4Compressor' } + AND default_time_to_live = 0 + AND speculative_retry = '99PERCENTILE' + AND min_index_interval = 128 + AND max_index_interval = 2048 + AND crc_check_chance = 1.0 + AND memtable_flush_period_in_ms = 0; + +CREATE TABLE complex_ks.rank_by_year_and_name ( + race_year int, + race_name text, + rank int, + cyclist_name text, + PRIMARY KEY ((race_year, race_name), rank) +) WITH CLUSTERING ORDER BY (rank ASC) + AND read_repair_chance = 0.0 + AND dclocal_read_repair_chance = 0.1 + AND gc_grace_seconds = 864000 + AND bloom_filter_fp_chance = 0.01 + AND caching = { 'keys' : 'ALL', 'rows_per_partition' : 'NONE' } + AND comment = '' + AND compaction = { 'class' : 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold' : 32, 'min_threshold' : 4 } + AND compression = { 'chunk_length_in_kb' : 64, 'class' : 'org.apache.cassandra.io.compress.LZ4Compressor' } + AND default_time_to_live = 0 + AND speculative_retry = '99PERCENTILE' + AND min_index_interval = 128 + AND max_index_interval = 2048 + AND crc_check_chance = 1.0 + AND memtable_flush_period_in_ms = 0; + +CREATE INDEX rrank ON complex_ks.rank_by_year_and_name (rank); + +CREATE INDEX ryear ON complex_ks.rank_by_year_and_name (race_year); + +CREATE TABLE complex_ks.ztable ( + zkey text, + a frozen, + PRIMARY KEY (zkey) +) WITH read_repair_chance = 0.0 + AND dclocal_read_repair_chance = 0.1 + AND gc_grace_seconds = 864000 + AND bloom_filter_fp_chance = 0.1 + AND caching = { 'keys' : 'ALL', 'rows_per_partition' : 'NONE' } + AND comment = '' + AND compaction = { 'class' : 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy', 'sstable_size_in_mb' : 95 } + AND compression = { 'chunk_length_in_kb' : 64, 'class' : 'org.apache.cassandra.io.compress.LZ4Compressor' } + AND default_time_to_live = 0 + AND speculative_retry = '99PERCENTILE' + AND min_index_interval = 128 + AND max_index_interval = 2048 + AND crc_check_chance = 1.0 + AND memtable_flush_period_in_ms = 0; + +CREATE FUNCTION complex_ks.avgfinal(state tuple) + CALLED ON NULL INPUT + RETURNS double + LANGUAGE java + AS 'double r = 0; if (state.getInt(0) == 0) return null; r = state.getLong(1); r /= state.getInt(0); return Double.valueOf(r);'; + +CREATE FUNCTION complex_ks.avgstate(state tuple,val int) + CALLED ON NULL INPUT + RETURNS tuple + LANGUAGE java + AS 'if (val !=null) { state.setInt(0, state.getInt(0)+1); state.setLong(1, state.getLong(1)+val.intValue()); } return state;'; + +CREATE AGGREGATE complex_ks.average(int) + SFUNC avgstate + STYPE tuple + FINALFUNC avgfinal + INITCOND (0,0); + +CREATE AGGREGATE complex_ks.mean(int) + SFUNC avgstate + STYPE tuple + FINALFUNC avgfinal + INITCOND (0,0); diff --git a/driver-core/src/test/resources/export_as_string_test_3.11.cql b/driver-core/src/test/resources/export_as_string_test_3.11.cql new file mode 100644 index 00000000000..c7edb57bae0 --- /dev/null +++ b/driver-core/src/test/resources/export_as_string_test_3.11.cql @@ -0,0 +1,204 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +CREATE KEYSPACE complex_ks WITH REPLICATION = { 'class' : 'org.apache.cassandra.locator.SimpleStrategy', 'replication_factor': '1' } AND DURABLE_WRITES = true; + +CREATE TYPE complex_ks.btype ( + a text +); + +CREATE TYPE complex_ks.xtype ( + d text +); + +CREATE TYPE complex_ks.ztype ( + c text, + a int +); + +CREATE TYPE complex_ks.ctype ( + "Z" frozen, + x frozen +); + +CREATE TYPE complex_ks.atype ( + c frozen +); + +CREATE TABLE complex_ks.cyclist_mv ( + cid uuid, + age int, + birthday date, + country text, + name text, + PRIMARY KEY (cid) +) WITH read_repair_chance = 0.0 + AND dclocal_read_repair_chance = 0.1 + AND gc_grace_seconds = 864000 + AND bloom_filter_fp_chance = 0.01 + AND caching = { 'keys' : 'ALL', 'rows_per_partition' : 'NONE' } + AND comment = '' + AND compaction = { 'class' : 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold' : 32, 'min_threshold' : 4 } + AND compression = { 'chunk_length_in_kb' : 64, 'class' : 'org.apache.cassandra.io.compress.LZ4Compressor' } + AND default_time_to_live = 0 + AND speculative_retry = '99PERCENTILE' + AND min_index_interval = 128 + AND max_index_interval = 2048 + AND crc_check_chance = 1.0 + AND cdc = false + AND memtable_flush_period_in_ms = 0; + +CREATE INDEX cyclist_by_country ON complex_ks.cyclist_mv (country); + +CREATE MATERIALIZED VIEW complex_ks.cyclist_by_a_age AS + SELECT * + FROM complex_ks.cyclist_mv + WHERE age IS NOT NULL AND cid IS NOT NULL + PRIMARY KEY (age, cid) + WITH CLUSTERING ORDER BY (cid ASC) + AND read_repair_chance = 0.0 + AND dclocal_read_repair_chance = 0.1 + AND gc_grace_seconds = 864000 + AND bloom_filter_fp_chance = 0.01 + AND caching = { 'keys' : 'ALL', 'rows_per_partition' : 'NONE' } + AND comment = '' + AND compaction = { 'class' : 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold' : 32, 'min_threshold' : 4 } + AND compression = { 'chunk_length_in_kb' : 64, 'class' : 'org.apache.cassandra.io.compress.LZ4Compressor' } + AND default_time_to_live = 0 + AND speculative_retry = '99PERCENTILE' + AND min_index_interval = 128 + AND max_index_interval = 2048 + AND crc_check_chance = 1.0 + AND cdc = false + AND memtable_flush_period_in_ms = 0; + +CREATE MATERIALIZED VIEW complex_ks.cyclist_by_age AS + SELECT age, cid, birthday, country, name + FROM complex_ks.cyclist_mv + WHERE age IS NOT NULL AND cid IS NOT NULL + PRIMARY KEY (age, cid) + WITH CLUSTERING ORDER BY (cid ASC) + AND read_repair_chance = 0.0 + AND dclocal_read_repair_chance = 0.1 + AND gc_grace_seconds = 864000 + AND bloom_filter_fp_chance = 0.01 + AND caching = { 'keys' : 'ALL', 'rows_per_partition' : 'NONE' } + AND comment = 'simple view' + AND compaction = { 'class' : 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold' : 32, 'min_threshold' : 4 } + AND compression = { 'chunk_length_in_kb' : 64, 'class' : 'org.apache.cassandra.io.compress.LZ4Compressor' } + AND default_time_to_live = 0 + AND speculative_retry = '99PERCENTILE' + AND min_index_interval = 128 + AND max_index_interval = 2048 + AND crc_check_chance = 1.0 + AND cdc = false + AND memtable_flush_period_in_ms = 0; + +CREATE MATERIALIZED VIEW complex_ks.cyclist_by_r_age AS + SELECT age, cid, birthday, country, name + FROM complex_ks.cyclist_mv + WHERE age IS NOT NULL AND cid IS NOT NULL + PRIMARY KEY (age, cid) + WITH CLUSTERING ORDER BY (cid DESC) + AND read_repair_chance = 0.0 + AND dclocal_read_repair_chance = 0.1 + AND gc_grace_seconds = 864000 + AND bloom_filter_fp_chance = 0.01 + AND caching = { 'keys' : 'ALL', 'rows_per_partition' : 'NONE' } + AND comment = '' + AND compaction = { 'class' : 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold' : 32, 'min_threshold' : 4 } + AND compression = { 'chunk_length_in_kb' : 64, 'class' : 'org.apache.cassandra.io.compress.LZ4Compressor' } + AND default_time_to_live = 0 + AND speculative_retry = '99PERCENTILE' + AND min_index_interval = 128 + AND max_index_interval = 2048 + AND crc_check_chance = 1.0 + AND cdc = false + AND memtable_flush_period_in_ms = 0; + +CREATE TABLE complex_ks.rank_by_year_and_name ( + race_year int, + race_name text, + rank int, + cyclist_name text, + PRIMARY KEY ((race_year, race_name), rank) +) WITH CLUSTERING ORDER BY (rank ASC) + AND read_repair_chance = 0.0 + AND dclocal_read_repair_chance = 0.1 + AND gc_grace_seconds = 864000 + AND bloom_filter_fp_chance = 0.01 + AND caching = { 'keys' : 'ALL', 'rows_per_partition' : 'NONE' } + AND comment = '' + AND compaction = { 'class' : 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold' : 32, 'min_threshold' : 4 } + AND compression = { 'chunk_length_in_kb' : 64, 'class' : 'org.apache.cassandra.io.compress.LZ4Compressor' } + AND default_time_to_live = 0 + AND speculative_retry = '99PERCENTILE' + AND min_index_interval = 128 + AND max_index_interval = 2048 + AND crc_check_chance = 1.0 + AND cdc = false + AND memtable_flush_period_in_ms = 0; + +CREATE INDEX rrank ON complex_ks.rank_by_year_and_name (rank); + +CREATE INDEX ryear ON complex_ks.rank_by_year_and_name (race_year); + +CREATE TABLE complex_ks.ztable ( + zkey text, + a frozen, + PRIMARY KEY (zkey) +) WITH read_repair_chance = 0.0 + AND dclocal_read_repair_chance = 0.1 + AND gc_grace_seconds = 864000 + AND bloom_filter_fp_chance = 0.1 + AND caching = { 'keys' : 'ALL', 'rows_per_partition' : 'NONE' } + AND comment = '' + AND compaction = { 'class' : 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy', 'sstable_size_in_mb' : 95 } + AND compression = { 'chunk_length_in_kb' : 64, 'class' : 'org.apache.cassandra.io.compress.LZ4Compressor' } + AND default_time_to_live = 0 + AND speculative_retry = '99PERCENTILE' + AND min_index_interval = 128 + AND max_index_interval = 2048 + AND crc_check_chance = 1.0 + AND cdc = false + AND memtable_flush_period_in_ms = 0; + +CREATE FUNCTION complex_ks.avgfinal(state tuple) + CALLED ON NULL INPUT + RETURNS double + LANGUAGE java + AS 'double r = 0; if (state.getInt(0) == 0) return null; r = state.getLong(1); r /= state.getInt(0); return Double.valueOf(r);'; + +CREATE FUNCTION complex_ks.avgstate(state tuple,val int) + CALLED ON NULL INPUT + RETURNS tuple + LANGUAGE java + AS 'if (val !=null) { state.setInt(0, state.getInt(0)+1); state.setLong(1, state.getLong(1)+val.intValue()); } return state;'; + +CREATE AGGREGATE complex_ks.average(int) + SFUNC avgstate + STYPE tuple + FINALFUNC avgfinal + INITCOND (0,0); + +CREATE AGGREGATE complex_ks.mean(int) + SFUNC avgstate + STYPE tuple + FINALFUNC avgfinal + INITCOND (0,0); diff --git a/driver-core/src/test/resources/export_as_string_test_4.0.cql b/driver-core/src/test/resources/export_as_string_test_4.0.cql new file mode 100644 index 00000000000..3c02a337113 --- /dev/null +++ b/driver-core/src/test/resources/export_as_string_test_4.0.cql @@ -0,0 +1,204 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +CREATE KEYSPACE complex_ks WITH REPLICATION = { 'class' : 'org.apache.cassandra.locator.SimpleStrategy', 'replication_factor': '1' } AND DURABLE_WRITES = true; + +CREATE TYPE complex_ks.btype ( + a text +); + +CREATE TYPE complex_ks.xtype ( + d text +); + +CREATE TYPE complex_ks.ztype ( + c text, + a int +); + +CREATE TYPE complex_ks.ctype ( + "Z" frozen, + x frozen +); + +CREATE TYPE complex_ks.atype ( + c frozen +); + +CREATE TABLE complex_ks.cyclist_mv ( + cid uuid, + age int, + birthday date, + country text, + name text, + PRIMARY KEY (cid) +) WITH read_repair = 'BLOCKING' + AND gc_grace_seconds = 864000 + AND additional_write_policy = '99p' + AND bloom_filter_fp_chance = 0.01 + AND caching = { 'keys' : 'ALL', 'rows_per_partition' : 'NONE' } + AND comment = '' + AND compaction = { 'class' : 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold' : 32, 'min_threshold' : 4 } + AND compression = { 'chunk_length_in_kb' : 16, 'class' : 'org.apache.cassandra.io.compress.LZ4Compressor' } + AND default_time_to_live = 0 + AND speculative_retry = '99p' + AND min_index_interval = 128 + AND max_index_interval = 2048 + AND crc_check_chance = 1.0 + AND cdc = false + AND memtable_flush_period_in_ms = 0; + +CREATE INDEX cyclist_by_country ON complex_ks.cyclist_mv (country); + +CREATE MATERIALIZED VIEW complex_ks.cyclist_by_a_age AS + SELECT * + FROM complex_ks.cyclist_mv + WHERE age IS NOT NULL AND cid IS NOT NULL + PRIMARY KEY (age, cid) + WITH CLUSTERING ORDER BY (cid ASC) + AND read_repair = 'BLOCKING' + AND gc_grace_seconds = 864000 + AND additional_write_policy = '99p' + AND bloom_filter_fp_chance = 0.01 + AND caching = { 'keys' : 'ALL', 'rows_per_partition' : 'NONE' } + AND comment = '' + AND compaction = { 'class' : 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold' : 32, 'min_threshold' : 4 } + AND compression = { 'chunk_length_in_kb' : 16, 'class' : 'org.apache.cassandra.io.compress.LZ4Compressor' } + AND default_time_to_live = 0 + AND speculative_retry = '99p' + AND min_index_interval = 128 + AND max_index_interval = 2048 + AND crc_check_chance = 1.0 + AND cdc = false + AND memtable_flush_period_in_ms = 0; + +CREATE MATERIALIZED VIEW complex_ks.cyclist_by_age AS + SELECT age, cid, birthday, country, name + FROM complex_ks.cyclist_mv + WHERE age IS NOT NULL AND cid IS NOT NULL + PRIMARY KEY (age, cid) + WITH CLUSTERING ORDER BY (cid ASC) + AND read_repair = 'BLOCKING' + AND gc_grace_seconds = 864000 + AND additional_write_policy = '99p' + AND bloom_filter_fp_chance = 0.01 + AND caching = { 'keys' : 'ALL', 'rows_per_partition' : 'NONE' } + AND comment = 'simple view' + AND compaction = { 'class' : 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold' : 32, 'min_threshold' : 4 } + AND compression = { 'chunk_length_in_kb' : 16, 'class' : 'org.apache.cassandra.io.compress.LZ4Compressor' } + AND default_time_to_live = 0 + AND speculative_retry = '99p' + AND min_index_interval = 128 + AND max_index_interval = 2048 + AND crc_check_chance = 1.0 + AND cdc = false + AND memtable_flush_period_in_ms = 0; + +CREATE MATERIALIZED VIEW complex_ks.cyclist_by_r_age AS + SELECT age, cid, birthday, country, name + FROM complex_ks.cyclist_mv + WHERE age IS NOT NULL AND cid IS NOT NULL + PRIMARY KEY (age, cid) + WITH CLUSTERING ORDER BY (cid DESC) + AND read_repair = 'BLOCKING' + AND gc_grace_seconds = 864000 + AND additional_write_policy = '99p' + AND bloom_filter_fp_chance = 0.01 + AND caching = { 'keys' : 'ALL', 'rows_per_partition' : 'NONE' } + AND comment = '' + AND compaction = { 'class' : 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold' : 32, 'min_threshold' : 4 } + AND compression = { 'chunk_length_in_kb' : 16, 'class' : 'org.apache.cassandra.io.compress.LZ4Compressor' } + AND default_time_to_live = 0 + AND speculative_retry = '99p' + AND min_index_interval = 128 + AND max_index_interval = 2048 + AND crc_check_chance = 1.0 + AND cdc = false + AND memtable_flush_period_in_ms = 0; + +CREATE TABLE complex_ks.rank_by_year_and_name ( + race_year int, + race_name text, + rank int, + cyclist_name text, + PRIMARY KEY ((race_year, race_name), rank) +) WITH CLUSTERING ORDER BY (rank ASC) + AND read_repair = 'BLOCKING' + AND gc_grace_seconds = 864000 + AND additional_write_policy = '99p' + AND bloom_filter_fp_chance = 0.01 + AND caching = { 'keys' : 'ALL', 'rows_per_partition' : 'NONE' } + AND comment = '' + AND compaction = { 'class' : 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold' : 32, 'min_threshold' : 4 } + AND compression = { 'chunk_length_in_kb' : 16, 'class' : 'org.apache.cassandra.io.compress.LZ4Compressor' } + AND default_time_to_live = 0 + AND speculative_retry = '99p' + AND min_index_interval = 128 + AND max_index_interval = 2048 + AND crc_check_chance = 1.0 + AND cdc = false + AND memtable_flush_period_in_ms = 0; + +CREATE INDEX rrank ON complex_ks.rank_by_year_and_name (rank); + +CREATE INDEX ryear ON complex_ks.rank_by_year_and_name (race_year); + +CREATE TABLE complex_ks.ztable ( + zkey text, + a frozen, + PRIMARY KEY (zkey) +) WITH read_repair = 'BLOCKING' + AND gc_grace_seconds = 864000 + AND additional_write_policy = '99p' + AND bloom_filter_fp_chance = 0.1 + AND caching = { 'keys' : 'ALL', 'rows_per_partition' : 'NONE' } + AND comment = '' + AND compaction = { 'class' : 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy', 'max_threshold' : 32, 'min_threshold' : 4, 'sstable_size_in_mb' : 95 } + AND compression = { 'chunk_length_in_kb' : 16, 'class' : 'org.apache.cassandra.io.compress.LZ4Compressor' } + AND default_time_to_live = 0 + AND speculative_retry = '99p' + AND min_index_interval = 128 + AND max_index_interval = 2048 + AND crc_check_chance = 1.0 + AND cdc = false + AND memtable_flush_period_in_ms = 0; + +CREATE FUNCTION complex_ks.avgfinal(state tuple) + CALLED ON NULL INPUT + RETURNS double + LANGUAGE java + AS 'double r = 0; if (state.getInt(0) == 0) return null; r = state.getLong(1); r /= state.getInt(0); return Double.valueOf(r);'; + +CREATE FUNCTION complex_ks.avgstate(state tuple,val int) + CALLED ON NULL INPUT + RETURNS tuple + LANGUAGE java + AS 'if (val !=null) { state.setInt(0, state.getInt(0)+1); state.setLong(1, state.getLong(1)+val.intValue()); } return state;'; + +CREATE AGGREGATE complex_ks.average(int) + SFUNC avgstate + STYPE tuple + FINALFUNC avgfinal + INITCOND (0,0); + +CREATE AGGREGATE complex_ks.mean(int) + SFUNC avgstate + STYPE tuple + FINALFUNC avgfinal + INITCOND (0,0); diff --git a/driver-core/src/test/resources/log4j.properties b/driver-core/src/test/resources/log4j.properties index 7e4217014c7..b74fe275a0b 100644 --- a/driver-core/src/test/resources/log4j.properties +++ b/driver-core/src/test/resources/log4j.properties @@ -1,11 +1,13 @@ # -# Copyright (C) 2012-2017 DataStax Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/driver-core/src/test/resources/reference.conf b/driver-core/src/test/resources/reference.conf index 6da0ce3a055..4f5a14da64c 100644 --- a/driver-core/src/test/resources/reference.conf +++ b/driver-core/src/test/resources/reference.conf @@ -1,3 +1,20 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + # Adjust Scassandra's log level # (it seems some messages are conditioned by log4j.properties and others by reference.conf, so we need both) akka { diff --git a/driver-dist-source/pom.xml b/driver-dist-source/pom.xml new file mode 100644 index 00000000000..27297c8bb66 --- /dev/null +++ b/driver-dist-source/pom.xml @@ -0,0 +1,125 @@ + + + + 4.0.0 + + org.apache.cassandra + cassandra-driver-parent + 3.12.2-SNAPSHOT + + java-driver-distribution-source + pom + Apache Cassandra Java Driver - source distribution + + apache-cassandra-java-driver-${project.version}-source + + + maven-jar-plugin + + + + default-jar + none + + + + + maven-source-plugin + + true + + + + maven-install-plugin + + true + + + + maven-deploy-plugin + + true + + + + org.revapi + revapi-maven-plugin + + true + + + + org.sonatype.plugins + nexus-staging-maven-plugin + + true + + + + + + + release + + + + maven-assembly-plugin + + + assemble-source-tarball + package + + single + + + + + false + + src/assembly/source-tarball.xml + + posix + + + + net.nicoulaj.maven.plugins + checksum-maven-plugin + 1.7 + + + + artifacts + + + + + true + + sha256 + sha512 + + + + + + + + diff --git a/driver-dist-source/src/assembly/source-tarball.xml b/driver-dist-source/src/assembly/source-tarball.xml new file mode 100644 index 00000000000..b3e2d0f463a --- /dev/null +++ b/driver-dist-source/src/assembly/source-tarball.xml @@ -0,0 +1,43 @@ + + + + source-tarball + + tar.gz + + + + .. + . + true + + + **/*.iml + **/.classpath + **/.project + **/.java-version + **/.flattened-pom.xml + **/dependency-reduced-pom.xml + **/${project.build.directory}/** + + + + diff --git a/driver-dist/pom.xml b/driver-dist/pom.xml index 765631a4adf..098e8be1a66 100644 --- a/driver-dist/pom.xml +++ b/driver-dist/pom.xml @@ -1,12 +1,14 @@ jar - DataStax Java Driver for Apache Cassandra - Binary distribution + Java Driver for Apache Cassandra - Binary distribution - - com.datastax.cassandra + org.apache.cassandra cassandra-driver-core - - com.datastax.cassandra + org.apache.cassandra cassandra-driver-mapping - - com.datastax.cassandra + org.apache.cassandra cassandra-driver-extras - - - cassandra-java-driver-${project.version} - + apache-cassandra-java-driver-${project.version} - maven-jar-plugin @@ -66,53 +61,38 @@ - maven-source-plugin true - maven-install-plugin true - maven-deploy-plugin true - + + maven-gpg-plugin + + true + + - - release - - maven-javadoc-plugin - - - dependencies-javadoc - package - - jar - - - true - - - - maven-assembly-plugin @@ -139,10 +119,34 @@ true + + net.nicoulaj.maven.plugins + checksum-maven-plugin + 1.7 + + + + artifacts + + + + + true + + sha256 + sha512 + + + + + maven-gpg-plugin + + false + + - diff --git a/driver-dist/src/assembly/binary-tarball.xml b/driver-dist/src/assembly/binary-tarball.xml index 1b7b1a74b6b..baf4579ca55 100644 --- a/driver-dist/src/assembly/binary-tarball.xml +++ b/driver-dist/src/assembly/binary-tarball.xml @@ -1,12 +1,14 @@ - com.datastax.cassandra:cassandra-driver-core - com.datastax.cassandra:cassandra-driver-mapping - com.datastax.cassandra:cassandra-driver-extras + org.apache.cassandra:cassandra-driver-core + org.apache.cassandra:cassandra-driver-mapping + org.apache.cassandra:cassandra-driver-extras io.netty:netty-transport-native-epoll:* true + + lib + + + io.netty:netty-common:* + + true + @@ -55,7 +65,7 @@ true - com.datastax.cassandra:cassandra-driver-mapping + org.apache.cassandra:cassandra-driver-mapping false @@ -64,9 +74,9 @@ lib/mapping - com.datastax.cassandra:cassandra-driver-core - com.datastax.cassandra:cassandra-driver-mapping - com.datastax.cassandra:cassandra-driver-extras + org.apache.cassandra:cassandra-driver-core + org.apache.cassandra:cassandra-driver-mapping + org.apache.cassandra:cassandra-driver-extras com.google.guava:guava org.slf4j:slf4j-api @@ -81,7 +91,7 @@ true - com.datastax.cassandra:cassandra-driver-extras + org.apache.cassandra:cassandra-driver-extras false @@ -90,9 +100,9 @@ lib/extras - com.datastax.cassandra:cassandra-driver-core - com.datastax.cassandra:cassandra-driver-mapping - com.datastax.cassandra:cassandra-driver-extras + org.apache.cassandra:cassandra-driver-core + org.apache.cassandra:cassandra-driver-mapping + org.apache.cassandra:cassandra-driver-extras com.google.guava:guava org.slf4j:slf4j-api @@ -107,10 +117,10 @@ true - com.datastax.cassandra:cassandra-driver-core - com.datastax.cassandra:cassandra-driver-mapping - com.datastax.cassandra:cassandra-driver-extras - com.datastax.cassandra:cassandra-driver-examples + org.apache.cassandra:cassandra-driver-core + org.apache.cassandra:cassandra-driver-mapping + org.apache.cassandra:cassandra-driver-extras + org.apache.cassandra:cassandra-driver-examples false @@ -127,17 +137,13 @@ - - target/apidocs - apidocs - - .. . README* - LICENSE* + LICENSE_binary + NOTICE_binary.txt diff --git a/driver-examples/README.md b/driver-examples/README.md index 553cdbc5117..63e8c59726c 100644 --- a/driver-examples/README.md +++ b/driver-examples/README.md @@ -1,6 +1,25 @@ -# DataStax Java Driver for Apache Cassandra - Examples + + +# Java Driver for Apache Cassandra - Examples + +This module contains examples of how to use the Java Driver for Apache Cassandra. ## Usage diff --git a/driver-examples/pom.xml b/driver-examples/pom.xml index 185b84ac29c..a5508fa9d9b 100644 --- a/driver-examples/pom.xml +++ b/driver-examples/pom.xml @@ -1,12 +1,14 @@ - com.datastax.cassandra + org.apache.cassandra cassandra-driver-core - com.datastax.cassandra + org.apache.cassandra cassandra-driver-extras true @@ -134,6 +136,22 @@ + + + src/main/resources + + + ${project.basedir}/.. + + LICENSE_binary + LICENSE + NOTICE_binary.txt + NOTICE.txt + + META-INF + + + @@ -152,13 +170,6 @@ - - maven-javadoc-plugin - - true - - - maven-gpg-plugin diff --git a/driver-examples/src/main/java/com/datastax/driver/examples/astra/AstraReadCassandraVersion.java b/driver-examples/src/main/java/com/datastax/driver/examples/astra/AstraReadCassandraVersion.java new file mode 100644 index 00000000000..dea5ec9fde6 --- /dev/null +++ b/driver-examples/src/main/java/com/datastax/driver/examples/astra/AstraReadCassandraVersion.java @@ -0,0 +1,93 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.datastax.driver.examples.astra; + +import com.datastax.driver.core.Cluster; +import com.datastax.driver.core.ResultSet; +import com.datastax.driver.core.Row; +import com.datastax.driver.core.Session; +import java.io.File; + +/** + * Connects to a DataStax Astra cluster and extracts basic information from it. + * + *

    Preconditions: + * + *

      + *
    • A DataStax Astra cluster is running and accessible. + *
    • A DataStax Astra secure connect bundle for the running cluster. + *
    + * + *

    Side effects: none. + * + * @see Creating an + * Astra Database + * @see + * Providing access to Astra databases + * @see + * Obtaining Astra secure connect bundle + * @see Java Driver online manual + */ +public class AstraReadCassandraVersion { + + public static void main(String[] args) { + + Cluster cluster = null; + try { + // The Cluster object is the main entry point of the driver. + // It holds the known state of the actual Cassandra cluster (notably the Metadata). + // This class is thread-safe, you should create a single instance (per target Cassandra + // cluster), and share it throughout your application. + // Change the path here to the secure connect bundle location (see javadocs above) + cluster = + Cluster.builder() + // Change the path here to the secure connect bundle location (see javadocs above) + .withCloudSecureConnectBundle(new File("/path/to/secure-connect-database_name.zip")) + // Change the user_name and password here for the Astra instance + .withCredentials("user_name", "password") + // Uncomment the next line to use a specific keyspace + // .withKeyspace("keyspace_name") + .build(); + + // The Session is what you use to execute queries. Likewise, it is thread-safe and should be + // reused. + Session session = cluster.connect(); + + // We use execute to send a query to Cassandra. This returns a ResultSet, which is essentially + // a collection of Row objects. + ResultSet rs = session.execute("select release_version from system.local"); + // Extract the first row (which is the only one in this case). + Row row = rs.one(); + + // Extract the value of the first (and only) column from the row. + String releaseVersion = row.getString("release_version"); + System.out.printf("Cassandra version is: %s%n", releaseVersion); + + } finally { + // Close the cluster after we’re done with it. This will also close any session that was + // created from this cluster. + // This step is important because it frees underlying resources (TCP connections, thread + // pools...). In a real application, you would typically do this at shutdown (for example, + // when undeploying your webapp). + if (cluster != null) cluster.close(); + } + } +} diff --git a/driver-examples/src/main/java/com/datastax/driver/examples/basic/CreateAndPopulateKeyspace.java b/driver-examples/src/main/java/com/datastax/driver/examples/basic/CreateAndPopulateKeyspace.java index 6a203f848a2..d4a7c8d97cd 100644 --- a/driver-examples/src/main/java/com/datastax/driver/examples/basic/CreateAndPopulateKeyspace.java +++ b/driver-examples/src/main/java/com/datastax/driver/examples/basic/CreateAndPopulateKeyspace.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,145 +24,131 @@ /** * Creates a keyspace and tables, and loads some data into them. - *

    - * Preconditions: - * - a Cassandra cluster is running and accessible through the contacts points identified by CONTACT_POINTS and PORT. - *

    - * Side effects: - * - creates a new keyspace "simplex" in the cluster. If a keyspace with this name already exists, it will be reused; - * - creates two tables "simplex.songs" and "simplex.playlists". If they exist already, they will be reused; - * - inserts a row in each table. * - * @see Java driver online manual + *

    Preconditions: - a Cassandra cluster is running and accessible through the contacts points + * identified by CONTACT_POINTS and PORT. + * + *

    Side effects: - creates a new keyspace "simplex" in the cluster. If a keyspace with this name + * already exists, it will be reused; - creates two tables "simplex.songs" and "simplex.playlists". + * If they exist already, they will be reused; - inserts a row in each table. + * + * @see Java Driver online manual */ public class CreateAndPopulateKeyspace { - static String[] CONTACT_POINTS = {"127.0.0.1"}; - static int PORT = 9042; - - public static void main(String[] args) { - - CreateAndPopulateKeyspace client = new CreateAndPopulateKeyspace(); - - try { - - client.connect(CONTACT_POINTS, PORT); - client.createSchema(); - client.loadData(); - client.querySchema(); - - } finally { - client.close(); - } - } - - private Cluster cluster; - - private Session session; - - /** - * Initiates a connection to the cluster - * specified by the given contact point. - * - * @param contactPoints the contact points to use. - * @param port the port to use. - */ - public void connect(String[] contactPoints, int port) { - - cluster = Cluster.builder() - .addContactPoints(contactPoints).withPort(port) - .build(); - - System.out.printf("Connected to cluster: %s%n", cluster.getMetadata().getClusterName()); + static String[] CONTACT_POINTS = {"127.0.0.1"}; + static int PORT = 9042; - session = cluster.connect(); - } + public static void main(String[] args) { - /** - * Creates the schema (keyspace) and tables - * for this example. - */ - public void createSchema() { + CreateAndPopulateKeyspace client = new CreateAndPopulateKeyspace(); - session.execute("CREATE KEYSPACE IF NOT EXISTS simplex WITH replication " + - "= {'class':'SimpleStrategy', 'replication_factor':1};"); + try { - session.execute( - "CREATE TABLE IF NOT EXISTS simplex.songs (" + - "id uuid PRIMARY KEY," + - "title text," + - "album text," + - "artist text," + - "tags set," + - "data blob" + - ");"); + client.connect(CONTACT_POINTS, PORT); + client.createSchema(); + client.loadData(); + client.querySchema(); - session.execute( - "CREATE TABLE IF NOT EXISTS simplex.playlists (" + - "id uuid," + - "title text," + - "album text, " + - "artist text," + - "song_id uuid," + - "PRIMARY KEY (id, title, album, artist)" + - ");"); + } finally { + client.close(); } - - /** - * Inserts data into the tables. - */ - public void loadData() { - - session.execute( - "INSERT INTO simplex.songs (id, title, album, artist, tags) " + - "VALUES (" + - "756716f7-2e54-4715-9f00-91dcbea6cf50," + - "'La Petite Tonkinoise'," + - "'Bye Bye Blackbird'," + - "'Joséphine Baker'," + - "{'jazz', '2013'})" + - ";"); - + } + + private Cluster cluster; + + private Session session; + + /** + * Initiates a connection to the cluster specified by the given contact point. + * + * @param contactPoints the contact points to use. + * @param port the port to use. + */ + public void connect(String[] contactPoints, int port) { + + cluster = Cluster.builder().addContactPoints(contactPoints).withPort(port).build(); + + System.out.printf("Connected to cluster: %s%n", cluster.getMetadata().getClusterName()); + + session = cluster.connect(); + } + + /** Creates the schema (keyspace) and tables for this example. */ + public void createSchema() { + + session.execute( + "CREATE KEYSPACE IF NOT EXISTS simplex WITH replication " + + "= {'class':'SimpleStrategy', 'replication_factor':1};"); + + session.execute( + "CREATE TABLE IF NOT EXISTS simplex.songs (" + + "id uuid PRIMARY KEY," + + "title text," + + "album text," + + "artist text," + + "tags set," + + "data blob" + + ");"); + + session.execute( + "CREATE TABLE IF NOT EXISTS simplex.playlists (" + + "id uuid," + + "title text," + + "album text, " + + "artist text," + + "song_id uuid," + + "PRIMARY KEY (id, title, album, artist)" + + ");"); + } + + /** Inserts data into the tables. */ + public void loadData() { + + session.execute( + "INSERT INTO simplex.songs (id, title, album, artist, tags) " + + "VALUES (" + + "756716f7-2e54-4715-9f00-91dcbea6cf50," + + "'La Petite Tonkinoise'," + + "'Bye Bye Blackbird'," + + "'Joséphine Baker'," + + "{'jazz', '2013'})" + + ";"); + + session.execute( + "INSERT INTO simplex.playlists (id, song_id, title, album, artist) " + + "VALUES (" + + "2cc9ccb7-6221-4ccb-8387-f22b6a1b354d," + + "756716f7-2e54-4715-9f00-91dcbea6cf50," + + "'La Petite Tonkinoise'," + + "'Bye Bye Blackbird'," + + "'Joséphine Baker'" + + ");"); + } + + /** Queries and displays data. */ + public void querySchema() { + + ResultSet results = session.execute( - "INSERT INTO simplex.playlists (id, song_id, title, album, artist) " + - "VALUES (" + - "2cc9ccb7-6221-4ccb-8387-f22b6a1b354d," + - "756716f7-2e54-4715-9f00-91dcbea6cf50," + - "'La Petite Tonkinoise'," + - "'Bye Bye Blackbird'," + - "'Joséphine Baker'" + - ");"); - } - - /** - * Queries and displays data. - */ - public void querySchema() { - - ResultSet results = session.execute( - "SELECT * FROM simplex.playlists " + - "WHERE id = 2cc9ccb7-6221-4ccb-8387-f22b6a1b354d;"); - - System.out.printf("%-30s\t%-20s\t%-20s%n", "title", "album", "artist"); - System.out.println("-------------------------------+-----------------------+--------------------"); + "SELECT * FROM simplex.playlists " + + "WHERE id = 2cc9ccb7-6221-4ccb-8387-f22b6a1b354d;"); - for (Row row : results) { + System.out.printf("%-30s\t%-20s\t%-20s%n", "title", "album", "artist"); + System.out.println( + "-------------------------------+-----------------------+--------------------"); - System.out.printf("%-30s\t%-20s\t%-20s%n", - row.getString("title"), - row.getString("album"), - row.getString("artist")); - - } - - } + for (Row row : results) { - /** - * Closes the session and the cluster. - */ - public void close() { - session.close(); - cluster.close(); + System.out.printf( + "%-30s\t%-20s\t%-20s%n", + row.getString("title"), row.getString("album"), row.getString("artist")); } + } + /** Closes the session and the cluster. */ + public void close() { + session.close(); + cluster.close(); + } } diff --git a/driver-examples/src/main/java/com/datastax/driver/examples/basic/ReadCassandraVersion.java b/driver-examples/src/main/java/com/datastax/driver/examples/basic/ReadCassandraVersion.java index 3863058c54d..74d9d17cbbf 100644 --- a/driver-examples/src/main/java/com/datastax/driver/examples/basic/ReadCassandraVersion.java +++ b/driver-examples/src/main/java/com/datastax/driver/examples/basic/ReadCassandraVersion.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,51 +24,54 @@ /** * Connects to a Cassandra cluster and extracts basic information from it. - *

    - * Preconditions: - * - a Cassandra cluster is running and accessible through the contacts points identified by CONTACT_POINTS and PORT. - *

    - * Side effects: none. * - * @see Java driver online manual + *

    Preconditions: - a Cassandra cluster is running and accessible through the contacts points + * identified by CONTACT_POINTS and PORT. + * + *

    Side effects: none. + * + * @see Java Driver online manual */ public class ReadCassandraVersion { - static String[] CONTACT_POINTS = {"127.0.0.1"}; - static int PORT = 9042; + static String[] CONTACT_POINTS = {"127.0.0.1"}; + static int PORT = 9042; - public static void main(String[] args) { + public static void main(String[] args) { - Cluster cluster = null; - try { - // The Cluster object is the main entry point of the driver. - // It holds the known state of the actual Cassandra cluster (notably the Metadata). - // This class is thread-safe, you should create a single instance (per target Cassandra cluster), and share - // it throughout your application. - cluster = Cluster.builder() - .addContactPoints(CONTACT_POINTS).withPort(PORT) - .build(); + Cluster cluster = null; + try { + // The Cluster object is the main entry point of the driver. + // It holds the known state of the actual Cassandra cluster (notably the Metadata). + // This class is thread-safe, you should create a single instance (per target Cassandra + // cluster), and share + // it throughout your application. + cluster = Cluster.builder().addContactPoints(CONTACT_POINTS).withPort(PORT).build(); - // The Session is what you use to execute queries. Likewise, it is thread-safe and should be reused. - Session session = cluster.connect(); + // The Session is what you use to execute queries. Likewise, it is thread-safe and should be + // reused. + Session session = cluster.connect(); - // We use execute to send a query to Cassandra. This returns a ResultSet, which is essentially a collection - // of Row objects. - ResultSet rs = session.execute("select release_version from system.local"); - // Extract the first row (which is the only one in this case). - Row row = rs.one(); + // We use execute to send a query to Cassandra. This returns a ResultSet, which is essentially + // a collection + // of Row objects. + ResultSet rs = session.execute("select release_version from system.local"); + // Extract the first row (which is the only one in this case). + Row row = rs.one(); - // Extract the value of the first (and only) column from the row. - String releaseVersion = row.getString("release_version"); - System.out.printf("Cassandra version is: %s%n", releaseVersion); + // Extract the value of the first (and only) column from the row. + String releaseVersion = row.getString("release_version"); + System.out.printf("Cassandra version is: %s%n", releaseVersion); - } finally { - // Close the cluster after we’re done with it. This will also close any session that was created from this - // cluster. - // This step is important because it frees underlying resources (TCP connections, thread pools...). In a - // real application, you would typically do this at shutdown (for example, when undeploying your webapp). - if (cluster != null) - cluster.close(); - } + } finally { + // Close the cluster after we’re done with it. This will also close any session that was + // created from this + // cluster. + // This step is important because it frees underlying resources (TCP connections, thread + // pools...). In a + // real application, you would typically do this at shutdown (for example, when undeploying + // your webapp). + if (cluster != null) cluster.close(); } + } } diff --git a/driver-examples/src/main/java/com/datastax/driver/examples/basic/ReadTopologyAndSchemaMetadata.java b/driver-examples/src/main/java/com/datastax/driver/examples/basic/ReadTopologyAndSchemaMetadata.java index e08de798210..aee6d5d737f 100644 --- a/driver-examples/src/main/java/com/datastax/driver/examples/basic/ReadTopologyAndSchemaMetadata.java +++ b/driver-examples/src/main/java/com/datastax/driver/examples/basic/ReadTopologyAndSchemaMetadata.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,50 +17,51 @@ */ package com.datastax.driver.examples.basic; -import com.datastax.driver.core.*; +import com.datastax.driver.core.Cluster; +import com.datastax.driver.core.Host; +import com.datastax.driver.core.KeyspaceMetadata; +import com.datastax.driver.core.Metadata; +import com.datastax.driver.core.TableMetadata; /** - * Gathers information about a Cassandra cluster's topology (which nodes belong to the cluster) and schema (what - * keyspaces, tables, etc. exist in this cluster). - *

    - * Preconditions: - * - a Cassandra cluster is running and accessible through the contacts points identified by CONTACT_POINTS and PORT. - *

    - * Side effects: none. + * Gathers information about a Cassandra cluster's topology (which nodes belong to the cluster) and + * schema (what keyspaces, tables, etc. exist in this cluster). + * + *

    Preconditions: - a Cassandra cluster is running and accessible through the contacts points + * identified by CONTACT_POINTS and PORT. + * + *

    Side effects: none. * - * @see Java driver online manual + * @see Java Driver online manual */ public class ReadTopologyAndSchemaMetadata { - static String[] CONTACT_POINTS = {"127.0.0.1"}; - static int PORT = 9042; + static String[] CONTACT_POINTS = {"127.0.0.1"}; + static int PORT = 9042; - public static void main(String[] args) { + public static void main(String[] args) { - Cluster cluster = null; - try { - cluster = Cluster.builder() - .addContactPoints(CONTACT_POINTS).withPort(PORT) - .build(); + Cluster cluster = null; + try { + cluster = Cluster.builder().addContactPoints(CONTACT_POINTS).withPort(PORT).build(); - Metadata metadata = cluster.getMetadata(); - System.out.printf("Connected to cluster: %s%n", metadata.getClusterName()); + Metadata metadata = cluster.getMetadata(); + System.out.printf("Connected to cluster: %s%n", metadata.getClusterName()); - for (Host host : metadata.getAllHosts()) { - System.out.printf("Datatacenter: %s; Host: %s; Rack: %s%n", - host.getDatacenter(), host.getAddress(), host.getRack()); - } + for (Host host : metadata.getAllHosts()) { + System.out.printf( + "Datatacenter: %s; Host: %s; Rack: %s%n", + host.getDatacenter(), host.getEndPoint(), host.getRack()); + } - for (KeyspaceMetadata keyspace : metadata.getKeyspaces()) { - for (TableMetadata table : keyspace.getTables()) { - System.out.printf("Keyspace: %s; Table: %s%n", - keyspace.getName(), table.getName()); - } - } - - } finally { - if (cluster != null) - cluster.close(); + for (KeyspaceMetadata keyspace : metadata.getKeyspaces()) { + for (TableMetadata table : keyspace.getTables()) { + System.out.printf("Keyspace: %s; Table: %s%n", keyspace.getName(), table.getName()); } + } + + } finally { + if (cluster != null) cluster.close(); } + } } diff --git a/driver-examples/src/main/java/com/datastax/driver/examples/datatypes/Blobs.java b/driver-examples/src/main/java/com/datastax/driver/examples/datatypes/Blobs.java index 74e46c9ef8f..e28120295aa 100644 --- a/driver-examples/src/main/java/com/datastax/driver/examples/datatypes/Blobs.java +++ b/driver-examples/src/main/java/com/datastax/driver/examples/datatypes/Blobs.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,10 +17,17 @@ */ package com.datastax.driver.examples.datatypes; -import com.datastax.driver.core.*; +import com.datastax.driver.core.BoundStatement; +import com.datastax.driver.core.Cluster; +import com.datastax.driver.core.PreparedStatement; +import com.datastax.driver.core.Row; +import com.datastax.driver.core.Session; import com.datastax.driver.core.utils.Bytes; - -import java.io.*; +import java.io.Closeable; +import java.io.File; +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.io.IOException; import java.nio.ByteBuffer; import java.nio.channels.FileChannel; import java.util.HashMap; @@ -26,230 +35,245 @@ /** * Inserts and retrieves values in BLOB columns. - *

    - * By default, the Java driver maps this type to {@link java.nio.ByteBuffer}. The ByteBuffer API is a bit tricky to use - * at times, so we will show common pitfalls as well. We strongly recommend that you read the {@link java.nio.Buffer} - * and {@link ByteBuffer} API docs and become familiar with the capacity, limit and position properties. - * This tutorial might also help. - *

    - * Preconditions: - * - a Cassandra cluster is running and accessible through the contacts points identified by CONTACT_POINTS and PORT; - * - FILE references an existing file. - *

    - * Side effects: - * - creates a new keyspace "examples" in the cluster. If a keyspace with this name already exists, it will be reused; - * - creates a table "examples.blobs". If it already exists, it will be reused; - * - inserts data in the table. + * + *

    By default, the Java Driver maps this type to {@link java.nio.ByteBuffer}. The ByteBuffer API + * is a bit tricky to use at times, so we will show common pitfalls as well. We strongly recommend + * that you read the {@link java.nio.Buffer} and {@link ByteBuffer} API docs and become familiar + * with the capacity, limit and position properties. This tutorial might also help. + * + *

    Preconditions: - a Cassandra cluster is running and accessible through the contacts points + * identified by CONTACT_POINTS and PORT; - FILE references an existing file. + * + *

    Side effects: - creates a new keyspace "examples" in the cluster. If a keyspace with this name + * already exists, it will be reused; - creates a table "examples.blobs". If it already exists, it + * will be reused; - inserts data in the table. */ public class Blobs { - static String[] CONTACT_POINTS = {"127.0.0.1"}; - static int PORT = 9042; - - static File FILE = new File(Blobs.class.getResource("/cassandra_logo.png").getFile()); - - public static void main(String[] args) throws IOException { - Cluster cluster = null; - try { - cluster = Cluster.builder() - .addContactPoints(CONTACT_POINTS).withPort(PORT) - .build(); - Session session = cluster.connect(); - - createSchema(session); - allocateAndInsert(session); - retrieveSimpleColumn(session); - retrieveMapColumn(session); - insertConcurrent(session); - insertFromAndRetrieveToFile(session); - } finally { - if (cluster != null) cluster.close(); - } - } - - private static void createSchema(Session session) { - session.execute("CREATE KEYSPACE IF NOT EXISTS examples " + - "WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}"); - session.execute("CREATE TABLE IF NOT EXISTS examples.blobs(k int PRIMARY KEY, b blob, m map)"); + static String[] CONTACT_POINTS = {"127.0.0.1"}; + static int PORT = 9042; + + static File FILE = new File(Blobs.class.getResource("/cassandra_logo.png").getFile()); + + public static void main(String[] args) throws IOException { + Cluster cluster = null; + try { + cluster = Cluster.builder().addContactPoints(CONTACT_POINTS).withPort(PORT).build(); + Session session = cluster.connect(); + + createSchema(session); + allocateAndInsert(session); + retrieveSimpleColumn(session); + retrieveMapColumn(session); + insertConcurrent(session); + insertFromAndRetrieveToFile(session); + } finally { + if (cluster != null) cluster.close(); } - - private static void allocateAndInsert(Session session) { - // One way to get a byte buffer is to allocate it and fill it yourself: - ByteBuffer buffer = ByteBuffer.allocate(16); - while (buffer.hasRemaining()) - buffer.put((byte) 0xFF); - - // Don't forget to flip! The driver expects a buffer that is ready for reading. That is, it will consider all - // the data between buffer.position() and buffer.limit(). - // Right now we are positioned at the end because we just finished writing, so if we passed the buffer as-is it - // would appear to be empty: - assert buffer.limit() - buffer.position() == 0; - - buffer.flip(); - // Now position is back to the beginning, so the driver will see all 16 bytes. - assert buffer.limit() - buffer.position() == 16; - - Map map = new HashMap(); - map.put("test", buffer); - session.execute("INSERT INTO examples.blobs (k, b, m) VALUES (1, ?, ?)", - buffer, map); - } - - private static void retrieveSimpleColumn(Session session) { - Row row = session.execute("SELECT b, m FROM examples.blobs WHERE k = 1").one(); - - ByteBuffer buffer = row.getBytes("b"); - - // The driver always returns buffers that are ready for reading. - assert buffer.limit() - buffer.position() == 16; - - // One way to read from the buffer is to use absolute getters. Do NOT start reading at index 0, as the buffer - // might start at a different position (we'll see an example of that later). - for (int i = buffer.position(); i < buffer.limit(); i++) { - byte b = buffer.get(i); - assert b == (byte) 0xFF; - } - - // Another way is to use relative getters. - while (buffer.hasRemaining()) { - byte b = buffer.get(); - assert b == (byte) 0xFF; - } - // Note that relative getters change the position, so when we're done reading we're at the end again. - assert buffer.position() == buffer.limit(); - - // Reset the position for the next operation. - buffer.flip(); - - // Yet another way is to convert the buffer to a byte array. Do NOT use buffer.array(), because it returns the - // buffer's *backing array*, which is not the same thing as its contents: - // - not all byte buffers have backing arrays - // - even then, the backing array might be larger than the buffer's contents - // - // The driver provides a utility method that handles those details for you: - byte[] array = Bytes.getArray(buffer); - assert array.length == 16; - for (byte b : array) { - assert b == (byte) 0xFF; - } - } - - private static void retrieveMapColumn(Session session) { - Row row = session.execute("SELECT b, m FROM examples.blobs WHERE k = 1").one(); - - // The map columns illustrates the pitfalls with position() and array(). - Map m = row.getMap("m", String.class, ByteBuffer.class); - ByteBuffer buffer = m.get("test"); - - // We did get back a buffer that contains 16 bytes as expected. - assert buffer.limit() - buffer.position() == 16; - // However, it is not positioned at 0. And you can also see that its backing array contains more than 16 bytes. - // What happens is that the buffer is a "view" of the last 16 of a 32-byte array. - // This is an implementation detail and you shouldn't have to worry about it if you process the buffer correctly - // (don't iterate from 0, use Bytes.getArray()). - assert buffer.position() == 16; - assert buffer.array().length == 32; - } - - private static void insertConcurrent(Session session) { - PreparedStatement preparedStatement = session.prepare("INSERT INTO examples.blobs (k, b) VALUES (1, :b)"); - - // This is another convenient utility provided by the driver. It's useful for tests. - ByteBuffer buffer = Bytes.fromHexString("0xffffff"); - - // When you pass a byte buffer to a bound statement, it creates a shallow copy internally with the - // buffer.duplicate() method. - BoundStatement boundStatement = preparedStatement.bind(); - boundStatement.setBytes("b", buffer); - - // This means you can now move in the original buffer, without affecting the insertion if it happens later. - buffer.position(buffer.limit()); - - session.execute(boundStatement); - Row row = session.execute("SELECT b FROM examples.blobs WHERE k = 1").one(); - assert Bytes.toHexString(row.getBytes("b")).equals("0xffffff"); - - buffer.flip(); - - // HOWEVER duplicate() only performs a shallow copy. The two buffers still share the same contents. So if you - // modify the contents of the original buffer, this will affect another execution of the bound statement. - buffer.put(0, (byte) 0xaa); - session.execute(boundStatement); - row = session.execute("SELECT b FROM examples.blobs WHERE k = 1").one(); - assert Bytes.toHexString(row.getBytes("b")).equals("0xaaffff"); - - // This will also happen if you use the async API, e.g. create the bound statement, call executeAsync() on it - // and reuse the buffer immediately. - - // If you reuse buffers concurrently and want to avoid those issues, perform a deep copy of the buffer before - // passing it to the bound statement. - int startPosition = buffer.position(); - ByteBuffer buffer2 = ByteBuffer.allocate(buffer.limit() - startPosition); - buffer2.put(buffer); - buffer.position(startPosition); - buffer2.flip(); - boundStatement.setBytes("b", buffer2); - session.execute(boundStatement); - - // Note: unlike BoundStatement, SimpleStatement does not duplicate its arguments, so even the position will be - // affected if you change it before executing the statement. Again, resort to deep copies if required. + } + + private static void createSchema(Session session) { + session.execute( + "CREATE KEYSPACE IF NOT EXISTS examples " + + "WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}"); + session.execute( + "CREATE TABLE IF NOT EXISTS examples.blobs(k int PRIMARY KEY, b blob, m map)"); + } + + private static void allocateAndInsert(Session session) { + // One way to get a byte buffer is to allocate it and fill it yourself: + ByteBuffer buffer = ByteBuffer.allocate(16); + while (buffer.hasRemaining()) buffer.put((byte) 0xFF); + + // Don't forget to flip! The driver expects a buffer that is ready for reading. That is, it will + // consider all + // the data between buffer.position() and buffer.limit(). + // Right now we are positioned at the end because we just finished writing, so if we passed the + // buffer as-is it + // would appear to be empty: + assert buffer.limit() - buffer.position() == 0; + + buffer.flip(); + // Now position is back to the beginning, so the driver will see all 16 bytes. + assert buffer.limit() - buffer.position() == 16; + + Map map = new HashMap(); + map.put("test", buffer); + session.execute("INSERT INTO examples.blobs (k, b, m) VALUES (1, ?, ?)", buffer, map); + } + + private static void retrieveSimpleColumn(Session session) { + Row row = session.execute("SELECT b, m FROM examples.blobs WHERE k = 1").one(); + + ByteBuffer buffer = row.getBytes("b"); + + // The driver always returns buffers that are ready for reading. + assert buffer.limit() - buffer.position() == 16; + + // One way to read from the buffer is to use absolute getters. Do NOT start reading at index 0, + // as the buffer + // might start at a different position (we'll see an example of that later). + for (int i = buffer.position(); i < buffer.limit(); i++) { + byte b = buffer.get(i); + assert b == (byte) 0xFF; } - private static void insertFromAndRetrieveToFile(Session session) throws IOException { - ByteBuffer buffer = readAll(FILE); - session.execute("INSERT INTO examples.blobs (k, b) VALUES (1, ?)", buffer); - - File tmpFile = File.createTempFile("blob", ".png"); - System.out.printf("Writing retrieved buffer to %s%n", tmpFile.getAbsoluteFile()); - - Row row = session.execute("SELECT b FROM examples.blobs WHERE k = 1").one(); - writeAll(row.getBytes("b"), tmpFile); + // Another way is to use relative getters. + while (buffer.hasRemaining()) { + byte b = buffer.get(); + assert b == (byte) 0xFF; } - - // Note: - // - this is written with Java 6 APIs; if you're on a more recent version this can be improved (try-with-resources, - // new-new io...) - // - this reads the whole file in memory in one go. If your file does not fit in memory you should probably not - // insert it into Cassandra either ;) - private static ByteBuffer readAll(File file) throws IOException { - FileInputStream inputStream = null; - boolean threw = false; - try { - inputStream = new FileInputStream(file); - FileChannel channel = inputStream.getChannel(); - ByteBuffer buffer = ByteBuffer.allocate((int) channel.size()); - channel.read(buffer); - buffer.flip(); - return buffer; - } catch (IOException e) { - threw = true; - throw e; - } finally { - close(inputStream, threw); - } + // Note that relative getters change the position, so when we're done reading we're at the end + // again. + assert buffer.position() == buffer.limit(); + + // Reset the position for the next operation. + buffer.flip(); + + // Yet another way is to convert the buffer to a byte array. Do NOT use buffer.array(), because + // it returns the + // buffer's *backing array*, which is not the same thing as its contents: + // - not all byte buffers have backing arrays + // - even then, the backing array might be larger than the buffer's contents + // + // The driver provides a utility method that handles those details for you: + byte[] array = Bytes.getArray(buffer); + assert array.length == 16; + for (byte b : array) { + assert b == (byte) 0xFF; } - - private static void writeAll(ByteBuffer buffer, File file) throws IOException { - FileOutputStream outputStream = null; - boolean threw = false; - try { - outputStream = new FileOutputStream(file); - FileChannel channel = outputStream.getChannel(); - channel.write(buffer); - } catch (IOException e) { - threw = true; - throw e; - } finally { - close(outputStream, threw); - } + } + + private static void retrieveMapColumn(Session session) { + Row row = session.execute("SELECT b, m FROM examples.blobs WHERE k = 1").one(); + + // The map columns illustrates the pitfalls with position() and array(). + Map m = row.getMap("m", String.class, ByteBuffer.class); + ByteBuffer buffer = m.get("test"); + + // We did get back a buffer that contains 16 bytes as expected. + assert buffer.limit() - buffer.position() == 16; + // However, it is not positioned at 0. And you can also see that its backing array contains more + // than 16 bytes. + // What happens is that the buffer is a "view" of the last 16 of a 32-byte array. + // This is an implementation detail and you shouldn't have to worry about it if you process the + // buffer correctly + // (don't iterate from 0, use Bytes.getArray()). + assert buffer.position() == 16; + assert buffer.array().length == 32; + } + + private static void insertConcurrent(Session session) { + PreparedStatement preparedStatement = + session.prepare("INSERT INTO examples.blobs (k, b) VALUES (1, :b)"); + + // This is another convenient utility provided by the driver. It's useful for tests. + ByteBuffer buffer = Bytes.fromHexString("0xffffff"); + + // When you pass a byte buffer to a bound statement, it creates a shallow copy internally with + // the + // buffer.duplicate() method. + BoundStatement boundStatement = preparedStatement.bind(); + boundStatement.setBytes("b", buffer); + + // This means you can now move in the original buffer, without affecting the insertion if it + // happens later. + buffer.position(buffer.limit()); + + session.execute(boundStatement); + Row row = session.execute("SELECT b FROM examples.blobs WHERE k = 1").one(); + assert Bytes.toHexString(row.getBytes("b")).equals("0xffffff"); + + buffer.flip(); + + // HOWEVER duplicate() only performs a shallow copy. The two buffers still share the same + // contents. So if you + // modify the contents of the original buffer, this will affect another execution of the bound + // statement. + buffer.put(0, (byte) 0xaa); + session.execute(boundStatement); + row = session.execute("SELECT b FROM examples.blobs WHERE k = 1").one(); + assert Bytes.toHexString(row.getBytes("b")).equals("0xaaffff"); + + // This will also happen if you use the async API, e.g. create the bound statement, call + // executeAsync() on it + // and reuse the buffer immediately. + + // If you reuse buffers concurrently and want to avoid those issues, perform a deep copy of the + // buffer before + // passing it to the bound statement. + int startPosition = buffer.position(); + ByteBuffer buffer2 = ByteBuffer.allocate(buffer.limit() - startPosition); + buffer2.put(buffer); + buffer.position(startPosition); + buffer2.flip(); + boundStatement.setBytes("b", buffer2); + session.execute(boundStatement); + + // Note: unlike BoundStatement, SimpleStatement does not duplicate its arguments, so even the + // position will be + // affected if you change it before executing the statement. Again, resort to deep copies if + // required. + } + + private static void insertFromAndRetrieveToFile(Session session) throws IOException { + ByteBuffer buffer = readAll(FILE); + session.execute("INSERT INTO examples.blobs (k, b) VALUES (1, ?)", buffer); + + File tmpFile = File.createTempFile("blob", ".png"); + System.out.printf("Writing retrieved buffer to %s%n", tmpFile.getAbsoluteFile()); + + Row row = session.execute("SELECT b FROM examples.blobs WHERE k = 1").one(); + writeAll(row.getBytes("b"), tmpFile); + } + + // Note: + // - this is written with Java 6 APIs; if you're on a more recent version this can be improved + // (try-with-resources, + // new-new io...) + // - this reads the whole file in memory in one go. If your file does not fit in memory you should + // probably not + // insert it into Cassandra either ;) + private static ByteBuffer readAll(File file) throws IOException { + FileInputStream inputStream = null; + boolean threw = false; + try { + inputStream = new FileInputStream(file); + FileChannel channel = inputStream.getChannel(); + ByteBuffer buffer = ByteBuffer.allocate((int) channel.size()); + channel.read(buffer); + buffer.flip(); + return buffer; + } catch (IOException e) { + threw = true; + throw e; + } finally { + close(inputStream, threw); } - - private static void close(Closeable inputStream, boolean threw) throws IOException { - if (inputStream != null) - try { - inputStream.close(); - } catch (IOException e) { - if (!threw) throw e; // else preserve original exception - } + } + + private static void writeAll(ByteBuffer buffer, File file) throws IOException { + FileOutputStream outputStream = null; + boolean threw = false; + try { + outputStream = new FileOutputStream(file); + FileChannel channel = outputStream.getChannel(); + channel.write(buffer); + } catch (IOException e) { + threw = true; + throw e; + } finally { + close(outputStream, threw); } + } + + private static void close(Closeable inputStream, boolean threw) throws IOException { + if (inputStream != null) + try { + inputStream.close(); + } catch (IOException e) { + if (!threw) throw e; // else preserve original exception + } + } } diff --git a/driver-examples/src/main/java/com/datastax/driver/examples/json/JacksonJsonColumn.java b/driver-examples/src/main/java/com/datastax/driver/examples/json/JacksonJsonColumn.java index 0d7212869ee..9990049d6a8 100644 --- a/driver-examples/src/main/java/com/datastax/driver/examples/json/JacksonJsonColumn.java +++ b/driver-examples/src/main/java/com/datastax/driver/examples/json/JacksonJsonColumn.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,25 +17,34 @@ */ package com.datastax.driver.examples.json; -import com.datastax.driver.core.*; +import static com.datastax.driver.core.querybuilder.QueryBuilder.bindMarker; +import static com.datastax.driver.core.querybuilder.QueryBuilder.in; +import static com.datastax.driver.core.querybuilder.QueryBuilder.insertInto; +import static com.datastax.driver.core.querybuilder.QueryBuilder.select; + +import com.datastax.driver.core.Cluster; +import com.datastax.driver.core.CodecRegistry; +import com.datastax.driver.core.PreparedStatement; +import com.datastax.driver.core.ResultSet; +import com.datastax.driver.core.Row; +import com.datastax.driver.core.Session; +import com.datastax.driver.core.Statement; +import com.datastax.driver.core.TypeCodec; import com.datastax.driver.extras.codecs.json.JacksonJsonCodec; import com.fasterxml.jackson.annotation.JsonCreator; import com.fasterxml.jackson.annotation.JsonProperty; -import static com.datastax.driver.core.querybuilder.QueryBuilder.*; - /** - * Illustrates how to map a single table column of type {@code VARCHAR}, - * containing JSON payloads, into a Java object using - * the Jackson library. - *

    - * This example makes usage of a custom {@link TypeCodec codec}, - * {@link JacksonJsonCodec}, which is declared in the driver-extras module. - * If you plan to follow this example, make sure to include the following - * Maven dependencies in your project: + * Illustrates how to map a single table column of type {@code VARCHAR}, containing JSON payloads, + * into a Java object using the Jackson library. + * + *

    This example makes usage of a custom {@link TypeCodec codec}, {@link JacksonJsonCodec}, which + * is declared in the driver-extras module. If you plan to follow this example, make sure to include + * the following Maven dependencies in your project: + * *

    {@code
      * 
    - *     com.datastax.cassandra
    + *     org.apache.cassandra
      *     cassandra-driver-extras
      *     ${driver.version}
      * 
    @@ -44,127 +55,128 @@
      *     ${jackson.version}
      * 
      * }
    - * This example also uses the {@link com.datastax.driver.core.querybuilder.QueryBuilder QueryBuilder}; - * for examples using the "core" API, see {@link PlainTextJson} (they are easily translatable to the - * queries in this class). - *

    - * Preconditions: - * - a Cassandra cluster is running and accessible through the contacts points identified by CONTACT_POINTS and PORT; - *

    - * Side effects: - * - creates a new keyspace "examples" in the cluster. If a keyspace with this name already exists, it will be reused; - * - creates a table "examples.json_jackson_column". If it already exists, it will be reused; - * - inserts data in the table. + * + * This example also uses the {@link com.datastax.driver.core.querybuilder.QueryBuilder + * QueryBuilder}; for examples using the "core" API, see {@link PlainTextJson} (they are easily + * translatable to the queries in this class). + * + *

    Preconditions: - a Cassandra cluster is running and accessible through the contacts points + * identified by CONTACT_POINTS and PORT; + * + *

    Side effects: - creates a new keyspace "examples" in the cluster. If a keyspace with this name + * already exists, it will be reused; - creates a table "examples.json_jackson_column". If it + * already exists, it will be reused; - inserts data in the table. */ public class JacksonJsonColumn { - static String[] CONTACT_POINTS = {"127.0.0.1"}; - static int PORT = 9042; + static String[] CONTACT_POINTS = {"127.0.0.1"}; + static int PORT = 9042; - public static void main(String[] args) { - Cluster cluster = null; - try { + public static void main(String[] args) { + Cluster cluster = null; + try { - // A codec to convert JSON payloads into User instances; - // this codec is declared in the driver-extras module - TypeCodec userCodec = new JacksonJsonCodec(User.class); + // A codec to convert JSON payloads into User instances; + // this codec is declared in the driver-extras module + TypeCodec userCodec = new JacksonJsonCodec(User.class); - cluster = Cluster.builder() - .addContactPoints(CONTACT_POINTS).withPort(PORT) - .withCodecRegistry(new CodecRegistry().register(userCodec)) - .build(); + cluster = + Cluster.builder() + .addContactPoints(CONTACT_POINTS) + .withPort(PORT) + .withCodecRegistry(new CodecRegistry().register(userCodec)) + .build(); - Session session = cluster.connect(); + Session session = cluster.connect(); - createSchema(session); - insertJsonColumn(session); - selectJsonColumn(session); + createSchema(session); + insertJsonColumn(session); + selectJsonColumn(session); - } finally { - if (cluster != null) cluster.close(); - } + } finally { + if (cluster != null) cluster.close(); } - - private static void createSchema(Session session) { - session.execute("CREATE KEYSPACE IF NOT EXISTS examples " + - "WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}"); - session.execute("CREATE TABLE IF NOT EXISTS examples.json_jackson_column(" + - "id int PRIMARY KEY, json text)"); + } + + private static void createSchema(Session session) { + session.execute( + "CREATE KEYSPACE IF NOT EXISTS examples " + + "WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}"); + session.execute( + "CREATE TABLE IF NOT EXISTS examples.json_jackson_column(" + + "id int PRIMARY KEY, json text)"); + } + + // Mapping a User instance to a table column + private static void insertJsonColumn(Session session) { + + User alice = new User("alice", 30); + User bob = new User("bob", 35); + + // Build and execute a simple statement + Statement stmt = + insertInto("examples", "json_jackson_column") + .value("id", 1) + // the User object will be converted into a String and persisted into the VARCHAR column + // "json" + .value("json", alice); + session.execute(stmt); + + // The JSON object can be a bound value if the statement is prepared + // (we use a local variable here for the sake of example, but in a real application you would + // cache and reuse + // the prepared statement) + PreparedStatement pst = + session.prepare( + insertInto("examples", "json_jackson_column") + .value("id", bindMarker("id")) + .value("json", bindMarker("json"))); + session.execute(pst.bind().setInt("id", 2).set("json", bob, User.class)); + } + + // Retrieving User instances from a table column + private static void selectJsonColumn(Session session) { + + Statement stmt = select().from("examples", "json_jackson_column").where(in("id", 1, 2)); + + ResultSet rows = session.execute(stmt); + + for (Row row : rows) { + int id = row.getInt("id"); + // retrieve the JSON payload and convert it to a User instance + User user = row.get("json", User.class); + // it is also possible to retrieve the raw JSON payload + String json = row.getString("json"); + System.out.printf( + "Retrieved row:%n" + "id %d%n" + "user %s%n" + "user (raw) %s%n%n", + id, user, json); } + } - // Mapping a User instance to a table column - private static void insertJsonColumn(Session session) { - - User alice = new User("alice", 30); - User bob = new User("bob", 35); - - // Build and execute a simple statement - Statement stmt = insertInto("examples", "json_jackson_column") - .value("id", 1) - // the User object will be converted into a String and persisted into the VARCHAR column "json" - .value("json", alice); - session.execute(stmt); - - // The JSON object can be a bound value if the statement is prepared - // (we use a local variable here for the sake of example, but in a real application you would cache and reuse - // the prepared statement) - PreparedStatement pst = session.prepare( - insertInto("examples", "json_jackson_column") - .value("id", bindMarker("id")) - .value("json", bindMarker("json"))); - session.execute(pst.bind() - .setInt("id", 2) - .set("json", bob, User.class)); - } + @SuppressWarnings("unused") + public static class User { - // Retrieving User instances from a table column - private static void selectJsonColumn(Session session) { + private final String name; - Statement stmt = select() - .from("examples", "json_jackson_column") - .where(in("id", 1, 2)); + private final int age; - ResultSet rows = session.execute(stmt); - - for (Row row : rows) { - int id = row.getInt("id"); - // retrieve the JSON payload and convert it to a User instance - User user = row.get("json", User.class); - // it is also possible to retrieve the raw JSON payload - String json = row.getString("json"); - System.out.printf("Retrieved row:%n" + - "id %d%n" + - "user %s%n" + - "user (raw) %s%n%n", - id, user, json); - - } + @JsonCreator + public User(@JsonProperty("name") String name, @JsonProperty("age") int age) { + this.name = name; + this.age = age; } - @SuppressWarnings("unused") - public static class User { - - private final String name; - - private final int age; - - @JsonCreator - public User(@JsonProperty("name") String name, @JsonProperty("age") int age) { - this.name = name; - this.age = age; - } - - public String getName() { - return name; - } + public String getName() { + return name; + } - public int getAge() { - return age; - } + public int getAge() { + return age; + } - @Override - public String toString() { - return String.format("%s (%s)", name, age); - } + @Override + public String toString() { + return String.format("%s (%s)", name, age); } + } } diff --git a/driver-examples/src/main/java/com/datastax/driver/examples/json/JacksonJsonFunction.java b/driver-examples/src/main/java/com/datastax/driver/examples/json/JacksonJsonFunction.java index 5df584efb3e..bd94e716d1e 100644 --- a/driver-examples/src/main/java/com/datastax/driver/examples/json/JacksonJsonFunction.java +++ b/driver-examples/src/main/java/com/datastax/driver/examples/json/JacksonJsonFunction.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +17,20 @@ */ package com.datastax.driver.examples.json; -import com.datastax.driver.core.*; +import static com.datastax.driver.core.querybuilder.QueryBuilder.bindMarker; +import static com.datastax.driver.core.querybuilder.QueryBuilder.fromJson; +import static com.datastax.driver.core.querybuilder.QueryBuilder.in; +import static com.datastax.driver.core.querybuilder.QueryBuilder.insertInto; +import static com.datastax.driver.core.querybuilder.QueryBuilder.select; + +import com.datastax.driver.core.Cluster; +import com.datastax.driver.core.CodecRegistry; +import com.datastax.driver.core.PreparedStatement; +import com.datastax.driver.core.ResultSet; +import com.datastax.driver.core.Row; +import com.datastax.driver.core.Session; +import com.datastax.driver.core.Statement; +import com.datastax.driver.core.TypeCodec; import com.datastax.driver.extras.codecs.json.JacksonJsonCodec; import com.fasterxml.jackson.annotation.JsonCreator; import com.fasterxml.jackson.annotation.JsonProperty; @@ -23,22 +38,18 @@ import com.fasterxml.jackson.databind.node.JsonNodeFactory; import com.fasterxml.jackson.databind.node.ObjectNode; -import static com.datastax.driver.core.querybuilder.QueryBuilder.*; - /** - * Illustrates how to map a single table column of an arbitrary type - * to a Java object using - * the Jackson library, - * and leveraging the {@code toJson()} and {@code fromJson()} functions - * introduced in Cassandra 2.2. - *

    - * This example makes usage of a custom {@link TypeCodec codec}, - * {@link JacksonJsonCodec}, which is declared in the driver-extras module. - * If you plan to follow this example, make sure to include the following - * Maven dependencies in your project: + * Illustrates how to map a single table column of an arbitrary type to a Java object using the Jackson library, and leveraging the {@code + * toJson()} and {@code fromJson()} functions introduced in Cassandra 2.2. + * + *

    This example makes usage of a custom {@link TypeCodec codec}, {@link JacksonJsonCodec}, which + * is declared in the driver-extras module. If you plan to follow this example, make sure to include + * the following Maven dependencies in your project: + * *

    {@code
      * 
    - *     com.datastax.cassandra
    + *     org.apache.cassandra
      *     cassandra-driver-extras
      *     ${driver.version}
      * 
    @@ -49,166 +60,176 @@
      *     ${jackson.version}
      * 
      * }
    - * This example also uses the {@link com.datastax.driver.core.querybuilder.QueryBuilder QueryBuilder}; - * for examples using the "core" API, see {@link PlainTextJson} (they are easily translatable to the - * queries in this class). - *

    - * Preconditions: - * - a Cassandra cluster is running and accessible through the contacts points identified by CONTACT_POINTS and PORT; - *

    - * Side effects: - * - creates a new keyspace "examples" in the cluster. If a keyspace with this name already exists, it will be reused; - * - creates a user-defined type (UDT) "examples.json_jackson_function_user". If it already exists, it will be reused; - * - creates a table "examples.json_jackson_function". If it already exists, it will be reused; - * - inserts data in the table. * - * @see What’s New in Cassandra 2.2: JSON Support + * This example also uses the {@link com.datastax.driver.core.querybuilder.QueryBuilder + * QueryBuilder}; for examples using the "core" API, see {@link PlainTextJson} (they are easily + * translatable to the queries in this class). + * + *

    Preconditions: - a Cassandra cluster is running and accessible through the contacts points + * identified by CONTACT_POINTS and PORT; + * + *

    Side effects: - creates a new keyspace "examples" in the cluster. If a keyspace with this name + * already exists, it will be reused; - creates a user-defined type (UDT) + * "examples.json_jackson_function_user". If it already exists, it will be reused; - creates a table + * "examples.json_jackson_function". If it already exists, it will be reused; - inserts data in the + * table. + * + * @see What’s + * New in Cassandra 2.2: JSON Support */ public class JacksonJsonFunction { - static String[] CONTACT_POINTS = {"127.0.0.1"}; - static int PORT = 9042; + static String[] CONTACT_POINTS = {"127.0.0.1"}; + static int PORT = 9042; - public static void main(String[] args) { - Cluster cluster = null; - try { + public static void main(String[] args) { + Cluster cluster = null; + try { - // A codec to convert JSON payloads into User instances; - // this codec is declared in the driver-extras module - TypeCodec userCodec = new JacksonJsonCodec(User.class); + // A codec to convert JSON payloads into User instances; + // this codec is declared in the driver-extras module + TypeCodec userCodec = new JacksonJsonCodec(User.class); - // A codec to convert generic JSON payloads into JsonNode instances - TypeCodec jsonNodeCodec = new JacksonJsonCodec(JsonNode.class); + // A codec to convert generic JSON payloads into JsonNode instances + TypeCodec jsonNodeCodec = new JacksonJsonCodec(JsonNode.class); - cluster = Cluster.builder() - .addContactPoints(CONTACT_POINTS).withPort(PORT) - .withCodecRegistry(new CodecRegistry() - .register(userCodec) - .register(jsonNodeCodec)) - .build(); + cluster = + Cluster.builder() + .addContactPoints(CONTACT_POINTS) + .withPort(PORT) + .withCodecRegistry(new CodecRegistry().register(userCodec).register(jsonNodeCodec)) + .build(); - Session session = cluster.connect(); + Session session = cluster.connect(); - createSchema(session); - insertFromJson(session); - selectToJson(session); + createSchema(session); + insertFromJson(session); + selectToJson(session); - } finally { - if (cluster != null) cluster.close(); - } + } finally { + if (cluster != null) cluster.close(); } - - private static void createSchema(Session session) { - session.execute("CREATE KEYSPACE IF NOT EXISTS examples " + - "WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}"); - session.execute("CREATE TYPE IF NOT EXISTS examples.json_jackson_function_user(" + - "name text, age int)"); - session.execute("CREATE TABLE IF NOT EXISTS examples.json_jackson_function(" + - "id int PRIMARY KEY, user frozen, scores map)"); - } - - // Mapping JSON payloads to table columns of arbitrary types, - // using fromJson() function - private static void insertFromJson(Session session) { - - User alice = new User("alice", 30); - User bob = new User("bob", 35); - - ObjectNode aliceScores = JsonNodeFactory.instance.objectNode() - .put("call_of_duty", 4.8) - .put("pokemon_go", 9.7); - ObjectNode bobScores = JsonNodeFactory.instance.objectNode() - .put("zelda", 8.3) - .put("pokemon_go", 12.4); - - // Build and execute a simple statement - Statement stmt = insertInto("examples", "json_jackson_function") - .value("id", 1) - // client-side, the User object will be converted into a JSON String; - // then, server-side, the fromJson() function will convert that JSON string - // into an instance of the json_jackson_function_user user-defined type (UDT), - // which will be persisted into the column "user" - .value("user", fromJson(alice)) - // same thing, but this time converting from - // a generic JsonNode to a JSON string, then from this string to a map - .value("scores", fromJson(aliceScores)); - session.execute(stmt); - - // The JSON object can be a bound value if the statement is prepared - // (we use a local variable here for the sake of example, but in a real application you would cache and reuse - // the prepared statement) - PreparedStatement pst = session.prepare( - insertInto("examples", "json_jackson_function") - .value("id", bindMarker("id")) - .value("user", fromJson(bindMarker("user"))) - .value("scores", fromJson(bindMarker("scores")))); - session.execute(pst.bind() - .setInt("id", 2) - .set("user", bob, User.class) - // note that the codec requires that the type passed to the set() method - // be always JsonNode, and not a subclass of it, such as ObjectNode - .set("scores", bobScores, JsonNode.class)); - } - - // Retrieving JSON payloads from table columns of arbitrary types, - // using toJson() function - private static void selectToJson(Session session) { - - Statement stmt = select() - .column("id") - .toJson("user").as("user") - .toJson("scores").as("scores") - .from("examples", "json_jackson_function") - .where(in("id", 1, 2)); - - ResultSet rows = session.execute(stmt); - - for (Row row : rows) { - int id = row.getInt("id"); - // retrieve the JSON payload and convert it to a User instance - User user = row.get("user", User.class); - // it is also possible to retrieve the raw JSON payload - String userJson = row.getString("user"); - // retrieve the JSON payload and convert it to a JsonNode instance - // note that the codec requires that the type passed to the get() method + } + + private static void createSchema(Session session) { + session.execute( + "CREATE KEYSPACE IF NOT EXISTS examples " + + "WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}"); + session.execute( + "CREATE TYPE IF NOT EXISTS examples.json_jackson_function_user(" + "name text, age int)"); + session.execute( + "CREATE TABLE IF NOT EXISTS examples.json_jackson_function(" + + "id int PRIMARY KEY, user frozen, scores map)"); + } + + // Mapping JSON payloads to table columns of arbitrary types, + // using fromJson() function + private static void insertFromJson(Session session) { + + User alice = new User("alice", 30); + User bob = new User("bob", 35); + + ObjectNode aliceScores = + JsonNodeFactory.instance.objectNode().put("call_of_duty", 4.8).put("pokemon_go", 9.7); + ObjectNode bobScores = + JsonNodeFactory.instance.objectNode().put("zelda", 8.3).put("pokemon_go", 12.4); + + // Build and execute a simple statement + Statement stmt = + insertInto("examples", "json_jackson_function") + .value("id", 1) + // client-side, the User object will be converted into a JSON String; + // then, server-side, the fromJson() function will convert that JSON string + // into an instance of the json_jackson_function_user user-defined type (UDT), + // which will be persisted into the column "user" + .value("user", fromJson(alice)) + // same thing, but this time converting from + // a generic JsonNode to a JSON string, then from this string to a map + .value("scores", fromJson(aliceScores)); + session.execute(stmt); + + // The JSON object can be a bound value if the statement is prepared + // (we use a local variable here for the sake of example, but in a real application you would + // cache and reuse + // the prepared statement) + PreparedStatement pst = + session.prepare( + insertInto("examples", "json_jackson_function") + .value("id", bindMarker("id")) + .value("user", fromJson(bindMarker("user"))) + .value("scores", fromJson(bindMarker("scores")))); + session.execute( + pst.bind() + .setInt("id", 2) + .set("user", bob, User.class) + // note that the codec requires that the type passed to the set() method // be always JsonNode, and not a subclass of it, such as ObjectNode - JsonNode scores = row.get("scores", JsonNode.class); - // it is also possible to retrieve the raw JSON payload - String scoresJson = row.getString("scores"); - System.out.printf("Retrieved row:%n" + - "id %d%n" + - "user %s%n" + - "user (raw) %s%n" + - "scores %s%n" + - "scores (raw) %s%n%n", - id, user, userJson, scores, scoresJson); - } + .set("scores", bobScores, JsonNode.class)); + } + + // Retrieving JSON payloads from table columns of arbitrary types, + // using toJson() function + private static void selectToJson(Session session) { + + Statement stmt = + select() + .column("id") + .toJson("user") + .as("user") + .toJson("scores") + .as("scores") + .from("examples", "json_jackson_function") + .where(in("id", 1, 2)); + + ResultSet rows = session.execute(stmt); + + for (Row row : rows) { + int id = row.getInt("id"); + // retrieve the JSON payload and convert it to a User instance + User user = row.get("user", User.class); + // it is also possible to retrieve the raw JSON payload + String userJson = row.getString("user"); + // retrieve the JSON payload and convert it to a JsonNode instance + // note that the codec requires that the type passed to the get() method + // be always JsonNode, and not a subclass of it, such as ObjectNode + JsonNode scores = row.get("scores", JsonNode.class); + // it is also possible to retrieve the raw JSON payload + String scoresJson = row.getString("scores"); + System.out.printf( + "Retrieved row:%n" + + "id %d%n" + + "user %s%n" + + "user (raw) %s%n" + + "scores %s%n" + + "scores (raw) %s%n%n", + id, user, userJson, scores, scoresJson); } + } - @SuppressWarnings("unused") - public static class User { + @SuppressWarnings("unused") + public static class User { - private final String name; + private final String name; - private final int age; + private final int age; - @JsonCreator - public User(@JsonProperty("name") String name, @JsonProperty("age") int age) { - this.name = name; - this.age = age; - } + @JsonCreator + public User(@JsonProperty("name") String name, @JsonProperty("age") int age) { + this.name = name; + this.age = age; + } - public String getName() { - return name; - } + public String getName() { + return name; + } - public int getAge() { - return age; - } + public int getAge() { + return age; + } - @Override - public String toString() { - return String.format("%s (%s)", name, age); - } + @Override + public String toString() { + return String.format("%s (%s)", name, age); } + } } diff --git a/driver-examples/src/main/java/com/datastax/driver/examples/json/JacksonJsonRow.java b/driver-examples/src/main/java/com/datastax/driver/examples/json/JacksonJsonRow.java index 2bec1489efb..32d44d28eee 100644 --- a/driver-examples/src/main/java/com/datastax/driver/examples/json/JacksonJsonRow.java +++ b/driver-examples/src/main/java/com/datastax/driver/examples/json/JacksonJsonRow.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,26 +17,35 @@ */ package com.datastax.driver.examples.json; -import com.datastax.driver.core.*; +import static com.datastax.driver.core.querybuilder.QueryBuilder.bindMarker; +import static com.datastax.driver.core.querybuilder.QueryBuilder.in; +import static com.datastax.driver.core.querybuilder.QueryBuilder.insertInto; +import static com.datastax.driver.core.querybuilder.QueryBuilder.select; + +import com.datastax.driver.core.Cluster; +import com.datastax.driver.core.CodecRegistry; +import com.datastax.driver.core.PreparedStatement; +import com.datastax.driver.core.ResultSet; +import com.datastax.driver.core.Row; +import com.datastax.driver.core.Session; +import com.datastax.driver.core.Statement; +import com.datastax.driver.core.TypeCodec; import com.datastax.driver.extras.codecs.json.JacksonJsonCodec; import com.fasterxml.jackson.annotation.JsonCreator; import com.fasterxml.jackson.annotation.JsonProperty; -import static com.datastax.driver.core.querybuilder.QueryBuilder.*; - /** - * Illustrates how to map an entire table row to a Java object using - * the Jackson library, - * and leveraging the {@code SELECT JSON} and {@code INSERT JSON} syntaxes - * introduced in Cassandra 2.2. - *

    - * This example makes usage of a custom {@link TypeCodec codec}, - * {@link JacksonJsonCodec}, which is declared in the driver-extras module. - * If you plan to follow this example, make sure to include the following - * Maven dependencies in your project: + * Illustrates how to map an entire table row to a Java object using the Jackson library, and leveraging the {@code + * SELECT JSON} and {@code INSERT JSON} syntaxes introduced in Cassandra 2.2. + * + *

    This example makes usage of a custom {@link TypeCodec codec}, {@link JacksonJsonCodec}, which + * is declared in the driver-extras module. If you plan to follow this example, make sure to include + * the following Maven dependencies in your project: + * *

    {@code
      * 
    - *     com.datastax.cassandra
    + *     org.apache.cassandra
      *     cassandra-driver-extras
      *     ${driver.version}
      * 
    @@ -45,121 +56,126 @@
      *     ${jackson.version}
      * 
      * }
    - * This example also uses the {@link com.datastax.driver.core.querybuilder.QueryBuilder QueryBuilder}; - * for examples using the "core" API, see {@link PlainTextJson} (they are easily translatable to the - * queries in this class). - *

    - * Preconditions: - * - a Cassandra 2.2+ cluster is running and accessible through the contacts points identified by CONTACT_POINTS and PORT; - *

    - * Side effects: - * - creates a new keyspace "examples" in the cluster. If a keyspace with this name already exists, it will be reused; - * - creates a table "examples.json_jackson_row". If it already exists, it will be reused; - * - inserts data in the table. * - * @see What’s New in Cassandra 2.2: JSON Support + * This example also uses the {@link com.datastax.driver.core.querybuilder.QueryBuilder + * QueryBuilder}; for examples using the "core" API, see {@link PlainTextJson} (they are easily + * translatable to the queries in this class). + * + *

    Preconditions: - a Cassandra 2.2+ cluster is running and accessible through the contacts + * points identified by CONTACT_POINTS and PORT; + * + *

    Side effects: - creates a new keyspace "examples" in the cluster. If a keyspace with this name + * already exists, it will be reused; - creates a table "examples.json_jackson_row". If it already + * exists, it will be reused; - inserts data in the table. + * + * @see What’s + * New in Cassandra 2.2: JSON Support */ public class JacksonJsonRow { - static String[] CONTACT_POINTS = {"127.0.0.1"}; - static int PORT = 9042; + static String[] CONTACT_POINTS = {"127.0.0.1"}; + static int PORT = 9042; - public static void main(String[] args) { - Cluster cluster = null; - try { + public static void main(String[] args) { + Cluster cluster = null; + try { - // A codec to convert JSON payloads into User instances; - // this codec is declared in the driver-extras module - TypeCodec userCodec = new JacksonJsonCodec(User.class); + // A codec to convert JSON payloads into User instances; + // this codec is declared in the driver-extras module + TypeCodec userCodec = new JacksonJsonCodec(User.class); - cluster = Cluster.builder() - .addContactPoints(CONTACT_POINTS).withPort(PORT) - .withCodecRegistry(new CodecRegistry().register(userCodec)) - .build(); + cluster = + Cluster.builder() + .addContactPoints(CONTACT_POINTS) + .withPort(PORT) + .withCodecRegistry(new CodecRegistry().register(userCodec)) + .build(); - Session session = cluster.connect(); + Session session = cluster.connect(); - createSchema(session); - insertJsonRow(session); - selectJsonRow(session); + createSchema(session); + insertJsonRow(session); + selectJsonRow(session); - } finally { - if (cluster != null) cluster.close(); - } + } finally { + if (cluster != null) cluster.close(); } - - private static void createSchema(Session session) { - session.execute("CREATE KEYSPACE IF NOT EXISTS examples " + - "WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}"); - session.execute("CREATE TABLE IF NOT EXISTS examples.json_jackson_row(" + - "id int PRIMARY KEY, name text, age int)"); + } + + private static void createSchema(Session session) { + session.execute( + "CREATE KEYSPACE IF NOT EXISTS examples " + + "WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}"); + session.execute( + "CREATE TABLE IF NOT EXISTS examples.json_jackson_row(" + + "id int PRIMARY KEY, name text, age int)"); + } + + // Mapping a User instance to a table row using INSERT JSON + private static void insertJsonRow(Session session) { + // Build and execute a simple statement + Statement stmt = insertInto("examples", "json_jackson_row").json(new User(1, "alice", 30)); + session.execute(stmt); + + // The JSON object can be a bound value if the statement is prepared + // (we use a local variable here for the sake of example, but in a real application you would + // cache and reuse + // the prepared statement) + PreparedStatement pst = + session.prepare(insertInto("examples", "json_jackson_row").json(bindMarker("user"))); + session.execute(pst.bind().set("user", new User(2, "bob", 35), User.class)); + } + + // Retrieving User instances from table rows using SELECT JSON + private static void selectJsonRow(Session session) { + + // Reading the whole row as a JSON object + Statement stmt = select().json().from("examples", "json_jackson_row").where(in("id", 1, 2)); + + ResultSet rows = session.execute(stmt); + + for (Row row : rows) { + // SELECT JSON returns only one column for each row, of type VARCHAR, + // containing the row as a JSON payload + User user = row.get(0, User.class); + System.out.printf("Retrieved user: %s%n", user); } + } - // Mapping a User instance to a table row using INSERT JSON - private static void insertJsonRow(Session session) { - // Build and execute a simple statement - Statement stmt = insertInto("examples", "json_jackson_row") - .json(new User(1, "alice", 30)); - session.execute(stmt); - - // The JSON object can be a bound value if the statement is prepared - // (we use a local variable here for the sake of example, but in a real application you would cache and reuse - // the prepared statement) - PreparedStatement pst = session.prepare( - insertInto("examples", "json_jackson_row").json(bindMarker("user"))); - session.execute(pst.bind() - .set("user", new User(2, "bob", 35), User.class)); - } + @SuppressWarnings("unused") + public static class User { - // Retrieving User instances from table rows using SELECT JSON - private static void selectJsonRow(Session session) { + private final int id; - // Reading the whole row as a JSON object - Statement stmt = select().json() - .from("examples", "json_jackson_row") - .where(in("id", 1, 2)); + private final String name; - ResultSet rows = session.execute(stmt); + private final int age; - for (Row row : rows) { - // SELECT JSON returns only one column for each row, of type VARCHAR, - // containing the row as a JSON payload - User user = row.get(0, User.class); - System.out.printf("Retrieved user: %s%n", user); - } + @JsonCreator + public User( + @JsonProperty("id") int id, + @JsonProperty("name") String name, + @JsonProperty("age") int age) { + this.id = id; + this.name = name; + this.age = age; } - @SuppressWarnings("unused") - public static class User { - - private final int id; - - private final String name; - - private final int age; - - @JsonCreator - public User(@JsonProperty("id") int id, @JsonProperty("name") String name, @JsonProperty("age") int age) { - this.id = id; - this.name = name; - this.age = age; - } - - public int getId() { - return id; - } + public int getId() { + return id; + } - public String getName() { - return name; - } + public String getName() { + return name; + } - public int getAge() { - return age; - } + public int getAge() { + return age; + } - @Override - public String toString() { - return String.format("%s (id %d, age %d)", name, id, age); - } + @Override + public String toString() { + return String.format("%s (id %d, age %d)", name, id, age); } + } } diff --git a/driver-examples/src/main/java/com/datastax/driver/examples/json/Jsr353JsonColumn.java b/driver-examples/src/main/java/com/datastax/driver/examples/json/Jsr353JsonColumn.java index de7c96a5f94..5eb58ffabea 100644 --- a/driver-examples/src/main/java/com/datastax/driver/examples/json/Jsr353JsonColumn.java +++ b/driver-examples/src/main/java/com/datastax/driver/examples/json/Jsr353JsonColumn.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,27 +17,36 @@ */ package com.datastax.driver.examples.json; -import com.datastax.driver.core.*; +import static com.datastax.driver.core.querybuilder.QueryBuilder.bindMarker; +import static com.datastax.driver.core.querybuilder.QueryBuilder.in; +import static com.datastax.driver.core.querybuilder.QueryBuilder.insertInto; +import static com.datastax.driver.core.querybuilder.QueryBuilder.select; + +import com.datastax.driver.core.Cluster; +import com.datastax.driver.core.CodecRegistry; +import com.datastax.driver.core.PreparedStatement; +import com.datastax.driver.core.ResultSet; +import com.datastax.driver.core.Row; +import com.datastax.driver.core.Session; +import com.datastax.driver.core.Statement; +import com.datastax.driver.core.TypeCodec; import com.datastax.driver.extras.codecs.json.Jsr353JsonCodec; - import javax.json.Json; import javax.json.JsonObject; import javax.json.JsonStructure; -import static com.datastax.driver.core.querybuilder.QueryBuilder.*; - /** - * Illustrates how to map a single table column of type {@code VARCHAR}, - * containing JSON payloads, into a Java object using - * the Java API for JSON processing. - *

    - * This example makes usage of a custom {@link TypeCodec codec}, - * {@link Jsr353JsonCodec}, which is declared in the driver-extras module. - * If you plan to follow this example, make sure to include the following - * Maven dependencies in your project: + * Illustrates how to map a single table column of type {@code VARCHAR}, containing JSON payloads, + * into a Java object using the Java API for JSON + * processing. + * + *

    This example makes usage of a custom {@link TypeCodec codec}, {@link Jsr353JsonCodec}, which + * is declared in the driver-extras module. If you plan to follow this example, make sure to include + * the following Maven dependencies in your project: + * *

    {@code
      * 
    - *     com.datastax.cassandra
    + *     org.apache.cassandra
      *     cassandra-driver-extras
      *     ${driver.version}
      * 
    @@ -53,112 +64,110 @@
      *     runtime
      * 
      * }
    - * This example also uses the {@link com.datastax.driver.core.querybuilder.QueryBuilder QueryBuilder}; - * for examples using the "core" API, see {@link PlainTextJson} (they are easily translatable to the - * queries in this class). - *

    - * Preconditions: - * - a Cassandra cluster is running and accessible through the contacts points identified by CONTACT_POINTS and PORT; - *

    - * Side effects: - * - creates a new keyspace "examples" in the cluster. If a keyspace with this name already exists, it will be reused; - * - creates a table "examples.json_jsr353_column". If it already exists, it will be reused; - * - inserts data in the table. + * + * This example also uses the {@link com.datastax.driver.core.querybuilder.QueryBuilder + * QueryBuilder}; for examples using the "core" API, see {@link PlainTextJson} (they are easily + * translatable to the queries in this class). + * + *

    Preconditions: - a Cassandra cluster is running and accessible through the contacts points + * identified by CONTACT_POINTS and PORT; + * + *

    Side effects: - creates a new keyspace "examples" in the cluster. If a keyspace with this name + * already exists, it will be reused; - creates a table "examples.json_jsr353_column". If it already + * exists, it will be reused; - inserts data in the table. */ public class Jsr353JsonColumn { - static String[] CONTACT_POINTS = {"127.0.0.1"}; - static int PORT = 9042; - - public static void main(String[] args) { - Cluster cluster = null; - try { + static String[] CONTACT_POINTS = {"127.0.0.1"}; + static int PORT = 9042; - // A codec to convert JSON payloads into JsonObject instances; - // this codec is declared in the driver-extras module - Jsr353JsonCodec userCodec = new Jsr353JsonCodec(); + public static void main(String[] args) { + Cluster cluster = null; + try { - cluster = Cluster.builder() - .addContactPoints(CONTACT_POINTS).withPort(PORT) - .withCodecRegistry(new CodecRegistry().register(userCodec)) - .build(); + // A codec to convert JSON payloads into JsonObject instances; + // this codec is declared in the driver-extras module + Jsr353JsonCodec userCodec = new Jsr353JsonCodec(); - Session session = cluster.connect(); + cluster = + Cluster.builder() + .addContactPoints(CONTACT_POINTS) + .withPort(PORT) + .withCodecRegistry(new CodecRegistry().register(userCodec)) + .build(); - createSchema(session); - insertJsonColumn(session); - selectJsonColumn(session); + Session session = cluster.connect(); - } finally { - if (cluster != null) cluster.close(); - } - } + createSchema(session); + insertJsonColumn(session); + selectJsonColumn(session); - private static void createSchema(Session session) { - session.execute("CREATE KEYSPACE IF NOT EXISTS examples " + - "WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}"); - session.execute("CREATE TABLE IF NOT EXISTS examples.json_jsr353_column(" + - "id int PRIMARY KEY, json text)"); + } finally { + if (cluster != null) cluster.close(); } - - // Mapping a JSON object to a table column - private static void insertJsonColumn(Session session) { - - JsonObject alice = Json.createObjectBuilder() - .add("name", "alice") - .add("age", 30) - .build(); - - JsonObject bob = Json.createObjectBuilder() - .add("name", "bob") - .add("age", 35) - .build(); - - // Build and execute a simple statement - Statement stmt = insertInto("examples", "json_jsr353_column") - .value("id", 1) - // the JSON object will be converted into a String and persisted into the VARCHAR column "json" - .value("json", alice); - session.execute(stmt); - - // The JSON object can be a bound value if the statement is prepared - // (we use a local variable here for the sake of example, but in a real application you would cache and reuse - // the prepared statement) - PreparedStatement pst = session.prepare( - insertInto("examples", "json_jsr353_column") - .value("id", bindMarker("id")) - .value("json", bindMarker("json"))); - session.execute(pst.bind() - .setInt("id", 2) - // note that the codec requires that the type passed to the set() method - // be always JsonStructure, and not a subclass of it, such as JsonObject - .set("json", bob, JsonStructure.class)); + } + + private static void createSchema(Session session) { + session.execute( + "CREATE KEYSPACE IF NOT EXISTS examples " + + "WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}"); + session.execute( + "CREATE TABLE IF NOT EXISTS examples.json_jsr353_column(" + + "id int PRIMARY KEY, json text)"); + } + + // Mapping a JSON object to a table column + private static void insertJsonColumn(Session session) { + + JsonObject alice = Json.createObjectBuilder().add("name", "alice").add("age", 30).build(); + + JsonObject bob = Json.createObjectBuilder().add("name", "bob").add("age", 35).build(); + + // Build and execute a simple statement + Statement stmt = + insertInto("examples", "json_jsr353_column") + .value("id", 1) + // the JSON object will be converted into a String and persisted into the VARCHAR column + // "json" + .value("json", alice); + session.execute(stmt); + + // The JSON object can be a bound value if the statement is prepared + // (we use a local variable here for the sake of example, but in a real application you would + // cache and reuse + // the prepared statement) + PreparedStatement pst = + session.prepare( + insertInto("examples", "json_jsr353_column") + .value("id", bindMarker("id")) + .value("json", bindMarker("json"))); + session.execute( + pst.bind() + .setInt("id", 2) + // note that the codec requires that the type passed to the set() method + // be always JsonStructure, and not a subclass of it, such as JsonObject + .set("json", bob, JsonStructure.class)); + } + + // Retrieving JSON objects from a table column + private static void selectJsonColumn(Session session) { + + Statement stmt = select().from("examples", "json_jsr353_column").where(in("id", 1, 2)); + + ResultSet rows = session.execute(stmt); + + for (Row row : rows) { + int id = row.getInt("id"); + // retrieve the JSON payload and convert it to a JsonObject instance + // note that the codec requires that the type passed to the get() method + // be always JsonStructure, and not a subclass of it, such as JsonObject, + // hence the need to downcast to JsonObject manually + JsonObject user = (JsonObject) row.get("json", JsonStructure.class); + // it is also possible to retrieve the raw JSON payload + String json = row.getString("json"); + System.out.printf( + "Retrieved row:%n" + "id %d%n" + "user %s%n" + "user (raw) %s%n%n", + id, user, json); } - - // Retrieving JSON objects from a table column - private static void selectJsonColumn(Session session) { - - Statement stmt = select() - .from("examples", "json_jsr353_column") - .where(in("id", 1, 2)); - - ResultSet rows = session.execute(stmt); - - for (Row row : rows) { - int id = row.getInt("id"); - // retrieve the JSON payload and convert it to a JsonObject instance - // note that the codec requires that the type passed to the get() method - // be always JsonStructure, and not a subclass of it, such as JsonObject, - // hence the need to downcast to JsonObject manually - JsonObject user = (JsonObject) row.get("json", JsonStructure.class); - // it is also possible to retrieve the raw JSON payload - String json = row.getString("json"); - System.out.printf("Retrieved row:%n" + - "id %d%n" + - "user %s%n" + - "user (raw) %s%n%n", - id, user, json); - } - } - + } } diff --git a/driver-examples/src/main/java/com/datastax/driver/examples/json/Jsr353JsonFunction.java b/driver-examples/src/main/java/com/datastax/driver/examples/json/Jsr353JsonFunction.java index 78939090178..377f82764d4 100644 --- a/driver-examples/src/main/java/com/datastax/driver/examples/json/Jsr353JsonFunction.java +++ b/driver-examples/src/main/java/com/datastax/driver/examples/json/Jsr353JsonFunction.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,29 +17,37 @@ */ package com.datastax.driver.examples.json; -import com.datastax.driver.core.*; +import static com.datastax.driver.core.querybuilder.QueryBuilder.bindMarker; +import static com.datastax.driver.core.querybuilder.QueryBuilder.fromJson; +import static com.datastax.driver.core.querybuilder.QueryBuilder.in; +import static com.datastax.driver.core.querybuilder.QueryBuilder.insertInto; +import static com.datastax.driver.core.querybuilder.QueryBuilder.select; + +import com.datastax.driver.core.Cluster; +import com.datastax.driver.core.CodecRegistry; +import com.datastax.driver.core.PreparedStatement; +import com.datastax.driver.core.ResultSet; +import com.datastax.driver.core.Row; +import com.datastax.driver.core.Session; +import com.datastax.driver.core.Statement; +import com.datastax.driver.core.TypeCodec; import com.datastax.driver.extras.codecs.json.Jsr353JsonCodec; - import javax.json.Json; import javax.json.JsonObject; import javax.json.JsonStructure; -import static com.datastax.driver.core.querybuilder.QueryBuilder.*; - /** - * Illustrates how to map a single table column of an arbitrary type - * to a Java object using - * the Java API for JSON processing, - * and leveraging the {@code toJson()} and {@code fromJson()} functions - * introduced in Cassandra 2.2. - *

    - * This example makes usage of a custom {@link TypeCodec codec}, - * {@link Jsr353JsonCodec}, which is declared in the driver-extras module. - * If you plan to follow this example, make sure to include the following - * Maven dependencies in your project: + * Illustrates how to map a single table column of an arbitrary type to a Java object using the Java API for JSON processing, and leveraging the + * {@code toJson()} and {@code fromJson()} functions introduced in Cassandra 2.2. + * + *

    This example makes usage of a custom {@link TypeCodec codec}, {@link Jsr353JsonCodec}, which + * is declared in the driver-extras module. If you plan to follow this example, make sure to include + * the following Maven dependencies in your project: + * *

    {@code
      * 
    - *     com.datastax.cassandra
    + *     org.apache.cassandra
      *     cassandra-driver-extras
      *     ${driver.version}
      * 
    @@ -55,147 +65,149 @@
      *     runtime
      * 
      * }
    - * This example also uses the {@link com.datastax.driver.core.querybuilder.QueryBuilder QueryBuilder}; - * for examples using the "core" API, see {@link PlainTextJson} (they are easily translatable to the - * queries in this class). - *

    - * Preconditions: - * - a Cassandra cluster is running and accessible through the contacts points identified by CONTACT_POINTS and PORT; - *

    - * Side effects: - * - creates a new keyspace "examples" in the cluster. If a keyspace with this name already exists, it will be reused; - * - creates a user-defined type (UDT) "examples.json_jsr353_function_user". If it already exists, it will be reused; - * - creates a table "examples.json_jsr353_function". If it already exists, it will be reused; - * - inserts data in the table. * - * @see What’s New in Cassandra 2.2: JSON Support + * This example also uses the {@link com.datastax.driver.core.querybuilder.QueryBuilder + * QueryBuilder}; for examples using the "core" API, see {@link PlainTextJson} (they are easily + * translatable to the queries in this class). + * + *

    Preconditions: - a Cassandra cluster is running and accessible through the contacts points + * identified by CONTACT_POINTS and PORT; + * + *

    Side effects: - creates a new keyspace "examples" in the cluster. If a keyspace with this name + * already exists, it will be reused; - creates a user-defined type (UDT) + * "examples.json_jsr353_function_user". If it already exists, it will be reused; - creates a table + * "examples.json_jsr353_function". If it already exists, it will be reused; - inserts data in the + * table. + * + * @see What’s + * New in Cassandra 2.2: JSON Support */ public class Jsr353JsonFunction { - static String[] CONTACT_POINTS = {"127.0.0.1"}; - static int PORT = 9042; - - public static void main(String[] args) { - Cluster cluster = null; - try { + static String[] CONTACT_POINTS = {"127.0.0.1"}; + static int PORT = 9042; - // A codec to convert JSON payloads into JsonObject instances; - // this codec is declared in the driver-extras module - Jsr353JsonCodec userCodec = new Jsr353JsonCodec(); + public static void main(String[] args) { + Cluster cluster = null; + try { - cluster = Cluster.builder() - .addContactPoints(CONTACT_POINTS).withPort(PORT) - .withCodecRegistry(new CodecRegistry() - .register(userCodec)) - .build(); + // A codec to convert JSON payloads into JsonObject instances; + // this codec is declared in the driver-extras module + Jsr353JsonCodec userCodec = new Jsr353JsonCodec(); - Session session = cluster.connect(); + cluster = + Cluster.builder() + .addContactPoints(CONTACT_POINTS) + .withPort(PORT) + .withCodecRegistry(new CodecRegistry().register(userCodec)) + .build(); - createSchema(session); - insertFromJson(session); - selectToJson(session); + Session session = cluster.connect(); - } finally { - if (cluster != null) cluster.close(); - } - } + createSchema(session); + insertFromJson(session); + selectToJson(session); - private static void createSchema(Session session) { - session.execute("CREATE KEYSPACE IF NOT EXISTS examples " + - "WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}"); - session.execute("CREATE TYPE IF NOT EXISTS examples.json_jsr353_function_user(" + - "name text, age int)"); - session.execute("CREATE TABLE IF NOT EXISTS examples.json_jsr353_function(" + - "id int PRIMARY KEY, user frozen, scores map)"); + } finally { + if (cluster != null) cluster.close(); } - - // Mapping JSON payloads to table columns of arbitrary types, - // using fromJson() function - private static void insertFromJson(Session session) { - - JsonObject alice = Json.createObjectBuilder() - .add("name", "alice") - .add("age", 30) - .build(); - - JsonObject bob = Json.createObjectBuilder() - .add("name", "bob") - .add("age", 35) - .build(); - - JsonObject aliceScores = Json.createObjectBuilder() - .add("call_of_duty", 4.8) - .add("pokemon_go", 9.7) - .build(); - - JsonObject bobScores = Json.createObjectBuilder() - .add("zelda", 8.3) - .add("pokemon_go", 12.4) - .build(); - - // Build and execute a simple statement - Statement stmt = insertInto("examples", "json_jsr353_function") - .value("id", 1) - // client-side, the JsonObject will be converted into a JSON String; - // then, server-side, the fromJson() function will convert that JSON string - // into an instance of the json_jsr353_function_user user-defined type (UDT), - // which will be persisted into the column "user" - .value("user", fromJson(alice)) - // same thing, but this time converting from - // a JsonObject to a JSON string, then from this string to a map - .value("scores", fromJson(aliceScores)); - session.execute(stmt); - - // The JSON object can be a bound value if the statement is prepared - // (we use a local variable here for the sake of example, but in a real application you would cache and reuse - // the prepared statement) - PreparedStatement pst = session.prepare( - insertInto("examples", "json_jsr353_function") - .value("id", bindMarker("id")) - .value("user", fromJson(bindMarker("user"))) - .value("scores", fromJson(bindMarker("scores")))); - session.execute(pst.bind() - .setInt("id", 2) - // note that the codec requires that the type passed to the set() method - // be always JsonStructure, and not a subclass of it, such as JsonObject - .set("user", bob, JsonStructure.class) - .set("scores", bobScores, JsonStructure.class)); + } + + private static void createSchema(Session session) { + session.execute( + "CREATE KEYSPACE IF NOT EXISTS examples " + + "WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}"); + session.execute( + "CREATE TYPE IF NOT EXISTS examples.json_jsr353_function_user(" + "name text, age int)"); + session.execute( + "CREATE TABLE IF NOT EXISTS examples.json_jsr353_function(" + + "id int PRIMARY KEY, user frozen, scores map)"); + } + + // Mapping JSON payloads to table columns of arbitrary types, + // using fromJson() function + private static void insertFromJson(Session session) { + + JsonObject alice = Json.createObjectBuilder().add("name", "alice").add("age", 30).build(); + + JsonObject bob = Json.createObjectBuilder().add("name", "bob").add("age", 35).build(); + + JsonObject aliceScores = + Json.createObjectBuilder().add("call_of_duty", 4.8).add("pokemon_go", 9.7).build(); + + JsonObject bobScores = + Json.createObjectBuilder().add("zelda", 8.3).add("pokemon_go", 12.4).build(); + + // Build and execute a simple statement + Statement stmt = + insertInto("examples", "json_jsr353_function") + .value("id", 1) + // client-side, the JsonObject will be converted into a JSON String; + // then, server-side, the fromJson() function will convert that JSON string + // into an instance of the json_jsr353_function_user user-defined type (UDT), + // which will be persisted into the column "user" + .value("user", fromJson(alice)) + // same thing, but this time converting from + // a JsonObject to a JSON string, then from this string to a map + .value("scores", fromJson(aliceScores)); + session.execute(stmt); + + // The JSON object can be a bound value if the statement is prepared + // (we use a local variable here for the sake of example, but in a real application you would + // cache and reuse + // the prepared statement) + PreparedStatement pst = + session.prepare( + insertInto("examples", "json_jsr353_function") + .value("id", bindMarker("id")) + .value("user", fromJson(bindMarker("user"))) + .value("scores", fromJson(bindMarker("scores")))); + session.execute( + pst.bind() + .setInt("id", 2) + // note that the codec requires that the type passed to the set() method + // be always JsonStructure, and not a subclass of it, such as JsonObject + .set("user", bob, JsonStructure.class) + .set("scores", bobScores, JsonStructure.class)); + } + + // Retrieving JSON payloads from table columns of arbitrary types, + // using toJson() function + private static void selectToJson(Session session) { + + Statement stmt = + select() + .column("id") + .toJson("user") + .as("user") + .toJson("scores") + .as("scores") + .from("examples", "json_jsr353_function") + .where(in("id", 1, 2)); + + ResultSet rows = session.execute(stmt); + + for (Row row : rows) { + int id = row.getInt("id"); + // retrieve the JSON payload and convert it to a JsonObject instance + // note that the codec requires that the type passed to the get() method + // be always JsonStructure, and not a subclass of it, such as JsonObject, + // hence the need to downcast to JsonObject manually + JsonObject user = (JsonObject) row.get("user", JsonStructure.class); + // it is also possible to retrieve the raw JSON payload + String userJson = row.getString("user"); + // retrieve the JSON payload and convert it to a JsonObject instance + JsonObject scores = (JsonObject) row.get("scores", JsonStructure.class); + // it is also possible to retrieve the raw JSON payload + String scoresJson = row.getString("scores"); + System.out.printf( + "Retrieved row:%n" + + "id %d%n" + + "user %s%n" + + "user (raw) %s%n" + + "scores %s%n" + + "scores (raw) %s%n%n", + id, user, userJson, scores, scoresJson); } - - // Retrieving JSON payloads from table columns of arbitrary types, - // using toJson() function - private static void selectToJson(Session session) { - - Statement stmt = select() - .column("id") - .toJson("user").as("user") - .toJson("scores").as("scores") - .from("examples", "json_jsr353_function") - .where(in("id", 1, 2)); - - ResultSet rows = session.execute(stmt); - - for (Row row : rows) { - int id = row.getInt("id"); - // retrieve the JSON payload and convert it to a JsonObject instance - // note that the codec requires that the type passed to the get() method - // be always JsonStructure, and not a subclass of it, such as JsonObject, - // hence the need to downcast to JsonObject manually - JsonObject user = (JsonObject) row.get("user", JsonStructure.class); - // it is also possible to retrieve the raw JSON payload - String userJson = row.getString("user"); - // retrieve the JSON payload and convert it to a JsonObject instance - JsonObject scores = (JsonObject) row.get("scores", JsonStructure.class); - // it is also possible to retrieve the raw JSON payload - String scoresJson = row.getString("scores"); - System.out.printf("Retrieved row:%n" + - "id %d%n" + - "user %s%n" + - "user (raw) %s%n" + - "scores %s%n" + - "scores (raw) %s%n%n", - id, user, userJson, scores, scoresJson); - } - } - + } } diff --git a/driver-examples/src/main/java/com/datastax/driver/examples/json/Jsr353JsonRow.java b/driver-examples/src/main/java/com/datastax/driver/examples/json/Jsr353JsonRow.java index 2a7f8684c73..05aa1cea1ab 100644 --- a/driver-examples/src/main/java/com/datastax/driver/examples/json/Jsr353JsonRow.java +++ b/driver-examples/src/main/java/com/datastax/driver/examples/json/Jsr353JsonRow.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,28 +17,36 @@ */ package com.datastax.driver.examples.json; -import com.datastax.driver.core.*; +import static com.datastax.driver.core.querybuilder.QueryBuilder.bindMarker; +import static com.datastax.driver.core.querybuilder.QueryBuilder.in; +import static com.datastax.driver.core.querybuilder.QueryBuilder.insertInto; +import static com.datastax.driver.core.querybuilder.QueryBuilder.select; + +import com.datastax.driver.core.Cluster; +import com.datastax.driver.core.CodecRegistry; +import com.datastax.driver.core.PreparedStatement; +import com.datastax.driver.core.ResultSet; +import com.datastax.driver.core.Row; +import com.datastax.driver.core.Session; +import com.datastax.driver.core.Statement; +import com.datastax.driver.core.TypeCodec; import com.datastax.driver.extras.codecs.json.Jsr353JsonCodec; - import javax.json.Json; import javax.json.JsonObject; import javax.json.JsonStructure; -import static com.datastax.driver.core.querybuilder.QueryBuilder.*; - /** - * Illustrates how to map an entire table row to a Java object using - * the Java API for JSON processing, - * and leveraging the {@code SELECT JSON} and {@code INSERT JSON} syntaxes - * introduced in Cassandra 2.2. - *

    - * This example makes usage of a custom {@link TypeCodec codec}, - * {@link Jsr353JsonCodec}, which is declared in the driver-extras module. - * If you plan to follow this example, make sure to include the following - * Maven dependencies in your project: + * Illustrates how to map an entire table row to a Java object using the Java API for JSON processing, and leveraging the + * {@code SELECT JSON} and {@code INSERT JSON} syntaxes introduced in Cassandra 2.2. + * + *

    This example makes usage of a custom {@link TypeCodec codec}, {@link Jsr353JsonCodec}, which + * is declared in the driver-extras module. If you plan to follow this example, make sure to include + * the following Maven dependencies in your project: + * *

    {@code
      * 
    - *     com.datastax.cassandra
    + *     org.apache.cassandra
      *     cassandra-driver-extras
      *     ${driver.version}
      * 
    @@ -54,106 +64,103 @@
      *     runtime
      * 
      * }
    - * This example also uses the {@link com.datastax.driver.core.querybuilder.QueryBuilder QueryBuilder}; - * for examples using the "core" API, see {@link PlainTextJson} (they are easily translatable to the - * queries in this class). - *

    - * Preconditions: - * - a Cassandra 2.2+ cluster is running and accessible through the contacts points identified by CONTACT_POINTS and PORT; - *

    - * Side effects: - * - creates a new keyspace "examples" in the cluster. If a keyspace with this name already exists, it will be reused; - * - creates a table "examples.json_jsr353_row". If it already exists, it will be reused; - * - inserts data in the table. * - * @see What’s New in Cassandra 2.2: JSON Support + * This example also uses the {@link com.datastax.driver.core.querybuilder.QueryBuilder + * QueryBuilder}; for examples using the "core" API, see {@link PlainTextJson} (they are easily + * translatable to the queries in this class). + * + *

    Preconditions: - a Cassandra 2.2+ cluster is running and accessible through the contacts + * points identified by CONTACT_POINTS and PORT; + * + *

    Side effects: - creates a new keyspace "examples" in the cluster. If a keyspace with this name + * already exists, it will be reused; - creates a table "examples.json_jsr353_row". If it already + * exists, it will be reused; - inserts data in the table. + * + * @see What’s + * New in Cassandra 2.2: JSON Support */ public class Jsr353JsonRow { - static String[] CONTACT_POINTS = {"127.0.0.1"}; - static int PORT = 9042; - - public static void main(String[] args) { - Cluster cluster = null; - try { + static String[] CONTACT_POINTS = {"127.0.0.1"}; + static int PORT = 9042; - // A codec to convert JSON payloads into JsonObject instances; - // this codec is declared in the driver-extras module - Jsr353JsonCodec userCodec = new Jsr353JsonCodec(); + public static void main(String[] args) { + Cluster cluster = null; + try { - cluster = Cluster.builder() - .addContactPoints(CONTACT_POINTS).withPort(PORT) - .withCodecRegistry(new CodecRegistry().register(userCodec)) - .build(); + // A codec to convert JSON payloads into JsonObject instances; + // this codec is declared in the driver-extras module + Jsr353JsonCodec userCodec = new Jsr353JsonCodec(); - Session session = cluster.connect(); + cluster = + Cluster.builder() + .addContactPoints(CONTACT_POINTS) + .withPort(PORT) + .withCodecRegistry(new CodecRegistry().register(userCodec)) + .build(); - createSchema(session); - insertJsonRow(session); - selectJsonRow(session); + Session session = cluster.connect(); - } finally { - if (cluster != null) cluster.close(); - } - } + createSchema(session); + insertJsonRow(session); + selectJsonRow(session); - private static void createSchema(Session session) { - session.execute("CREATE KEYSPACE IF NOT EXISTS examples " + - "WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}"); - session.execute("CREATE TABLE IF NOT EXISTS examples.json_jsr353_row(" + - "id int PRIMARY KEY, name text, age int)"); + } finally { + if (cluster != null) cluster.close(); } - - // Mapping a User instance to a table row using INSERT JSON - private static void insertJsonRow(Session session) { - - JsonObject alice = Json.createObjectBuilder() - .add("id", 1) - .add("name", "alice") - .add("age", 30) - .build(); - - JsonObject bob = Json.createObjectBuilder() - .add("id", 2) - .add("name", "bob") - .add("age", 35) - .build(); - - // Build and execute a simple statement - Statement stmt = insertInto("examples", "json_jsr353_row") - .json(alice); - session.execute(stmt); - - // The JSON object can be a bound value if the statement is prepared - // (we use a local variable here for the sake of example, but in a real application you would cache and reuse - // the prepared statement) - PreparedStatement pst = session.prepare( - insertInto("examples", "json_jsr353_row").json(bindMarker("user"))); - session.execute(pst.bind() - // note that the codec requires that the type passed to the set() method - // be always JsonStructure, and not a subclass of it, such as JsonObject - .set("user", bob, JsonStructure.class)); + } + + private static void createSchema(Session session) { + session.execute( + "CREATE KEYSPACE IF NOT EXISTS examples " + + "WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}"); + session.execute( + "CREATE TABLE IF NOT EXISTS examples.json_jsr353_row(" + + "id int PRIMARY KEY, name text, age int)"); + } + + // Mapping a User instance to a table row using INSERT JSON + private static void insertJsonRow(Session session) { + + JsonObject alice = + Json.createObjectBuilder().add("id", 1).add("name", "alice").add("age", 30).build(); + + JsonObject bob = + Json.createObjectBuilder().add("id", 2).add("name", "bob").add("age", 35).build(); + + // Build and execute a simple statement + Statement stmt = insertInto("examples", "json_jsr353_row").json(alice); + session.execute(stmt); + + // The JSON object can be a bound value if the statement is prepared + // (we use a local variable here for the sake of example, but in a real application you would + // cache and reuse + // the prepared statement) + PreparedStatement pst = + session.prepare(insertInto("examples", "json_jsr353_row").json(bindMarker("user"))); + session.execute( + pst.bind() + // note that the codec requires that the type passed to the set() method + // be always JsonStructure, and not a subclass of it, such as JsonObject + .set("user", bob, JsonStructure.class)); + } + + // Retrieving User instances from table rows using SELECT JSON + private static void selectJsonRow(Session session) { + + // Reading the whole row as a JSON object + Statement stmt = select().json().from("examples", "json_jsr353_row").where(in("id", 1, 2)); + + ResultSet rows = session.execute(stmt); + + for (Row row : rows) { + // SELECT JSON returns only one column for each row, of type VARCHAR, + // containing the row as a JSON payload. + // Note that the codec requires that the type passed to the get() method + // be always JsonStructure, and not a subclass of it, such as JsonObject, + // hence the need to downcast to JsonObject manually + JsonObject user = (JsonObject) row.get(0, JsonStructure.class); + System.out.printf("Retrieved user: %s%n", user); } - - // Retrieving User instances from table rows using SELECT JSON - private static void selectJsonRow(Session session) { - - // Reading the whole row as a JSON object - Statement stmt = select().json() - .from("examples", "json_jsr353_row") - .where(in("id", 1, 2)); - - ResultSet rows = session.execute(stmt); - - for (Row row : rows) { - // SELECT JSON returns only one column for each row, of type VARCHAR, - // containing the row as a JSON payload. - // Note that the codec requires that the type passed to the get() method - // be always JsonStructure, and not a subclass of it, such as JsonObject, - // hence the need to downcast to JsonObject manually - JsonObject user = (JsonObject) row.get(0, JsonStructure.class); - System.out.printf("Retrieved user: %s%n", user); - } - } - + } } diff --git a/driver-examples/src/main/java/com/datastax/driver/examples/json/PlainTextJson.java b/driver-examples/src/main/java/com/datastax/driver/examples/json/PlainTextJson.java index baedca8e09a..4b97380d403 100644 --- a/driver-examples/src/main/java/com/datastax/driver/examples/json/PlainTextJson.java +++ b/driver-examples/src/main/java/com/datastax/driver/examples/json/PlainTextJson.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,138 +17,158 @@ */ package com.datastax.driver.examples.json; -import com.datastax.driver.core.*; +import static com.datastax.driver.core.querybuilder.QueryBuilder.bindMarker; +import static com.datastax.driver.core.querybuilder.QueryBuilder.eq; +import static com.datastax.driver.core.querybuilder.QueryBuilder.fromJson; +import static com.datastax.driver.core.querybuilder.QueryBuilder.insertInto; +import static com.datastax.driver.core.querybuilder.QueryBuilder.select; -import static com.datastax.driver.core.querybuilder.QueryBuilder.*; +import com.datastax.driver.core.Cluster; +import com.datastax.driver.core.PreparedStatement; +import com.datastax.driver.core.Row; +import com.datastax.driver.core.Session; +import com.datastax.driver.core.Statement; /** - * Illustrates basic JSON support with plain JSON strings. For more advanced examples using complex objects and custom - * codecs, refer to the other examples in this package. - *

    - * Preconditions: - * - a Cassandra 2.2+ cluster is running and accessible through the contacts points identified by CONTACT_POINTS and - * PORT; - *

    - * Side effects: - * - creates a new keyspace "examples" in the cluster. If a keyspace with this name already exists, it will be reused; - * - creates a table "examples.querybuilder_json". If it already exists, it will be reused; - * - inserts data in the table. + * Illustrates basic JSON support with plain JSON strings. For more advanced examples using complex + * objects and custom codecs, refer to the other examples in this package. + * + *

    Preconditions: - a Cassandra 2.2+ cluster is running and accessible through the contacts + * points identified by CONTACT_POINTS and PORT; * - * @see What’s New in Cassandra 2.2: JSON Support + *

    Side effects: - creates a new keyspace "examples" in the cluster. If a keyspace with this name + * already exists, it will be reused; - creates a table "examples.querybuilder_json". If it already + * exists, it will be reused; - inserts data in the table. + * + * @see What’s + * New in Cassandra 2.2: JSON Support */ public class PlainTextJson { - static String[] CONTACT_POINTS = {"127.0.0.1"}; - static int PORT = 9042; - - public static void main(String[] args) { - Cluster cluster = null; - try { - cluster = Cluster.builder() - .addContactPoints(CONTACT_POINTS).withPort(PORT) - .build(); - Session session = cluster.connect(); + static String[] CONTACT_POINTS = {"127.0.0.1"}; + static int PORT = 9042; - createSchema(session); + public static void main(String[] args) { + Cluster cluster = null; + try { + cluster = Cluster.builder().addContactPoints(CONTACT_POINTS).withPort(PORT).build(); + Session session = cluster.connect(); - insertWithCoreApi(session); - selectWithCoreApi(session); + createSchema(session); - insertWithQueryBuilder(session); - selectWithQueryBuilder(session); - } finally { - if (cluster != null) cluster.close(); - } - } - - private static void createSchema(Session session) { - session.execute("CREATE KEYSPACE IF NOT EXISTS examples " + - "WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}"); - session.execute("CREATE TABLE IF NOT EXISTS examples.querybuilder_json(" + - "id int PRIMARY KEY, name text, specs map)"); - } - - /** - * Demonstrates data insertion with the "core" API, i.e. providing the full query strings. - */ - private static void insertWithCoreApi(Session session) { - // Bind in a simple statement: - session.execute("INSERT INTO examples.querybuilder_json JSON ?", - "{ \"id\": 1, \"name\": \"Mouse\", \"specs\": { \"color\": \"silver\" } }"); - - // Bind in a prepared statement: - // (we use a local variable here for the sake of example, but in a real application you would cache and reuse - // the prepared statement) - PreparedStatement pst = session.prepare("INSERT INTO examples.querybuilder_json JSON :payload"); - session.execute(pst.bind() - .setString("payload", "{ \"id\": 2, \"name\": \"Keyboard\", \"specs\": { \"layout\": \"qwerty\" } }")); - - // fromJson lets you provide individual columns as JSON: - session.execute("INSERT INTO examples.querybuilder_json " + - "(id, name, specs) VALUES (?, ?, fromJson(?))", - 3, "Screen", "{ \"size\": \"24-inch\" }"); - } - - /** - * Demonstrates data retrieval with the "core" API, i.e. providing the full query strings. - */ - private static void selectWithCoreApi(Session session) { - // Reading the whole row as a JSON object: - Row row = session.execute("SELECT JSON * FROM examples.querybuilder_json WHERE id = ?", 1).one(); - System.out.printf("Entry #1 as JSON: %s%n", row.getString("[json]")); - - // Extracting a particular column as JSON: - row = session.execute("SELECT id, toJson(specs) AS json_specs FROM examples.querybuilder_json WHERE id = ?", 2) - .one(); - System.out.printf("Entry #%d's specs as JSON: %s%n", - row.getInt("id"), row.getString("json_specs")); - } - - /** - * Same as {@link #insertWithCoreApi(Session)}, but using {@link com.datastax.driver.core.querybuilder.QueryBuilder} - * to construct the queries. - */ - private static void insertWithQueryBuilder(Session session) { - // Simple statement: - Statement stmt = insertInto("examples", "querybuilder_json") - .json("{ \"id\": 1, \"name\": \"Mouse\", \"specs\": { \"color\": \"silver\" } }"); - session.execute(stmt); - - // Prepare and bind: - // (again, cache the prepared statement in a real application) - PreparedStatement pst = session.prepare( - insertInto("examples", "querybuilder_json").json(bindMarker("payload"))); - session.execute(pst.bind() - .setString("payload", "{ \"id\": 2, \"name\": \"Keyboard\", \"specs\": { \"layout\": \"qwerty\" } }")); - - // fromJson on a single column: - stmt = insertInto("examples", "querybuilder_json") - .value("id", 3) - .value("name", "Screen") - .value("specs", fromJson("{ \"size\": \"24-inch\" }")); - session.execute(stmt); - } + insertWithCoreApi(session); + selectWithCoreApi(session); - /** - * Same as {@link #selectWithCoreApi(Session)}, but using {@link com.datastax.driver.core.querybuilder.QueryBuilder} - * to construct the queries. - */ - private static void selectWithQueryBuilder(Session session) { - // Reading the whole row as a JSON object: - Statement stmt = select().json() - .from("examples", "querybuilder_json") - .where(eq("id", 1)); - Row row = session.execute(stmt).one(); - System.out.printf("Entry #1 as JSON: %s%n", row.getString("[json]")); - - // Extracting a particular column as JSON: - stmt = select() - .column("id") - .toJson("specs").as("json_specs") - .from("examples", "querybuilder_json") - .where(eq("id", 2)); - row = session.execute(stmt).one(); - System.out.printf("Entry #%d's specs as JSON: %s%n", - row.getInt("id"), row.getString("json_specs")); + insertWithQueryBuilder(session); + selectWithQueryBuilder(session); + } finally { + if (cluster != null) cluster.close(); } + } + + private static void createSchema(Session session) { + session.execute( + "CREATE KEYSPACE IF NOT EXISTS examples " + + "WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}"); + session.execute( + "CREATE TABLE IF NOT EXISTS examples.querybuilder_json(" + + "id int PRIMARY KEY, name text, specs map)"); + } + + /** Demonstrates data insertion with the "core" API, i.e. providing the full query strings. */ + private static void insertWithCoreApi(Session session) { + // Bind in a simple statement: + session.execute( + "INSERT INTO examples.querybuilder_json JSON ?", + "{ \"id\": 1, \"name\": \"Mouse\", \"specs\": { \"color\": \"silver\" } }"); + + // Bind in a prepared statement: + // (we use a local variable here for the sake of example, but in a real application you would + // cache and reuse + // the prepared statement) + PreparedStatement pst = session.prepare("INSERT INTO examples.querybuilder_json JSON :payload"); + session.execute( + pst.bind() + .setString( + "payload", + "{ \"id\": 2, \"name\": \"Keyboard\", \"specs\": { \"layout\": \"qwerty\" } }")); + + // fromJson lets you provide individual columns as JSON: + session.execute( + "INSERT INTO examples.querybuilder_json " + "(id, name, specs) VALUES (?, ?, fromJson(?))", + 3, + "Screen", + "{ \"size\": \"24-inch\" }"); + } + + /** Demonstrates data retrieval with the "core" API, i.e. providing the full query strings. */ + private static void selectWithCoreApi(Session session) { + // Reading the whole row as a JSON object: + Row row = + session.execute("SELECT JSON * FROM examples.querybuilder_json WHERE id = ?", 1).one(); + System.out.printf("Entry #1 as JSON: %s%n", row.getString("[json]")); + + // Extracting a particular column as JSON: + row = + session + .execute( + "SELECT id, toJson(specs) AS json_specs FROM examples.querybuilder_json WHERE id = ?", + 2) + .one(); + System.out.printf( + "Entry #%d's specs as JSON: %s%n", row.getInt("id"), row.getString("json_specs")); + } + + /** + * Same as {@link #insertWithCoreApi(Session)}, but using {@link + * com.datastax.driver.core.querybuilder.QueryBuilder} to construct the queries. + */ + private static void insertWithQueryBuilder(Session session) { + // Simple statement: + Statement stmt = + insertInto("examples", "querybuilder_json") + .json("{ \"id\": 1, \"name\": \"Mouse\", \"specs\": { \"color\": \"silver\" } }"); + session.execute(stmt); + + // Prepare and bind: + // (again, cache the prepared statement in a real application) + PreparedStatement pst = + session.prepare(insertInto("examples", "querybuilder_json").json(bindMarker("payload"))); + session.execute( + pst.bind() + .setString( + "payload", + "{ \"id\": 2, \"name\": \"Keyboard\", \"specs\": { \"layout\": \"qwerty\" } }")); + + // fromJson on a single column: + stmt = + insertInto("examples", "querybuilder_json") + .value("id", 3) + .value("name", "Screen") + .value("specs", fromJson("{ \"size\": \"24-inch\" }")); + session.execute(stmt); + } + + /** + * Same as {@link #selectWithCoreApi(Session)}, but using {@link + * com.datastax.driver.core.querybuilder.QueryBuilder} to construct the queries. + */ + private static void selectWithQueryBuilder(Session session) { + // Reading the whole row as a JSON object: + Statement stmt = select().json().from("examples", "querybuilder_json").where(eq("id", 1)); + Row row = session.execute(stmt).one(); + System.out.printf("Entry #1 as JSON: %s%n", row.getString("[json]")); + + // Extracting a particular column as JSON: + stmt = + select() + .column("id") + .toJson("specs") + .as("json_specs") + .from("examples", "querybuilder_json") + .where(eq("id", 2)); + row = session.execute(stmt).one(); + System.out.printf( + "Entry #%d's specs as JSON: %s%n", row.getInt("id"), row.getString("json_specs")); + } } diff --git a/driver-examples/src/main/java/com/datastax/driver/examples/paging/ForwardPagingRestUi.java b/driver-examples/src/main/java/com/datastax/driver/examples/paging/ForwardPagingRestUi.java index bcfcebf8b55..faa938f50f6 100644 --- a/driver-examples/src/main/java/com/datastax/driver/examples/paging/ForwardPagingRestUi.java +++ b/driver-examples/src/main/java/com/datastax/driver/examples/paging/ForwardPagingRestUi.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,19 +17,14 @@ */ package com.datastax.driver.examples.paging; -import com.datastax.driver.core.*; +import com.datastax.driver.core.Cluster; +import com.datastax.driver.core.PagingState; +import com.datastax.driver.core.PreparedStatement; +import com.datastax.driver.core.ResultSet; +import com.datastax.driver.core.Row; +import com.datastax.driver.core.Session; +import com.datastax.driver.core.Statement; import com.sun.net.httpserver.HttpServer; -import org.glassfish.hk2.utilities.binding.AbstractBinder; -import org.glassfish.jersey.jdkhttp.JdkHttpServerFactory; -import org.glassfish.jersey.server.ResourceConfig; - -import javax.annotation.PostConstruct; -import javax.inject.Inject; -import javax.inject.Singleton; -import javax.ws.rs.*; -import javax.ws.rs.core.Context; -import javax.ws.rs.core.UriBuilder; -import javax.ws.rs.core.UriInfo; import java.io.IOException; import java.net.URI; import java.util.ArrayList; @@ -35,260 +32,276 @@ import java.util.List; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; +import javax.annotation.PostConstruct; +import javax.inject.Inject; +import javax.inject.Singleton; +import javax.ws.rs.GET; +import javax.ws.rs.Path; +import javax.ws.rs.PathParam; +import javax.ws.rs.Produces; +import javax.ws.rs.QueryParam; +import javax.ws.rs.core.Context; +import javax.ws.rs.core.UriBuilder; +import javax.ws.rs.core.UriInfo; +import org.glassfish.hk2.utilities.binding.AbstractBinder; +import org.glassfish.jersey.jdkhttp.JdkHttpServerFactory; +import org.glassfish.jersey.server.ResourceConfig; /** - * A stateless REST service (backed by - * Jersey, - * HK2 and - * the JDK HttpServer) that displays paginated results for a CQL query. - *

    - * Conversion to and from JSON is made through - * Jersey Jackson providers. - *

    - * Navigation is forward-only. - * The implementation relies on the paging state returned by Cassandra, and encodes it in HTTP URLs. - *

    - * Preconditions: - * - a Cassandra cluster is running and accessible through the contacts points identified by CONTACT_POINTS and - * CASSANDRA_PORT; - * - port HTTP_PORT is available. - *

    - * Side effects: - * - creates a new keyspace "examples" in the cluster. If a keyspace with this name already exists, it will be reused; - * - creates a table "examples.forward_paging_rest_ui". If it already exists, it will be reused; - * - inserts data in the table; - * - launches a REST server listening on HTTP_PORT. + * A stateless REST service (backed by Jersey, HK2 and the JDK HttpServer) that displays paginated results for + * a CQL query. + * + *

    Conversion to and from JSON is made through Jersey Jackson + * providers. + * + *

    Navigation is forward-only. The implementation relies on the paging state returned by + * Cassandra, and encodes it in HTTP URLs. + * + *

    Preconditions: - a Cassandra cluster is running and accessible through the contacts points + * identified by CONTACT_POINTS and CASSANDRA_PORT; - port HTTP_PORT is available. + * + *

    Side effects: - creates a new keyspace "examples" in the cluster. If a keyspace with this name + * already exists, it will be reused; - creates a table "examples.forward_paging_rest_ui". If it + * already exists, it will be reused; - inserts data in the table; - launches a REST server + * listening on HTTP_PORT. */ public class ForwardPagingRestUi { - static final String[] CONTACT_POINTS = {"127.0.0.1"}; + static final String[] CONTACT_POINTS = {"127.0.0.1"}; - static final int CASSANDRA_PORT = 9042; + static final int CASSANDRA_PORT = 9042; - static final int HTTP_PORT = 8080; + static final int HTTP_PORT = 8080; - static final int ITEMS_PER_PAGE = 10; + static final int ITEMS_PER_PAGE = 10; - static final URI BASE_URI = UriBuilder.fromUri("http://localhost/").path("").port(HTTP_PORT).build(); + static final URI BASE_URI = + UriBuilder.fromUri("http://localhost/").path("").port(HTTP_PORT).build(); - public static void main(String[] args) throws Exception { + public static void main(String[] args) throws Exception { - Cluster cluster = null; - try { + Cluster cluster = null; + try { - cluster = Cluster.builder() - .addContactPoints(CONTACT_POINTS).withPort(CASSANDRA_PORT) - .build(); - Session session = cluster.connect(); + cluster = Cluster.builder().addContactPoints(CONTACT_POINTS).withPort(CASSANDRA_PORT).build(); + Session session = cluster.connect(); - createSchema(session); - populateSchema(session); - startRestService(session); + createSchema(session); + populateSchema(session); + startRestService(session); - } finally { - if (cluster != null) cluster.close(); - } - - } - - // Creates a table storing videos by users, in a typically denormalized way - private static void createSchema(Session session) { - session.execute("CREATE KEYSPACE IF NOT EXISTS examples " + - "WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}"); - session.execute("CREATE TABLE IF NOT EXISTS examples.forward_paging_rest_ui(" + - "userid int, username text, " + - "added timestamp, " + - "videoid int, title text, " + - "PRIMARY KEY (userid, added, videoid)" + - ") WITH CLUSTERING ORDER BY (added DESC, videoid ASC)"); + } finally { + if (cluster != null) cluster.close(); } - - private static void populateSchema(Session session) { - // 3 users - for (int i = 0; i < 3; i++) { - // 49 videos each - for (int j = 0; j < 49; j++) { - int videoid = i * 100 + j; - session.execute("INSERT INTO examples.forward_paging_rest_ui (userid, username, added, videoid, title) VALUES (?, ?, ?, ?, ?)", - i, "user " + i, new Date(j * 100000), videoid, "video " + videoid); - } - } + } + + // Creates a table storing videos by users, in a typically denormalized way + private static void createSchema(Session session) { + session.execute( + "CREATE KEYSPACE IF NOT EXISTS examples " + + "WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}"); + session.execute( + "CREATE TABLE IF NOT EXISTS examples.forward_paging_rest_ui(" + + "userid int, username text, " + + "added timestamp, " + + "videoid int, title text, " + + "PRIMARY KEY (userid, added, videoid)" + + ") WITH CLUSTERING ORDER BY (added DESC, videoid ASC)"); + } + + private static void populateSchema(Session session) { + // 3 users + for (int i = 0; i < 3; i++) { + // 49 videos each + for (int j = 0; j < 49; j++) { + int videoid = i * 100 + j; + session.execute( + "INSERT INTO examples.forward_paging_rest_ui (userid, username, added, videoid, title) VALUES (?, ?, ?, ?, ?)", + i, + "user " + i, + new Date(j * 100000), + videoid, + "video " + videoid); + } } + } + + // starts the REST server using JDK HttpServer (com.sun.net.httpserver.HttpServer) + private static void startRestService(Session session) throws IOException, InterruptedException { + + final HttpServer server = + JdkHttpServerFactory.createHttpServer(BASE_URI, new VideoApplication(session), false); + final ExecutorService executor = Executors.newSingleThreadExecutor(); + server.setExecutor(executor); + Runtime.getRuntime() + .addShutdownHook( + new Thread( + new Runnable() { + @Override + public void run() { + System.out.println(); + System.out.println("Stopping REST Service"); + server.stop(0); + executor.shutdownNow(); + System.out.println("REST Service stopped"); + } + })); + server.start(); + + System.out.println(); + System.out.printf( + "REST Service started on http://localhost:%d/users, press CTRL+C to stop%n", HTTP_PORT); + System.out.println( + "To explore this example, start with the following request and walk from there:"); + System.out.printf("curl -i http://localhost:%d/users/1/videos%n", HTTP_PORT); + System.out.println(); + + Thread.currentThread().join(); + } + + /** + * Configures the REST application and handles injection of custom objects, such as the driver + * session. + * + *

    This is also the place where you would normally configure JSON serialization, etc. + * + *

    Note that in this example, we rely on the automatic discovery and configuration of Jackson + * through {@code org.glassfish.jersey.jackson.JacksonFeature}. + */ + public static class VideoApplication extends ResourceConfig { + + public VideoApplication(final Session session) { + super(UserService.class); + // AbstractBinder is provided by HK2 + register( + new AbstractBinder() { - // starts the REST server using JDK HttpServer (com.sun.net.httpserver.HttpServer) - private static void startRestService(Session session) throws IOException, InterruptedException { - - final HttpServer server = JdkHttpServerFactory.createHttpServer(BASE_URI, new VideoApplication(session), false); - final ExecutorService executor = Executors.newSingleThreadExecutor(); - server.setExecutor(executor); - Runtime.getRuntime().addShutdownHook(new Thread(new Runnable() { @Override - public void run() { - System.out.println(); - System.out.println("Stopping REST Service"); - server.stop(0); - executor.shutdownNow(); - System.out.println("REST Service stopped"); + protected void configure() { + bind(session).to(Session.class); } - })); - server.start(); - - System.out.println(); - System.out.printf("REST Service started on http://localhost:%d/users, press CTRL+C to stop%n", HTTP_PORT); - System.out.println("To explore this example, start with the following request and walk from there:"); - System.out.printf("curl -i http://localhost:%d/users/1/videos%n", HTTP_PORT); - System.out.println(); - - Thread.currentThread().join(); - + }); } - - /** - * Configures the REST application and handles injection of custom objects, such - * as the driver session. - *

    - * This is also the place where you would normally configure JSON serialization, etc. - *

    - * Note that in this example, we rely on the automatic discovery and configuration of - * Jackson through {@code org.glassfish.jersey.jackson.JacksonFeature}. - */ - public static class VideoApplication extends ResourceConfig { - - public VideoApplication(final Session session) { - super(UserService.class); - // AbstractBinder is provided by HK2 - register(new AbstractBinder() { - - @Override - protected void configure() { - bind(session).to(Session.class); - } - - }); - } - + } + + /** + * A typical REST service, handling requests involving users. + * + *

    Typically, this service would contain methods for listing and searching for users, and + * methods to retrieve user details. Here, for brevity, only one method, listing videos by user, + * is implemented. + */ + @Singleton + @Path("/users") + @Produces("application/json") + public static class UserService { + + @Inject private Session session; + + @Context private UriInfo uri; + + private PreparedStatement videosByUser; + + @PostConstruct + @SuppressWarnings("unused") + public void init() { + this.videosByUser = + session.prepare( + "SELECT videoid, title, added FROM examples.forward_paging_rest_ui WHERE userid = ?"); } /** - * A typical REST service, handling requests involving users. - *

    - * Typically, this service would contain methods for listing and searching for users, - * and methods to retrieve user details. Here, for brevity, - * only one method, listing videos by user, is implemented. + * Returns a paginated list of all the videos created by the given user. + * + * @param userid the user ID. + * @param page the page to request, or {@code null} to get the first page. */ - @Singleton - @Path("/users") - @Produces("application/json") - public static class UserService { + @GET + @Path("/{userid}/videos") + public UserVideosResponse getUserVideos( + @PathParam("userid") int userid, @QueryParam("page") String page) { - @Inject - private Session session; + Statement statement = videosByUser.bind(userid).setFetchSize(ITEMS_PER_PAGE); + if (page != null) statement.setPagingState(PagingState.fromString(page)); - @Context - private UriInfo uri; + ResultSet rs = session.execute(statement); + PagingState nextPage = rs.getExecutionInfo().getPagingState(); - private PreparedStatement videosByUser; + int remaining = rs.getAvailableWithoutFetching(); + List videos = new ArrayList(remaining); - @PostConstruct - @SuppressWarnings("unused") - public void init() { - this.videosByUser = session.prepare("SELECT videoid, title, added FROM examples.forward_paging_rest_ui WHERE userid = ?"); - } - - /** - * Returns a paginated list of all the videos created by the given user. - * - * @param userid the user ID. - * @param page the page to request, or {@code null} to get the first page. - */ - @GET - @Path("/{userid}/videos") - public UserVideosResponse getUserVideos(@PathParam("userid") int userid, @QueryParam("page") String page) { - - Statement statement = videosByUser.bind(userid).setFetchSize(ITEMS_PER_PAGE); - if (page != null) - statement.setPagingState(PagingState.fromString(page)); - - ResultSet rs = session.execute(statement); - PagingState nextPage = rs.getExecutionInfo().getPagingState(); - - int remaining = rs.getAvailableWithoutFetching(); - List videos = new ArrayList(remaining); - - if (remaining > 0) { - for (Row row : rs) { - - UserVideo video = new UserVideo( - row.getInt("videoid"), - row.getString("title"), - row.getTimestamp("added")); - videos.add(video); - - // Make sure we don't go past the current page (we don't want the driver to fetch the next one) - if (--remaining == 0) - break; - } - } + if (remaining > 0) { + for (Row row : rs) { - URI next = null; - if (nextPage != null) - next = uri.getAbsolutePathBuilder().queryParam("page", nextPage).build(); + UserVideo video = + new UserVideo( + row.getInt("videoid"), row.getString("title"), row.getTimestamp("added")); + videos.add(video); - return new UserVideosResponse(videos, next); + // Make sure we don't go past the current page (we don't want the driver to fetch the next + // one) + if (--remaining == 0) break; } + } - } + URI next = null; + if (nextPage != null) + next = uri.getAbsolutePathBuilder().queryParam("page", nextPage).build(); - public static class UserVideosResponse { + return new UserVideosResponse(videos, next); + } + } - private final List videos; + public static class UserVideosResponse { - private final URI nextPage; + private final List videos; - public UserVideosResponse(List videos, URI nextPage) { - this.videos = videos; - this.nextPage = nextPage; - } + private final URI nextPage; - @SuppressWarnings("unused") - public List getVideos() { - return videos; - } + public UserVideosResponse(List videos, URI nextPage) { + this.videos = videos; + this.nextPage = nextPage; + } - @SuppressWarnings("unused") - public URI getNextPage() { - return nextPage; - } + @SuppressWarnings("unused") + public List getVideos() { + return videos; + } + @SuppressWarnings("unused") + public URI getNextPage() { + return nextPage; } + } - public static class UserVideo { + public static class UserVideo { - private final int videoid; + private final int videoid; - private final String title; + private final String title; - private final Date added; + private final Date added; - public UserVideo(int videoid, String title, Date added) { - this.videoid = videoid; - this.title = title; - this.added = added; - } - - @SuppressWarnings("unused") - public int getVideoid() { - return videoid; - } + public UserVideo(int videoid, String title, Date added) { + this.videoid = videoid; + this.title = title; + this.added = added; + } - public String getTitle() { - return title; - } + @SuppressWarnings("unused") + public int getVideoid() { + return videoid; + } - @SuppressWarnings("unused") - public Date getAdded() { - return added; - } + public String getTitle() { + return title; } + @SuppressWarnings("unused") + public Date getAdded() { + return added; + } + } } diff --git a/driver-examples/src/main/java/com/datastax/driver/examples/paging/RandomPagingRestUi.java b/driver-examples/src/main/java/com/datastax/driver/examples/paging/RandomPagingRestUi.java index 3944aca5c9f..e65047a0bc0 100644 --- a/driver-examples/src/main/java/com/datastax/driver/examples/paging/RandomPagingRestUi.java +++ b/driver-examples/src/main/java/com/datastax/driver/examples/paging/RandomPagingRestUi.java @@ -1,11 +1,13 @@ /* - * Copyright (C) 2012-2017 DataStax Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,19 +17,13 @@ */ package com.datastax.driver.examples.paging; -import com.datastax.driver.core.*; +import com.datastax.driver.core.Cluster; +import com.datastax.driver.core.PreparedStatement; +import com.datastax.driver.core.ResultSet; +import com.datastax.driver.core.Row; +import com.datastax.driver.core.Session; +import com.datastax.driver.core.Statement; import com.sun.net.httpserver.HttpServer; -import org.glassfish.hk2.utilities.binding.AbstractBinder; -import org.glassfish.jersey.jdkhttp.JdkHttpServerFactory; -import org.glassfish.jersey.server.ResourceConfig; - -import javax.annotation.PostConstruct; -import javax.inject.Inject; -import javax.inject.Singleton; -import javax.ws.rs.*; -import javax.ws.rs.core.Context; -import javax.ws.rs.core.UriBuilder; -import javax.ws.rs.core.UriInfo; import java.io.IOException; import java.net.URI; import java.util.ArrayList; @@ -36,328 +32,351 @@ import java.util.List; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; +import javax.annotation.PostConstruct; +import javax.inject.Inject; +import javax.inject.Singleton; +import javax.ws.rs.GET; +import javax.ws.rs.Path; +import javax.ws.rs.PathParam; +import javax.ws.rs.Produces; +import javax.ws.rs.QueryParam; +import javax.ws.rs.core.Context; +import javax.ws.rs.core.UriBuilder; +import javax.ws.rs.core.UriInfo; +import org.glassfish.hk2.utilities.binding.AbstractBinder; +import org.glassfish.jersey.jdkhttp.JdkHttpServerFactory; +import org.glassfish.jersey.server.ResourceConfig; /** - * A stateless REST service (backed by - * Jersey, - * HK2 and - * the JDK HttpServer) that displays paginated results for a CQL query. - *

    - * Conversion to and from JSON is made through - * Jersey Jackson providers. - *

    - * Navigation is bidirectional, and you can jump to a random page (by modifying the URL). - * Cassandra does not support offset queries (see https://issues.apache.org/jira/browse/CASSANDRA-6511), so we emulate - * it by restarting from the beginning each time, and iterating through the results until we reach the requested page. - * This is fundamentally inefficient (O(n) in the number of rows skipped), but the tradeoff might be acceptable for some - * use cases; for example, if you show 10 results per page and you think users are unlikely to browse past page 10, - * you only need to retrieve at most 100 rows. - *

    - * Preconditions: - * - a Cassandra cluster is running and accessible through the contacts points identified by CONTACT_POINTS and - * CASSANDRA_PORT; - * - port HTTP_PORT is available. - *

    - * Side effects: - * - creates a new keyspace "examples" in the cluster. If a keyspace with this name already exists, it will be reused; - * - creates a table "examples.random_paging_rest_ui". If it already exists, it will be reused; - * - inserts data in the table; - * - launches a REST server listening on HTTP_PORT. + * A stateless REST service (backed by Jersey, HK2 and the JDK HttpServer) that displays paginated results for + * a CQL query. + * + *

    Conversion to and from JSON is made through Jersey Jackson + * providers. + * + *

    Navigation is bidirectional, and you can jump to a random page (by modifying the URL). + * Cassandra does not support offset queries (see + * https://issues.apache.org/jira/browse/CASSANDRA-6511), so we emulate it by restarting from the + * beginning each time, and iterating through the results until we reach the requested page. This is + * fundamentally inefficient (O(n) in the number of rows skipped), but the tradeoff might be + * acceptable for some use cases; for example, if you show 10 results per page and you think users + * are unlikely to browse past page 10, you only need to retrieve at most 100 rows. + * + *

    Preconditions: - a Cassandra cluster is running and accessible through the contacts points + * identified by CONTACT_POINTS and CASSANDRA_PORT; - port HTTP_PORT is available. + * + *

    Side effects: - creates a new keyspace "examples" in the cluster. If a keyspace with this name + * already exists, it will be reused; - creates a table "examples.random_paging_rest_ui". If it + * already exists, it will be reused; - inserts data in the table; - launches a REST server + * listening on HTTP_PORT. */ public class RandomPagingRestUi { - static final String[] CONTACT_POINTS = {"127.0.0.1"}; + static final String[] CONTACT_POINTS = {"127.0.0.1"}; - static final int CASSANDRA_PORT = 9042; + static final int CASSANDRA_PORT = 9042; - static final int HTTP_PORT = 8080; + static final int HTTP_PORT = 8080; - static final int ITEMS_PER_PAGE = 10; - // How many rows the driver will retrieve at a time. - // This is set artificially low for the sake of this example. Unless your rows are very large, you can probably use - // a much higher value (the driver's default is 5000). - static final int FETCH_SIZE = 60; + static final int ITEMS_PER_PAGE = 10; + // How many rows the driver will retrieve at a time. + // This is set artificially low for the sake of this example. Unless your rows are very large, you + // can probably use + // a much higher value (the driver's default is 5000). + static final int FETCH_SIZE = 60; - static final URI BASE_URI = UriBuilder.fromUri("http://localhost/").path("").port(HTTP_PORT).build(); + static final URI BASE_URI = + UriBuilder.fromUri("http://localhost/").path("").port(HTTP_PORT).build(); - public static void main(String[] args) throws Exception { + public static void main(String[] args) throws Exception { - Cluster cluster = null; - try { + Cluster cluster = null; + try { - cluster = Cluster.builder() - .addContactPoints(CONTACT_POINTS).withPort(CASSANDRA_PORT) - .build(); - Session session = cluster.connect(); + cluster = Cluster.builder().addContactPoints(CONTACT_POINTS).withPort(CASSANDRA_PORT).build(); + Session session = cluster.connect(); - createSchema(session); - populateSchema(session); - startRestService(session); - - } finally { - if (cluster != null) cluster.close(); - } + createSchema(session); + populateSchema(session); + startRestService(session); + } finally { + if (cluster != null) cluster.close(); } - - // Creates a table storing videos by users, in a typically denormalized way - private static void createSchema(Session session) { - session.execute("CREATE KEYSPACE IF NOT EXISTS examples " + - "WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}"); - session.execute("CREATE TABLE IF NOT EXISTS examples.random_paging_rest_ui(" + - "userid int, username text, " + - "added timestamp, " + - "videoid int, title text, " + - "PRIMARY KEY (userid, added, videoid)" + - ") WITH CLUSTERING ORDER BY (added DESC, videoid ASC)"); + } + + // Creates a table storing videos by users, in a typically denormalized way + private static void createSchema(Session session) { + session.execute( + "CREATE KEYSPACE IF NOT EXISTS examples " + + "WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}"); + session.execute( + "CREATE TABLE IF NOT EXISTS examples.random_paging_rest_ui(" + + "userid int, username text, " + + "added timestamp, " + + "videoid int, title text, " + + "PRIMARY KEY (userid, added, videoid)" + + ") WITH CLUSTERING ORDER BY (added DESC, videoid ASC)"); + } + + private static void populateSchema(Session session) { + // 3 users + for (int i = 0; i < 3; i++) { + // 49 videos each + for (int j = 0; j < 49; j++) { + int videoid = i * 100 + j; + session.execute( + "INSERT INTO examples.random_paging_rest_ui (userid, username, added, videoid, title) VALUES (?, ?, ?, ?, ?)", + i, + "user " + i, + new Date(j * 100000), + videoid, + "video " + videoid); + } } + } + + // starts the REST server using JDK HttpServer (com.sun.net.httpserver.HttpServer) + private static void startRestService(Session session) throws IOException, InterruptedException { + + final HttpServer server = + JdkHttpServerFactory.createHttpServer(BASE_URI, new VideoApplication(session), false); + final ExecutorService executor = Executors.newSingleThreadExecutor(); + server.setExecutor(executor); + Runtime.getRuntime() + .addShutdownHook( + new Thread( + new Runnable() { + @Override + public void run() { + System.out.println(); + System.out.println("Stopping REST Service"); + server.stop(0); + executor.shutdownNow(); + System.out.println("REST Service stopped"); + } + })); + server.start(); + + System.out.println(); + System.out.printf( + "REST Service started on http://localhost:%d/users, press CTRL+C to stop%n", HTTP_PORT); + System.out.println( + "To explore this example, start with the following request and walk from there:"); + System.out.printf("curl -i http://localhost:%d/users/1/videos%n", HTTP_PORT); + System.out.println(); + + Thread.currentThread().join(); + } + + /** + * Configures the REST application and handles injection of custom objects, such as the driver + * session. + * + *

    This is also the place where you would normally configure JSON serialization, etc. + * + *

    Note that in this example, we rely on the automatic discovery and configuration of Jackson + * through {@code org.glassfish.jersey.jackson.JacksonFeature}. + */ + public static class VideoApplication extends ResourceConfig { + + public VideoApplication(final Session session) { + super(UserService.class); + // AbstractBinder is provided by HK2 + register( + new AbstractBinder() { - private static void populateSchema(Session session) { - // 3 users - for (int i = 0; i < 3; i++) { - // 49 videos each - for (int j = 0; j < 49; j++) { - int videoid = i * 100 + j; - session.execute("INSERT INTO examples.random_paging_rest_ui (userid, username, added, videoid, title) VALUES (?, ?, ?, ?, ?)", - i, "user " + i, new Date(j * 100000), videoid, "video " + videoid); - } - } - } - - // starts the REST server using JDK HttpServer (com.sun.net.httpserver.HttpServer) - private static void startRestService(Session session) throws IOException, InterruptedException { - - final HttpServer server = JdkHttpServerFactory.createHttpServer(BASE_URI, new VideoApplication(session), false); - final ExecutorService executor = Executors.newSingleThreadExecutor(); - server.setExecutor(executor); - Runtime.getRuntime().addShutdownHook(new Thread(new Runnable() { @Override - public void run() { - System.out.println(); - System.out.println("Stopping REST Service"); - server.stop(0); - executor.shutdownNow(); - System.out.println("REST Service stopped"); + protected void configure() { + bind(session).to(Session.class); } - })); - server.start(); - - System.out.println(); - System.out.printf("REST Service started on http://localhost:%d/users, press CTRL+C to stop%n", HTTP_PORT); - System.out.println("To explore this example, start with the following request and walk from there:"); - System.out.printf("curl -i http://localhost:%d/users/1/videos%n", HTTP_PORT); - System.out.println(); - - Thread.currentThread().join(); - + }); } - - /** - * Configures the REST application and handles injection of custom objects, such - * as the driver session. - *

    - * This is also the place where you would normally configure JSON serialization, etc. - *

    - * Note that in this example, we rely on the automatic discovery and configuration of - * Jackson through {@code org.glassfish.jersey.jackson.JacksonFeature}. - */ - public static class VideoApplication extends ResourceConfig { - - public VideoApplication(final Session session) { - super(UserService.class); - // AbstractBinder is provided by HK2 - register(new AbstractBinder() { - - @Override - protected void configure() { - bind(session).to(Session.class); - } - - }); - } - + } + + /** + * A typical REST service, handling requests involving users. + * + *

    Typically, this service would contain methods for listing and searching for users, and + * methods to retrieve user details. Here, for brevity, only one method, listing videos by user, + * is implemented. + */ + @Singleton + @Path("/users") + @Produces("application/json") + public static class UserService { + + @Inject private Session session; + + @Context private UriInfo uri; + + private PreparedStatement videosByUser; + private Pager pager; + + @PostConstruct + @SuppressWarnings("unused") + public void init() { + this.pager = new Pager(session, ITEMS_PER_PAGE); + this.videosByUser = + session.prepare( + "SELECT videoid, title, added FROM examples.random_paging_rest_ui WHERE userid = ?"); } /** - * A typical REST service, handling requests involving users. - *

    - * Typically, this service would contain methods for listing and searching for users, - * and methods to retrieve user details. Here, for brevity, - * only one method, listing videos by user, is implemented. + * Returns a paginated list of all the videos created by the given user. + * + * @param userid the user ID. + * @param page the page to request, or {@code null} to get the first page. */ - @Singleton - @Path("/users") - @Produces("application/json") - public static class UserService { - - @Inject - private Session session; - - @Context - private UriInfo uri; - - private PreparedStatement videosByUser; - private Pager pager; - - @PostConstruct - @SuppressWarnings("unused") - public void init() { - this.pager = new Pager(session, ITEMS_PER_PAGE); - this.videosByUser = session.prepare("SELECT videoid, title, added FROM examples.random_paging_rest_ui WHERE userid = ?"); - } - - /** - * Returns a paginated list of all the videos created by the given user. - * - * @param userid the user ID. - * @param page the page to request, or {@code null} to get the first page. - */ - @GET - @Path("/{userid}/videos") - public UserVideosResponse getUserVideos(@PathParam("userid") int userid, @QueryParam("page") Integer page) { - - Statement statement = videosByUser.bind(userid).setFetchSize(FETCH_SIZE); - - if (page == null) page = 1; - ResultSet rs = pager.skipTo(statement, page); - - List videos; - boolean empty = rs.isExhausted(); - if (empty) { - videos = Collections.emptyList(); - } else { - int remaining = ITEMS_PER_PAGE; - videos = new ArrayList(remaining); - for (Row row : rs) { - UserVideo video = new UserVideo( - row.getInt("videoid"), - row.getString("title"), - row.getTimestamp("added")); - videos.add(video); - - if (--remaining == 0) - break; - } - } - - URI previous = (page == 1) ? null - : uri.getAbsolutePathBuilder().queryParam("page", page - 1).build(); - URI next = (empty) ? null - : uri.getAbsolutePathBuilder().queryParam("page", page + 1).build(); - return new UserVideosResponse(videos, previous, next); + @GET + @Path("/{userid}/videos") + public UserVideosResponse getUserVideos( + @PathParam("userid") int userid, @QueryParam("page") Integer page) { + + Statement statement = videosByUser.bind(userid).setFetchSize(FETCH_SIZE); + + if (page == null) page = 1; + ResultSet rs = pager.skipTo(statement, page); + + List videos; + boolean empty = rs.isExhausted(); + if (empty) { + videos = Collections.emptyList(); + } else { + int remaining = ITEMS_PER_PAGE; + videos = new ArrayList(remaining); + for (Row row : rs) { + UserVideo video = + new UserVideo( + row.getInt("videoid"), row.getString("title"), row.getTimestamp("added")); + videos.add(video); + + if (--remaining == 0) break; } + } + URI previous = + (page == 1) ? null : uri.getAbsolutePathBuilder().queryParam("page", page - 1).build(); + URI next = (empty) ? null : uri.getAbsolutePathBuilder().queryParam("page", page + 1).build(); + return new UserVideosResponse(videos, previous, next); } + } - public static class UserVideosResponse { + public static class UserVideosResponse { - private final List videos; + private final List videos; - private final URI previousPage; + private final URI previousPage; - private final URI nextPage; - - public UserVideosResponse(List videos, URI previousPage, URI nextPage) { - this.videos = videos; - this.previousPage = previousPage; - this.nextPage = nextPage; - } - - @SuppressWarnings("unused") - public List getVideos() { - return videos; - } - - @SuppressWarnings("unused") - public URI getPreviousPage() { - return previousPage; - } + private final URI nextPage; - @SuppressWarnings("unused") - public URI getNextPage() { - return nextPage; - } + public UserVideosResponse(List videos, URI previousPage, URI nextPage) { + this.videos = videos; + this.previousPage = previousPage; + this.nextPage = nextPage; + } + @SuppressWarnings("unused") + public List getVideos() { + return videos; } - public static class UserVideo { + @SuppressWarnings("unused") + public URI getPreviousPage() { + return previousPage; + } - private final int videoid; + @SuppressWarnings("unused") + public URI getNextPage() { + return nextPage; + } + } - private final String title; + public static class UserVideo { - private final Date added; + private final int videoid; - public UserVideo(int videoid, String title, Date added) { - this.videoid = videoid; - this.title = title; - this.added = added; - } + private final String title; - @SuppressWarnings("unused") - public int getVideoid() { - return videoid; - } - - public String getTitle() { - return title; - } + private final Date added; - @SuppressWarnings("unused") - public Date getAdded() { - return added; - } + public UserVideo(int videoid, String title, Date added) { + this.videoid = videoid; + this.title = title; + this.added = added; } - /** - * Helper class to emulate random paging. - *

    - * Note that it MUST be stateless, because it is cached as a field in our HTTP handler. - */ - static class Pager { - private final Session session; - private final int pageSize; + @SuppressWarnings("unused") + public int getVideoid() { + return videoid; + } - Pager(Session session, int pageSize) { - this.session = session; - this.pageSize = pageSize; - } + public String getTitle() { + return title; + } - ResultSet skipTo(Statement statement, int displayPage) { - // Absolute index of the first row we want to display on the web page. Our goal is that rs.next() returns - // that row. - int targetRow = (displayPage - 1) * pageSize; - - ResultSet rs = session.execute(statement); - // Absolute index of the next row returned by rs (if it is not exhausted) - int currentRow = 0; - int fetchedSize = rs.getAvailableWithoutFetching(); - byte[] nextState = rs.getExecutionInfo().getPagingStateUnsafe(); - - // Skip protocol pages until we reach the one that contains our target row. - // For example, if the first query returned 60 rows and our target is row number 90, we know we can skip - // those 60 rows directly without even iterating through them. - // This part is optional, we could simply iterate through the rows with the for loop below, but that's - // slightly less efficient because iterating each row involves a bit of internal decoding. - while (fetchedSize > 0 && nextState != null && currentRow + fetchedSize < targetRow) { - statement.setPagingStateUnsafe(nextState); - rs = session.execute(statement); - currentRow += fetchedSize; - fetchedSize = rs.getAvailableWithoutFetching(); - nextState = rs.getExecutionInfo().getPagingStateUnsafe(); - } + @SuppressWarnings("unused") + public Date getAdded() { + return added; + } + } + + /** + * Helper class to emulate random paging. + * + *

    Note that it MUST be stateless, because it is cached as a field in our HTTP handler. + */ + static class Pager { + private final Session session; + private final int pageSize; + + Pager(Session session, int pageSize) { + this.session = session; + this.pageSize = pageSize; + } - if (currentRow < targetRow) { - for (@SuppressWarnings("unused") Row row : rs) { - if (++currentRow == targetRow) break; - } - } - // If targetRow is past the end, rs will be exhausted. - // This means you can request a page past the end in the web UI (e.g. request page 12 while there are only - // 10 pages), and it will show up as empty. - // One improvement would be to detect that and take a different action, for example redirect to page 10 or - // show an error message, this is left as an exercise for the reader. - return rs; + ResultSet skipTo(Statement statement, int displayPage) { + // Absolute index of the first row we want to display on the web page. Our goal is that + // rs.next() returns + // that row. + int targetRow = (displayPage - 1) * pageSize; + + ResultSet rs = session.execute(statement); + // Absolute index of the next row returned by rs (if it is not exhausted) + int currentRow = 0; + int fetchedSize = rs.getAvailableWithoutFetching(); + byte[] nextState = rs.getExecutionInfo().getPagingStateUnsafe(); + + // Skip protocol pages until we reach the one that contains our target row. + // For example, if the first query returned 60 rows and our target is row number 90, we know + // we can skip + // those 60 rows directly without even iterating through them. + // This part is optional, we could simply iterate through the rows with the for loop below, + // but that's + // slightly less efficient because iterating each row involves a bit of internal decoding. + while (fetchedSize > 0 && nextState != null && currentRow + fetchedSize < targetRow) { + statement.setPagingStateUnsafe(nextState); + rs = session.execute(statement); + currentRow += fetchedSize; + fetchedSize = rs.getAvailableWithoutFetching(); + nextState = rs.getExecutionInfo().getPagingStateUnsafe(); + } + + if (currentRow < targetRow) { + for (@SuppressWarnings("unused") Row row : rs) { + if (++currentRow == targetRow) break; } + } + // If targetRow is past the end, rs will be exhausted. + // This means you can request a page past the end in the web UI (e.g. request page 12 while + // there are only + // 10 pages), and it will show up as empty. + // One improvement would be to detect that and take a different action, for example redirect + // to page 10 or + // show an error message, this is left as an exercise for the reader. + return rs; } + } } diff --git a/driver-examples/src/main/java/com/datastax/driver/examples/retry/DowngradingRetry.java b/driver-examples/src/main/java/com/datastax/driver/examples/retry/DowngradingRetry.java new file mode 100644 index 00000000000..64ad03f9dbe --- /dev/null +++ b/driver-examples/src/main/java/com/datastax/driver/examples/retry/DowngradingRetry.java @@ -0,0 +1,469 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.examples.retry; + +import static com.datastax.driver.core.BatchStatement.Type.UNLOGGED; +import static com.datastax.driver.core.ConsistencyLevel.QUORUM; + +import com.datastax.driver.core.BatchStatement; +import com.datastax.driver.core.Cluster; +import com.datastax.driver.core.ConsistencyLevel; +import com.datastax.driver.core.ResultSet; +import com.datastax.driver.core.Row; +import com.datastax.driver.core.Session; +import com.datastax.driver.core.SimpleStatement; +import com.datastax.driver.core.Statement; +import com.datastax.driver.core.WriteType; +import com.datastax.driver.core.exceptions.DriverException; +import com.datastax.driver.core.exceptions.NoHostAvailableException; +import com.datastax.driver.core.exceptions.QueryConsistencyException; +import com.datastax.driver.core.exceptions.ReadTimeoutException; +import com.datastax.driver.core.exceptions.UnavailableException; +import com.datastax.driver.core.exceptions.WriteTimeoutException; +import java.text.SimpleDateFormat; + +/** + * This example illustrates how to replace the deprecated {@link + * com.datastax.driver.core.policies.DowngradingConsistencyRetryPolicy} with equivalent application + * logic. + * + *

    Preconditions: + * + *

      + *
    • An Apache Cassandra cluster is running and accessible through the contacts points + * identified by {@link #CONTACT_POINTS} and {@link #PORT}. + *
    + * + * Side effects: + * + *
      + *
    1. Creates a new keyspace {@code downgrading} in the cluster, with replication factor 3. If a + * keyspace with this name already exists, it will be reused; + *
    2. Creates a new table {@code downgrading.sensor_data}. If a table with that name exists + * already, it will be reused; + *
    3. Inserts a few rows, downgrading the consistency level if the operation fails; + *
    4. Queries the table, downgrading the consistency level if the operation fails; + *
    5. Displays the results on the console. + *
    + * + * Notes: + * + *
      + *
    • The downgrading logic here is similar to what {@code DowngradingConsistencyRetryPolicy} + * does; feel free to adapt it to your application needs; + *
    • You should never attempt to retry a non-idempotent write. See the driver's manual page on + * idempotence for more information. + *
    + * + * @see Java Driver online manual + */ +@SuppressWarnings("deprecation") +public class DowngradingRetry { + + private static final String[] CONTACT_POINTS = {"127.0.0.1"}; + + private static final int PORT = 9042; + + /** The maximum number of retries to attempt. */ + private static final int MAX_RETRIES = 1; + + /** The initial consistency level to use. */ + private static final ConsistencyLevel INITIAL_CL = QUORUM; + + public static void main(String[] args) { + + DowngradingRetry client = new DowngradingRetry(MAX_RETRIES); + + try { + + client.connect(CONTACT_POINTS, PORT); + client.createSchema(); + client.write(INITIAL_CL, 0); + ResultSet rows = client.read(INITIAL_CL, 0); + client.display(rows); + + } finally { + client.close(); + } + } + + private final int maxRetries; + + private Cluster cluster; + private Session session; + + private DowngradingRetry(int maxRetries) { + this.maxRetries = maxRetries; + } + + /** + * Initiates a connection to the cluster specified by the given contact points and port. + * + * @param contactPoints the contact points to use. + * @param port the port to use. + */ + private void connect(String[] contactPoints, int port) { + + cluster = Cluster.builder().addContactPoints(contactPoints).withPort(port).build(); + + System.out.println("Connected to cluster: " + cluster.getClusterName()); + + session = cluster.connect(); + } + + /** Creates the schema (keyspace) and table for this example. */ + private void createSchema() { + + session.execute( + "CREATE KEYSPACE IF NOT EXISTS downgrading WITH replication " + + "= {'class':'SimpleStrategy', 'replication_factor':3}"); + + session.execute( + "CREATE TABLE IF NOT EXISTS downgrading.sensor_data (" + + "sensor_id uuid," + + "date date," + + // emulates bucketing by day + "timestamp timestamp," + + "value double," + + "PRIMARY KEY ((sensor_id,date),timestamp)" + + ")"); + } + + /** + * Inserts data, retrying if necessary with a downgraded CL. + * + * @param cl the consistency level to apply. + * @param retryCount the current retry count. + * @throws DriverException if the current consistency level cannot be downgraded. + */ + private void write(ConsistencyLevel cl, int retryCount) { + + System.out.printf("Writing at %s (retry count: %d)%n", cl, retryCount); + + BatchStatement batch = new BatchStatement(UNLOGGED); + + batch.add( + new SimpleStatement( + "INSERT INTO downgrading.sensor_data " + + "(sensor_id, date, timestamp, value) " + + "VALUES (" + + "756716f7-2e54-4715-9f00-91dcbea6cf50," + + "'2018-02-26'," + + "'2018-02-26T13:53:46.345+01:00'," + + "2.34)")); + + batch.add( + new SimpleStatement( + "INSERT INTO downgrading.sensor_data " + + "(sensor_id, date, timestamp, value) " + + "VALUES (" + + "756716f7-2e54-4715-9f00-91dcbea6cf50," + + "'2018-02-26'," + + "'2018-02-26T13:54:27.488+01:00'," + + "2.47)")); + + batch.add( + new SimpleStatement( + "INSERT INTO downgrading.sensor_data " + + "(sensor_id, date, timestamp, value) " + + "VALUES (" + + "756716f7-2e54-4715-9f00-91dcbea6cf50," + + "'2018-02-26'," + + "'2018-02-26T13:56:33.739+01:00'," + + "2.52)")); + + batch.setConsistencyLevel(cl); + + try { + + session.execute(batch); + System.out.println("Write succeeded at " + cl); + + } catch (DriverException e) { + + if (retryCount == maxRetries) { + throw e; + } + + e = unwrapNoHostAvailableException(e); + + System.out.println("Write failed: " + e); + + // General intent: + // 1) If we know the write has been fully persisted on at least one replica, + // ignore the exception since the write will be eventually propagated to other replicas. + // 2) If the write couldn't be persisted at all, abort as it is unlikely that a retry would + // succeed. + // 3) If the write was only partially persisted, retry at the highest consistency + // level that is likely to succeed. + + if (e instanceof UnavailableException) { + + // With an UnavailableException, we know that the write wasn't even attempted. + // Downgrade to the number of replicas reported alive and retry. + int aliveReplicas = ((UnavailableException) e).getAliveReplicas(); + + ConsistencyLevel downgraded = downgrade(cl, aliveReplicas, e); + write(downgraded, retryCount + 1); + + } else if (e instanceof WriteTimeoutException) { + + WriteType writeType = ((WriteTimeoutException) e).getWriteType(); + int acknowledgements = ((WriteTimeoutException) e).getReceivedAcknowledgements(); + + switch (writeType) { + case SIMPLE: + case BATCH: + // For simple and batch writes, as long as one replica acknowledged the write, + // ignore the exception; if none responded however, abort as it is unlikely that + // a retry would ever succeed. + if (acknowledgements == 0) { + throw e; + } + break; + + case UNLOGGED_BATCH: + // For unlogged batches, the write might have been persisted only partially, + // so we can't simply ignore the exception: instead, we need to retry with + // consistency level equal to the number of acknowledged writes. + ConsistencyLevel downgraded = downgrade(cl, acknowledgements, e); + write(downgraded, retryCount + 1); + break; + + case BATCH_LOG: + // Rare edge case: the peers that were chosen by the coordinator + // to receive the distributed batch log failed to respond. + // Simply retry with same consistency level. + write(cl, retryCount + 1); + break; + + default: + // Other write types are uncommon and should not be retried. + throw e; + } + + } else { + + // Unexpected error: just retry with same consistency level + // and hope to talk to a healthier coordinator. + write(cl, retryCount + 1); + } + } + } + + /** + * Queries data, retrying if necessary with a downgraded CL. + * + * @param cl the consistency level to apply. + * @param retryCount the current retry count. + * @throws DriverException if the current consistency level cannot be downgraded. + */ + private ResultSet read(ConsistencyLevel cl, int retryCount) { + + System.out.printf("Reading at %s (retry count: %d)%n", cl, retryCount); + + Statement stmt = + new SimpleStatement( + "SELECT sensor_id, date, timestamp, value " + + "FROM downgrading.sensor_data " + + "WHERE " + + "sensor_id = 756716f7-2e54-4715-9f00-91dcbea6cf50 AND " + + "date = '2018-02-26' AND " + + "timestamp > '2018-02-26+01:00'") + .setConsistencyLevel(cl); + + try { + + ResultSet rows = session.execute(stmt); + System.out.println("Read succeeded at " + cl); + return rows; + + } catch (DriverException e) { + + if (retryCount == maxRetries) { + throw e; + } + + e = unwrapNoHostAvailableException(e); + + System.out.println("Read failed: " + e); + + // General intent: downgrade and retry at the highest consistency level + // that is likely to succeed. + + if (e instanceof UnavailableException) { + + // Downgrade to the number of replicas reported alive and retry. + int aliveReplicas = ((UnavailableException) e).getAliveReplicas(); + + ConsistencyLevel downgraded = downgrade(cl, aliveReplicas, e); + return read(downgraded, retryCount + 1); + + } else if (e instanceof ReadTimeoutException) { + + ReadTimeoutException readTimeout = (ReadTimeoutException) e; + int received = readTimeout.getReceivedAcknowledgements(); + int required = readTimeout.getRequiredAcknowledgements(); + + // If fewer replicas responded than required by the consistency level + // (but at least one replica did respond), retry with a consistency level + // equal to the number of received acknowledgements. + if (received < required) { + + ConsistencyLevel downgraded = downgrade(cl, received, e); + return read(downgraded, retryCount + 1); + } + + // If we received enough replies to meet the consistency level, + // but the actual data was not present among the received responses, + // then retry with the initial consistency level, we might be luckier next time + // and get the data back. + if (!readTimeout.wasDataRetrieved()) { + + return read(cl, retryCount + 1); + } + + // Otherwise, abort since the read timeout is unlikely to be solved by a retry. + throw e; + + } else { + + // Unexpected error: just retry with same consistency level + // and hope to talk to a healthier coordinator. + return read(cl, retryCount + 1); + } + } + } + + /** + * Displays the results on the console. + * + * @param rows the results to display. + */ + private void display(ResultSet rows) { + + final int width1 = 38; + final int width2 = 12; + final int width3 = 30; + final int width4 = 21; + + String format = + "%-" + width1 + "s" + "%-" + width2 + "s" + "%-" + width3 + "s" + "%-" + width4 + "s" + + "%n"; + + SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSSZ"); + + // headings + System.out.printf(format, "sensor_id", "date", "timestamp", "value"); + + // separators + drawLine(width1, width2, width3, width4); + + // data + for (Row row : rows) { + + System.out.printf( + format, + row.getUUID("sensor_id"), + row.getDate("date"), + sdf.format(row.getTimestamp("timestamp")), + row.getDouble("value")); + } + } + + /** Closes the session and the cluster. */ + private void close() { + if (session != null) { + session.close(); + cluster.close(); + } + } + + /** + * Downgrades the current consistency level to the highest level that is likely to succeed, given + * the number of acknowledgements received. Rethrows the original exception if the current + * consistency level cannot be downgraded any further. + * + * @param current the current CL. + * @param acknowledgements the acknowledgements received. + * @param original the original exception. + * @return the downgraded CL. + * @throws DriverException if the current consistency level cannot be downgraded. + */ + private static ConsistencyLevel downgrade( + ConsistencyLevel current, int acknowledgements, DriverException original) { + if (acknowledgements >= 3) { + return ConsistencyLevel.THREE; + } + if (acknowledgements == 2) { + return ConsistencyLevel.TWO; + } + if (acknowledgements == 1) { + return ConsistencyLevel.ONE; + } + // Edge case: EACH_QUORUM does not report a global number of alive replicas + // so even if we get 0 alive replicas, there might be + // a node up in some other datacenter, so retry at ONE. + if (current == ConsistencyLevel.EACH_QUORUM) { + return ConsistencyLevel.ONE; + } + throw original; + } + + /** + * If the driver was unable to contact any node, it throws an umbrella {@link + * NoHostAvailableException} containing a map of the actual errors, keyed by host. + * + *

    This method unwraps this exception, inspects the map of errors, and returns the first + * exploitable {@link DriverException}. + * + * @param e the exception to unwrap. + * @return the unwrapped exception, or the original exception, if it is not an instance of {@link + * NoHostAvailableException}. + * @throws NoHostAvailableException the original exception, if it cannot be unwrapped. + */ + private static DriverException unwrapNoHostAvailableException(DriverException e) { + if (e instanceof NoHostAvailableException) { + NoHostAvailableException noHostAvailable = (NoHostAvailableException) e; + for (Throwable error : noHostAvailable.getErrors().values()) { + if (error instanceof QueryConsistencyException || error instanceof UnavailableException) { + return (DriverException) error; + } + } + // Couldn't find an exploitable error to unwrap: abort. + throw e; + } + // the original exceptional wasn't a NoHostAvailableException: proceed. + return e; + } + + /** + * Draws a line to isolate headings from rows. + * + * @param widths the column widths. + */ + private static void drawLine(int... widths) { + for (int width : widths) { + for (int i = 1; i < width; i++) { + System.out.print('-'); + } + System.out.print('+'); + } + System.out.println(); + } +} diff --git a/driver-examples/src/main/resources/logback.xml b/driver-examples/src/main/resources/logback.xml index 25dd17c8687..6e477b80910 100644 --- a/driver-examples/src/main/resources/logback.xml +++ b/driver-examples/src/main/resources/logback.xml @@ -1,12 +1,14 @@ + # OSGi Tests A collection of simple tests for the Java Driver in an OSGi environment. @@ -9,7 +28,7 @@ It is _not_ meant as an example application. If you are looking for examples demonstrating usage of the driver in an OSGi environment, please refer to our [OSGi examples repository]. -[OSGi examples repository]:https://github.com/datastax/java-driver-examples-osgi +[OSGi examples repository]:https://github.com/apache/cassandra-java-driver-examples-osgi ## Usage @@ -40,7 +59,7 @@ Once `mvn verify` completes, the bundle jar will be present in the `target/` dir The project includes integration tests that verify that the service can be activated and used in an OSGi container. It also verifies that -the Java driver can be used in an OSGi container in the following +the Java Driver can be used in an OSGi container in the following configurations: 1. Default (default classifier with all dependencies) diff --git a/driver-tests/osgi/common/pom.xml b/driver-tests/osgi/common/pom.xml new file mode 100644 index 00000000000..71e616ad2a5 --- /dev/null +++ b/driver-tests/osgi/common/pom.xml @@ -0,0 +1,46 @@ + + + + 4.0.0 + + + org.apache.cassandra + cassandra-driver-tests-osgi + 3.12.2-SNAPSHOT + + + jar + cassandra-driver-tests-osgi-common + Java Driver for Apache Cassandra Tests - OSGi - Shaded + Common classes for testing Java Driver in an OSGi container. + + + + diff --git a/driver-tests/osgi/common/src/main/java/com/datastax/driver/osgi/api/MailboxException.java b/driver-tests/osgi/common/src/main/java/com/datastax/driver/osgi/api/MailboxException.java new file mode 100644 index 00000000000..4a09fde9936 --- /dev/null +++ b/driver-tests/osgi/common/src/main/java/com/datastax/driver/osgi/api/MailboxException.java @@ -0,0 +1,25 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.osgi.api; + +public class MailboxException extends Exception { + + public MailboxException(Throwable cause) { + super("Failure interacting with Mailbox", cause); + } +} diff --git a/driver-tests/osgi/common/src/main/java/com/datastax/driver/osgi/api/MailboxMessage.java b/driver-tests/osgi/common/src/main/java/com/datastax/driver/osgi/api/MailboxMessage.java new file mode 100644 index 00000000000..441119612e8 --- /dev/null +++ b/driver-tests/osgi/common/src/main/java/com/datastax/driver/osgi/api/MailboxMessage.java @@ -0,0 +1,102 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.osgi.api; + +import static com.datastax.driver.osgi.api.MailboxMessage.TABLE; + +import com.datastax.driver.core.utils.MoreObjects; +import com.datastax.driver.extras.codecs.date.SimpleTimestampCodec; +import com.datastax.driver.mapping.annotations.ClusteringColumn; +import com.datastax.driver.mapping.annotations.Column; +import com.datastax.driver.mapping.annotations.PartitionKey; +import com.datastax.driver.mapping.annotations.Table; + +/** A mailbox message entity mapped to the table {@value #TABLE}. */ +@SuppressWarnings("unused") +@Table(name = TABLE) +public class MailboxMessage { + + public static final String TABLE = "mailbox"; + + @PartitionKey private String recipient; + + @ClusteringColumn + @Column(name = "time", codec = SimpleTimestampCodec.class) + private long date; + + @Column private String sender; + + @Column private String body; + + public MailboxMessage() {} + + public MailboxMessage(String recipient, long date, String sender, String body) { + this.recipient = recipient; + this.date = date; + this.sender = sender; + this.body = body; + } + + public String getRecipient() { + return recipient; + } + + public void setRecipient(String recipient) { + this.recipient = recipient; + } + + public long getDate() { + return date; + } + + public void setDate(long date) { + this.date = date; + } + + public String getSender() { + return sender; + } + + public void setSender(String sender) { + this.sender = sender; + } + + public String getBody() { + return body; + } + + public void setBody(String body) { + this.body = body; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + MailboxMessage that = (MailboxMessage) o; + return date == that.date + && MoreObjects.equal(recipient, that.recipient) + && MoreObjects.equal(sender, that.sender) + && MoreObjects.equal(body, that.body); + } + + @Override + public int hashCode() { + return MoreObjects.hashCode(recipient, date, sender, body); + } +} diff --git a/driver-tests/osgi/common/src/main/java/com/datastax/driver/osgi/api/MailboxService.java b/driver-tests/osgi/common/src/main/java/com/datastax/driver/osgi/api/MailboxService.java new file mode 100644 index 00000000000..b720417b05c --- /dev/null +++ b/driver-tests/osgi/common/src/main/java/com/datastax/driver/osgi/api/MailboxService.java @@ -0,0 +1,44 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.osgi.api; + +public interface MailboxService { + + /** + * Retrieve all messages for a given recipient. + * + * @param recipient User whose mailbox is being read. + * @return All messages in the mailbox. + */ + public Iterable getMessages(String recipient) throws MailboxException; + + /** + * Stores the given message in the appropriate mailbox. + * + * @param message Message to send. + * @return The timestamp generated for the message (milliseconds since the Epoch). + */ + public long sendMessage(MailboxMessage message) throws MailboxException; + + /** + * Deletes all mail for the given recipient. + * + * @param recipient User whose mailbox will be cleared. + */ + public void clearMailbox(String recipient) throws MailboxException; +} diff --git a/driver-tests/osgi/common/src/main/java/com/datastax/driver/osgi/impl/Activator.java b/driver-tests/osgi/common/src/main/java/com/datastax/driver/osgi/impl/Activator.java new file mode 100644 index 00000000000..d3c8fb98981 --- /dev/null +++ b/driver-tests/osgi/common/src/main/java/com/datastax/driver/osgi/impl/Activator.java @@ -0,0 +1,150 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.osgi.impl; + +import static com.datastax.driver.core.ProtocolOptions.Compression.LZ4; +import static com.datastax.driver.core.ProtocolOptions.Compression.SNAPPY; +import static com.datastax.driver.osgi.api.MailboxMessage.TABLE; + +import com.datastax.driver.core.Cluster; +import com.datastax.driver.core.CodecRegistry; +import com.datastax.driver.core.Metadata; +import com.datastax.driver.core.PerHostPercentileTracker; +import com.datastax.driver.core.ProtocolOptions; +import com.datastax.driver.core.Session; +import com.datastax.driver.core.VersionNumber; +import com.datastax.driver.core.exceptions.InvalidQueryException; +import com.datastax.driver.core.policies.PercentileSpeculativeExecutionPolicy; +import com.datastax.driver.extras.codecs.date.SimpleTimestampCodec; +import com.datastax.driver.osgi.api.MailboxService; +import java.util.Hashtable; +import org.osgi.framework.BundleActivator; +import org.osgi.framework.BundleContext; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class Activator implements BundleActivator { + + private static final Logger LOGGER = LoggerFactory.getLogger(Activator.class); + + private Cluster cluster; + + @Override + public void start(BundleContext context) throws Exception { + + VersionNumber ver = VersionNumber.parse(context.getProperty("cassandra.version")); + LOGGER.info("C* version: {}", ver); + + String contactPointsStr = context.getProperty("cassandra.contactpoints"); + if (contactPointsStr == null) { + contactPointsStr = "127.0.0.1"; + } + LOGGER.info("Contact points: {}", contactPointsStr); + String[] contactPoints = contactPointsStr.split(","); + + String keyspace = context.getProperty("cassandra.keyspace"); + if (keyspace == null) { + keyspace = "mailbox"; + } + LOGGER.info("Keyspace: {}", keyspace); + keyspace = Metadata.quote(keyspace); + + Cluster.Builder builder = + Cluster.builder() + .addContactPoints(contactPoints) + .withCodecRegistry(new CodecRegistry().register(SimpleTimestampCodec.instance)); + + String compression = context.getProperty("cassandra.compression"); + if (compression != null) { + if (ver.getMajor() < 2 && compression.equals(LZ4.name())) { + LOGGER.warn("Requested LZ4 compression but C* version < 2.0 is not compatible, disabling"); + } else if (ver.getMajor() >= 4 && compression.equals(SNAPPY.name())) { + LOGGER.warn( + "Requested snappy compression but C* version >= 4.0 is not compatible, disabling"); + } else { + LOGGER.info("Compression: {}", compression); + builder.withCompression(ProtocolOptions.Compression.valueOf(compression)); + } + } else { + LOGGER.info("Compression: NONE"); + } + + String usePercentileSpeculativeExecutionPolicy = + context.getProperty("cassandra.usePercentileSpeculativeExecutionPolicy"); + if ("true".equals(usePercentileSpeculativeExecutionPolicy)) { + PerHostPercentileTracker perHostPercentileTracker = + PerHostPercentileTracker.builder(15000).build(); + builder.withSpeculativeExecutionPolicy( + new PercentileSpeculativeExecutionPolicy(perHostPercentileTracker, 99, 1)); + LOGGER.info("Use PercentileSpeculativeExecutionPolicy: YES"); + } else { + LOGGER.info("Use PercentileSpeculativeExecutionPolicy: NO"); + } + + cluster = builder.build(); + + Session session; + try { + session = cluster.connect(keyspace); + } catch (InvalidQueryException e) { + // Create the schema if it does not exist. + session = cluster.connect(); + session.execute( + "CREATE KEYSPACE " + + keyspace + + " with replication = {'class': 'SimpleStrategy', 'replication_factor' : 1}"); + session.execute( + "CREATE TABLE " + + keyspace + + "." + + TABLE + + " (" + + "recipient text," + + "time timestamp," + + "sender text," + + "body text," + + "PRIMARY KEY (recipient, time))"); + session.execute("USE " + keyspace); + } + + MailboxImpl mailbox = new MailboxImpl(session, keyspace); + mailbox.init(); + + context.registerService( + MailboxService.class.getName(), mailbox, new Hashtable()); + LOGGER.info("Mailbox Service successfully initialized"); + } + + @Override + public void stop(BundleContext context) throws Exception { + if (cluster != null) { + cluster.close(); + /* + Allow Netty ThreadDeathWatcher to terminate; + unfortunately we can't explicitly call ThreadDeathWatcher.awaitInactivity() + because Netty could be shaded. + If this thread isn't terminated when this bundle is closed, + we could get exceptions such as this one: + Exception in thread "threadDeathWatcher-2-1" java.lang.NoClassDefFoundError: xxx + Caused by: java.lang.ClassNotFoundException: Unable to load class 'xxx' because the bundle wiring for xxx is no longer valid. + Although ugly, they are harmless and can be safely ignored. + */ + Thread.sleep(1000); + } + } +} diff --git a/driver-tests/osgi/common/src/main/java/com/datastax/driver/osgi/impl/MailboxImpl.java b/driver-tests/osgi/common/src/main/java/com/datastax/driver/osgi/impl/MailboxImpl.java new file mode 100644 index 00000000000..2781b1e3ce0 --- /dev/null +++ b/driver-tests/osgi/common/src/main/java/com/datastax/driver/osgi/impl/MailboxImpl.java @@ -0,0 +1,109 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.osgi.impl; + +import static com.datastax.driver.core.querybuilder.QueryBuilder.bindMarker; +import static com.datastax.driver.core.querybuilder.QueryBuilder.delete; +import static com.datastax.driver.core.querybuilder.QueryBuilder.eq; +import static com.datastax.driver.core.querybuilder.QueryBuilder.select; +import static com.datastax.driver.osgi.api.MailboxMessage.TABLE; + +import com.datastax.driver.core.BoundStatement; +import com.datastax.driver.core.PreparedStatement; +import com.datastax.driver.core.Session; +import com.datastax.driver.mapping.Mapper; +import com.datastax.driver.mapping.MappingManager; +import com.datastax.driver.osgi.api.MailboxException; +import com.datastax.driver.osgi.api.MailboxMessage; +import com.datastax.driver.osgi.api.MailboxService; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class MailboxImpl implements MailboxService { + + private static final Logger LOGGER = LoggerFactory.getLogger(MailboxImpl.class); + + private final Session session; + + private final String keyspace; + + private volatile boolean initialized = false; + + private PreparedStatement retrieveStatement; + + private PreparedStatement deleteStatement; + + private Mapper mapper; + + public MailboxImpl(Session session, String keyspace) { + this.session = session; + this.keyspace = keyspace; + } + + public synchronized void init() { + if (initialized) return; + + retrieveStatement = + session.prepare(select().from(keyspace, TABLE).where(eq("recipient", bindMarker()))); + + deleteStatement = + session.prepare(delete().from(keyspace, TABLE).where(eq("recipient", bindMarker()))); + + MappingManager mappingManager = new MappingManager(session); + + mapper = mappingManager.mapper(MailboxMessage.class); + + // Exercise metrics + LOGGER.info( + "Number of requests: {}", session.getCluster().getMetrics().getRequestsTimer().getCount()); + + initialized = true; + } + + @Override + public Iterable getMessages(String recipient) throws MailboxException { + try { + BoundStatement statement = new BoundStatement(retrieveStatement); + statement.setString(0, recipient); + return mapper.map(session.execute(statement)); + } catch (Exception e) { + throw new MailboxException(e); + } + } + + @Override + public long sendMessage(MailboxMessage message) throws MailboxException { + try { + mapper.save(message); + return message.getDate(); + } catch (Exception e) { + throw new MailboxException(e); + } + } + + @Override + public void clearMailbox(String recipient) throws MailboxException { + try { + BoundStatement statement = new BoundStatement(deleteStatement); + statement.setString(0, recipient); + session.execute(statement); + } catch (Exception e) { + throw new MailboxException(e); + } + } +} diff --git a/driver-tests/osgi/common/src/test/java/com/datastax/driver/osgi/BundleOptions.java b/driver-tests/osgi/common/src/test/java/com/datastax/driver/osgi/BundleOptions.java new file mode 100644 index 00000000000..e4d9ef9aaa0 --- /dev/null +++ b/driver-tests/osgi/common/src/test/java/com/datastax/driver/osgi/BundleOptions.java @@ -0,0 +1,222 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.osgi; + +import static org.ops4j.pax.exam.CoreOptions.bootDelegationPackages; +import static org.ops4j.pax.exam.CoreOptions.bundle; +import static org.ops4j.pax.exam.CoreOptions.mavenBundle; +import static org.ops4j.pax.exam.CoreOptions.options; +import static org.ops4j.pax.exam.CoreOptions.systemPackages; +import static org.ops4j.pax.exam.CoreOptions.systemProperty; + +import com.datastax.driver.core.CCMBridge; +import com.datastax.driver.core.Cluster; +import com.datastax.driver.core.ProtocolOptions; +import com.datastax.driver.core.TestUtils; +import com.google.common.collect.Lists; +import java.util.List; +import org.ops4j.pax.exam.Option; +import org.ops4j.pax.exam.options.CompositeOption; +import org.ops4j.pax.exam.options.MavenArtifactProvisionOption; +import org.ops4j.pax.exam.options.UrlProvisionOption; +import org.ops4j.pax.exam.util.PathUtils; + +/** + * To check that all driver bundles are correctly provisioned, or to debug provisioning problems, + * run the Maven Pax Runner plugin: + * + *

    + * mvn pax:run
    + * 
    + * + * The plugin will start a Felix Gogo interactive shell and attempt to provision the driver bundles. + * + *

    Note: you MUST run 'mvn install' on the entire project before! + * + * @see Apache + * Felix Gogo Documentation + */ +public class BundleOptions { + + public static UrlProvisionOption driverBundle() { + return driverBundle(false); + } + + public static UrlProvisionOption driverBundle(boolean useShaded) { + String classifier = useShaded ? "-shaded" : ""; + return bundle( + "reference:file:" + + PathUtils.getBaseDir() + + "/../../../driver-core/target/cassandra-driver-core-" + + Cluster.getDriverVersion() + + classifier + + ".jar"); + } + + public static UrlProvisionOption mappingBundle() { + return bundle( + "reference:file:" + + PathUtils.getBaseDir() + + "/../../../driver-mapping/target/cassandra-driver-mapping-" + + Cluster.getDriverVersion() + + ".jar"); + } + + public static UrlProvisionOption extrasBundle() { + return bundle( + "reference:file:" + + PathUtils.getBaseDir() + + "/../../../driver-extras/target/cassandra-driver-extras-" + + Cluster.getDriverVersion() + + ".jar"); + } + + public static MavenArtifactProvisionOption guavaBundle() { + return mavenBundle("com.google.guava", "guava", getVersion("guava.version")); + } + + public static CompositeOption lz4Bundle() { + return new CompositeOption() { + + @Override + public Option[] getOptions() { + return options( + systemProperty("cassandra.compression").value(ProtocolOptions.Compression.LZ4.name()), + mavenBundle("org.lz4", "lz4-java", getVersion("lz4.version"))); + } + }; + } + + public static CompositeOption snappyBundle() { + return new CompositeOption() { + + @Override + public Option[] getOptions() { + return options( + systemProperty("cassandra.compression") + .value(ProtocolOptions.Compression.SNAPPY.name()), + mavenBundle("org.xerial.snappy", "snappy-java", getVersion("snappy.version"))); + } + }; + } + + public static CompositeOption hdrHistogramBundle() { + return new CompositeOption() { + + @Override + public Option[] getOptions() { + return options( + systemProperty("cassandra.usePercentileSpeculativeExecutionPolicy").value("true"), + mavenBundle("org.hdrhistogram", "HdrHistogram", getVersion("hdr.version"))); + } + }; + } + + public static CompositeOption nettyBundles() { + final String nettyVersion = getVersion("netty.version"); + return new CompositeOption() { + + @Override + public Option[] getOptions() { + return options( + mavenBundle("io.netty", "netty-buffer", nettyVersion), + mavenBundle("io.netty", "netty-codec", nettyVersion), + mavenBundle("io.netty", "netty-common", nettyVersion), + mavenBundle("io.netty", "netty-handler", nettyVersion), + mavenBundle("io.netty", "netty-transport", nettyVersion), + mavenBundle("io.netty", "netty-transport-native-unix-common", nettyVersion), + mavenBundle("io.netty", "netty-resolver", nettyVersion)); + } + }; + } + + public static CompositeOption dropwizardMetricsBundle() { + return new CompositeOption() { + + @Override + public Option[] getOptions() { + return options( + mavenBundle("io.dropwizard.metrics", "metrics-core", getVersion("metrics.version"))); + } + }; + } + + public static UrlProvisionOption mailboxBundle() { + return bundle("reference:file:" + PathUtils.getBaseDir() + "/target/classes"); + } + + public static CompositeOption defaultOptions() { + return new CompositeOption() { + + @Override + public Option[] getOptions() { + List

    This is needed for tests that use Pax-Exam since it runs some methods in the OSGi container + * which we do not want. + */ +public class CCMBridgeListener implements ITestListener { + + private CCMBridge ccm; + + @Override + public void onStart(ITestContext context) { + ccm = CCMBridge.builder().withNodes(1).withBinaryPort(9042).build(); + } + + @Override + public void onFinish(ITestContext context) { + if (ccm != null) { + ccm.remove(); + } + } + + @Override + public void onTestStart(ITestResult result) {} + + @Override + public void onTestSuccess(ITestResult result) {} + + @Override + public void onTestFailure(ITestResult result) {} + + @Override + public void onTestSkipped(ITestResult result) {} + + @Override + public void onTestFailedButWithinSuccessPercentage(ITestResult result) {} +} diff --git a/driver-tests/osgi/common/src/test/java/com/datastax/driver/osgi/MailboxServiceTests.java b/driver-tests/osgi/common/src/test/java/com/datastax/driver/osgi/MailboxServiceTests.java new file mode 100644 index 00000000000..fa92ea4dc4f --- /dev/null +++ b/driver-tests/osgi/common/src/test/java/com/datastax/driver/osgi/MailboxServiceTests.java @@ -0,0 +1,82 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.osgi; + +import static org.testng.Assert.assertEquals; + +import com.datastax.driver.osgi.api.MailboxException; +import com.datastax.driver.osgi.api.MailboxMessage; +import com.datastax.driver.osgi.api.MailboxService; +import java.util.ArrayList; +import java.util.Collection; +import java.util.GregorianCalendar; +import javax.inject.Inject; +import org.osgi.framework.Bundle; +import org.osgi.framework.BundleContext; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public abstract class MailboxServiceTests { + + private static final Logger LOGGER = LoggerFactory.getLogger(MailboxServiceTests.class); + + @Inject MailboxService service; + + @Inject BundleContext bundleContext; + + /** + * Exercises a 'mailbox' service provided by an OSGi bundle that depends on the driver. Ensures + * that queries can be made through the service with the current given configuration. + * + *

    The following configurations are tried (defined via methods with the @Configuration + * annotation): + * + *

      + *
    1. Default bundle (Driver with all of it's dependencies and Guava 16.0.1) + *
    2. Shaded bundle (Driver with netty shaded and Guava 16.0.1) + *
    3. With Guava 17 + *
    4. With Guava 18 + *
    5. With Guava 19 + *
    + */ + protected void checkService() throws MailboxException { + if (LOGGER.isDebugEnabled()) { + for (Bundle bundle : bundleContext.getBundles()) { + LOGGER.debug("Loaded bundle: {} {}", bundle.getSymbolicName(), bundle.getVersion()); + } + } + // Insert some data into mailbox for a particular user. + String recipient = "user@datastax.com"; + try { + Collection inMessages = new ArrayList(); + for (int i = 0; i < 30; i++) { + MailboxMessage message = + new MailboxMessage( + recipient, new GregorianCalendar(2015, 1, i).getTimeInMillis(), recipient, "" + i); + inMessages.add(message); + service.sendMessage(message); + } + + Iterable messages = service.getMessages(recipient); + + assertEquals(messages, inMessages); + } finally { + service.clearMailbox(recipient); + } + } +} diff --git a/driver-tests/osgi/pom.xml b/driver-tests/osgi/pom.xml index cda762fdd8b..8609753a8c9 100644 --- a/driver-tests/osgi/pom.xml +++ b/driver-tests/osgi/pom.xml @@ -1,12 +1,14 @@ @@ -177,31 +165,22 @@ ${metrics.version} ${testng.version} ${jsr353-api.version} + ${jackson.version} + ${jackson-databind.version} ${ipprefix} + false + + https://repo1.maven.org/maven2@id=central - org.apache.felix - maven-bundle-plugin + org.codehaus.mojo + animal-sniffer-maven-plugin - - com.datastax.driver.osgi - com.datastax.driver.osgi.api,!com.datastax.driver.osgi.impl - com.datastax.driver.osgi.impl.Activator - <_include>-osgi.bnd - + true - - - bundle-manifest - process-classes - - manifest - - - diff --git a/driver-tests/osgi/shaded/pom.xml b/driver-tests/osgi/shaded/pom.xml new file mode 100644 index 00000000000..26d30bc88bd --- /dev/null +++ b/driver-tests/osgi/shaded/pom.xml @@ -0,0 +1,163 @@ + + + + 4.0.0 + + + org.apache.cassandra + cassandra-driver-tests-osgi + 3.12.2-SNAPSHOT + + + cassandra-driver-tests-osgi-shaded + Java Driver for Apache Cassandra Tests - OSGi - Shaded + A test for the shaded Java Driver in an OSGi container. + + + + org.apache.cassandra + cassandra-driver-core + + shaded + + + com.github.jnr + jnr-ffi + + + com.github.jnr + jnr-posix + + + + + + + + + org.apache.maven.plugins + maven-resources-plugin + + + + copy-common-sources + process-sources + + copy-resources + + + ${project.build.directory}/dependency-sources/cassandra-driver-tests-osgi-common + true + + + ../common/src/main/java/ + + **/*.* + + + + + + + copy-common-test-sources + process-test-sources + + copy-resources + + + ${project.build.directory}/dependency-test-sources/cassandra-driver-tests-osgi-common + true + + + ../common/src/test/java/ + + **/*.* + + + + + + + + + + org.codehaus.mojo + build-helper-maven-plugin + + + add-source + generate-sources + + add-source + + + + ${project.build.directory}/dependency-sources/cassandra-driver-tests-osgi-common + + + + + add-test-source + generate-test-sources + + add-test-source + + + + ${project.build.directory}/dependency-test-sources/cassandra-driver-tests-osgi-common + + + + + + + + maven-failsafe-plugin + + ${test.osgi.skip} + + + + + org.apache.felix + maven-bundle-plugin + + + bundle-manifest + process-classes + + manifest + + + ${project.build.outputDirectory}/META-INF + + com.datastax.driver.osgi + com.datastax.driver.osgi.api,!com.datastax.driver.osgi.impl + com.datastax.driver.osgi.impl.Activator + <_include>-osgi.bnd + + + + + + + + diff --git a/driver-tests/osgi/shaded/src/test/java/com/datastax/driver/osgi/MailboxServiceShadedIT.java b/driver-tests/osgi/shaded/src/test/java/com/datastax/driver/osgi/MailboxServiceShadedIT.java new file mode 100644 index 00000000000..1f8cf26e679 --- /dev/null +++ b/driver-tests/osgi/shaded/src/test/java/com/datastax/driver/osgi/MailboxServiceShadedIT.java @@ -0,0 +1,62 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.osgi; + +import static com.datastax.driver.osgi.BundleOptions.defaultOptions; +import static com.datastax.driver.osgi.BundleOptions.driverBundle; +import static com.datastax.driver.osgi.BundleOptions.extrasBundle; +import static com.datastax.driver.osgi.BundleOptions.guavaBundle; +import static com.datastax.driver.osgi.BundleOptions.mailboxBundle; +import static com.datastax.driver.osgi.BundleOptions.mappingBundle; +import static org.ops4j.pax.exam.CoreOptions.options; + +import com.datastax.driver.osgi.api.MailboxException; +import org.ops4j.pax.exam.Configuration; +import org.ops4j.pax.exam.Option; +import org.ops4j.pax.exam.testng.listener.PaxExam; +import org.testng.annotations.Listeners; +import org.testng.annotations.Test; + +@Listeners({CCMBridgeListener.class, PaxExam.class}) +public class MailboxServiceShadedIT extends MailboxServiceTests { + + @Configuration + public Option[] shadedConfig() { + return options( + defaultOptions(), + guavaBundle(), + extrasBundle(), + mappingBundle(), + driverBundle(true), + mailboxBundle()); + } + + /** + * Exercises a 'mailbox' service provided by an OSGi bundle that depends on the driver with shaded + * configuration (driver with Netty shaded). + * + * @test_category packaging + * @expected_result Can create, retrieve and delete data using the mailbox service. + * @jira_ticket JAVA-620 + * @since 2.0.10, 2.1.5 + */ + @Test(groups = "short") + public void test_shaded() throws MailboxException { + checkService(); + } +} diff --git a/driver-tests/osgi/shaded/src/test/resources/exam.properties b/driver-tests/osgi/shaded/src/test/resources/exam.properties new file mode 100644 index 00000000000..16d353c88cb --- /dev/null +++ b/driver-tests/osgi/shaded/src/test/resources/exam.properties @@ -0,0 +1,20 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +pax.exam.system=test +pax.exam.logging=none diff --git a/driver-tests/osgi/src/test/resources/logback.xml b/driver-tests/osgi/shaded/src/test/resources/logback.xml similarity index 74% rename from driver-tests/osgi/src/test/resources/logback.xml rename to driver-tests/osgi/shaded/src/test/resources/logback.xml index 316e0d60a8f..0821c6dd2c0 100644 --- a/driver-tests/osgi/src/test/resources/logback.xml +++ b/driver-tests/osgi/shaded/src/test/resources/logback.xml @@ -1,12 +1,14 @@ + + + 4.0.0 + + + org.apache.cassandra + cassandra-driver-tests-osgi + 3.12.2-SNAPSHOT + + + cassandra-driver-tests-osgi-unshaded + Java Driver for Apache Cassandra Tests - OSGi - Unshaded + A test for the unshaded Java Driver in an OSGi container. + + + + org.apache.cassandra + cassandra-driver-core + + + + com.github.jnr + jnr-ffi + + + com.github.jnr + jnr-posix + + + + + + + + + org.apache.maven.plugins + maven-resources-plugin + + + + copy-common-sources + process-sources + + copy-resources + + + ${project.build.directory}/dependency-sources/cassandra-driver-tests-osgi-common + true + + + ../common/src/main/java/ + + **/*.* + + + + + + + copy-common-test-sources + process-test-sources + + copy-resources + + + ${project.build.directory}/dependency-test-sources/cassandra-driver-tests-osgi-common + true + + + ../common/src/test/java/ + + **/*.* + + + + + + + + + + org.codehaus.mojo + build-helper-maven-plugin + + + add-source + generate-sources + + add-source + + + + ${project.build.directory}/dependency-sources/cassandra-driver-tests-osgi-common + + + + + add-test-source + generate-test-sources + + add-test-source + + + + ${project.build.directory}/dependency-test-sources/cassandra-driver-tests-osgi-common + + + + + + + + maven-failsafe-plugin + + ${test.osgi.skip} + + + + + org.apache.felix + maven-bundle-plugin + + + bundle-manifest + process-classes + + manifest + + + ${project.build.outputDirectory}/META-INF + + com.datastax.driver.osgi + com.datastax.driver.osgi.api,!com.datastax.driver.osgi.impl + com.datastax.driver.osgi.impl.Activator + <_include>-osgi.bnd + + + + + + + + diff --git a/driver-tests/osgi/unshaded/src/test/java/com/datastax/driver/osgi/MailboxServiceDefaultIT.java b/driver-tests/osgi/unshaded/src/test/java/com/datastax/driver/osgi/MailboxServiceDefaultIT.java new file mode 100644 index 00000000000..abdc10bbbec --- /dev/null +++ b/driver-tests/osgi/unshaded/src/test/java/com/datastax/driver/osgi/MailboxServiceDefaultIT.java @@ -0,0 +1,66 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.osgi; + +import static com.datastax.driver.osgi.BundleOptions.defaultOptions; +import static com.datastax.driver.osgi.BundleOptions.driverBundle; +import static com.datastax.driver.osgi.BundleOptions.dropwizardMetricsBundle; +import static com.datastax.driver.osgi.BundleOptions.extrasBundle; +import static com.datastax.driver.osgi.BundleOptions.guavaBundle; +import static com.datastax.driver.osgi.BundleOptions.mailboxBundle; +import static com.datastax.driver.osgi.BundleOptions.mappingBundle; +import static com.datastax.driver.osgi.BundleOptions.nettyBundles; +import static org.ops4j.pax.exam.CoreOptions.options; + +import com.datastax.driver.osgi.api.MailboxException; +import org.ops4j.pax.exam.Configuration; +import org.ops4j.pax.exam.Option; +import org.ops4j.pax.exam.testng.listener.PaxExam; +import org.testng.annotations.Listeners; +import org.testng.annotations.Test; + +@Listeners({CCMBridgeListener.class, PaxExam.class}) +public class MailboxServiceDefaultIT extends MailboxServiceTests { + + @Configuration + public Option[] defaultConfig() { + return options( + defaultOptions(), + nettyBundles(), + dropwizardMetricsBundle(), + guavaBundle(), + driverBundle(), + extrasBundle(), + mappingBundle(), + mailboxBundle()); + } + + /** + * Exercises a 'mailbox' service provided by an OSGi bundle that depends on the driver with + * default configuration (driver with all of its regular dependencies). + * + * @test_category packaging + * @expected_result Can create, retrieve and delete data using the mailbox service. + * @jira_ticket JAVA-620 + * @since 2.0.10, 2.1.5 + */ + @Test(groups = "short") + public void test_default() throws MailboxException { + checkService(); + } +} diff --git a/driver-tests/osgi/unshaded/src/test/java/com/datastax/driver/osgi/MailboxServiceGuava17IT.java b/driver-tests/osgi/unshaded/src/test/java/com/datastax/driver/osgi/MailboxServiceGuava17IT.java new file mode 100644 index 00000000000..3b6b90141e4 --- /dev/null +++ b/driver-tests/osgi/unshaded/src/test/java/com/datastax/driver/osgi/MailboxServiceGuava17IT.java @@ -0,0 +1,66 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.osgi; + +import static com.datastax.driver.osgi.BundleOptions.defaultOptions; +import static com.datastax.driver.osgi.BundleOptions.driverBundle; +import static com.datastax.driver.osgi.BundleOptions.dropwizardMetricsBundle; +import static com.datastax.driver.osgi.BundleOptions.extrasBundle; +import static com.datastax.driver.osgi.BundleOptions.guavaBundle; +import static com.datastax.driver.osgi.BundleOptions.mailboxBundle; +import static com.datastax.driver.osgi.BundleOptions.mappingBundle; +import static com.datastax.driver.osgi.BundleOptions.nettyBundles; +import static org.ops4j.pax.exam.CoreOptions.options; + +import com.datastax.driver.osgi.api.MailboxException; +import org.ops4j.pax.exam.Configuration; +import org.ops4j.pax.exam.Option; +import org.ops4j.pax.exam.testng.listener.PaxExam; +import org.testng.annotations.Listeners; +import org.testng.annotations.Test; + +@Listeners({CCMBridgeListener.class, PaxExam.class}) +public class MailboxServiceGuava17IT extends MailboxServiceTests { + + @Configuration + public Option[] guava17Config() { + return options( + defaultOptions(), + nettyBundles(), + dropwizardMetricsBundle(), + guavaBundle().version("17.0"), + driverBundle(), + mappingBundle(), + extrasBundle(), + mailboxBundle()); + } + + /** + * Exercises a 'mailbox' service provided by an OSGi bundle that depends on the driver with Guava + * 17 explicitly enforced. + * + * @test_category packaging + * @expected_result Can create, retrieve and delete data using the mailbox service. + * @jira_ticket JAVA-620 + * @since 2.0.10, 2.1.5 + */ + @Test(groups = "short") + public void test_guava_17() throws MailboxException { + checkService(); + } +} diff --git a/driver-tests/osgi/unshaded/src/test/java/com/datastax/driver/osgi/MailboxServiceGuava18IT.java b/driver-tests/osgi/unshaded/src/test/java/com/datastax/driver/osgi/MailboxServiceGuava18IT.java new file mode 100644 index 00000000000..2bc27869075 --- /dev/null +++ b/driver-tests/osgi/unshaded/src/test/java/com/datastax/driver/osgi/MailboxServiceGuava18IT.java @@ -0,0 +1,66 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.osgi; + +import static com.datastax.driver.osgi.BundleOptions.defaultOptions; +import static com.datastax.driver.osgi.BundleOptions.driverBundle; +import static com.datastax.driver.osgi.BundleOptions.dropwizardMetricsBundle; +import static com.datastax.driver.osgi.BundleOptions.extrasBundle; +import static com.datastax.driver.osgi.BundleOptions.guavaBundle; +import static com.datastax.driver.osgi.BundleOptions.mailboxBundle; +import static com.datastax.driver.osgi.BundleOptions.mappingBundle; +import static com.datastax.driver.osgi.BundleOptions.nettyBundles; +import static org.ops4j.pax.exam.CoreOptions.options; + +import com.datastax.driver.osgi.api.MailboxException; +import org.ops4j.pax.exam.Configuration; +import org.ops4j.pax.exam.Option; +import org.ops4j.pax.exam.testng.listener.PaxExam; +import org.testng.annotations.Listeners; +import org.testng.annotations.Test; + +@Listeners({CCMBridgeListener.class, PaxExam.class}) +public class MailboxServiceGuava18IT extends MailboxServiceTests { + + @Configuration + public Option[] guava18Config() { + return options( + defaultOptions(), + nettyBundles(), + dropwizardMetricsBundle(), + guavaBundle().version("18.0"), + driverBundle(), + extrasBundle(), + mappingBundle(), + mailboxBundle()); + } + + /** + * Exercises a 'mailbox' service provided by an OSGi bundle that depends on the driver with Guava + * 18 explicitly enforced. + * + * @test_category packaging + * @expected_result Can create, retrieve and delete data using the mailbox service. + * @jira_ticket JAVA-620 + * @since 2.0.10, 2.1.5 + */ + @Test(groups = "short") + public void test_guava_18() throws MailboxException { + checkService(); + } +} diff --git a/driver-tests/osgi/unshaded/src/test/java/com/datastax/driver/osgi/MailboxServiceGuava19IT.java b/driver-tests/osgi/unshaded/src/test/java/com/datastax/driver/osgi/MailboxServiceGuava19IT.java new file mode 100644 index 00000000000..7fe217fc74f --- /dev/null +++ b/driver-tests/osgi/unshaded/src/test/java/com/datastax/driver/osgi/MailboxServiceGuava19IT.java @@ -0,0 +1,66 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.osgi; + +import static com.datastax.driver.osgi.BundleOptions.defaultOptions; +import static com.datastax.driver.osgi.BundleOptions.driverBundle; +import static com.datastax.driver.osgi.BundleOptions.dropwizardMetricsBundle; +import static com.datastax.driver.osgi.BundleOptions.extrasBundle; +import static com.datastax.driver.osgi.BundleOptions.guavaBundle; +import static com.datastax.driver.osgi.BundleOptions.mailboxBundle; +import static com.datastax.driver.osgi.BundleOptions.mappingBundle; +import static com.datastax.driver.osgi.BundleOptions.nettyBundles; +import static org.ops4j.pax.exam.CoreOptions.options; + +import com.datastax.driver.osgi.api.MailboxException; +import org.ops4j.pax.exam.Configuration; +import org.ops4j.pax.exam.Option; +import org.ops4j.pax.exam.testng.listener.PaxExam; +import org.testng.annotations.Listeners; +import org.testng.annotations.Test; + +@Listeners({CCMBridgeListener.class, PaxExam.class}) +public class MailboxServiceGuava19IT extends MailboxServiceTests { + + @Configuration + public Option[] guava19Config() { + return options( + defaultOptions(), + nettyBundles(), + dropwizardMetricsBundle(), + guavaBundle().version("19.0"), + driverBundle(), + extrasBundle(), + mappingBundle(), + mailboxBundle()); + } + + /** + * Exercises a 'mailbox' service provided by an OSGi bundle that depends on the driver with Guava + * 19 explicitly enforced. + * + * @test_category packaging + * @expected_result Can create, retrieve and delete data using the mailbox service. + * @jira_ticket JAVA-620 + * @since 2.0.10, 2.1.5 + */ + @Test(groups = "short") + public void test_guava_19() throws MailboxException { + checkService(); + } +} diff --git a/driver-tests/osgi/unshaded/src/test/java/com/datastax/driver/osgi/MailboxServiceGuava20IT.java b/driver-tests/osgi/unshaded/src/test/java/com/datastax/driver/osgi/MailboxServiceGuava20IT.java new file mode 100644 index 00000000000..6917d1d7cd8 --- /dev/null +++ b/driver-tests/osgi/unshaded/src/test/java/com/datastax/driver/osgi/MailboxServiceGuava20IT.java @@ -0,0 +1,66 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.osgi; + +import static com.datastax.driver.osgi.BundleOptions.defaultOptions; +import static com.datastax.driver.osgi.BundleOptions.driverBundle; +import static com.datastax.driver.osgi.BundleOptions.dropwizardMetricsBundle; +import static com.datastax.driver.osgi.BundleOptions.extrasBundle; +import static com.datastax.driver.osgi.BundleOptions.guavaBundle; +import static com.datastax.driver.osgi.BundleOptions.mailboxBundle; +import static com.datastax.driver.osgi.BundleOptions.mappingBundle; +import static com.datastax.driver.osgi.BundleOptions.nettyBundles; +import static org.ops4j.pax.exam.CoreOptions.options; + +import com.datastax.driver.osgi.api.MailboxException; +import org.ops4j.pax.exam.Configuration; +import org.ops4j.pax.exam.Option; +import org.ops4j.pax.exam.testng.listener.PaxExam; +import org.testng.annotations.Listeners; +import org.testng.annotations.Test; + +@Listeners({CCMBridgeListener.class, PaxExam.class}) +public class MailboxServiceGuava20IT extends MailboxServiceTests { + + @Configuration + public Option[] guava20Config() { + return options( + defaultOptions(), + nettyBundles(), + dropwizardMetricsBundle(), + guavaBundle().version("20.0"), + driverBundle(), + extrasBundle(), + mappingBundle(), + mailboxBundle()); + } + + /** + * Exercises a 'mailbox' service provided by an OSGi bundle that depends on the driver with Guava + * 20 explicitly enforced. + * + * @test_category packaging + * @expected_result Can create, retrieve and delete data using the mailbox service. + * @jira_ticket JAVA-620 + * @since 2.0.10, 2.1.5 + */ + @Test(groups = "short") + public void test_guava_20() throws MailboxException { + checkService(); + } +} diff --git a/driver-tests/osgi/unshaded/src/test/java/com/datastax/driver/osgi/MailboxServiceGuava21IT.java b/driver-tests/osgi/unshaded/src/test/java/com/datastax/driver/osgi/MailboxServiceGuava21IT.java new file mode 100644 index 00000000000..bb9d10c9968 --- /dev/null +++ b/driver-tests/osgi/unshaded/src/test/java/com/datastax/driver/osgi/MailboxServiceGuava21IT.java @@ -0,0 +1,80 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.osgi; + +import static com.datastax.driver.osgi.BundleOptions.defaultOptions; +import static com.datastax.driver.osgi.BundleOptions.driverBundle; +import static com.datastax.driver.osgi.BundleOptions.dropwizardMetricsBundle; +import static com.datastax.driver.osgi.BundleOptions.extrasBundle; +import static com.datastax.driver.osgi.BundleOptions.guavaBundle; +import static com.datastax.driver.osgi.BundleOptions.mailboxBundle; +import static com.datastax.driver.osgi.BundleOptions.mappingBundle; +import static com.datastax.driver.osgi.BundleOptions.nettyBundles; +import static org.ops4j.pax.exam.CoreOptions.options; + +import com.datastax.driver.osgi.api.MailboxException; +import org.ops4j.pax.exam.Configuration; +import org.ops4j.pax.exam.Option; +import org.ops4j.pax.exam.options.MavenArtifactProvisionOption; +import org.ops4j.pax.exam.testng.listener.PaxExam; +import org.testng.SkipException; +import org.testng.annotations.Listeners; +import org.testng.annotations.Test; + +@Listeners({CCMBridgeListener.class, PaxExam.class}) +public class MailboxServiceGuava21IT extends MailboxServiceTests { + + @Configuration + public Option[] guava21Config() { + MavenArtifactProvisionOption guavaBundle = guavaBundle(); + String javaVersion = System.getProperty("java.version"); + // Only bring in 21.0 if java version >= 1.8. If this is not done the framework + // will fail to load for < 1.8 and we plan on skipping the test anyways. + if (javaVersion.compareTo("1.8") >= 0) { + guavaBundle = guavaBundle.version("21.0"); + } + + return options( + defaultOptions(), + nettyBundles(), + dropwizardMetricsBundle(), + guavaBundle, + driverBundle(), + extrasBundle(), + mappingBundle(), + mailboxBundle()); + } + + /** + * Exercises a 'mailbox' service provided by an OSGi bundle that depends on the driver with Guava + * 21 explicitly enforced. + * + * @test_category packaging + * @expected_result Can create, retrieve and delete data using the mailbox service. + * @jira_ticket JAVA-620 + * @since 2.0.10, 2.1.5 + */ + @Test(groups = "short") + public void test_guava_21() throws MailboxException { + String javaVersion = System.getProperty("java.version"); + if (javaVersion.compareTo("1.8") < 0) { + throw new SkipException("Guava 21 requires Java 1.8"); + } + checkService(); + } +} diff --git a/driver-tests/osgi/unshaded/src/test/java/com/datastax/driver/osgi/MailboxServiceHdrHistogramIT.java b/driver-tests/osgi/unshaded/src/test/java/com/datastax/driver/osgi/MailboxServiceHdrHistogramIT.java new file mode 100644 index 00000000000..019d3616087 --- /dev/null +++ b/driver-tests/osgi/unshaded/src/test/java/com/datastax/driver/osgi/MailboxServiceHdrHistogramIT.java @@ -0,0 +1,68 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.osgi; + +import static com.datastax.driver.osgi.BundleOptions.defaultOptions; +import static com.datastax.driver.osgi.BundleOptions.driverBundle; +import static com.datastax.driver.osgi.BundleOptions.dropwizardMetricsBundle; +import static com.datastax.driver.osgi.BundleOptions.extrasBundle; +import static com.datastax.driver.osgi.BundleOptions.guavaBundle; +import static com.datastax.driver.osgi.BundleOptions.hdrHistogramBundle; +import static com.datastax.driver.osgi.BundleOptions.mailboxBundle; +import static com.datastax.driver.osgi.BundleOptions.mappingBundle; +import static com.datastax.driver.osgi.BundleOptions.nettyBundles; +import static org.ops4j.pax.exam.CoreOptions.options; + +import com.datastax.driver.osgi.api.MailboxException; +import org.ops4j.pax.exam.Configuration; +import org.ops4j.pax.exam.Option; +import org.ops4j.pax.exam.testng.listener.PaxExam; +import org.testng.annotations.Listeners; +import org.testng.annotations.Test; + +@Listeners({CCMBridgeListener.class, PaxExam.class}) +public class MailboxServiceHdrHistogramIT extends MailboxServiceTests { + + @Configuration + public Option[] hdrHistogramConfig() { + return options( + defaultOptions(), + hdrHistogramBundle(), + nettyBundles(), + dropwizardMetricsBundle(), + guavaBundle(), + extrasBundle(), + mappingBundle(), + driverBundle(), + mailboxBundle()); + } + + /** + * Exercises a 'mailbox' service provided by an OSGi bundle that depends on the driver with LZ4 + * compression activated. + * + * @test_category packaging + * @expected_result Can create, retrieve and delete data using the mailbox service. + * @jira_ticket JAVA-1200 + * @since 3.1.0 + */ + @Test(groups = "short") + public void test_hdr() throws MailboxException { + checkService(); + } +} diff --git a/driver-tests/osgi/unshaded/src/test/java/com/datastax/driver/osgi/MailboxServiceLZ4IT.java b/driver-tests/osgi/unshaded/src/test/java/com/datastax/driver/osgi/MailboxServiceLZ4IT.java new file mode 100644 index 00000000000..430a5d0b98c --- /dev/null +++ b/driver-tests/osgi/unshaded/src/test/java/com/datastax/driver/osgi/MailboxServiceLZ4IT.java @@ -0,0 +1,68 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.osgi; + +import static com.datastax.driver.osgi.BundleOptions.defaultOptions; +import static com.datastax.driver.osgi.BundleOptions.driverBundle; +import static com.datastax.driver.osgi.BundleOptions.dropwizardMetricsBundle; +import static com.datastax.driver.osgi.BundleOptions.extrasBundle; +import static com.datastax.driver.osgi.BundleOptions.guavaBundle; +import static com.datastax.driver.osgi.BundleOptions.lz4Bundle; +import static com.datastax.driver.osgi.BundleOptions.mailboxBundle; +import static com.datastax.driver.osgi.BundleOptions.mappingBundle; +import static com.datastax.driver.osgi.BundleOptions.nettyBundles; +import static org.ops4j.pax.exam.CoreOptions.options; + +import com.datastax.driver.osgi.api.MailboxException; +import org.ops4j.pax.exam.Configuration; +import org.ops4j.pax.exam.Option; +import org.ops4j.pax.exam.testng.listener.PaxExam; +import org.testng.annotations.Listeners; +import org.testng.annotations.Test; + +@Listeners({CCMBridgeListener.class, PaxExam.class}) +public class MailboxServiceLZ4IT extends MailboxServiceTests { + + @Configuration + public Option[] lz4Config() { + return options( + defaultOptions(), + lz4Bundle(), + nettyBundles(), + dropwizardMetricsBundle(), + guavaBundle(), + extrasBundle(), + mappingBundle(), + driverBundle(), + mailboxBundle()); + } + + /** + * Exercises a 'mailbox' service provided by an OSGi bundle that depends on the driver with LZ4 + * compression activated. + * + * @test_category packaging + * @expected_result Can create, retrieve and delete data using the mailbox service. + * @jira_ticket JAVA-1200 + * @since 3.1.0 + */ + @Test(groups = "short") + public void test_lz4() throws MailboxException { + checkService(); + } +} diff --git a/driver-tests/osgi/unshaded/src/test/java/com/datastax/driver/osgi/MailboxServiceSnappyIT.java b/driver-tests/osgi/unshaded/src/test/java/com/datastax/driver/osgi/MailboxServiceSnappyIT.java new file mode 100644 index 00000000000..0cb87523076 --- /dev/null +++ b/driver-tests/osgi/unshaded/src/test/java/com/datastax/driver/osgi/MailboxServiceSnappyIT.java @@ -0,0 +1,74 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.osgi; + +import static com.datastax.driver.osgi.BundleOptions.defaultOptions; +import static com.datastax.driver.osgi.BundleOptions.driverBundle; +import static com.datastax.driver.osgi.BundleOptions.dropwizardMetricsBundle; +import static com.datastax.driver.osgi.BundleOptions.extrasBundle; +import static com.datastax.driver.osgi.BundleOptions.guavaBundle; +import static com.datastax.driver.osgi.BundleOptions.mailboxBundle; +import static com.datastax.driver.osgi.BundleOptions.mappingBundle; +import static com.datastax.driver.osgi.BundleOptions.nettyBundles; +import static com.datastax.driver.osgi.BundleOptions.snappyBundle; +import static org.ops4j.pax.exam.CoreOptions.options; + +import com.datastax.driver.core.VersionNumber; +import com.datastax.driver.osgi.api.MailboxException; +import org.ops4j.pax.exam.Configuration; +import org.ops4j.pax.exam.Option; +import org.ops4j.pax.exam.testng.listener.PaxExam; +import org.testng.SkipException; +import org.testng.annotations.Listeners; +import org.testng.annotations.Test; + +@Listeners({CCMBridgeListener.class, PaxExam.class}) +public class MailboxServiceSnappyIT extends MailboxServiceTests { + + @Configuration + public Option[] snappyConfig() { + return options( + defaultOptions(), + snappyBundle(), + nettyBundles(), + dropwizardMetricsBundle(), + guavaBundle(), + extrasBundle(), + mappingBundle(), + driverBundle(), + mailboxBundle()); + } + + /** + * Exercises a 'mailbox' service provided by an OSGi bundle that depends on the driver with LZ4 + * compression activated. + * + * @test_category packaging + * @expected_result Can create, retrieve and delete data using the mailbox service. + * @jira_ticket JAVA-1200 + * @since 3.1.0 + */ + @Test(groups = "short") + public void test_snappy() throws MailboxException { + VersionNumber ver = VersionNumber.parse(bundleContext.getProperty("cassandra.version")); + if (ver.getMajor() >= 4) { + throw new SkipException("Snappy not supported with cassandra 4.0+"); + } + checkService(); + } +} diff --git a/driver-tests/osgi/unshaded/src/test/resources/exam.properties b/driver-tests/osgi/unshaded/src/test/resources/exam.properties new file mode 100644 index 00000000000..16d353c88cb --- /dev/null +++ b/driver-tests/osgi/unshaded/src/test/resources/exam.properties @@ -0,0 +1,20 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +pax.exam.system=test +pax.exam.logging=none diff --git a/driver-tests/osgi/unshaded/src/test/resources/logback.xml b/driver-tests/osgi/unshaded/src/test/resources/logback.xml new file mode 100644 index 00000000000..0821c6dd2c0 --- /dev/null +++ b/driver-tests/osgi/unshaded/src/test/resources/logback.xml @@ -0,0 +1,52 @@ + + + + + + %d{HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/driver-tests/pom.xml b/driver-tests/pom.xml index 8c3b59037d8..fee1fba96c1 100644 --- a/driver-tests/pom.xml +++ b/driver-tests/pom.xml @@ -1,12 +1,14 @@ + + + 4.0.0 + + + org.apache.cassandra + cassandra-driver-tests-parent + 3.12.2-SNAPSHOT + + + pom + cassandra-driver-tests-shading + Java Driver for Apache Cassandra Tests - Shading + A test project for tests which ensure that the shading of the driver didn't break anything. + + + shaded + unshaded + + + + + org.testng + testng + test + + + + + diff --git a/driver-tests/shading/shaded/pom.xml b/driver-tests/shading/shaded/pom.xml new file mode 100644 index 00000000000..46465fcb09c --- /dev/null +++ b/driver-tests/shading/shaded/pom.xml @@ -0,0 +1,77 @@ + + + + 4.0.0 + + + org.apache.cassandra + cassandra-driver-tests-shading + 3.12.2-SNAPSHOT + + + cassandra-driver-tests-shading-shaded + Java Driver for Apache Cassandra Tests - Shading - Shaded + The shading detection tests for the shaded driver + + + + + org.apache.cassandra + cassandra-driver-core + shaded + + + * + io.netty + + + io.dropwizard.metrics + metrics-core + + + + + + + io.netty + netty-handler + test + + + + io.dropwizard.metrics + metrics-core + test + + + + org.testng + testng + test + + + + + + diff --git a/driver-tests/shading/shaded/src/test/java/com/datastax/driver/core/NettyUtilIT.java b/driver-tests/shading/shaded/src/test/java/com/datastax/driver/core/NettyUtilIT.java new file mode 100644 index 00000000000..b8a2b913155 --- /dev/null +++ b/driver-tests/shading/shaded/src/test/java/com/datastax/driver/core/NettyUtilIT.java @@ -0,0 +1,29 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import org.testng.Assert; +import org.testng.annotations.Test; + +public class NettyUtilIT { + + @Test(groups = "unit") + public void should_detect_shaded_driver() { + Assert.assertTrue(NettyUtil.isShaded()); + } +} diff --git a/driver-tests/shading/unshaded/pom.xml b/driver-tests/shading/unshaded/pom.xml new file mode 100644 index 00000000000..577c414453d --- /dev/null +++ b/driver-tests/shading/unshaded/pom.xml @@ -0,0 +1,50 @@ + + + + 4.0.0 + + + org.apache.cassandra + cassandra-driver-tests-shading + 3.12.2-SNAPSHOT + + + cassandra-driver-tests-shading-unshaded + Java Driver for Apache Cassandra Tests - Shading - Unshaded + The shading detection tests for the unshaded driver + + + + + org.apache.cassandra + cassandra-driver-core + + + + org.testng + testng + test + + + + + + diff --git a/driver-tests/shading/unshaded/src/test/java/com/datastax/driver/core/NettyUtilIT.java b/driver-tests/shading/unshaded/src/test/java/com/datastax/driver/core/NettyUtilIT.java new file mode 100644 index 00000000000..ce7a7b31219 --- /dev/null +++ b/driver-tests/shading/unshaded/src/test/java/com/datastax/driver/core/NettyUtilIT.java @@ -0,0 +1,29 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import org.testng.Assert; +import org.testng.annotations.Test; + +public class NettyUtilIT { + + @Test(groups = "unit") + public void should_detect_non_shaded_driver() { + Assert.assertFalse(NettyUtil.isShaded()); + } +} diff --git a/driver-tests/stress/README.md b/driver-tests/stress/README.md index ea0ea61bc88..063cf025fc4 100644 --- a/driver-tests/stress/README.md +++ b/driver-tests/stress/README.md @@ -1,7 +1,26 @@ + + # Stress application -A simple example application that uses the Java driver to stress test -Cassandra. This also somewhat stress tests the Java driver as a result. +A simple example application that uses the Java Driver to stress test +Cassandra. This also somewhat stress tests the Java Driver as a result. Please note that this simple example is far from being a complete stress application. In particular it currently supports a very limited number of diff --git a/driver-tests/stress/bin/stress b/driver-tests/stress/bin/stress old mode 100755 new mode 100644 index e47ec0d7955..7aa9b52405d --- a/driver-tests/stress/bin/stress +++ b/driver-tests/stress/bin/stress @@ -1,4 +1,20 @@ #!/bin/sh +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. SCRIPT_DIR=$( cd "$( dirname "$0" )" && pwd ) CURRENT_DIR=$( pwd ) diff --git a/driver-tests/stress/pom.xml b/driver-tests/stress/pom.xml index 94fcdf763e2..669a03344c4 100644 --- a/driver-tests/stress/pom.xml +++ b/driver-tests/stress/pom.xml @@ -1,12 +1,14 @@ + ## Frequently Asked Questions ### How do I implement paging? @@ -35,7 +54,7 @@ row.getBool(0); // this is equivalent row.getBool("applied") Note that, unlike manual inspection, `wasApplied` does not consume the first row. -[wasApplied]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/ResultSet.html#wasApplied-- +[wasApplied]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/ResultSet.html#wasApplied-- ### What is a parameterized statement and how can I use it? @@ -157,7 +176,7 @@ and we've had many reports where the problem turned out to be in user code. See [Blobs.java] in the `driver-examples` module for some examples and explanations. -[Blobs.java]: https://github.com/datastax/java-driver/tree/3.x/driver-examples/src/main/java/com/datastax/driver/examples/datatypes/Blobs.java +[Blobs.java]: https://github.com/apache/cassandra-java-driver/tree/3.x/driver-examples/src/main/java/com/datastax/driver/examples/datatypes/Blobs.java ### How do I use the driver in an OSGi application? @@ -236,10 +255,66 @@ before submitting the next batch. See the [Acquisition queue] section of the Pooling section in the manual for explanation of how the driver enqueues requests when connections are over-utilized. -[Blobs.java]: https://github.com/datastax/java-driver/tree/3.3.0/driver-examples/src/main/java/com/datastax/driver/examples/datatypes/Blobs.java +### What is Netty's native epoll transport and how do I enable or disable it? + +Netty provides [native transport libraries](http://netty.io/wiki/native-transports.html) which generally generate less +garbage and improve performance when compared to the default NIO-based transport. +By default if the driver detects the `netty-transport-native-epoll` library in its classpath it will attempt to use +[`EpollEventLoopGroup`](https://netty.io/4.0/api/io/netty/channel/epoll/EpollEventLoopGroup.html) for its underlying +event loop. + +In the usual case this works fine in linux environments. On the other hand, many users have run into compatibility +issues when the version of `netty-transport-native-epoll` is not compatible with a version of Netty in an application's +classpath. One such case is where an application depends on a version of `netty-all` that is different than the +version of `netty-handler` that the driver depends on. In such a case, a user may encounter an exception such as the +one described in [JAVA-1535](https://datastax-oss.atlassian.net/browse/JAVA-1535). + +While the epoll transport may in general improve performance, we expect the improvement to be marginal for a lot of use +cases. Therefore, if you don't want `netty-transport-native-epoll` to be used by the driver even if the library is +present in an application's classpath, the most direct way to disable this is to provide the system property value +`-Dcom.datastax.driver.FORCE_NIO=true` to your application to force the use of the default Netty NIO-based event loop. +If properly used, the following log message will be logged at INFO on startup: + +> Found Netty's native epoll transport in the classpath, but NIO was forced through the FORCE_NIO system property. + +### Why am I encountering `NoSuchMethodFoundException`, `NoClassDefFoundError`, or `VerifyError`s and how do I avoid them? + +Incompatibilities between the java driver and other libraries may cause these exceptions to surface in your +application at runtime. + +It could be that an older or newer version of a library that the driver depends on, such as Netty +or Guava, may be present in your application's classpath. If using Maven or another dependency +management tool, the tool should offer a command, such as `mvn dependency:tree` to identify the dependencies +in your project to help you understand the dependent versions across the various libraries you use in your +project. You may also want to evaluate your classpath to see if there are multiple JARs present for a library, +but with different versions, which could cause compatibility issues. In addition, consider evaluating +using the Logback logging framework, which provides the capability to include [packaging data] for classes +in stack traces. + +For Netty in particular, the driver offers an alternative artifact that shades its Netty dependency, +allowing you to use newer or older versions of Netty in your application without impacting the driver. +See [Using the shaded JAR] for more details. + +Another possibility could be that another library depends on a different version of the driver. +In this case, observe the stacktrace of the exception to see which library is attempting to use +the driver. To identify compatible versions, check that library's dependency on the driver to understand +what version is compatible. + +Finally, some monitoring and agent-based tools such as [DynaTrace] offer solutions that instrument the driver to +observe and record useful metrics such as request rates, latencies and more. It is possible that the tool +is not compatible with the version of the java driver you are using. In this case, check to +see if a newer version of that tool is available that works with this version of the driver. If no such +version is available, you may want to reach out to the maintainer of that tool to request that they provide +an update with compatibility to this driver version. + + +[Blobs.java]: https://github.com/apache/cassandra-java-driver/tree/3.12.1/driver-examples/src/main/java/com/datastax/driver/examples/datatypes/Blobs.java [CASSANDRA-7304]: https://issues.apache.org/jira/browse/CASSANDRA-7304 [Parameters and Binding]: ../manual/statements/prepared/#parameters-and-binding [Mapper options]: ../manual/object_mapper/using/#mapper-options [Acquisition queue]: ../manual/pooling/#acquisition-queue [Semaphore]: https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Semaphore.html -[Futures.allAsList]: https://google.github.io/guava/releases/19.0/api/docs/com/google/common/util/concurrent/Futures.html#allAsList(java.lang.Iterable) \ No newline at end of file +[Futures.allAsList]: https://google.github.io/guava/releases/19.0/api/docs/com/google/common/util/concurrent/Futures.html#allAsList(java.lang.Iterable) +[DynaTrace]: https://www.dynatrace.com/ +[packaging data]: https://logback.qos.ch/reasonsToSwitch.html#packagingData +[Using the shaded JAR]: ../manual/shaded_jar diff --git a/faq/osgi/README.md b/faq/osgi/README.md index 82de85ac591..82b4b788947 100644 --- a/faq/osgi/README.md +++ b/faq/osgi/README.md @@ -1,6 +1,25 @@ + + ## Frequently Asked Questions - OSGi -### How to use the Java driver in an OSGi environment? +### How to use the Java Driver in an OSGi environment? We have complete examples demonstrating usage of the driver in an [OSGi] environment; please refer to our [OSGi examples repository]. @@ -9,7 +28,7 @@ environment; please refer to our [OSGi examples repository]. ### How to override Guava's version? The driver is compatible and tested with all versions of Guava in the range -`[16.0.1,20)`. +`[16.0.1,26.0-jre)`. If using Maven, you can force a more specific version by re-declaring the Guava dependency in your project, e.g.: @@ -29,7 +48,7 @@ of Guava's packages, e.g. for 19.0: ### How to enable compression? First, read our [manual page on compression](../../manual/compression/) -to understand how to enable compression for the Java driver. +to understand how to enable compression for the Java Driver. OSGi projects can use both Snappy or LZ4 compression algorithms. @@ -156,8 +175,8 @@ it is also normal to see the following log lines when starting the driver: [JAVA-1127]:https://datastax-oss.atlassian.net/browse/JAVA-1127 [BND]:http://bnd.bndtools.org/ [Maven bundle plugin]:https://cwiki.apache.org/confluence/display/FELIX/Apache+Felix+Maven+Bundle+Plugin+%28BND%29 -[OSGi examples repository]:https://github.com/datastax/java-driver-examples-osgi -[without metrics]:http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/Cluster.Builder.html#withoutMetrics-- +[OSGi examples repository]:https://github.com/apache/cassandra-java-driver-examples-osgi +[without metrics]:https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/Cluster.Builder.html#withoutMetrics-- [SLF4J]:http://www.slf4j.org/ [Logback]:http://logback.qos.ch/ [Tycho]:https://eclipse.org/tycho/ diff --git a/licenses/HdrHistogram.txt b/licenses/HdrHistogram.txt new file mode 100644 index 00000000000..401ccfb0ec5 --- /dev/null +++ b/licenses/HdrHistogram.txt @@ -0,0 +1,41 @@ +The code in this repository code was Written by Gil Tene, Michael Barker, +and Matt Warren, and released to the public domain, as explained at +http://creativecommons.org/publicdomain/zero/1.0/ + +For users of this code who wish to consume it under the "BSD" license +rather than under the public domain or CC0 contribution text mentioned +above, the code found under this directory is *also* provided under the +following license (commonly referred to as the BSD 2-Clause License). This +license does not detract from the above stated release of the code into +the public domain, and simply represents an additional license granted by +the Author. + +----------------------------------------------------------------------------- +** Beginning of "BSD 2-Clause License" text. ** + + Copyright (c) 2012, 2013, 2014, 2015, 2016 Gil Tene + Copyright (c) 2014 Michael Barker + Copyright (c) 2014 Matt Warren + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + THE POSSIBILITY OF SUCH DAMAGE. diff --git a/licenses/asm.txt b/licenses/asm.txt new file mode 100644 index 00000000000..c71bb7bac5d --- /dev/null +++ b/licenses/asm.txt @@ -0,0 +1,27 @@ +ASM: a very small and fast Java bytecode manipulation framework +Copyright (c) 2000-2011 INRIA, France Telecom +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. +3. Neither the name of the copyright holders nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +THE POSSIBILITY OF SUCH DAMAGE. diff --git a/licenses/jnr-posix.txt b/licenses/jnr-posix.txt new file mode 100644 index 00000000000..4dc4217a306 --- /dev/null +++ b/licenses/jnr-posix.txt @@ -0,0 +1,1076 @@ +jnr-posix is released under a tri EPL/GPL/LGPL license. You can use it, +redistribute it and/or modify it under the terms of the: + + Eclipse Public License version 2.0 + OR + GNU General Public License version 2 + OR + GNU Lesser General Public License version 2.1 + +The complete text of the Eclipse Public License is as follows: + + Eclipse Public License - v 2.0 + + THE ACCOMPANYING PROGRAM IS PROVIDED UNDER THE TERMS OF THIS ECLIPSE + PUBLIC LICENSE ("AGREEMENT"). ANY USE, REPRODUCTION OR DISTRIBUTION + OF THE PROGRAM CONSTITUTES RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT. + + 1. DEFINITIONS + + "Contribution" means: + + a) in the case of the initial Contributor, the initial content + Distributed under this Agreement, and + + b) in the case of each subsequent Contributor: + i) changes to the Program, and + ii) additions to the Program; + where such changes and/or additions to the Program originate from + and are Distributed by that particular Contributor. A Contribution + "originates" from a Contributor if it was added to the Program by + such Contributor itself or anyone acting on such Contributor's behalf. + Contributions do not include changes or additions to the Program that + are not Modified Works. + + "Contributor" means any person or entity that Distributes the Program. + + "Licensed Patents" mean patent claims licensable by a Contributor which + are necessarily infringed by the use or sale of its Contribution alone + or when combined with the Program. + + "Program" means the Contributions Distributed in accordance with this + Agreement. + + "Recipient" means anyone who receives the Program under this Agreement + or any Secondary License (as applicable), including Contributors. + + "Derivative Works" shall mean any work, whether in Source Code or other + form, that is based on (or derived from) the Program and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. + + "Modified Works" shall mean any work in Source Code or other form that + results from an addition to, deletion from, or modification of the + contents of the Program, including, for purposes of clarity any new file + in Source Code form that contains any contents of the Program. Modified + Works shall not include works that contain only declarations, + interfaces, types, classes, structures, or files of the Program solely + in each case in order to link to, bind by name, or subclass the Program + or Modified Works thereof. + + "Distribute" means the acts of a) distributing or b) making available + in any manner that enables the transfer of a copy. + + "Source Code" means the form of a Program preferred for making + modifications, including but not limited to software source code, + documentation source, and configuration files. + + "Secondary License" means either the GNU General Public License, + Version 2.0, or any later versions of that license, including any + exceptions or additional permissions as identified by the initial + Contributor. + + 2. GRANT OF RIGHTS + + a) Subject to the terms of this Agreement, each Contributor hereby + grants Recipient a non-exclusive, worldwide, royalty-free copyright + license to reproduce, prepare Derivative Works of, publicly display, + publicly perform, Distribute and sublicense the Contribution of such + Contributor, if any, and such Derivative Works. + + b) Subject to the terms of this Agreement, each Contributor hereby + grants Recipient a non-exclusive, worldwide, royalty-free patent + license under Licensed Patents to make, use, sell, offer to sell, + import and otherwise transfer the Contribution of such Contributor, + if any, in Source Code or other form. This patent license shall + apply to the combination of the Contribution and the Program if, at + the time the Contribution is added by the Contributor, such addition + of the Contribution causes such combination to be covered by the + Licensed Patents. The patent license shall not apply to any other + combinations which include the Contribution. No hardware per se is + licensed hereunder. + + c) Recipient understands that although each Contributor grants the + licenses to its Contributions set forth herein, no assurances are + provided by any Contributor that the Program does not infringe the + patent or other intellectual property rights of any other entity. + Each Contributor disclaims any liability to Recipient for claims + brought by any other entity based on infringement of intellectual + property rights or otherwise. As a condition to exercising the + rights and licenses granted hereunder, each Recipient hereby + assumes sole responsibility to secure any other intellectual + property rights needed, if any. For example, if a third party + patent license is required to allow Recipient to Distribute the + Program, it is Recipient's responsibility to acquire that license + before distributing the Program. + + d) Each Contributor represents that to its knowledge it has + sufficient copyright rights in its Contribution, if any, to grant + the copyright license set forth in this Agreement. + + e) Notwithstanding the terms of any Secondary License, no + Contributor makes additional grants to any Recipient (other than + those set forth in this Agreement) as a result of such Recipient's + receipt of the Program under the terms of a Secondary License + (if permitted under the terms of Section 3). + + 3. REQUIREMENTS + + 3.1 If a Contributor Distributes the Program in any form, then: + + a) the Program must also be made available as Source Code, in + accordance with section 3.2, and the Contributor must accompany + the Program with a statement that the Source Code for the Program + is available under this Agreement, and informs Recipients how to + obtain it in a reasonable manner on or through a medium customarily + used for software exchange; and + + b) the Contributor may Distribute the Program under a license + different than this Agreement, provided that such license: + i) effectively disclaims on behalf of all other Contributors all + warranties and conditions, express and implied, including + warranties or conditions of title and non-infringement, and + implied warranties or conditions of merchantability and fitness + for a particular purpose; + + ii) effectively excludes on behalf of all other Contributors all + liability for damages, including direct, indirect, special, + incidental and consequential damages, such as lost profits; + + iii) does not attempt to limit or alter the recipients' rights + in the Source Code under section 3.2; and + + iv) requires any subsequent distribution of the Program by any + party to be under a license that satisfies the requirements + of this section 3. + + 3.2 When the Program is Distributed as Source Code: + + a) it must be made available under this Agreement, or if the + Program (i) is combined with other material in a separate file or + files made available under a Secondary License, and (ii) the initial + Contributor attached to the Source Code the notice described in + Exhibit A of this Agreement, then the Program may be made available + under the terms of such Secondary Licenses, and + + b) a copy of this Agreement must be included with each copy of + the Program. + + 3.3 Contributors may not remove or alter any copyright, patent, + trademark, attribution notices, disclaimers of warranty, or limitations + of liability ("notices") contained within the Program from any copy of + the Program which they Distribute, provided that Contributors may add + their own appropriate notices. + + 4. COMMERCIAL DISTRIBUTION + + Commercial distributors of software may accept certain responsibilities + with respect to end users, business partners and the like. While this + license is intended to facilitate the commercial use of the Program, + the Contributor who includes the Program in a commercial product + offering should do so in a manner which does not create potential + liability for other Contributors. Therefore, if a Contributor includes + the Program in a commercial product offering, such Contributor + ("Commercial Contributor") hereby agrees to defend and indemnify every + other Contributor ("Indemnified Contributor") against any losses, + damages and costs (collectively "Losses") arising from claims, lawsuits + and other legal actions brought by a third party against the Indemnified + Contributor to the extent caused by the acts or omissions of such + Commercial Contributor in connection with its distribution of the Program + in a commercial product offering. The obligations in this section do not + apply to any claims or Losses relating to any actual or alleged + intellectual property infringement. In order to qualify, an Indemnified + Contributor must: a) promptly notify the Commercial Contributor in + writing of such claim, and b) allow the Commercial Contributor to control, + and cooperate with the Commercial Contributor in, the defense and any + related settlement negotiations. The Indemnified Contributor may + participate in any such claim at its own expense. + + For example, a Contributor might include the Program in a commercial + product offering, Product X. That Contributor is then a Commercial + Contributor. If that Commercial Contributor then makes performance + claims, or offers warranties related to Product X, those performance + claims and warranties are such Commercial Contributor's responsibility + alone. Under this section, the Commercial Contributor would have to + defend claims against the other Contributors related to those performance + claims and warranties, and if a court requires any other Contributor to + pay any damages as a result, the Commercial Contributor must pay + those damages. + + 5. NO WARRANTY + + EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, AND TO THE EXTENT + PERMITTED BY APPLICABLE LAW, THE PROGRAM IS PROVIDED ON AN "AS IS" + BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR + IMPLIED INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OR CONDITIONS OF + TITLE, NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR A PARTICULAR + PURPOSE. Each Recipient is solely responsible for determining the + appropriateness of using and distributing the Program and assumes all + risks associated with its exercise of rights under this Agreement, + including but not limited to the risks and costs of program errors, + compliance with applicable laws, damage to or loss of data, programs + or equipment, and unavailability or interruption of operations. + + 6. DISCLAIMER OF LIABILITY + + EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, AND TO THE EXTENT + PERMITTED BY APPLICABLE LAW, NEITHER RECIPIENT NOR ANY CONTRIBUTORS + SHALL HAVE ANY LIABILITY FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING WITHOUT LIMITATION LOST + PROFITS), HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OR DISTRIBUTION OF THE PROGRAM OR THE + EXERCISE OF ANY RIGHTS GRANTED HEREUNDER, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGES. + + 7. GENERAL + + If any provision of this Agreement is invalid or unenforceable under + applicable law, it shall not affect the validity or enforceability of + the remainder of the terms of this Agreement, and without further + action by the parties hereto, such provision shall be reformed to the + minimum extent necessary to make such provision valid and enforceable. + + If Recipient institutes patent litigation against any entity + (including a cross-claim or counterclaim in a lawsuit) alleging that the + Program itself (excluding combinations of the Program with other software + or hardware) infringes such Recipient's patent(s), then such Recipient's + rights granted under Section 2(b) shall terminate as of the date such + litigation is filed. + + All Recipient's rights under this Agreement shall terminate if it + fails to comply with any of the material terms or conditions of this + Agreement and does not cure such failure in a reasonable period of + time after becoming aware of such noncompliance. If all Recipient's + rights under this Agreement terminate, Recipient agrees to cease use + and distribution of the Program as soon as reasonably practicable. + However, Recipient's obligations under this Agreement and any licenses + granted by Recipient relating to the Program shall continue and survive. + + Everyone is permitted to copy and distribute copies of this Agreement, + but in order to avoid inconsistency the Agreement is copyrighted and + may only be modified in the following manner. The Agreement Steward + reserves the right to publish new versions (including revisions) of + this Agreement from time to time. No one other than the Agreement + Steward has the right to modify this Agreement. The Eclipse Foundation + is the initial Agreement Steward. The Eclipse Foundation may assign the + responsibility to serve as the Agreement Steward to a suitable separate + entity. Each new version of the Agreement will be given a distinguishing + version number. The Program (including Contributions) may always be + Distributed subject to the version of the Agreement under which it was + received. In addition, after a new version of the Agreement is published, + Contributor may elect to Distribute the Program (including its + Contributions) under the new version. + + Except as expressly stated in Sections 2(a) and 2(b) above, Recipient + receives no rights or licenses to the intellectual property of any + Contributor under this Agreement, whether expressly, by implication, + estoppel or otherwise. All rights in the Program not expressly granted + under this Agreement are reserved. Nothing in this Agreement is intended + to be enforceable by any entity that is not a Contributor or Recipient. + No third-party beneficiary rights are created under this Agreement. + + Exhibit A - Form of Secondary Licenses Notice + + "This Source Code may also be made available under the following + Secondary Licenses when the conditions for such availability set forth + in the Eclipse Public License, v. 2.0 are satisfied: {name license(s), + version(s), and exceptions or additional permissions here}." + + Simply including a copy of this Agreement, including this Exhibit A + is not sufficient to license the Source Code under Secondary Licenses. + + If it is not possible or desirable to put the notice in a particular + file, then You may include the notice in a location (such as a LICENSE + file in a relevant directory) where a recipient would be likely to + look for such a notice. + + You may add additional accurate notices of copyright ownership. + +The complete text of the GNU General Public License v2 is as follows: + + GNU GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1989, 1991 Free Software Foundation, Inc. + 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The licenses for most software are designed to take away your + freedom to share and change it. By contrast, the GNU General Public + License is intended to guarantee your freedom to share and change free + software--to make sure the software is free for all its users. This + General Public License applies to most of the Free Software + Foundation's software and to any other program whose authors commit to + using it. (Some other Free Software Foundation software is covered by + the GNU Library General Public License instead.) You can apply it to + your programs, too. + + When we speak of free software, we are referring to freedom, not + price. Our General Public Licenses are designed to make sure that you + have the freedom to distribute copies of free software (and charge for + this service if you wish), that you receive source code or can get it + if you want it, that you can change the software or use pieces of it + in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid + anyone to deny you these rights or to ask you to surrender the rights. + These restrictions translate to certain responsibilities for you if you + distribute copies of the software, or if you modify it. + + For example, if you distribute copies of such a program, whether + gratis or for a fee, you must give the recipients all the rights that + you have. You must make sure that they, too, receive or can get the + source code. And you must show them these terms so they know their + rights. + + We protect your rights with two steps: (1) copyright the software, and + (2) offer you this license which gives you legal permission to copy, + distribute and/or modify the software. + + Also, for each author's protection and ours, we want to make certain + that everyone understands that there is no warranty for this free + software. If the software is modified by someone else and passed on, we + want its recipients to know that what they have is not the original, so + that any problems introduced by others will not reflect on the original + authors' reputations. + + Finally, any free program is threatened constantly by software + patents. We wish to avoid the danger that redistributors of a free + program will individually obtain patent licenses, in effect making the + program proprietary. To prevent this, we have made it clear that any + patent must be licensed for everyone's free use or not licensed at all. + + The precise terms and conditions for copying, distribution and + modification follow. + + GNU GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License applies to any program or other work which contains + a notice placed by the copyright holder saying it may be distributed + under the terms of this General Public License. The "Program", below, + refers to any such program or work, and a "work based on the Program" + means either the Program or any derivative work under copyright law: + that is to say, a work containing the Program or a portion of it, + either verbatim or with modifications and/or translated into another + language. (Hereinafter, translation is included without limitation in + the term "modification".) Each licensee is addressed as "you". + + Activities other than copying, distribution and modification are not + covered by this License; they are outside its scope. The act of + running the Program is not restricted, and the output from the Program + is covered only if its contents constitute a work based on the + Program (independent of having been made by running the Program). + Whether that is true depends on what the Program does. + + 1. You may copy and distribute verbatim copies of the Program's + source code as you receive it, in any medium, provided that you + conspicuously and appropriately publish on each copy an appropriate + copyright notice and disclaimer of warranty; keep intact all the + notices that refer to this License and to the absence of any warranty; + and give any other recipients of the Program a copy of this License + along with the Program. + + You may charge a fee for the physical act of transferring a copy, and + you may at your option offer warranty protection in exchange for a fee. + + 2. You may modify your copy or copies of the Program or any portion + of it, thus forming a work based on the Program, and copy and + distribute such modifications or work under the terms of Section 1 + above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any + part thereof, to be licensed as a whole at no charge to all third + parties under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a + notice that there is no warranty (or else, saying that you provide + a warranty) and that users may redistribute the program under + these conditions, and telling the user how to view a copy of this + License. (Exception: if the Program itself is interactive but + does not normally print such an announcement, your work based on + the Program is not required to print an announcement.) + + These requirements apply to the modified work as a whole. If + identifiable sections of that work are not derived from the Program, + and can be reasonably considered independent and separate works in + themselves, then this License, and its terms, do not apply to those + sections when you distribute them as separate works. But when you + distribute the same sections as part of a whole which is a work based + on the Program, the distribution of the whole must be on the terms of + this License, whose permissions for other licensees extend to the + entire whole, and thus to each and every part regardless of who wrote it. + + Thus, it is not the intent of this section to claim rights or contest + your rights to work written entirely by you; rather, the intent is to + exercise the right to control the distribution of derivative or + collective works based on the Program. + + In addition, mere aggregation of another work not based on the Program + with the Program (or with a work based on the Program) on a volume of + a storage or distribution medium does not bring the other work under + the scope of this License. + + 3. You may copy and distribute the Program (or a work based on it, + under Section 2) in object code or executable form under the terms of + Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your + cost of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is + allowed only for noncommercial distribution and only if you + received the program in object code or executable form with such + an offer, in accord with Subsection b above.) + + The source code for a work means the preferred form of the work for + making modifications to it. For an executable work, complete source + code means all the source code for all modules it contains, plus any + associated interface definition files, plus the scripts used to + control compilation and installation of the executable. However, as a + special exception, the source code distributed need not include + anything that is normally distributed (in either source or binary + form) with the major components (compiler, kernel, and so on) of the + operating system on which the executable runs, unless that component + itself accompanies the executable. + + If distribution of executable or object code is made by offering + access to copy from a designated place, then offering equivalent + access to copy the source code from the same place counts as + distribution of the source code, even though third parties are not + compelled to copy the source along with the object code. + + 4. You may not copy, modify, sublicense, or distribute the Program + except as expressly provided under this License. Any attempt + otherwise to copy, modify, sublicense or distribute the Program is + void, and will automatically terminate your rights under this License. + However, parties who have received copies, or rights, from you under + this License will not have their licenses terminated so long as such + parties remain in full compliance. + + 5. You are not required to accept this License, since you have not + signed it. However, nothing else grants you permission to modify or + distribute the Program or its derivative works. These actions are + prohibited by law if you do not accept this License. Therefore, by + modifying or distributing the Program (or any work based on the + Program), you indicate your acceptance of this License to do so, and + all its terms and conditions for copying, distributing or modifying + the Program or works based on it. + + 6. Each time you redistribute the Program (or any work based on the + Program), the recipient automatically receives a license from the + original licensor to copy, distribute or modify the Program subject to + these terms and conditions. You may not impose any further + restrictions on the recipients' exercise of the rights granted herein. + You are not responsible for enforcing compliance by third parties to + this License. + + 7. If, as a consequence of a court judgment or allegation of patent + infringement or for any other reason (not limited to patent issues), + conditions are imposed on you (whether by court order, agreement or + otherwise) that contradict the conditions of this License, they do not + excuse you from the conditions of this License. If you cannot + distribute so as to satisfy simultaneously your obligations under this + License and any other pertinent obligations, then as a consequence you + may not distribute the Program at all. For example, if a patent + license would not permit royalty-free redistribution of the Program by + all those who receive copies directly or indirectly through you, then + the only way you could satisfy both it and this License would be to + refrain entirely from distribution of the Program. + + If any portion of this section is held invalid or unenforceable under + any particular circumstance, the balance of the section is intended to + apply and the section as a whole is intended to apply in other + circumstances. + + It is not the purpose of this section to induce you to infringe any + patents or other property right claims or to contest validity of any + such claims; this section has the sole purpose of protecting the + integrity of the free software distribution system, which is + implemented by public license practices. Many people have made + generous contributions to the wide range of software distributed + through that system in reliance on consistent application of that + system; it is up to the author/donor to decide if he or she is willing + to distribute software through any other system and a licensee cannot + impose that choice. + + This section is intended to make thoroughly clear what is believed to + be a consequence of the rest of this License. + + 8. If the distribution and/or use of the Program is restricted in + certain countries either by patents or by copyrighted interfaces, the + original copyright holder who places the Program under this License + may add an explicit geographical distribution limitation excluding + those countries, so that distribution is permitted only in or among + countries not thus excluded. In such case, this License incorporates + the limitation as if written in the body of this License. + + 9. The Free Software Foundation may publish revised and/or new versions + of the General Public License from time to time. Such new versions will + be similar in spirit to the present version, but may differ in detail to + address new problems or concerns. + + Each version is given a distinguishing version number. If the Program + specifies a version number of this License which applies to it and "any + later version", you have the option of following the terms and conditions + either of that version or of any later version published by the Free + Software Foundation. If the Program does not specify a version number of + this License, you may choose any version ever published by the Free Software + Foundation. + + 10. If you wish to incorporate parts of the Program into other free + programs whose distribution conditions are different, write to the author + to ask for permission. For software which is copyrighted by the Free + Software Foundation, write to the Free Software Foundation; we sometimes + make exceptions for this. Our decision will be guided by the two goals + of preserving the free status of all derivatives of our free software and + of promoting the sharing and reuse of software generally. + + NO WARRANTY + + 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY + FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN + OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES + PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED + OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS + TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE + PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, + REPAIR OR CORRECTION. + + 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING + WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR + REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, + INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING + OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED + TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY + YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER + PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE + POSSIBILITY OF SUCH DAMAGES. + + END OF TERMS AND CONDITIONS + +The complete text of the GNU Lesser General Public License 2.1 is as follows: + + GNU LESSER GENERAL PUBLIC LICENSE + Version 2.1, February 1999 + + Copyright (C) 1991, 1999 Free Software Foundation, Inc. + 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + [This is the first released version of the Lesser GPL. It also counts + as the successor of the GNU Library Public License, version 2, hence + the version number 2.1.] + + Preamble + + The licenses for most software are designed to take away your + freedom to share and change it. By contrast, the GNU General Public + Licenses are intended to guarantee your freedom to share and change + free software--to make sure the software is free for all its users. + + This license, the Lesser General Public License, applies to some + specially designated software packages--typically libraries--of the + Free Software Foundation and other authors who decide to use it. You + can use it too, but we suggest you first think carefully about whether + this license or the ordinary General Public License is the better + strategy to use in any particular case, based on the explanations below. + + When we speak of free software, we are referring to freedom of use, + not price. Our General Public Licenses are designed to make sure that + you have the freedom to distribute copies of free software (and charge + for this service if you wish); that you receive source code or can get + it if you want it; that you can change the software and use pieces of + it in new free programs; and that you are informed that you can do + these things. + + To protect your rights, we need to make restrictions that forbid + distributors to deny you these rights or to ask you to surrender these + rights. These restrictions translate to certain responsibilities for + you if you distribute copies of the library or if you modify it. + + For example, if you distribute copies of the library, whether gratis + or for a fee, you must give the recipients all the rights that we gave + you. You must make sure that they, too, receive or can get the source + code. If you link other code with the library, you must provide + complete object files to the recipients, so that they can relink them + with the library after making changes to the library and recompiling + it. And you must show them these terms so they know their rights. + + We protect your rights with a two-step method: (1) we copyright the + library, and (2) we offer you this license, which gives you legal + permission to copy, distribute and/or modify the library. + + To protect each distributor, we want to make it very clear that + there is no warranty for the free library. Also, if the library is + modified by someone else and passed on, the recipients should know + that what they have is not the original version, so that the original + author's reputation will not be affected by problems that might be + introduced by others. + + Finally, software patents pose a constant threat to the existence of + any free program. We wish to make sure that a company cannot + effectively restrict the users of a free program by obtaining a + restrictive license from a patent holder. Therefore, we insist that + any patent license obtained for a version of the library must be + consistent with the full freedom of use specified in this license. + + Most GNU software, including some libraries, is covered by the + ordinary GNU General Public License. This license, the GNU Lesser + General Public License, applies to certain designated libraries, and + is quite different from the ordinary General Public License. We use + this license for certain libraries in order to permit linking those + libraries into non-free programs. + + When a program is linked with a library, whether statically or using + a shared library, the combination of the two is legally speaking a + combined work, a derivative of the original library. The ordinary + General Public License therefore permits such linking only if the + entire combination fits its criteria of freedom. The Lesser General + Public License permits more lax criteria for linking other code with + the library. + + We call this license the "Lesser" General Public License because it + does Less to protect the user's freedom than the ordinary General + Public License. It also provides other free software developers Less + of an advantage over competing non-free programs. These disadvantages + are the reason we use the ordinary General Public License for many + libraries. However, the Lesser license provides advantages in certain + special circumstances. + + For example, on rare occasions, there may be a special need to + encourage the widest possible use of a certain library, so that it becomes + a de-facto standard. To achieve this, non-free programs must be + allowed to use the library. A more frequent case is that a free + library does the same job as widely used non-free libraries. In this + case, there is little to gain by limiting the free library to free + software only, so we use the Lesser General Public License. + + In other cases, permission to use a particular library in non-free + programs enables a greater number of people to use a large body of + free software. For example, permission to use the GNU C Library in + non-free programs enables many more people to use the whole GNU + operating system, as well as its variant, the GNU/Linux operating + system. + + Although the Lesser General Public License is Less protective of the + users' freedom, it does ensure that the user of a program that is + linked with the Library has the freedom and the wherewithal to run + that program using a modified version of the Library. + + The precise terms and conditions for copying, distribution and + modification follow. Pay close attention to the difference between a + "work based on the library" and a "work that uses the library". The + former contains code derived from the library, whereas the latter must + be combined with the library in order to run. + + GNU LESSER GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License Agreement applies to any software library or other + program which contains a notice placed by the copyright holder or + other authorized party saying it may be distributed under the terms of + this Lesser General Public License (also called "this License"). + Each licensee is addressed as "you". + + A "library" means a collection of software functions and/or data + prepared so as to be conveniently linked with application programs + (which use some of those functions and data) to form executables. + + The "Library", below, refers to any such software library or work + which has been distributed under these terms. A "work based on the + Library" means either the Library or any derivative work under + copyright law: that is to say, a work containing the Library or a + portion of it, either verbatim or with modifications and/or translated + straightforwardly into another language. (Hereinafter, translation is + included without limitation in the term "modification".) + + "Source code" for a work means the preferred form of the work for + making modifications to it. For a library, complete source code means + all the source code for all modules it contains, plus any associated + interface definition files, plus the scripts used to control compilation + and installation of the library. + + Activities other than copying, distribution and modification are not + covered by this License; they are outside its scope. The act of + running a program using the Library is not restricted, and output from + such a program is covered only if its contents constitute a work based + on the Library (independent of the use of the Library in a tool for + writing it). Whether that is true depends on what the Library does + and what the program that uses the Library does. + + 1. You may copy and distribute verbatim copies of the Library's + complete source code as you receive it, in any medium, provided that + you conspicuously and appropriately publish on each copy an + appropriate copyright notice and disclaimer of warranty; keep intact + all the notices that refer to this License and to the absence of any + warranty; and distribute a copy of this License along with the + Library. + + You may charge a fee for the physical act of transferring a copy, + and you may at your option offer warranty protection in exchange for a + fee. + + 2. You may modify your copy or copies of the Library or any portion + of it, thus forming a work based on the Library, and copy and + distribute such modifications or work under the terms of Section 1 + above, provided that you also meet all of these conditions: + + a) The modified work must itself be a software library. + + b) You must cause the files modified to carry prominent notices + stating that you changed the files and the date of any change. + + c) You must cause the whole of the work to be licensed at no + charge to all third parties under the terms of this License. + + d) If a facility in the modified Library refers to a function or a + table of data to be supplied by an application program that uses + the facility, other than as an argument passed when the facility + is invoked, then you must make a good faith effort to ensure that, + in the event an application does not supply such function or + table, the facility still operates, and performs whatever part of + its purpose remains meaningful. + + (For example, a function in a library to compute square roots has + a purpose that is entirely well-defined independent of the + application. Therefore, Subsection 2d requires that any + application-supplied function or table used by this function must + be optional: if the application does not supply it, the square + root function must still compute square roots.) + + These requirements apply to the modified work as a whole. If + identifiable sections of that work are not derived from the Library, + and can be reasonably considered independent and separate works in + themselves, then this License, and its terms, do not apply to those + sections when you distribute them as separate works. But when you + distribute the same sections as part of a whole which is a work based + on the Library, the distribution of the whole must be on the terms of + this License, whose permissions for other licensees extend to the + entire whole, and thus to each and every part regardless of who wrote + it. + + Thus, it is not the intent of this section to claim rights or contest + your rights to work written entirely by you; rather, the intent is to + exercise the right to control the distribution of derivative or + collective works based on the Library. + + In addition, mere aggregation of another work not based on the Library + with the Library (or with a work based on the Library) on a volume of + a storage or distribution medium does not bring the other work under + the scope of this License. + + 3. You may opt to apply the terms of the ordinary GNU General Public + License instead of this License to a given copy of the Library. To do + this, you must alter all the notices that refer to this License, so + that they refer to the ordinary GNU General Public License, version 2, + instead of to this License. (If a newer version than version 2 of the + ordinary GNU General Public License has appeared, then you can specify + that version instead if you wish.) Do not make any other change in + these notices. + + Once this change is made in a given copy, it is irreversible for + that copy, so the ordinary GNU General Public License applies to all + subsequent copies and derivative works made from that copy. + + This option is useful when you wish to copy part of the code of + the Library into a program that is not a library. + + 4. You may copy and distribute the Library (or a portion or + derivative of it, under Section 2) in object code or executable form + under the terms of Sections 1 and 2 above provided that you accompany + it with the complete corresponding machine-readable source code, which + must be distributed under the terms of Sections 1 and 2 above on a + medium customarily used for software interchange. + + If distribution of object code is made by offering access to copy + from a designated place, then offering equivalent access to copy the + source code from the same place satisfies the requirement to + distribute the source code, even though third parties are not + compelled to copy the source along with the object code. + + 5. A program that contains no derivative of any portion of the + Library, but is designed to work with the Library by being compiled or + linked with it, is called a "work that uses the Library". Such a + work, in isolation, is not a derivative work of the Library, and + therefore falls outside the scope of this License. + + However, linking a "work that uses the Library" with the Library + creates an executable that is a derivative of the Library (because it + contains portions of the Library), rather than a "work that uses the + library". The executable is therefore covered by this License. + Section 6 states terms for distribution of such executables. + + When a "work that uses the Library" uses material from a header file + that is part of the Library, the object code for the work may be a + derivative work of the Library even though the source code is not. + Whether this is true is especially significant if the work can be + linked without the Library, or if the work is itself a library. The + threshold for this to be true is not precisely defined by law. + + If such an object file uses only numerical parameters, data + structure layouts and accessors, and small macros and small inline + functions (ten lines or less in length), then the use of the object + file is unrestricted, regardless of whether it is legally a derivative + work. (Executables containing this object code plus portions of the + Library will still fall under Section 6.) + + Otherwise, if the work is a derivative of the Library, you may + distribute the object code for the work under the terms of Section 6. + Any executables containing that work also fall under Section 6, + whether or not they are linked directly with the Library itself. + + 6. As an exception to the Sections above, you may also combine or + link a "work that uses the Library" with the Library to produce a + work containing portions of the Library, and distribute that work + under terms of your choice, provided that the terms permit + modification of the work for the customer's own use and reverse + engineering for debugging such modifications. + + You must give prominent notice with each copy of the work that the + Library is used in it and that the Library and its use are covered by + this License. You must supply a copy of this License. If the work + during execution displays copyright notices, you must include the + copyright notice for the Library among them, as well as a reference + directing the user to the copy of this License. Also, you must do one + of these things: + + a) Accompany the work with the complete corresponding + machine-readable source code for the Library including whatever + changes were used in the work (which must be distributed under + Sections 1 and 2 above); and, if the work is an executable linked + with the Library, with the complete machine-readable "work that + uses the Library", as object code and/or source code, so that the + user can modify the Library and then relink to produce a modified + executable containing the modified Library. (It is understood + that the user who changes the contents of definitions files in the + Library will not necessarily be able to recompile the application + to use the modified definitions.) + + b) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (1) uses at run time a + copy of the library already present on the user's computer system, + rather than copying library functions into the executable, and (2) + will operate properly with a modified version of the library, if + the user installs one, as long as the modified version is + interface-compatible with the version that the work was made with. + + c) Accompany the work with a written offer, valid for at + least three years, to give the same user the materials + specified in Subsection 6a, above, for a charge no more + than the cost of performing this distribution. + + d) If distribution of the work is made by offering access to copy + from a designated place, offer equivalent access to copy the above + specified materials from the same place. + + e) Verify that the user has already received a copy of these + materials or that you have already sent this user a copy. + + For an executable, the required form of the "work that uses the + Library" must include any data and utility programs needed for + reproducing the executable from it. However, as a special exception, + the materials to be distributed need not include anything that is + normally distributed (in either source or binary form) with the major + components (compiler, kernel, and so on) of the operating system on + which the executable runs, unless that component itself accompanies + the executable. + + It may happen that this requirement contradicts the license + restrictions of other proprietary libraries that do not normally + accompany the operating system. Such a contradiction means you cannot + use both them and the Library together in an executable that you + distribute. + + 7. You may place library facilities that are a work based on the + Library side-by-side in a single library together with other library + facilities not covered by this License, and distribute such a combined + library, provided that the separate distribution of the work based on + the Library and of the other library facilities is otherwise + permitted, and provided that you do these two things: + + a) Accompany the combined library with a copy of the same work + based on the Library, uncombined with any other library + facilities. This must be distributed under the terms of the + Sections above. + + b) Give prominent notice with the combined library of the fact + that part of it is a work based on the Library, and explaining + where to find the accompanying uncombined form of the same work. + + 8. You may not copy, modify, sublicense, link with, or distribute + the Library except as expressly provided under this License. Any + attempt otherwise to copy, modify, sublicense, link with, or + distribute the Library is void, and will automatically terminate your + rights under this License. However, parties who have received copies, + or rights, from you under this License will not have their licenses + terminated so long as such parties remain in full compliance. + + 9. You are not required to accept this License, since you have not + signed it. However, nothing else grants you permission to modify or + distribute the Library or its derivative works. These actions are + prohibited by law if you do not accept this License. Therefore, by + modifying or distributing the Library (or any work based on the + Library), you indicate your acceptance of this License to do so, and + all its terms and conditions for copying, distributing or modifying + the Library or works based on it. + + 10. Each time you redistribute the Library (or any work based on the + Library), the recipient automatically receives a license from the + original licensor to copy, distribute, link with or modify the Library + subject to these terms and conditions. You may not impose any further + restrictions on the recipients' exercise of the rights granted herein. + You are not responsible for enforcing compliance by third parties with + this License. + + 11. If, as a consequence of a court judgment or allegation of patent + infringement or for any other reason (not limited to patent issues), + conditions are imposed on you (whether by court order, agreement or + otherwise) that contradict the conditions of this License, they do not + excuse you from the conditions of this License. If you cannot + distribute so as to satisfy simultaneously your obligations under this + License and any other pertinent obligations, then as a consequence you + may not distribute the Library at all. For example, if a patent + license would not permit royalty-free redistribution of the Library by + all those who receive copies directly or indirectly through you, then + the only way you could satisfy both it and this License would be to + refrain entirely from distribution of the Library. + + If any portion of this section is held invalid or unenforceable under any + particular circumstance, the balance of the section is intended to apply, + and the section as a whole is intended to apply in other circumstances. + + It is not the purpose of this section to induce you to infringe any + patents or other property right claims or to contest validity of any + such claims; this section has the sole purpose of protecting the + integrity of the free software distribution system which is + implemented by public license practices. Many people have made + generous contributions to the wide range of software distributed + through that system in reliance on consistent application of that + system; it is up to the author/donor to decide if he or she is willing + to distribute software through any other system and a licensee cannot + impose that choice. + + This section is intended to make thoroughly clear what is believed to + be a consequence of the rest of this License. + + 12. If the distribution and/or use of the Library is restricted in + certain countries either by patents or by copyrighted interfaces, the + original copyright holder who places the Library under this License may add + an explicit geographical distribution limitation excluding those countries, + so that distribution is permitted only in or among countries not thus + excluded. In such case, this License incorporates the limitation as if + written in the body of this License. + + 13. The Free Software Foundation may publish revised and/or new + versions of the Lesser General Public License from time to time. + Such new versions will be similar in spirit to the present version, + but may differ in detail to address new problems or concerns. + + Each version is given a distinguishing version number. If the Library + specifies a version number of this License which applies to it and + "any later version", you have the option of following the terms and + conditions either of that version or of any later version published by + the Free Software Foundation. If the Library does not specify a + license version number, you may choose any version ever published by + the Free Software Foundation. + + 14. If you wish to incorporate parts of the Library into other free + programs whose distribution conditions are incompatible with these, + write to the author to ask for permission. For software which is + copyrighted by the Free Software Foundation, write to the Free + Software Foundation; we sometimes make exceptions for this. Our + decision will be guided by the two goals of preserving the free status + of all derivatives of our free software and of promoting the sharing + and reuse of software generally. + + NO WARRANTY + + 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO + WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. + EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR + OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY + KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE + LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME + THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN + WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY + AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU + FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR + CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE + LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING + RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A + FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF + SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH + DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Libraries + + If you develop a new library, and you want it to be of the greatest + possible use to the public, we recommend making it free software that + everyone can redistribute and change. You can do so by permitting + redistribution under these terms (or, alternatively, under the terms of the + ordinary General Public License). + + To apply these terms, attach the following notices to the library. It is + safest to attach them to the start of each source file to most effectively + convey the exclusion of warranty; and each file should have at least the + "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this library; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + + Also add information on how to contact you by electronic and paper mail. + + You should also get your employer (if you work as a programmer) or your + school, if any, to sign a "copyright disclaimer" for the library, if + necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the + library `Frob' (a library for tweaking knobs) written by James Random Hacker. + + , 1 April 1990 + Ty Coon, President of Vice + + That's all there is to it! diff --git a/licenses/jnr-x86asm.txt b/licenses/jnr-x86asm.txt new file mode 100644 index 00000000000..c9583db05fd --- /dev/null +++ b/licenses/jnr-x86asm.txt @@ -0,0 +1,24 @@ + + Copyright (C) 2010 Wayne Meissner + Copyright (c) 2008-2009, Petr Kobalicek + + Permission is hereby granted, free of charge, to any person + obtaining a copy of this software and associated documentation + files (the "Software"), to deal in the Software without + restriction, including without limitation the rights to use, + copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the + Software is furnished to do so, subject to the following + conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. diff --git a/licenses/slf4j-api.txt b/licenses/slf4j-api.txt new file mode 100644 index 00000000000..bb09a9ad4ec --- /dev/null +++ b/licenses/slf4j-api.txt @@ -0,0 +1,21 @@ +Copyright (c) 2004-2023 QOS.ch +All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/manual/README.md b/manual/README.md index 019d9941290..e2498c402a2 100644 --- a/manual/README.md +++ b/manual/README.md @@ -1,3 +1,22 @@ + + ## Manual ### Quick start @@ -209,7 +228,7 @@ String firstName = row.getString("first_name"); blob getBytes java.nio.ByteBuffer boolean getBool boolean counter getLong long - date getDate LocalDate + date getDate LocalDate decimal getDecimal java.math.BigDecimal double getDouble double float getFloat float @@ -277,26 +296,31 @@ for (ColumnDefinitions.Definition definition : row.getColumnDefinitions()) { } ``` +### Object mapping + +Besides explicit work with queries and rows, you can also use +[Object Mapper](object_mapper/) to simplify retrieval & store of your data. + ### More information If you're reading this from the [generated HTML documentation on github.io](http://datastax.github.io/java-driver/), use the "Contents" menu on the left hand side to navigate sub-sections. If you're [browsing the source files on -github.com](https://github.com/datastax/java-driver/tree/3.x/manual), +github.com](https://github.com/apache/cassandra-java-driver/tree/3.x/manual), simply navigate to each sub-directory. -[Cluster]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/Cluster.html -[Cluster.Builder]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/Cluster.Builder.html -[Initializer]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/Cluster.Initializer.html -[Session]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/Session.html -[ResultSet]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/ResultSet.html -[Row]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/Row.html -[NettyOptions]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/NettyOptions.html -[QueryOptions]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/QueryOptions.html -[SocketOptions]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/SocketOptions.html -[Host.StateListener]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/Host.StateListener.html -[LatencyTracker]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/LatencyTracker.html -[SchemaChangeListener]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/SchemaChangeListener.html -[NoHostAvailableException]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/exceptions/NoHostAvailableException.html -[LocalDate]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/LocalDate.html \ No newline at end of file +[Cluster]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/Cluster.html +[Cluster.Builder]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/Cluster.Builder.html +[Initializer]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/Cluster.Initializer.html +[Session]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/Session.html +[ResultSet]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/ResultSet.html +[Row]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/Row.html +[NettyOptions]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/NettyOptions.html +[QueryOptions]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/QueryOptions.html +[SocketOptions]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/SocketOptions.html +[Host.StateListener]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/Host.StateListener.html +[LatencyTracker]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/LatencyTracker.html +[SchemaChangeListener]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/SchemaChangeListener.html +[NoHostAvailableException]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/exceptions/NoHostAvailableException.html +[LocalDate]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/LocalDate.html diff --git a/manual/address_resolution/README.md b/manual/address_resolution/README.md index 619fdf574d3..412b3866c71 100644 --- a/manual/address_resolution/README.md +++ b/manual/address_resolution/README.md @@ -1,3 +1,22 @@ + + ## Address resolution Each node in the Cassandra cluster is uniquely identified by an IP address that the driver will use to establish @@ -102,8 +121,8 @@ private/public switch automatically based on location). -[AddressTranslator]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/policies/AddressTranslator.html -[EC2MultiRegionAddressTranslator]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/policies/EC2MultiRegionAddressTranslator.html +[AddressTranslator]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/policies/AddressTranslator.html +[EC2MultiRegionAddressTranslator]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/policies/EC2MultiRegionAddressTranslator.html [cassandra.yaml]: https://docs.datastax.com/en/cassandra/3.x/cassandra/configuration/configCassandra_yaml.html [rpc_address]: https://docs.datastax.com/en/cassandra/3.x/cassandra/configuration/configCassandra_yaml.html?scroll=configCassandra_yaml__rpc_address diff --git a/manual/async/README.md b/manual/async/README.md index abc93059f21..da7df365331 100644 --- a/manual/async/README.md +++ b/manual/async/README.md @@ -1,3 +1,22 @@ + + ## Asynchronous programming The driver exposes an asynchronous API that allows you to write programs @@ -51,8 +70,8 @@ to the current page, and [fetchMoreResults] to get a future to the next page (see also the section on [paging](../paging/)). Here is a full example: -[getAvailableWithoutFetching]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/ResultSet.html#getAvailableWithoutFetching-- -[fetchMoreResults]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/ResultSet.html#fetchMoreResults-- +[getAvailableWithoutFetching]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/ResultSet.html#getAvailableWithoutFetching-- +[fetchMoreResults]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/ResultSet.html#fetchMoreResults-- ```java Statement statement = new SimpleStatement("select * from foo").setFetchSize(20); @@ -134,5 +153,5 @@ There are still a few places where the driver will block internally hasn't been fetched already. [ListenableFuture]: https://github.com/google/guava/wiki/ListenableFutureExplained -[init]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/Cluster.html#init-- -[query trace]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/QueryTrace.html +[init]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/Cluster.html#init-- +[query trace]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/QueryTrace.html diff --git a/manual/auth/README.md b/manual/auth/README.md index 430e776fa4b..7b315fa3642 100644 --- a/manual/auth/README.md +++ b/manual/auth/README.md @@ -1,5 +1,53 @@ + + ## Authentication -*Coming soon... In the meantime, see the javadoc for [AuthProvider].* +Cassandra’s binary protocol supports [SASL]-based authentication. To enable it, use +[Cluster.Builder.withCredentials] when building your `Cluster` instance to provide the credentials +you wish to authenticate with: + +```java +Cluster.builder() + .withCredentials("bob", "mypassword") + .build(); +``` + +This is a shortcut for using [PlainTextAuthProvider] for simple username/password authentication +(intended to work with the server-side `PasswordAuthenticator`). This may alternatively be +provided using the [Cluster.Builder.withAuthProvider] method: + + +```java +Cluster.builder() + .withAuthProvider(new PlainTextAuthProvider("bob", "mypassword")) + .build(); +``` + +Authentication must be configured before opening a session, it cannot be changed at runtime. + +You can also write your own provider; it must implement [AuthProvider]. + + +[SASL]: https://en.wikipedia.org/wiki/Simple_Authentication_and_Security_Layer -[AuthProvider]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/AuthProvider.html \ No newline at end of file +[Cluster.Builder.withCredentials]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/Cluster.Builder.html#withCredentials-java.lang.String-java.lang.String- +[AuthProvider]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/AuthProvider.html +[Cluster.Builder.withAuthProvider]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/Cluster.Builder.html#withAuthProvider-com.datastax.driver.core.AuthProvider- +[PlainTextAuthProvider]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/PlainTextAuthProvider.html diff --git a/manual/cloud/README.md b/manual/cloud/README.md new file mode 100644 index 00000000000..3ff7e63558c --- /dev/null +++ b/manual/cloud/README.md @@ -0,0 +1,120 @@ + + +## Connecting to Astra (Cloud) + +Using the Java Driver to connect to a DataStax Astra database is almost identical to using +the driver to connect to any normal Apache Cassandra® database. The only differences are in how the +driver is configured in an application and that you will need to obtain a `secure connect bundle`. + +The following is a Quick Start guide to writing a simple application that can connect to an Astra +database. + +### Prerequisites + +1. [Download][Download Maven] and [install][Install Maven] Maven. +1. Create an Astra database on [GCP/AWS/Azure][Create an Astra database - GCP/AWS/Azure]; alternatively, + have a team member provide access to their Astra database (instructions for + [GCP/AWS/Azure][Access an Astra database - GCP/AWS/Azure]) to obtain database connection details. +1. Download the secure connect bundle (instructions for + [GCP/AWS/Azure][Download the secure connect bundle - GCP/AWS/Azure]) to obtain connection credentials for your + database. +1. Ensure you are using Java 8 or higher. The cloud connect api does not support java 6 or 7. + +### Procedure + +1. Include the driver artifacts in your `pom.xml` file according to this [pom.xml dependency]. + +1. Initialize the Java Driver. + + a. Create a `ConnectDatabase.java` file in the `/src/main/java` directory for your Java project. + + ```sh + $ cd javaProject/src/main/java + ``` + ```sh + $ touch ConnectDatabase.java + ``` + + b. Copy the following code for your DataStax Driver into the `ConnectDatabase.java` file. + The following example implements a `ConnectDatabase` class to connect to your Astra database, + runs a CQL query, and prints the output to the console. + + **Note:** With the `Cluster.builder()` object, make sure to set the path to the secure + connect bundle for your Astra database (**"/path/to/secure-connect-database_name.zip"**) in + the `withCloudSecureConnectBundle()` method as shown in the following example. + * Java Driver for Apache Cassandra 3.x + + ```java + import com.datastax.driver.core.Cluster; + import com.datastax.driver.core.ResultSet; + import com.datastax.driver.core.Row; + import com.datastax.driver.core.Session; + import java.io.File; + + public class ConnectDatabase { + + public static void main(String[] args) { + // Create the Cluster object: + Cluster cluster = null; + try { + cluster = Cluster.builder() + // make sure you change the path to the secure connect bundle below + .withCloudSecureConnectBundle(new File("/path/to/secure-connect-database_name.zip")) + .withCredentials("user_name", "password") + .build(); + Session session = cluster.connect(); + // Select the release_version from the system.local table: + ResultSet rs = session.execute("select release_version from system.local"); + Row row = rs.one(); + //Print the results of the CQL query to the console: + if (row != null) { + System.out.println(row.getString("release_version")); + } else { + System.out.println("An error occurred."); + } + } finally { + if (cluster != null) cluster.close(); + } + } + } + ``` + + c. Save and close the ConnectDatabase.java file. + +### Astra Differences + +In most circumstances, the client code for interacting with an Astra cluster will be the same as +interacting with any other Cassandra cluster. The exceptions being: + + * An SSL connection will be established automatically. Manual SSL configuration is not necessary. + + * A Cluster’s contact points attribute should not be used. The cloud config contains all of the + necessary contact information (i.e. don't use any of the `addContactPoint()` or + `addContactPoints()` methods on the Builder) + + * If a consistency level is not specified for an execution profile or query, then + `ConsistencyLevel.LOCAL_QUORUM` will be used as the default. + +[Download Maven]: https://maven.apache.org/download.cgi +[Install Maven]: https://maven.apache.org/install.html +[Create an Astra database - GCP/AWS/Azure]: https://docs.astra.datastax.com/docs/creating-your-astra-database#dscloudCreateCluster +[Access an Astra database - GCP/AWS/Azure]: https://docs.astra.datastax.com/docs/obtaining-database-credentials#sharing-your-secure-connect-bundle +[Download the secure connect bundle - GCP/AWS/Azure]: https://docs.astra.datastax.com/docs/obtaining-database-credentials +[pom.xml dependency]: ../../#getting-the-driver diff --git a/manual/compression/README.md b/manual/compression/README.md index dad0e5e5b94..6b25b5e5c69 100644 --- a/manual/compression/README.md +++ b/manual/compression/README.md @@ -1,3 +1,22 @@ + + ## Compression Cassandra's binary protocol supports optional compression of @@ -12,12 +31,15 @@ will likely be beneficial when you have larger payloads. Two algorithms are available: [LZ4](https://github.com/jpountz/lz4-java) and -[Snappy](https://code.google.com/p/snappy/). +[Snappy](https://code.google.com/p/snappy/). The LZ4 implementation is a good +first choice; it offers fallback implementations in case native libraries fail +to load and +[benchmarks](http://java-performance.info/performance-general-compression/) +suggest that it offers better performance and compression ratios over Snappy. Both rely on third-party libraries, declared by the driver as *optional* -dependencies. So If you use a build tool like Maven, you'll need to -declare an explicit dependency to pull the appropriate library in your -application's classpath. Then you configure compression at driver -startup. +dependencies. So if you use a build tool like Maven, you'll need to declare an +explicit dependency to pull the appropriate library in your application's +classpath. Then you configure compression at driver startup. ### LZ4 @@ -25,9 +47,9 @@ Maven dependency: ```xml - net.jpountz.lz4 - lz4 - 1.3.0 + org.lz4 + lz4-java + 1.4.1 ``` @@ -53,11 +75,11 @@ LZ4-java has three internal implementations (from fastest to slowest): It will pick the best implementation depending on what's possible on your platform. To find out which one was chosen, [enable INFO logs](../logging/) on the category -`com.datastax.driver.core.FrameCompressor` and look for a log similar to +`com.datastax.driver.core.LZ4Compressor` and look for a log similar to this: ``` -INFO com.datastax.driver.core.FrameCompressor - Using LZ4Factory:JNI +INFO com.datastax.driver.core.LZ4Compressor - Using LZ4Factory:JNI ``` ### Snappy @@ -85,4 +107,4 @@ cluster = Cluster.builder() .build(); ``` -[pom]: https://repo1.maven.org/maven2/com/datastax/cassandra/cassandra-driver-parent/3.3.0/cassandra-driver-parent-3.3.0.pom +[pom]: https://repo1.maven.org/maven2/com/datastax/cassandra/cassandra-driver-parent/3.12.1/cassandra-driver-parent-3.12.1.pom diff --git a/manual/control_connection/README.md b/manual/control_connection/README.md index eccef57e029..d2cde319bbf 100644 --- a/manual/control_connection/README.md +++ b/manual/control_connection/README.md @@ -1,10 +1,40 @@ + + ## Control connection -*Coming soon...* +The control connection is a dedicated connection used for administrative tasks: - \ No newline at end of file +* querying system tables to learn about the cluster's topology and + [schema](../metadata/#schema-metadata); +* checking [schema agreement](../metadata/#schema-agreement); +* reacting to server events, which are used to notify the driver of external topology or schema + changes. + +When the driver starts, the control connection is established to the first contacted node. If that +node goes down, a [reconnection](../reconnection/) is started to find another node; it is governed +by the same policy as regular connections and tries the nodes according to a query plan from the +[load balancing policy](../load_balancing/). + +The control connection is managed independently from [regular pooled connections](../pooling/), and +used exclusively for administrative requests. It is included in [Session.State.getOpenConnections], +as well as the `open-connections` [metric](../metrics); for example, if you've configured a pool +size of 2, the control node will have 3 connections. + +[Session.State.getOpenConnections]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/Session.State.html#getOpenConnections-com.datastax.driver.core.Host- diff --git a/manual/custom_codecs/README.md b/manual/custom_codecs/README.md index e30d9534ccf..6737d923dbd 100644 --- a/manual/custom_codecs/README.md +++ b/manual/custom_codecs/README.md @@ -1,3 +1,22 @@ + + ## Custom Codecs Custom codecs support transparent, user-configurable mapping of CQL types to arbitrary Java objects. @@ -296,12 +315,13 @@ public class AddressCodec extends TypeCodec
    { @Override public Address parse(String value) throws InvalidTypeException { - return value == null || value.isEmpty() || value.equals(NULL) ? null : toAddress(innerCodec.parse(value)); + return value == null || value.isEmpty() || value.equalsIgnoreCase("NULL") ? + null : toAddress(innerCodec.parse(value)); } @Override public String format(Address value) throws InvalidTypeException { - return value == null ? null : innerCodec.format(toUDTValue(value)); + return value == null ? "NULL" : innerCodec.format(toUDTValue(value)); } protected Address toAddress(UDTValue value) { @@ -446,26 +466,26 @@ Beware that in these cases, the lookup performs in average 10x worse. If perform consider using prepared statements all the time. [JAVA-721]: https://datastax-oss.atlassian.net/browse/JAVA-721 -[TypeCodec]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/TypeCodec.html -[LocalDate]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/LocalDate.html +[TypeCodec]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/TypeCodec.html +[LocalDate]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/LocalDate.html [ByteBuffer]: http://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html -[serialize]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/TypeCodec.html#serialize-T-com.datastax.driver.core.ProtocolVersion- -[deserialize]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/TypeCodec.html#deserialize-java.nio.ByteBuffer-com.datastax.driver.core.ProtocolVersion- -[TypeCodec.format]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/TypeCodec.html#format-T- -[TypeCodec.parse]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/TypeCodec.html#parse-java.lang.String- -[accepts]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/TypeCodec.html#accepts-com.datastax.driver.core.DataType- -[CodecRegistry]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/CodecRegistry.html -[CodecNotFoundException]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/exceptions/CodecNotFoundException.html +[serialize]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/TypeCodec.html#serialize-T-com.datastax.driver.core.ProtocolVersion- +[deserialize]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/TypeCodec.html#deserialize-java.nio.ByteBuffer-com.datastax.driver.core.ProtocolVersion- +[TypeCodec.format]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/TypeCodec.html#format-T- +[TypeCodec.parse]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/TypeCodec.html#parse-java.lang.String- +[accepts]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/TypeCodec.html#accepts-com.datastax.driver.core.DataType- +[CodecRegistry]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/CodecRegistry.html +[CodecNotFoundException]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/exceptions/CodecNotFoundException.html [Jackson]: https://github.com/FasterXML/jackson [AbstractType]: https://github.com/apache/cassandra/blob/trunk/src/java/org/apache/cassandra/db/marshal/AbstractType.java -[UserType]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/UserType.html -[UDTValue]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/UDTValue.html -[TupleType]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/TupleType.html -[TupleValue]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/TupleValue.html -[CustomType]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/DataType.CustomType.html -[TypeToken]: http://google.github.io/guava/releases/21.0/api/docs/com/google/common/reflect/TypeToken.html -[SimpleStatement]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/SimpleStatement.html -[BuiltStatement]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/querybuilder/BuiltStatement.html -[setList]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/SettableByIndexData.html#setList-int-java.util.List- -[setSet]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/SettableByIndexData.html#setSet-int-java.util.Set- -[setMap]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/SettableByIndexData.html#setMap-int-java.util.Map- +[UserType]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/UserType.html +[UDTValue]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/UDTValue.html +[TupleType]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/TupleType.html +[TupleValue]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/TupleValue.html +[CustomType]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/DataType.CustomType.html +[TypeToken]: https://google.github.io/guava/releases/19.0/api/docs/com/google/common/reflect/TypeToken.html +[SimpleStatement]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/SimpleStatement.html +[BuiltStatement]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/querybuilder/BuiltStatement.html +[setList]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/SettableByIndexData.html#setList-int-java.util.List- +[setSet]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/SettableByIndexData.html#setSet-int-java.util.Set- +[setMap]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/SettableByIndexData.html#setMap-int-java.util.Map- diff --git a/manual/custom_codecs/extras/README.md b/manual/custom_codecs/extras/README.md index 0441cd82d60..3198135878a 100644 --- a/manual/custom_codecs/extras/README.md +++ b/manual/custom_codecs/extras/README.md @@ -1,3 +1,22 @@ + + ## Optional codecs The driver's "extras" module provides additional [codec](../) implementations: these codecs are not required by core @@ -8,9 +27,9 @@ The module is published as a separate Maven artifact: ```xml - com.datastax.cassandra + org.apache.cassandra cassandra-driver-extras - 3.3.0 + 3.12.1 ``` @@ -41,6 +60,7 @@ session.execute("INSERT INTO example (id, t) VALUES (1, ?)", Similarly: * [LocalDateCodec] maps [LocalDate] to `date`; +* [LocalDateTimeCodec] maps [LocalDateTime] to `timestamp`; * [LocalTimeCodec] maps [LocalTime] to `time`. One problem with `timestamp` is that it does not store time zones. [ZonedDateTimeCodec] addresses that, by mapping a @@ -58,14 +78,32 @@ session.execute("INSERT INTO example (id, t) VALUES (1, ?)", ZonedDateTime.parse("2010-06-30T01:20:47.999+01:00")); ``` -[InstantCodec]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/extras/codecs/jdk8/InstantCodec.html -[LocalDateCodec]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/extras/codecs/jdk8/LocalDateCodec.html -[LocalTimeCodec]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/extras/codecs/jdk8/LocalTimeCodec.html -[ZonedDateTimeCodec]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/extras/codecs/jdk8/ZonedDateTimeCodec.html +[ZoneIdCodec] maps [ZoneId] to CQL's `varchar`: + +```java +import com.datastax.driver.extras.codecs.jdk8.ZoneIdCodec; +import java.time.ZoneId; + +cluster.getConfiguration().getCodecRegistry() + .register(ZoneIdCodec.instance); + +// schema: CREATE TABLE example(id int PRIMARY KEY, t varchar) +session.execute("INSERT INTO example (id, t) VALUES (1, ?)", + ZoneId.of("GMT+07:00")); +``` + +[InstantCodec]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/extras/codecs/jdk8/InstantCodec.html +[LocalDateCodec]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/extras/codecs/jdk8/LocalDateCodec.html +[LocalDateTimeCodec]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/extras/codecs/jdk8/LocalDateTimeCodec.html +[LocalTimeCodec]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/extras/codecs/jdk8/LocalTimeCodec.html +[ZonedDateTimeCodec]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/extras/codecs/jdk8/ZonedDateTimeCodec.html +[ZoneIdCodec]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/extras/codecs/jdk8/ZoneIdCodec.html [Instant]: https://docs.oracle.com/javase/8/docs/api/java/time/Instant.html [LocalDate]: https://docs.oracle.com/javase/8/docs/api/java/time/LocalDate.html +[LocalDateTime]: https://docs.oracle.com/javase/8/docs/api/java/time/LocalDateTime.html [LocalTime]: https://docs.oracle.com/javase/8/docs/api/java/time/LocalTime.html [ZonedDateTime]: https://docs.oracle.com/javase/8/docs/api/java/time/ZonedDateTime.html +[ZoneId]: https://docs.oracle.com/javase/8/docs/api/java/time/ZoneId.html #### Joda time @@ -111,10 +149,10 @@ session.execute("INSERT INTO example (id, t) VALUES (1, ?)", DateTime.parse("2010-06-30T01:20:47.999+01:00")); ``` -[InstantCodec_joda]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/extras/codecs/joda/InstantCodec.html -[LocalDateCodec_joda]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/extras/codecs/joda/LocalDateCodec.html -[LocalTimeCodec_joda]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/extras/codecs/joda/LocalTimeCodec.html -[DateTimeCodec]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/extras/codecs/joda/DateTimeCodec.html +[InstantCodec_joda]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/extras/codecs/joda/InstantCodec.html +[LocalDateCodec_joda]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/extras/codecs/joda/LocalDateCodec.html +[LocalTimeCodec_joda]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/extras/codecs/joda/LocalTimeCodec.html +[DateTimeCodec]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/extras/codecs/joda/DateTimeCodec.html [DateTime]: http://www.joda.org/joda-time/apidocs/org/joda/time/DateTime.html [Instant_joda]: http://www.joda.org/joda-time/apidocs/org/joda/time/Instant.html [LocalDate_joda]: http://www.joda.org/joda-time/apidocs/org/joda/time/LocalDate.html @@ -132,8 +170,8 @@ Time can also be expressed as simple durations: There is no extra codec for `time`, because by default the driver already maps that type to a `long` representing the number of nanoseconds since midnight. -[SimpleTimestampCodec]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/extras/codecs/date/SimpleTimestampCodec.html -[SimpleDateCodec]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/extras/codecs/date/SimpleDateCodec.html +[SimpleTimestampCodec]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/extras/codecs/date/SimpleTimestampCodec.html +[SimpleDateCodec]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/extras/codecs/date/SimpleDateCodec.html ### Enums @@ -171,8 +209,8 @@ Note that if you registered an `EnumNameCodec` and an `EnumOrdinalCodec` _for th In practice, this is unlikely to happen, because you'll probably stick to a single CQL type for a given enum type; however, if you ever run into that issue, the workaround is to use [prepared statements](../../statements/prepared/), for which the driver knows the CQL type and can pick the exact codec. -[EnumNameCodec]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/extras/codecs/enums/EnumNameCodec.html -[EnumOrdinalCodec]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/extras/codecs/enums/EnumOrdinalCodec.html +[EnumNameCodec]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/extras/codecs/enums/EnumNameCodec.html +[EnumOrdinalCodec]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/extras/codecs/enums/EnumOrdinalCodec.html [name]: https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html#name-- [ordinal]: https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html#ordinal-- @@ -217,7 +255,7 @@ session.execute("insert into example (id, owner) values (1, ?)", // owner saved as '{"id":1,"name":"root"}' ``` -[JacksonJsonCodec]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/extras/codecs/json/JacksonJsonCodec.html +[JacksonJsonCodec]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/extras/codecs/json/JacksonJsonCodec.html [Jackson]: https://github.com/FasterXML/jackson @@ -256,7 +294,7 @@ session.execute("insert into example (id, owner) values (1, ?)", ``` -[Jsr353JsonCodec]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/extras/codecs/json/Jsr353JsonCodec.html +[Jsr353JsonCodec]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/extras/codecs/json/Jsr353JsonCodec.html [JsonStructure]: https://docs.oracle.com/javaee/7/tutorial/jsonp002.htm @@ -309,9 +347,9 @@ For the same reason, we need to give a type hint when setting "v", in the form o anonymous inner class; we recommend storing these tokens as constants in a utility class, to avoid creating them too often. -[OptionalCodec]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/extras/codecs/jdk8/OptionalCodec.html +[OptionalCodec]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/extras/codecs/jdk8/OptionalCodec.html [Optional]: https://docs.oracle.com/javase/8/docs/api/java/util/Optional.html -[TypeToken]: http://google.github.io/guava/releases/21.0/api/docs/com/google/common/reflect/TypeToken.html +[TypeToken]: http://google.github.io/guava/releases/19.0/api/docs/com/google/common/reflect/TypeToken.html #### Guava @@ -353,8 +391,8 @@ session.execute(pst.bind() See the JDK8 Optional section above for explanations about [TypeToken]. -[OptionalCodec_guava]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/extras/codecs/guava/OptionalCodec.html -[Optional_guava]: http://google.github.io/guava/releases/21.0/api/docs/com/google/common/base/Optional.html +[OptionalCodec_guava]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/extras/codecs/guava/OptionalCodec.html +[Optional_guava]: http://google.github.io/guava/releases/19.0/api/docs/com/google/common/base/Optional.html ### Arrays @@ -378,9 +416,9 @@ session.execute("insert into example (i, l) values (1, ?)", Package [com.datastax.driver.extras.codecs.arrays][arrays] contains similar codecs for all primitive types, and [ObjectArrayCodec] to map arrays of objects. -[IntArrayCodec]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/extras/codecs/arrays/IntArrayCodec.html -[ObjectArrayCodec]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/extras/codecs/arrays/ObjectArrayCodec.html -[arrays]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/extras/codecs/arrays/package-summary.html +[IntArrayCodec]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/extras/codecs/arrays/IntArrayCodec.html +[ObjectArrayCodec]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/extras/codecs/arrays/ObjectArrayCodec.html +[arrays]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/extras/codecs/arrays/package-summary.html ### Abstract utilities @@ -410,5 +448,5 @@ These two classes are convenient, but since they perform conversions in two step optimal approach. If performance is paramount, it's better to start from scratch and convert your objects to `ByteBuffer` directly. -[MappingCodec]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/extras/codecs/MappingCodec.html -[ParsingCodec]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/extras/codecs/ParsingCodec.html +[MappingCodec]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/extras/codecs/MappingCodec.html +[ParsingCodec]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/extras/codecs/ParsingCodec.html diff --git a/manual/custom_payloads/README.md b/manual/custom_payloads/README.md index d0b68a677ed..778dbd72ea3 100644 --- a/manual/custom_payloads/README.md +++ b/manual/custom_payloads/README.md @@ -1,3 +1,22 @@ + + ## Custom Payloads The [native protocol](../native_protocol/) version 4 introduces a new feature called [Custom Payloads][CASSANDRA-8553]. @@ -42,7 +61,7 @@ payloads sent by the driver could get lost. ### Implementation Notes -Payloads in the Java driver are represented as `Map` instances. +Payloads in the Java Driver are represented as `Map` instances. It is safe to use any `Map` implementation, including unsynchronized implementations such as `java.util.HashMap`; the driver will create defensive, thread-safe copies of user-supplied maps. However, `ByteBuffer` instances are inherently mutable, @@ -51,11 +70,11 @@ to the driver as it could lead to unexpected results. #### Null values -Note that, for thread safety reasons, the Java driver does not permit `null` keys nor `null` values in a payload map; +Note that, for thread safety reasons, the Java Driver does not permit `null` keys nor `null` values in a payload map; including a `null` in your payload will result in a `NullPointerException` being immediately thrown. However, the protocol specification *does* allow `null` values. If you need to include -a `null` value in your payload map, this can be achieved with the Java driver +a `null` value in your payload map, this can be achieved with the Java Driver by using the special value `Statement.NULL_PAYLOAD_VALUE`. ##### Payload length limitations @@ -241,8 +260,8 @@ The log message contains a pretty-printed version of the payload itself, and its [CASSANDRA-8553]: https://issues.apache.org/jira/browse/CASSANDRA-8553 [v4spec]: https://github.com/apache/cassandra/blob/trunk/doc/native_protocol_v4.spec [qh]: https://issues.apache.org/jira/browse/CASSANDRA-6659 -[nhae]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/exceptions/NoHostAvailableException.html +[nhae]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/exceptions/NoHostAvailableException.html [chm]: https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ConcurrentHashMap.html [immutablemap]: http://docs.guava-libraries.googlecode.com/git/javadoc/com/google/common/collect/ImmutableMap.html -[ufe]:http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/exceptions/UnsupportedFeatureException.html +[ufe]:https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/exceptions/UnsupportedFeatureException.html diff --git a/manual/idempotence/README.md b/manual/idempotence/README.md index 4d2b0816965..24c4ac95c40 100644 --- a/manual/idempotence/README.md +++ b/manual/idempotence/README.md @@ -1,3 +1,22 @@ + + ## Query idempotence A query is *idempotent* if it can be applied multiple times without changing the result of the initial application. For @@ -125,8 +144,8 @@ broke linearizability by doing a transparent retry at step 6. If linearizability is important for you, you should ensure that lightweight transactions are appropriately flagged as not idempotent. -[isIdempotent]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/Statement.html#isIdempotent-- -[setDefaultIdempotence]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/QueryOptions.html#setDefaultIdempotence-boolean- -[QueryBuilder]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/querybuilder/QueryBuilder.html +[isIdempotent]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/Statement.html#isIdempotent-- +[setDefaultIdempotence]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/QueryOptions.html#setDefaultIdempotence-boolean- +[QueryBuilder]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/querybuilder/QueryBuilder.html -[linearizability]: https://en.wikipedia.org/wiki/Linearizability#Definition_of_linearizability \ No newline at end of file +[linearizability]: https://en.wikipedia.org/wiki/Linearizability#Definition_of_linearizability diff --git a/manual/load_balancing/README.md b/manual/load_balancing/README.md index f6b180ccbf3..8fd3fb06f2d 100644 --- a/manual/load_balancing/README.md +++ b/manual/load_balancing/README.md @@ -1,3 +1,22 @@ + + ## Load balancing A Cassandra cluster is typically composed of multiple hosts; the [LoadBalancingPolicy] \(sometimes abbreviated LBP) is a @@ -101,14 +120,11 @@ Cluster cluster = Cluster.builder() .withLoadBalancingPolicy( DCAwareRoundRobinPolicy.builder() .withLocalDc("myLocalDC") - .withUsedHostsPerRemoteDc(2) - .allowRemoteDCsForLocalConsistencyLevel() .build() ).build(); ``` -This policy queries nodes of the local data-center in a round-robin fashion; optionally, it can also try a configurable -number of hosts in remote data centers if all local hosts failed. +This policy queries nodes of the local data-center in a round-robin fashion. Call `withLocalDc` to specify the name of your local datacenter. You can also leave it out, and the driver will use the datacenter of the first contact point that was reached [at initialization](../#cluster-initialization). However, @@ -118,21 +134,6 @@ local datacenter. In general, providing the datacenter name explicitly is a safe Hosts belonging to the local datacenter are at distance `LOCAL`, and appear first in query plans (in a round-robin fashion). -If you call `withUsedHostsPerRemoteDc`, the policy will pick that number of hosts for each remote DC, and add them at -the end of query plans. To illustrate this, let's assume that the value is 2, there are 3 datacenters and 3 hosts in the -local datacenter. Query plans would look like this: - -* query 1: localHost1, localHost2, localHost3, host1InRemoteDc1, host2InRemoteDc1, host1InRemoteDc2, host2InRemoteDc2 -* query 2: localHost2, localHost3, localHost1, host1InRemoteDc1, host2InRemoteDc1, host1InRemoteDc2, host2InRemoteDc2 -* query 3: localHost3, localHost1, localHost2, host1InRemoteDc1, host2InRemoteDc1, host1InRemoteDc2, host2InRemoteDc2 - -Hosts selected by this option are at distance `REMOTE`. Note that they always appear in the same order. - -Finally, `allowRemoteDCsForLocalConsistencyLevel` controls whether remote hosts included by the previous option are -included when the consistency level of the query is `LOCAL_ONE` or `LOCAL_QUORUM`. By default, it is off (remote hosts -are not included for local CLs). - - ### [TokenAwarePolicy] ```java @@ -295,11 +296,11 @@ For any host, the distance returned by the policy is always the same as its chil Query plans are based on the child policy's, except that hosts that are currently excluded for being too slow are moved to the end of the plan. -[withExclusionThreshold]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/policies/LatencyAwarePolicy.Builder.html#withExclusionThreshold-double- -[withMininumMeasurements]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/policies/LatencyAwarePolicy.Builder.html#withMininumMeasurements-int- -[withRetryPeriod]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/policies/LatencyAwarePolicy.Builder.html#withRetryPeriod-long-java.util.concurrent.TimeUnit- -[withScale]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/policies/LatencyAwarePolicy.Builder.html#withScale-long-java.util.concurrent.TimeUnit- -[withUpdateRate]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/policies/LatencyAwarePolicy.Builder.html#withUpdateRate-long-java.util.concurrent.TimeUnit- +[withExclusionThreshold]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/policies/LatencyAwarePolicy.Builder.html#withExclusionThreshold-double- +[withMininumMeasurements]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/policies/LatencyAwarePolicy.Builder.html#withMininumMeasurements-int- +[withRetryPeriod]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/policies/LatencyAwarePolicy.Builder.html#withRetryPeriod-long-java.util.concurrent.TimeUnit- +[withScale]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/policies/LatencyAwarePolicy.Builder.html#withScale-long-java.util.concurrent.TimeUnit- +[withUpdateRate]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/policies/LatencyAwarePolicy.Builder.html#withUpdateRate-long-java.util.concurrent.TimeUnit- ### Filtering policies @@ -317,15 +318,15 @@ studying the existing implementations first: `RoundRobinPolicy` is a good place complex ones like `DCAwareRoundRobinPolicy`. -[LoadBalancingPolicy]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/policies/LoadBalancingPolicy.html -[RoundRobinPolicy]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/policies/RoundRobinPolicy.html -[DCAwareRoundRobinPolicy]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/policies/DCAwareRoundRobinPolicy.html -[TokenAwarePolicy]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/policies/TokenAwarePolicy.html -[LatencyAwarePolicy]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/policies/LatencyAwarePolicy.html -[HostFilterPolicy]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/policies/HostFilterPolicy.html -[WhiteListPolicy]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/policies/WhiteListPolicy.html -[HostDistance]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/HostDistance.html -[refreshConnectedHosts]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/PoolingOptions.html#refreshConnectedHosts-- -[setMetadataEnabled]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/QueryOptions.html#setMetadataEnabled-boolean- -[Statement#getKeyspace]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/Statement.html#getKeyspace-- -[Statement#getRoutingKey]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/Statement.html#getRoutingKey-- +[LoadBalancingPolicy]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/policies/LoadBalancingPolicy.html +[RoundRobinPolicy]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/policies/RoundRobinPolicy.html +[DCAwareRoundRobinPolicy]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/policies/DCAwareRoundRobinPolicy.html +[TokenAwarePolicy]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/policies/TokenAwarePolicy.html +[LatencyAwarePolicy]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/policies/LatencyAwarePolicy.html +[HostFilterPolicy]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/policies/HostFilterPolicy.html +[WhiteListPolicy]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/policies/WhiteListPolicy.html +[HostDistance]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/HostDistance.html +[refreshConnectedHosts]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/PoolingOptions.html#refreshConnectedHosts-- +[setMetadataEnabled]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/QueryOptions.html#setMetadataEnabled-boolean- +[Statement#getKeyspace]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/Statement.html#getKeyspace-- +[Statement#getRoutingKey]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/Statement.html#getRoutingKey-- diff --git a/manual/logging/README.md b/manual/logging/README.md index ca200f6d09f..109a4d0bb83 100644 --- a/manual/logging/README.md +++ b/manual/logging/README.md @@ -1,8 +1,27 @@ + + ## Logging ### Setup -DataStax Java driver uses the popular [SLF4J](http://www.slf4j.org) library to emit log messages; +Java Driver uses the popular [SLF4J](http://www.slf4j.org) library to emit log messages; SLF4J has the advantage of providing a logging API that is entirely decoupled from concrete implementations, letting client applications free to seamlessly connect SLF4J to their preferred logging backend. @@ -49,7 +68,7 @@ You can also find some configuration examples below. ### Useful loggers -When debugging the Java driver, the following loggers could be particularly useful +When debugging the Java Driver, the following loggers could be particularly useful and provide hints about what's going wrong. * `com.datastax.driver.core.Cluster` @@ -192,6 +211,47 @@ that can significantly boost latencies when writing log messages. without stopping the application. This usually involves JMX and is available for [Logback](http://logback.qos.ch/manual/jmxConfig.html); Log4J provides a `configureAndWatch()` method but it is not recommended to use it inside J2EE containers (see [FAQ](https://logging.apache.org/log4j/1.2/faq.html#a3.6)). +### Server Side Warnings + +When using the driver to execute queries, it is possible that the server will generate warnings and +return them along with the results. Consider the following query: + +```sql +SELECT count(*) FROM cycling.cyclist_name; +``` + +Executing this query would generate a warning in Cassandra: + +``` +Aggregation query used without partition key +``` + +These +[query warnings](https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/ExecutionInfo.html#getWarnings--) +are available programmatically from the +[ExecutionInfo](https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/ExecutionInfo.html) +via +[ResultSet](https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/ResultSet.html)'s +[getExecutionInfo()](https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/PagingIterable.html#getExecutionInfo--) +method. They are also logged by the driver: + +``` +WARN com.datastax.driver.core.RequestHandler - Query 'SELECT count(*) FROM cycling.cyclist_name' generated server side warning(s): Aggregation query used without partition key +``` + +Sometimes, it is not desirable for the driver to log server-side warnings. In such cases, logging +these warnings can be disabled in the driver by setting the system property `com.datastax.driver.DISABLE_QUERY_WARNING_LOGS` +to "true". This can be done at application startup (`-Dcom.datastax.driver.DISABLE_QUERY_WARNING_LOGS=true`) +or it can be toggled programmatically in application code: + +```java +// disable driver logging of server-side warnings +System.setProperty("com.datastax.driver.DISABLE_QUERY_WARNING_LOGS", "true"); +.... +// enable driver logging of server-side warnings +System.setProperty("com.datastax.driver.DISABLE_QUERY_WARNING_LOGS", "false"); +``` + ### Logback Example Here is a typical example configuration for Logback. *Please adapt it to your specific needs before using it!* @@ -299,4 +359,4 @@ It also turns on slow query tracing as described above. ``` -[query_logger]:http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/QueryLogger.html +[query_logger]:https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/QueryLogger.html diff --git a/manual/metadata/README.md b/manual/metadata/README.md index 4e4fd708ffd..817587f947d 100644 --- a/manual/metadata/README.md +++ b/manual/metadata/README.md @@ -1,10 +1,29 @@ + + ## Metadata The driver maintains global information about the Cassandra cluster it is connected to. It is available via [Cluster#getMetadata()][getMetadata]. -[getMetadata]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/Cluster.html#getMetadata-- +[getMetadata]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/Cluster.html#getMetadata-- ### Schema metadata @@ -12,8 +31,8 @@ Use [getKeyspace(String)][getKeyspace] or [getKeyspaces()][getKeyspaces] to get keyspace-level metadata. From there you can access the keyspace's objects (tables, and UDTs and UDFs if relevant). -[getKeyspace]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/Metadata.html#getKeyspace-java.lang.String- -[getKeyspaces]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/Metadata.html#getKeyspaces-- +[getKeyspace]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/Metadata.html#getKeyspace-java.lang.String- +[getKeyspaces]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/Metadata.html#getKeyspaces-- #### Refreshes @@ -47,8 +66,8 @@ Note that it is preferable to register a listener only *after* the cluster is fu otherwise the listener could be notified with a great deal of "Added" events as the driver builds the schema metadata from scratch for the first time. -[SchemaChangeListener]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/SchemaChangeListener.html -[registerListener]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/Cluster.html#register-com.datastax.driver.core.SchemaChangeListener- +[SchemaChangeListener]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/SchemaChangeListener.html +[registerListener]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/Cluster.html#register-com.datastax.driver.core.SchemaChangeListener- #### Schema agreement @@ -135,9 +154,9 @@ custom executor). Check out the API docs for the features in this section: -* [withMaxSchemaAgreementWaitSeconds(int)](http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/Cluster.Builder.html#withMaxSchemaAgreementWaitSeconds-int-) -* [isSchemaInAgreement()](http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/ExecutionInfo.html#isSchemaInAgreement--) -* [checkSchemaAgreement()](http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/Metadata.html#checkSchemaAgreement--) +* [withMaxSchemaAgreementWaitSeconds(int)](https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/Cluster.Builder.html#withMaxSchemaAgreementWaitSeconds-int-) +* [isSchemaInAgreement()](https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/ExecutionInfo.html#isSchemaInAgreement--) +* [checkSchemaAgreement()](https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/Metadata.html#checkSchemaAgreement--) ### Token metadata @@ -181,14 +200,14 @@ Starting with Cassandra 2.1.5, this information is available in a system table (see [CASSANDRA-7688](https://issues.apache.org/jira/browse/CASSANDRA-7688)). -[metadata]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/Metadata.html -[getTokenRanges]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/Metadata.html#getTokenRanges-- -[getTokenRanges2]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/Metadata.html#getTokenRanges-java.lang.String-com.datastax.driver.core.Host- -[getReplicas]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/Metadata.html#getReplicas-java.lang.String-com.datastax.driver.core.TokenRange- -[newToken]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/Metadata.html#newToken-java.lang.String- -[newTokenRange]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/Metadata.html#newTokenRange-com.datastax.driver.core.Token-com.datastax.driver.core.Token- -[TokenRange]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/TokenRange.html -[getTokens]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/Host.html#getTokens-- -[setToken]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/BoundStatement.html#setToken-int-com.datastax.driver.core.Token- -[getToken]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/Row.html#getToken-int- -[getPKToken]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/Row.html#getPartitionKeyToken-- +[metadata]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/Metadata.html +[getTokenRanges]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/Metadata.html#getTokenRanges-- +[getTokenRanges2]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/Metadata.html#getTokenRanges-java.lang.String-com.datastax.driver.core.Host- +[getReplicas]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/Metadata.html#getReplicas-java.lang.String-com.datastax.driver.core.TokenRange- +[newToken]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/Metadata.html#newToken-java.lang.String- +[newTokenRange]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/Metadata.html#newTokenRange-com.datastax.driver.core.Token-com.datastax.driver.core.Token- +[TokenRange]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/TokenRange.html +[getTokens]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/Host.html#getTokens-- +[setToken]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/BoundStatement.html#setToken-int-com.datastax.driver.core.Token- +[getToken]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/Row.html#getToken-int- +[getPKToken]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/Row.html#getPartitionKeyToken-- diff --git a/manual/metrics/README.md b/manual/metrics/README.md index 7dc741bc4c9..7af7a050232 100644 --- a/manual/metrics/README.md +++ b/manual/metrics/README.md @@ -1,12 +1,172 @@ -## Metrics + -[Metrics]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/Metrics.html \ No newline at end of file +## Metrics + +The driver exposes measurements of its internal behavior through the popular [Dropwizard Metrics] +library. Developers can access these metrics and choose to export them to a monitoring tool. + +The driver depends on Metrics 3.2.x, but is compatible with newer versions of Dropwizard Metrics. +For using Metrics 4.x with the driver, see [Metrics 4 Compatibility](#metrics-4-compatibility). + +### Structure + +Metric names are path-like, dot-separated strings. Metrics are measured at the `Cluster`-level, +thus the driver prefixes them with the name of the `Cluster` they are associated with (see [withClusterName] +for how to configure this), suffixed by `-metrics`. For example: + +``` +cluster1-metrics.connected-to +cluster1-metrics.connection-errors +... +``` + +### Configuration + +By default, metrics are enabled and exposed via JMX as [MXBeans]. + +Some users may find that they don't want the driver to record and expose metrics. To disable +metrics collection, use the [withoutMetrics] builder method, i.e.: + +```java +Cluster cluster = Cluster.builder() + .withoutMetrics() + .build(); +``` + +Note that if you decide to disable metrics, you may also consider excluding metrics as a dependency. +To do this in a maven project: + +```xml + + org.apache.cassandra + cassandra-driver-core + 3.12.1 + + + io.dropwizard.metrics + metrics-core + + + +``` + +Alternatively, one may not want to expose metrics using JMX. Disabling JMX reporting is simple +as using the [withoutJMXReporting] builder method, i.e.: + +```java +Cluster cluster = Cluster.builder() + .withoutJMXReporting() + .build(); +``` + +### Accessing Cluster Metrics + +`Cluster` metrics may be accessed via the [getMetrics] method. The [Metrics] class offers +direct access to all metrics recorded for the `Cluster` via getter methods. Refer to +the [Metrics javadoc][Metrics] for more details about the metrics offered. + +It is very common for applications to record their own metrics. You can add all metrics +recorded for a `Cluster` to your applications' [MetricRegistry] in the following manner: + +```java +MetricRegistry myRegistery = new MetricRegistry(); +myRegistry.registerAll(cluster.getMetrics().getRegistry()); +``` + +### Registering a Custom Reporter + +Dropwizard Metrics offers a variety of [Reporters] for exporting metrics. To enable reporting, +access the `Cluster`'s metrics via the [getMetrics] method. For example, to enable CSV reporting +every 30 seconds: + +```java +import com.codahale.metrics.*; + +import java.io.File; +import java.util.concurrent.TimeUnit; + +Metrics metrics = cluster.getMetrics(); + +CsvReporter csvReporter = CsvReporter.forRegistry(metrics.getRegistry()) + .convertDurationsTo(TimeUnit.MILLISECONDS) + .convertRatesTo(TimeUnit.SECONDS) + .build(new File(".")); + +csvReporter.start(30, TimeUnit.SECONDS); +``` + +### Metrics 4 Compatibility + +While the driver depends on Metrics 3.2.x, it also works with Metrics 4, with some caveats. + +In Metrics 4, JMX reporting was moved to a separate module, `metrics-jmx`. Because of this you are +likely to encounter the following exception at runtime when initializing a `Cluster`: + +``` +Exception in thread "main" java.lang.NoClassDefFoundError: com/codahale/metrics/JmxReporter + at com.datastax.driver.core.Metrics.(Metrics.java:103) + at com.datastax.driver.core.Cluster$Manager.init(Cluster.java:1402) + at com.datastax.driver.core.Cluster.init(Cluster.java:159) + at com.datastax.driver.core.Cluster.connectAsync(Cluster.java:330) + at com.datastax.driver.core.Cluster.connectAsync(Cluster.java:305) + at com.datastax.durationtest.core.DurationTest.createSessions(DurationTest.java:360) + .... +Caused by: java.lang.ClassNotFoundException: com.codahale.metrics.JmxReporter + at java.base/jdk.internal.loader.BuiltinClassLoader.loadClass(BuiltinClassLoader.java:582) + at java.base/jdk.internal.loader.ClassLoaders$AppClassLoader.loadClass(ClassLoaders.java:185) + at java.base/java.lang.ClassLoader.loadClass(ClassLoader.java:496) + ... 8 more +``` + +To fix this, use [withoutJMXReporting] when constructing your `Cluster`. If you still desire JMX +reporting, add `metrics-jmx` as a dependency: + +```xml + + io.dropwizard.metrics + metrics-jmx + 4.0.2 + +``` + +Then create your `Cluster` and `JmxReporter` in the following manner: + +```java +Cluster cluster = Cluster.builder() + .withoutJMXReporting() + .build(); + +JmxReporter reporter = + JmxReporter.forRegistry(cluster.getMetrics().getRegistry()) + .inDomain(cluster.getClusterName() + "-metrics") + .build(); + +reporter.start(); +``` + +[Dropwizard Metrics]: http://metrics.dropwizard.io/3.2.2/manual/index.html +[Reporters]: http://metrics.dropwizard.io/3.2.2/manual/core.html#reporters +[MetricRegistry]: http://metrics.dropwizard.io/3.2.2/apidocs/com/codahale/metrics/MetricRegistry.html +[MXBeans]: https://docs.oracle.com/javase/tutorial/jmx/mbeans/mxbeans.html +[withClusterName]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/Cluster.Builder.html#withClusterName-java.lang.String- +[withoutMetrics]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/Cluster.Builder.html#withoutMetrics-- +[withoutJMXReporting]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/Cluster.Builder.html#withoutJMXReporting-- +[getMetrics]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/Cluster.html#getMetrics-- +[Metrics]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/Metrics.html diff --git a/manual/native_protocol/README.md b/manual/native_protocol/README.md index be945e75664..7aab925ea9f 100644 --- a/manual/native_protocol/README.md +++ b/manual/native_protocol/README.md @@ -1,3 +1,22 @@ + + ## Native protocol The native protocol defines the format of the binary messages exchanged @@ -16,16 +35,23 @@ Cassandra when the first connection is established. Both sides are backward-compatible with older versions: - - - - - + + + + + + + + + + +
     Cassandra: 1.2.x
    (DSE 3.2)
    2.0.x
    (DSE 4.0 to 4.6)
    2.1.x
    (DSE 4.7)
    2.2.x3.0.x
    Driver: 1.0.x v1 v1 v1 v1 Unsupported (1)
    2.0.x to 2.1.1 v1 v2 v2 v2 Unsupported (1)
    2.1.2 to 2.1.x v1 v2 v3 v3 Unsupported (2)
    3.x v1 v2 v3 v4 v4
    Driver VersionCassandra 1.2.x
    (DSE 3.2)
    Cassandra 2.0.x
    (DSE 4.0 to 4.6)
    Cassandra 2.1.x
    (DSE 4.7)
    Cassandra 2.2.xCassandra 3.0.x & 3.x
    (DSE 5.0+)
    Cassandra 4.0+
    1.0.x v1 v1 v1 v1 Unsupported (1)Unsupported (1)
    2.0.x to 2.1.1 v1 v2 v2 v2Unsupported (1) Unsupported (1)
    2.1.2 to 2.1.x v1 v2 v3 v3Unsupported (2)Unsupported (2)
    3.x v1 v2 v3 v4 v4 v5
    -*(1) Cassandra 3.0 does not support protocol versions v1 and v2* +*(1) Cassandra 3.0+ does not support protocol versions v1 and v2* -*(2) There is a matching protocol version (v3), but the driver 2.1.x can't read the new system table format of Cassandra 3.0* +*(2) There is a matching protocol version (v3), but the driver 2.1.x can't read the new system table +format of Cassandra 3.0+* For example, if you use version 2.1.5 of the driver to connect to Cassandra 2.0.9, the maximum version you can use (and the one you'll get @@ -63,7 +89,7 @@ All host(s) tried for query failed [/127.0.0.1:9042] Host /127.0.0.1:9042 does not support protocol version V3 but V2)) ``` -[gpv]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/ProtocolOptions.html#getProtocolVersion-- +[gpv]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/ProtocolOptions.html#getProtocolVersion-- #### Protocol version with mixed clusters @@ -94,19 +120,19 @@ To avoid this issue, you can use one the following workarounds: #### v1 to v2 * bound variables in simple statements - ([Session#execute(String, Object...)](http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/Session.html#execute-java.lang.String-java.lang.Object...-)) -* [batch statements](http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/BatchStatement.html) + ([Session#execute(String, Object...)](https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/Session.html#execute-java.lang.String-java.lang.Object...-)) +* [batch statements](https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/BatchStatement.html) * [query paging](../paging/) #### v2 to v3 * the number of stream ids per connection goes from 128 to 32768 (see [Connection pooling](../pooling/)) -* [serial consistency on batch statements](http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/BatchStatement.html#setSerialConsistencyLevel-com.datastax.driver.core.ConsistencyLevel-) +* [serial consistency on batch statements](https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/BatchStatement.html#setSerialConsistencyLevel-com.datastax.driver.core.ConsistencyLevel-) * [client-side timestamps](../query_timestamps/) #### v3 to v4 -* [query warnings](http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/ExecutionInfo.html#getWarnings--) +* [query warnings](https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/ExecutionInfo.html#getWarnings--) * allowed unset values in bound statements * [Custom payloads](../custom_payloads/) diff --git a/manual/object_mapper/README.md b/manual/object_mapper/README.md index 08e5f18797a..3c3cc021822 100644 --- a/manual/object_mapper/README.md +++ b/manual/object_mapper/README.md @@ -1,3 +1,22 @@ + + # Object Mapper The driver provides a simple object mapper, which @@ -9,9 +28,9 @@ The mapper is published as a separate Maven artifact: ```xml - com.datastax.cassandra + org.apache.cassandra cassandra-driver-mapping - 3.3.0 + 3.12.1 ``` diff --git a/manual/object_mapper/creating/README.md b/manual/object_mapper/creating/README.md index cbb14bf2244..200fccf571e 100644 --- a/manual/object_mapper/creating/README.md +++ b/manual/object_mapper/creating/README.md @@ -1,3 +1,22 @@ + + ## Definition of mapped classes The object mapper is configured by annotations on the mapped classes. @@ -149,9 +168,9 @@ User user = new User() .setName("John Doe"); ``` -[table]:http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/mapping/annotations/Table.html +[table]:https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/mapping/annotations/Table.html [case-sensitive]:http://docs.datastax.com/en/cql/3.3/cql/cql_reference/ucase-lcase_r.html -[consistency level]:http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/ConsistencyLevel.html +[consistency level]:https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/ConsistencyLevel.html [java-beans]:https://docs.oracle.com/javase/tutorial/javabeans/writing/properties.html [set-accessible]:https://docs.oracle.com/javase/8/docs/api/java/lang/reflect/AccessibleObject.html#setAccessible-boolean- @@ -189,7 +208,7 @@ CREATE TABLE users(id uuid PRIMARY KEY, "userName" text); private String userName; ``` -[column]:http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/mapping/annotations/Column.html +[column]:https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/mapping/annotations/Column.html #### Primary key fields @@ -213,15 +232,15 @@ private String areaCode; The order of the indices must match that of the columns in the table declaration. -[pk]:http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/mapping/annotations/PartitionKey.html -[cc]:http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/mapping/annotations/ClusteringColumn.html +[pk]:https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/mapping/annotations/PartitionKey.html +[cc]:https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/mapping/annotations/ClusteringColumn.html [pks]:http://thelastpickle.com/blog/2013/01/11/primary-keys-in-cql.html #### Computed fields [@Computed][computed] can be used on properties that are the result of a computation on the Cassandra side, typically a function call. Native -functions in Cassandra like `writetime()` or [User Defined Functions] are +functions in Cassandra like `writetime()` or [User-Defined Functions] are supported. ```java @@ -250,7 +269,7 @@ version (see [JAVA-832](https://datastax-oss.atlassian.net/browse/JAVA-832)). [User Defined Functions]:http://www.planetcassandra.org/blog/user-defined-functions-in-cassandra-3-0/ -[computed]:http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/mapping/annotations/Computed.html +[computed]:https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/mapping/annotations/Computed.html #### Transient properties @@ -259,11 +278,11 @@ to table columns. [@Transient][transient] can be used to prevent a field or a Java bean property from being mapped. Like other column-level annotations, it should be placed on either the field declaration or the property getter method. -[transient]:http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/mapping/annotations/Transient.html +[transient]:https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/mapping/annotations/Transient.html ### Mapping User Types -[User Defined Types] can also be mapped by using [@UDT][udt]: +[User-Defined Types] can also be mapped by using [@UDT][udt]: ``` CREATE TYPE address (street text, zip_code int); @@ -322,8 +341,8 @@ This also works with UDTs inside collections or other UDTs, with any arbitrary nesting level. [User Defined Types]: ../../udts/ -[udt]:http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/mapping/annotations/UDT.html -[field]:http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/mapping/annotations/Field.html +[udt]:https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/mapping/annotations/UDT.html +[field]:https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/mapping/annotations/Field.html ### Mapping collections @@ -359,10 +378,53 @@ private Map> frozenKeyValueMap; private Map> frozenValueMap; ``` -[frozen]:http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/mapping/annotations/Frozen.html -[frozenkey]:http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/mapping/annotations/FrozenKey.html -[frozenvalue]:http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/mapping/annotations/FrozenValue.html +With regards to tuples, these can be represented as `TupleValue` fields, i.e.: + +```java +@Frozen +private TupleValue myTupleValue; +``` + +Please note however that tuples are not a good fit for the mapper since it is up to the user to +resolve the associated `TupleType` when creating and accessing `TupleValue`s and properly use the +right types since java type information is not known. + +Also note that `@UDT`-annotated classes are not implicitly registered with `TupleValue` like they +otherwise are because the mapper is not able to identify the cql type information at the time +entities are constructed. + +To work around this, one may use [udtCodec] to register a `TypeCodec` that the mapper can use +to figure out how to appropriately handle UDT conversion, i.e.: + +```java +mappingManager.udtCodec(Address.class); +``` + +[frozen]:https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/mapping/annotations/Frozen.html +[frozenkey]:https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/mapping/annotations/FrozenKey.html +[frozenvalue]:https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/mapping/annotations/FrozenValue.html +[udtCodec]:https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/mapping/MappingManager.html#udtCodec-java.lang.Class- + +#### Prefer Frozen Collections + +If `Mapper.save` is used to create and update entities, it is recommended to +use frozen collections over non-frozen collections. + +Frozen collections in Cassandra are serialized as a single cell value where +non-frozen collections serialize each individual element in a collection as a +cell. + +Since `Mapper.save` provides the entire collection for an entity field value on +each invocation, it is more efficient to use frozen collections as the entire +collection is serialized as one cell. + +Also, when using non-frozen collections, on INSERT Cassandra must +create a tombstone to invalidate all existing collection elements, even if +there are none. When using frozen collections, no such tombstone is needed. + +See [Freezing collection types] for more information about the frozen keyword. +[Freezing collection types]: https://docs.datastax.com/en/dse/6.7/cql/cql/cql_using/refCollectionType.html ### Polymorphism support diff --git a/manual/object_mapper/custom_codecs/README.md b/manual/object_mapper/custom_codecs/README.md index 148164ad115..d3b5414fbe8 100644 --- a/manual/object_mapper/custom_codecs/README.md +++ b/manual/object_mapper/custom_codecs/README.md @@ -1,3 +1,22 @@ + + # Using custom codecs The mapper can take advantage of [custom codecs](../../custom_codecs/) @@ -98,9 +117,9 @@ instance (one per column) and cache it for future use. This also works with [@Field][field] and [@Param][param] annotations. -[column]:http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/mapping/annotations/Column.html -[field]:http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/mapping/annotations/Field.html -[param]:http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/mapping/annotations/Param.html +[column]:https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/mapping/annotations/Column.html +[field]:https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/mapping/annotations/Field.html +[param]:https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/mapping/annotations/Param.html ## Implicit UDT codecs diff --git a/manual/object_mapper/using/README.md b/manual/object_mapper/using/README.md index d802378a28a..35fdc54e088 100644 --- a/manual/object_mapper/using/README.md +++ b/manual/object_mapper/using/README.md @@ -1,3 +1,22 @@ + + ## Using the mapper First, create a [MappingManager]. It wraps an existing [Session] @@ -28,9 +47,9 @@ Mapper mapper = manager.mapper(User.class); calling `manager#mapper` more than once for the same class will return the previously generated mapper. -[Mapper]:http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/mapping/Mapper.html -[MappingManager]:http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/mapping/MappingManager.html -[Session]:http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/Session.html +[Mapper]:https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/mapping/Mapper.html +[MappingManager]:https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/mapping/MappingManager.html +[Session]:https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/Session.html #### Basic CRUD operations @@ -51,8 +70,8 @@ UUID userId = ...; User u = mapper.get(userId); ``` -`get`'s arguments must match the partition key components (number of -arguments and their types). +`get`'s arguments must match the primary key components (number of +arguments, their types, and order). -------------- @@ -179,7 +198,7 @@ It provides methods `one()`, `all()`, `iterator()`, `getExecutionInfo()` and `isExhausted()`. Note that iterating the `Result` will consume the `ResultSet`, and vice-versa. -[Result]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/mapping/Result.html +[Result]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/mapping/Result.html ### Accessors @@ -202,7 +221,7 @@ implementation for it: ```java UserAccessor userAccessor = manager.createAccessor(UserAccessor.class); -User user = userAccessor.getOne(uuid); +Result users = userAccessor.getAll(); ``` Like mappers, accessors are cached at the manager level and thus, are @@ -229,7 +248,7 @@ corresponds to which marker: ResultSet insert(@Param("u") UUID userId, @Param("n") String name); ``` -[param]:http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/mapping/annotations/Param.html +[param]:https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/mapping/annotations/Param.html If a method argument is a Java enumeration, it must be annotated with `@Enumerated` to indicate how to convert it to a CQL type (the rules are @@ -275,6 +294,10 @@ executed: ListenableFuture<Result<T>> T must be a mapped class.
    Asynchronous execution, returns a list of mapped objects. + + Statement + Object mapper doesn't execute query, but returns an instance of BoundStatement that could be executed via Session object. + Example: @@ -297,7 +320,7 @@ query with the annotation [@QueryParameters]. Then, options like public ListenableFuture> getAllAsync(); ``` -[@QueryParameters]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/mapping/annotations/QueryParameters.html +[@QueryParameters]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/mapping/annotations/QueryParameters.html ### Mapping configuration @@ -341,6 +364,6 @@ PropertyMapper propertyMapper = new DefaultPropertyMapper() There is more to `DefaultPropertyMapper`; see the Javadocs and implementation for details. -[MappingConfiguration]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/mapping/MappingConfiguration.html -[PropertyMapper]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/mapping/PropertyMapper.html -[DefaultPropertyMapper]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/mapping/DefaultPropertyMapper.html +[MappingConfiguration]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/mapping/MappingConfiguration.html +[PropertyMapper]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/mapping/PropertyMapper.html +[DefaultPropertyMapper]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/mapping/DefaultPropertyMapper.html diff --git a/manual/osgi/README.md b/manual/osgi/README.md index 36a91b7155d..eb0c83e70da 100644 --- a/manual/osgi/README.md +++ b/manual/osgi/README.md @@ -1,3 +1,22 @@ + + # OSGi The driver is available as an [OSGi] bundle. More specifically, @@ -12,7 +31,7 @@ the following Maven artifacts are actually valid OSGi bundles: We have complete examples demonstrating usage of the driver in an OSGi environment; please refer to our [OSGi examples repository]. -[OSGi examples repository]:https://github.com/datastax/java-driver-examples-osgi +[OSGi examples repository]:https://github.com/apache/cassandra-java-driver-examples-osgi ## Troubleshooting OSGi applications diff --git a/manual/paging/README.md b/manual/paging/README.md index 915600d8099..34ca70e8672 100644 --- a/manual/paging/README.md +++ b/manual/paging/README.md @@ -1,3 +1,22 @@ + + ## Paging When a query returns many rows, it would be inefficient to return them @@ -176,8 +195,8 @@ if (nextPage != null) { } ``` -[result_set]:http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/ResultSet.html -[paging_state]:http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/PagingState.html +[result_set]:https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/ResultSet.html +[paging_state]:https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/PagingState.html Due to internal implementation details, `PagingState` instances are not @@ -196,6 +215,26 @@ could become a problem in the following scenario: If this is not acceptable for you, you might want to consider the unsafe API described in the next section. +Note: because of [CASSANDRA-10880], paging states are also incompatible +between Cassandra 2.2 and 3.0, even if they're both using native protocol v4. +This will manifest as the following error: + +``` +com.datastax.driver.core.exceptions.ProtocolError: An unexpected protocol error occurred on host xxx. +This is a bug in this library, please report: Invalid value for the paging state +``` + +The [Cassandra documentation](https://github.com/apache/cassandra/blob/cassandra-3.0/NEWS.txt#L334-L336) +recommends staying on protocol v3 during an upgrade between these two versions: + +``` +Clients must use the native protocol version 3 when upgrading from 2.2.X as +the native protocol version 4 is not compatible between 2.2.X and 3.Y. See +https://www.mail-archive.com/user@cassandra.apache.org/msg45381.html for details. +``` + +[CASSANDRA-10880]: https://issues.apache.org/jira/browse/CASSANDRA-10880 + #### Unsafe API As an alternative to the standard API, there are two methods that @@ -219,8 +258,8 @@ There are two situations where you might want to use the unsafe API: implementing your own validation logic (for example, signing the raw state with a private key). -[gpsu]: http://www.datastax.com/drivers/java/3.2/com/datastax/driver/core/ExecutionInfo.html#getPagingStateUnsafe-- -[spsu]: http://www.datastax.com/drivers/java/3.2/com/datastax/driver/core/Statement.html#setPagingStateUnsafe-byte:A- +[gpsu]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/ExecutionInfo.html#getPagingStateUnsafe-- +[spsu]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/Statement.html#setPagingStateUnsafe-byte:A- ### Offset queries diff --git a/manual/pooling/README.md b/manual/pooling/README.md index f8dbd94bd27..9bad1a995a4 100644 --- a/manual/pooling/README.md +++ b/manual/pooling/README.md @@ -1,3 +1,22 @@ + + ## Connection pooling ### Basics @@ -37,6 +56,8 @@ described in the next section). The number of stream ids depends on the +-------+ +-------+ +----+ +----------+ +-------+ ``` +If there are several connections in pool, driver evenly spreads new requests between connections. + ### Configuring the connection pool Connections pools are configured with a [PoolingOptions][pooling_options] object, which @@ -283,16 +304,16 @@ either: [newConnectionThreshold][nct] so that enough connections are added by the time you reach the bottleneck. -[result_set_future]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/ResultSetFuture.html -[pooling_options]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/PoolingOptions.html -[lbp]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/policies/LoadBalancingPolicy.html -[nct]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/PoolingOptions.html#setNewConnectionThreshold-com.datastax.driver.core.HostDistance-int- -[mrpc]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/PoolingOptions.html#setMaxRequestsPerConnection-com.datastax.driver.core.HostDistance-int- -[sits]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/PoolingOptions.html#setIdleTimeoutSeconds-int- -[rtm]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/SocketOptions.html#getReadTimeoutMillis-- -[smqs]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/PoolingOptions.html#setMaxQueueSize-int- -[sptm]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/PoolingOptions.html#setPoolTimeoutMillis-int- -[nhae]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/exceptions/NoHostAvailableException.html -[getErrors]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/exceptions/NoHostAvailableException.html#getErrors-- -[get_state]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/Session.html#getState-- -[BusyPoolException]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/exceptions/BusyPoolException.html +[result_set_future]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/ResultSetFuture.html +[pooling_options]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/PoolingOptions.html +[lbp]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/policies/LoadBalancingPolicy.html +[nct]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/PoolingOptions.html#setNewConnectionThreshold-com.datastax.driver.core.HostDistance-int- +[mrpc]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/PoolingOptions.html#setMaxRequestsPerConnection-com.datastax.driver.core.HostDistance-int- +[sits]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/PoolingOptions.html#setIdleTimeoutSeconds-int- +[rtm]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/SocketOptions.html#getReadTimeoutMillis-- +[smqs]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/PoolingOptions.html#setMaxQueueSize-int- +[sptm]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/PoolingOptions.html#setPoolTimeoutMillis-int- +[nhae]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/exceptions/NoHostAvailableException.html +[getErrors]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/exceptions/NoHostAvailableException.html#getErrors-- +[get_state]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/Session.html#getState-- +[BusyPoolException]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/exceptions/BusyPoolException.html diff --git a/manual/query_timestamps/README.md b/manual/query_timestamps/README.md index cb3f4dce9cf..27d60bdf20a 100644 --- a/manual/query_timestamps/README.md +++ b/manual/query_timestamps/README.md @@ -1,3 +1,22 @@ + + ## Query timestamps In Cassandra, each mutation has a microsecond-precision timestamp, which @@ -140,10 +159,10 @@ following: Steps 2 and 3 only apply if native protocol v3 or above is in use. -[TimestampGenerator]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/TimestampGenerator.html -[AtomicMonotonicTimestampGenerator]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/AtomicMonotonicTimestampGenerator.html -[ThreadLocalMonotonicTimestampGenerator]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/ThreadLocalMonotonicTimestampGenerator.html -[ServerSideTimestampGenerator]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/ServerSideTimestampGenerator.html +[TimestampGenerator]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/TimestampGenerator.html +[AtomicMonotonicTimestampGenerator]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/AtomicMonotonicTimestampGenerator.html +[ThreadLocalMonotonicTimestampGenerator]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/ThreadLocalMonotonicTimestampGenerator.html +[ServerSideTimestampGenerator]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/ServerSideTimestampGenerator.html [gettimeofday]: http://man7.org/linux/man-pages/man2/settimeofday.2.html [JNR]: https://github.com/jnr/jnr-ffi diff --git a/manual/reconnection/README.md b/manual/reconnection/README.md index ccd4598a309..797e8fc0ea4 100644 --- a/manual/reconnection/README.md +++ b/manual/reconnection/README.md @@ -1,11 +1,54 @@ -## Reconnection + -[ReconnectionPolicy]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/policies/ReconnectionPolicy.html \ No newline at end of file +## Reconnection + +If the driver loses a connection to a node, it tries to re-establish it according to a configurable +policy. This is used in two places: + +* [connection pools](../pooling/): for each node, a session has a fixed-size pool of connections to + execute user requests. If a node is detected as down, a reconnection is started. +* [control connection](../control_connection/): a session uses a single connection to an arbitrary + node for administrative requests. If that connection goes down, a reconnection gets started; each + attempt iterates through all active nodes until one of them accepts a connection. This goes on + until we have a control node again. + +[ReconnectionPolicy] controls the interval between each attempt. The policy to use may be +provided using [Cluster.Builder.withReconnectionPolicy]. For example, the following configures +an [ExponentialReconnectionPolicy] with a base delay of 1 second, and a max delay of 10 minutes +(this is the default behavior). + +```java +Cluster.builder() + .withReconnectionPolicy(new ExponentialReconnectionPolicy(1000, 10 * 60 * 1000)) + .build(); +``` + +[ConstantReconnectionPolicy] uses the same delay every time, regardless of the +previous number of attempts. + +You can also write your own policy; it must implement [ReconnectionPolicy]. + +For best results, use reasonable values: very low values (for example a constant delay of 10 +milliseconds) will quickly saturate your system. + +[ReconnectionPolicy]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/policies/ReconnectionPolicy.html +[Cluster.Builder.withReconnectionPolicy]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/Cluster.Builder.html#withReconnectionPolicy-com.datastax.driver.core.policies.ReconnectionPolicy- +[ExponentialReconnectionPolicy]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/policies/ExponentialReconnectionPolicy.html +[ConstantReconnectionPolicy]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/policies/ConstantReconnectionPolicy.html diff --git a/manual/retries/README.md b/manual/retries/README.md index a11adb93828..27091ad1526 100644 --- a/manual/retries/README.md +++ b/manual/retries/README.md @@ -1,3 +1,22 @@ + + ## Retries When a query fails, it sometimes makes sense to retry it: the error might be temporary, or the query might work on a @@ -146,37 +165,37 @@ implementations to handle idempotence (the new behavior is equivalent to what yo `IdempotenceAwareRetryPolicy` before). -[RetryDecision]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/policies/RetryPolicy.RetryDecision.html -[retry()]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/policies/RetryPolicy.RetryDecision.html#retry-com.datastax.driver.core.ConsistencyLevel- -[tryNextHost()]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/policies/RetryPolicy.RetryDecision.html#tryNextHost-com.datastax.driver.core.ConsistencyLevel- -[rethrow()]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/policies/RetryPolicy.RetryDecision.html#rethrow-- -[ignore()]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/policies/RetryPolicy.RetryDecision.html#ignore-- -[NoHostAvailableException]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/exceptions/NoHostAvailableException.html -[getErrors()]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/exceptions/NoHostAvailableException.html#getErrors-- -[RetryPolicy]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/policies/RetryPolicy.html -[DefaultRetryPolicy]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/policies/DefaultRetryPolicy.html -[onReadTimeout]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/policies/DefaultRetryPolicy.html#onReadTimeout-com.datastax.driver.core.Statement-com.datastax.driver.core.ConsistencyLevel-int-int-boolean-int- -[onWriteTimeout]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/policies/DefaultRetryPolicy.html#onWriteTimeout-com.datastax.driver.core.Statement-com.datastax.driver.core.ConsistencyLevel-com.datastax.driver.core.WriteType-int-int-int- -[onUnavailable]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/policies/DefaultRetryPolicy.html#onUnavailable-com.datastax.driver.core.Statement-com.datastax.driver.core.ConsistencyLevel-int-int-int- -[onRequestError]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/policies/DefaultRetryPolicy.html#onRequestError-com.datastax.driver.core.Statement-com.datastax.driver.core.ConsistencyLevel-com.datastax.driver.core.exceptions.DriverException-int- -[UnavailableException]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/exceptions/UnavailableException.html -[ReadTimeoutException]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/exceptions/ReadTimeoutException.html -[WriteTimeoutException]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/exceptions/WriteTimeoutException.html -[OverloadedException]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/exceptions/OverloadedException.html -[ServerError]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/exceptions/ServerError.html -[OperationTimedOutException]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/exceptions/OperationTimedOutException.html -[ConnectionException]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/exceptions/ConnectionException.html -[QueryValidationException]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/exceptions/QueryValidationException.html -[InvalidQueryException]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/exceptions/InvalidQueryException.html -[InvalidConfigurationInQueryException]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/exceptions/InvalidConfigurationInQueryException.html -[UnauthorizedException]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/exceptions/UnauthorizedException.html -[SyntaxError]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/exceptions/SyntaxError.html -[AlreadyExistsException]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/exceptions/AlreadyExistsException.html -[TruncateException]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/exceptions/TruncateException.html +[RetryDecision]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/policies/RetryPolicy.RetryDecision.html +[retry()]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/policies/RetryPolicy.RetryDecision.html#retry-com.datastax.driver.core.ConsistencyLevel- +[tryNextHost()]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/policies/RetryPolicy.RetryDecision.html#tryNextHost-com.datastax.driver.core.ConsistencyLevel- +[rethrow()]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/policies/RetryPolicy.RetryDecision.html#rethrow-- +[ignore()]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/policies/RetryPolicy.RetryDecision.html#ignore-- +[NoHostAvailableException]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/exceptions/NoHostAvailableException.html +[getErrors()]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/exceptions/NoHostAvailableException.html#getErrors-- +[RetryPolicy]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/policies/RetryPolicy.html +[DefaultRetryPolicy]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/policies/DefaultRetryPolicy.html +[onReadTimeout]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/policies/DefaultRetryPolicy.html#onReadTimeout-com.datastax.driver.core.Statement-com.datastax.driver.core.ConsistencyLevel-int-int-boolean-int- +[onWriteTimeout]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/policies/DefaultRetryPolicy.html#onWriteTimeout-com.datastax.driver.core.Statement-com.datastax.driver.core.ConsistencyLevel-com.datastax.driver.core.WriteType-int-int-int- +[onUnavailable]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/policies/DefaultRetryPolicy.html#onUnavailable-com.datastax.driver.core.Statement-com.datastax.driver.core.ConsistencyLevel-int-int-int- +[onRequestError]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/policies/DefaultRetryPolicy.html#onRequestError-com.datastax.driver.core.Statement-com.datastax.driver.core.ConsistencyLevel-com.datastax.driver.core.exceptions.DriverException-int- +[UnavailableException]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/exceptions/UnavailableException.html +[ReadTimeoutException]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/exceptions/ReadTimeoutException.html +[WriteTimeoutException]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/exceptions/WriteTimeoutException.html +[OverloadedException]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/exceptions/OverloadedException.html +[ServerError]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/exceptions/ServerError.html +[OperationTimedOutException]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/exceptions/OperationTimedOutException.html +[ConnectionException]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/exceptions/ConnectionException.html +[QueryValidationException]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/exceptions/QueryValidationException.html +[InvalidQueryException]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/exceptions/InvalidQueryException.html +[InvalidConfigurationInQueryException]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/exceptions/InvalidConfigurationInQueryException.html +[UnauthorizedException]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/exceptions/UnauthorizedException.html +[SyntaxError]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/exceptions/SyntaxError.html +[AlreadyExistsException]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/exceptions/AlreadyExistsException.html +[TruncateException]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/exceptions/TruncateException.html [query plan]: ../load_balancing/#query-plan [connection pool]: ../pooling/ [prepared]: ../statements/prepared/#preparing-on-multiple-nodes [driver read timeout]: ../socket_options/#driver-read-timeout [hinted handoffs]: https://docs.datastax.com/en/cassandra/2.1/cassandra/dml/dml_about_hh_c.html?scroll=concept_ds_ifg_jqx_zj__performance -[idempotence]: ../idempotence/ \ No newline at end of file +[idempotence]: ../idempotence/ diff --git a/manual/shaded_jar/README.md b/manual/shaded_jar/README.md index 6319c1a10fb..9bfe8e59564 100644 --- a/manual/shaded_jar/README.md +++ b/manual/shaded_jar/README.md @@ -1,3 +1,22 @@ + + ## Using the shaded JAR The default driver JAR depends on [Netty](http://netty.io/), which is @@ -10,9 +29,9 @@ package name: ```xml - com.datastax.cassandra + org.apache.cassandra cassandra-driver-core - 3.3.0 + 3.12.1 shaded @@ -21,6 +40,10 @@ package name: io.netty * + + io.dropwizard.metrics + metrics-core + ``` @@ -30,24 +53,28 @@ non-shaded JAR: ```xml - com.datastax.cassandra + org.apache.cassandra cassandra-driver-core - 3.3.0 + 3.12.1 shaded io.netty * + + io.dropwizard.metrics + metrics-core + - com.datastax.cassandra + org.apache.cassandra cassandra-driver-mapping - 3.3.0 + 3.12.1 - com.datastax.cassandra + org.apache.cassandra cassandra-driver-core @@ -74,5 +101,5 @@ detects that shaded Netty classes are being used: Detected shaded Netty classes in the classpath; native epoll transport will not work properly, defaulting to NIO. -[NettyOptions]:http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/NettyOptions.html +[NettyOptions]:https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/NettyOptions.html [Netty native transports]:http://netty.io/wiki/native-transports.html diff --git a/manual/socket_options/README.md b/manual/socket_options/README.md index 8a0defd87e7..47021853d81 100644 --- a/manual/socket_options/README.md +++ b/manual/socket_options/README.md @@ -1,3 +1,22 @@ + + ## Socket options [SocketOptions] controls various low-level parameters related to TCP connections between the driver and Cassandra. @@ -117,15 +136,15 @@ To clarify: We might rename `SocketOptions.setReadTimeoutMillis` in a future version to clear up any confusion. -[SocketOptions]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/SocketOptions.html -[setReadTimeoutMillis]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/SocketOptions.html#setReadTimeoutMillis-int- -[setConnectTimeoutMillis]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/SocketOptions.html#setConnectTimeoutMillis-int- -[setKeepAlive]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/SocketOptions.html#setKeepAlive-boolean- -[setReceiveBufferSize]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/SocketOptions.html#setReceiveBufferSize-int- -[setReuseAddress]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/SocketOptions.html#setReuseAddress-boolean- -[setSendBufferSize]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/SocketOptions.html#setSendBufferSize-int- -[setSoLinger]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/SocketOptions.html#setSoLinger-int- -[setTcpNoDelay]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/SocketOptions.html#setTcpNoDelay-boolean- -[onReadTimeout]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/policies/RetryPolicy.html#onReadTimeout-com.datastax.driver.core.Statement-com.datastax.driver.core.ConsistencyLevel-int-int-boolean-int- -[onRequestError]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/policies/RetryPolicy.html#onRequestError-com.datastax.driver.core.Statement-com.datastax.driver.core.ConsistencyLevel-com.datastax.driver.core.exceptions.DriverException-int- -[OperationTimedOutException]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/exceptions/OperationTimedOutException.html \ No newline at end of file +[SocketOptions]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/SocketOptions.html +[setReadTimeoutMillis]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/SocketOptions.html#setReadTimeoutMillis-int- +[setConnectTimeoutMillis]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/SocketOptions.html#setConnectTimeoutMillis-int- +[setKeepAlive]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/SocketOptions.html#setKeepAlive-boolean- +[setReceiveBufferSize]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/SocketOptions.html#setReceiveBufferSize-int- +[setReuseAddress]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/SocketOptions.html#setReuseAddress-boolean- +[setSendBufferSize]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/SocketOptions.html#setSendBufferSize-int- +[setSoLinger]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/SocketOptions.html#setSoLinger-int- +[setTcpNoDelay]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/SocketOptions.html#setTcpNoDelay-boolean- +[onReadTimeout]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/policies/RetryPolicy.html#onReadTimeout-com.datastax.driver.core.Statement-com.datastax.driver.core.ConsistencyLevel-int-int-boolean-int- +[onRequestError]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/policies/RetryPolicy.html#onRequestError-com.datastax.driver.core.Statement-com.datastax.driver.core.ConsistencyLevel-com.datastax.driver.core.exceptions.DriverException-int- +[OperationTimedOutException]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/exceptions/OperationTimedOutException.html diff --git a/manual/speculative_execution/README.md b/manual/speculative_execution/README.md index ceecc2a0f3b..90915bb2b8c 100644 --- a/manual/speculative_execution/README.md +++ b/manual/speculative_execution/README.md @@ -1,3 +1,22 @@ + + ## Speculative query execution Sometimes a Cassandra node might be experiencing difficulties (ex: long @@ -73,7 +92,7 @@ Speculative executions are controlled by an instance of `Cluster`. This policy defines the threshold after which a new speculative execution will be triggered. -[SpeculativeExecutionPolicy]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/policies/SpeculativeExecutionPolicy.html +[SpeculativeExecutionPolicy]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/policies/SpeculativeExecutionPolicy.html Two implementations are provided with the driver: @@ -101,7 +120,7 @@ way: * if no response has been received at t0 + 1000 milliseconds, start another speculative execution on a third node. -[ConstantSpeculativeExecutionPolicy]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/policies/ConstantSpeculativeExecutionPolicy.html +[ConstantSpeculativeExecutionPolicy]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/policies/ConstantSpeculativeExecutionPolicy.html #### [PercentileSpeculativeExecutionPolicy] @@ -117,7 +136,7 @@ explicitly depend on it: org.hdrhistogram HdrHistogram - 2.1.9 + 2.1.10 ``` @@ -160,10 +179,10 @@ Note that `PercentileTracker` may also be used with a slow query logger (see the [Logging](../logging/) section). In that case, you would create a single tracker object and share it with both components. -[PercentileSpeculativeExecutionPolicy]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/policies/PercentileSpeculativeExecutionPolicy.html -[PercentileTracker]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/PercentileTracker.html -[ClusterWidePercentileTracker]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/ClusterWidePercentileTracker.html -[PerHostPercentileTracker]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/PerHostPercentileTracker.html +[PercentileSpeculativeExecutionPolicy]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/policies/PercentileSpeculativeExecutionPolicy.html +[PercentileTracker]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/PercentileTracker.html +[ClusterWidePercentileTracker]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/ClusterWidePercentileTracker.html +[PerHostPercentileTracker]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/PerHostPercentileTracker.html [hdr]: http://hdrhistogram.github.io/HdrHistogram/ #### Using your own @@ -210,7 +229,7 @@ client driver exec1 exec2 The only impact is that all executions of the same query always share the same query plan, so each host will be used by at most one execution. -[retry_policy]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/policies/RetryPolicy.html +[retry_policy]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/policies/RetryPolicy.html ### Tuning and practical details @@ -225,8 +244,8 @@ You can monitor how many speculative executions were triggered with the It should only be a few percents of the total number of requests ([cluster.getMetrics().getRequestsTimer().getCount()][request_metric]). -[se_metric]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/Metrics.Errors.html#getSpeculativeExecutions-- -[request_metric]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/Metrics.html#getRequestsTimer-- +[se_metric]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/Metrics.Errors.html#getSpeculativeExecutions-- +[request_metric]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/Metrics.html#getRequestsTimer-- #### Stream id exhaustion @@ -255,8 +274,8 @@ sustained. If you're unsure of which native protocol version you're using, you can check with [cluster.getConfiguration().getProtocolOptions().getProtocolVersion()][protocol_version]. -[session_state]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/Session.State.html -[protocol_version]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/ProtocolOptions.html#getProtocolVersion-- +[session_state]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/Session.State.html +[protocol_version]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/ProtocolOptions.html#getProtocolVersion-- #### Request ordering and client timestamps diff --git a/manual/ssl/README.md b/manual/ssl/README.md index 59249b586c2..e9d56d69d46 100644 --- a/manual/ssl/README.md +++ b/manual/ssl/README.md @@ -1,3 +1,22 @@ + + ## SSL You can secure traffic between the driver and Cassandra with SSL. There @@ -118,7 +137,29 @@ Cluster cluster = Cluster.builder() Note that you can also extend the class and override [newSSLEngine(SocketChannel,InetSocketAddress)][newSSLEngine] if you need specific -configuration on the `SSLEngine` (for example hostname verification). +configuration on the `SSLEngine`. For example, to enable hostname verification: + +```java +SSLContext sslContext = ... // create and configure SSL context + +RemoteEndpointAwareJdkSSLOptions sslOptions = new RemoteEndpointAwareJdkSSLOptions(sslContext, null) { + protected SSLEngine newSSLEngine(SocketChannel channel, InetSocketAddress remoteEndpoint) { + SSLEngine engine = super.newSSLEngine(channel, remoteEndpoint); + SSLParameters parameters = engine.getSSLParameters(); + // HTTPS endpoint identification includes hostname verification against certificate's common name. + // This API is only available for JDK7+. + parameters.setEndpointIdentificationAlgorithm("HTTPS"); + engine.setSSLParameters(parameters); + return engine; + } +}; + +Cluster cluster = Cluster.builder() + .addContactPoint("127.0.0.1") + .withSSL(sslOptions) + .build(); +``` + #### Netty @@ -153,7 +194,7 @@ add it to your dependencies. There are known runtime incompatibilities between newer versions of netty-tcnative and the version of netty that the driver uses. For best -results, use version 2.0.1.Final. +results, use version 2.0.7.Final. Using netty-tcnative requires JDK 1.7 or above and requires the presence of OpenSSL on the system. It will not fall back to the JDK implementation. @@ -185,8 +226,8 @@ Cluster cluster = Cluster.builder() .build(); ``` -[RemoteEndpointAwareSSLOptions]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/RemoteEndpointAwareSSLOptions.html -[RemoteEndpointAwareJdkSSLOptions]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/RemoteEndpointAwareJdkSSLOptions.html -[newSSLEngine]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/RemoteEndpointAwareJdkSSLOptions.html#newSSLEngine-io.netty.channel.socket.SocketChannel-java.net.InetSocketAddress- -[RemoteEndpointAwareNettySSLOptions]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/RemoteEndpointAwareNettySSLOptions.html -[NettyOptions]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/NettyOptions.html +[RemoteEndpointAwareSSLOptions]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/RemoteEndpointAwareSSLOptions.html +[RemoteEndpointAwareJdkSSLOptions]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/RemoteEndpointAwareJdkSSLOptions.html +[newSSLEngine]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/RemoteEndpointAwareJdkSSLOptions.html#newSSLEngine-io.netty.channel.socket.SocketChannel-java.net.InetSocketAddress- +[RemoteEndpointAwareNettySSLOptions]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/RemoteEndpointAwareNettySSLOptions.html +[NettyOptions]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/NettyOptions.html diff --git a/manual/statements/README.md b/manual/statements/README.md index 6efd14ce104..a078eec4446 100644 --- a/manual/statements/README.md +++ b/manual/statements/README.md @@ -1,3 +1,22 @@ + + ## Statements To execute a query, you create a [Statement] instance and pass it to [Session#execute()][execute] or @@ -32,11 +51,11 @@ If you use custom policies ([RetryPolicy], [LoadBalancingPolicy], properties that influence statement execution. To achieve this, you can wrap your statements in a custom [StatementWrapper] implementation. -[Statement]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/Statement.html -[QueryBuilder]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/querybuilder/QueryBuilder.html -[StatementWrapper]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/StatementWrapper.html -[RetryPolicy]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/policies/RetryPolicy.html -[LoadBalancingPolicy]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/policies/LoadBalancingPolicy.html -[SpeculativeExecutionPolicy]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/policies/SpeculativeExecutionPolicy.html -[execute]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/Session.html#execute-com.datastax.driver.core.Statement- -[executeAsync]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/Session.html#executeAsync-com.datastax.driver.core.Statement- +[Statement]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/Statement.html +[QueryBuilder]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/querybuilder/QueryBuilder.html +[StatementWrapper]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/StatementWrapper.html +[RetryPolicy]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/policies/RetryPolicy.html +[LoadBalancingPolicy]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/policies/LoadBalancingPolicy.html +[SpeculativeExecutionPolicy]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/policies/SpeculativeExecutionPolicy.html +[execute]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/Session.html#execute-com.datastax.driver.core.Statement- +[executeAsync]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/Session.html#executeAsync-com.datastax.driver.core.Statement- diff --git a/manual/statements/batch/README.md b/manual/statements/batch/README.md index 5f3b7930c32..776afb56227 100644 --- a/manual/statements/batch/README.md +++ b/manual/statements/batch/README.md @@ -1,5 +1,61 @@ + + ## Batch statements -*Coming soon... In the meantime, see the javadoc for [BatchStatement].* +Use [BatchStatement] to execute a set of queries as a single operation (refer to +[Batching inserts, updates and deletes][batch_dse] to understand how to use batching effectively): + +```java +PreparedStatement preparedInsertExpense = + session.prepare( + "INSERT INTO cyclist_expenses (cyclist_name, expense_id, amount, description, paid) " + + "VALUES (:name, :id, :amount, :description, :paid)"); +SimpleStatement simpleInsertBalance = + new SimpleStatement("INSERT INTO cyclist_expenses (cyclist_name, balance) VALUES (?, 0) IF NOT EXISTS", + "Vera ADRIAN"); + +BatchStatement batch = new BatchStatement(BatchStatement.Type.UNLOGGED) + .add(simpleInsertBalance) + .add(preparedInsertExpense.bind("Vera ADRIAN", 1, 7.95f, "Breakfast", false)); + +session.execute(batch); +``` + +As shown in the examples above, batches can contain any combination of simple statements and bound +statements. A given batch can contain at most 65536 statements. Past this limit, addition methods +throw an `IllegalStateException`. + +By default, batches are configured as [LOGGED]. This ensures that if any statement in the batch +succeeds, all will eventually succeed. Ensuring all queries in a batch succeed has a +performance cost. Consider using [UNLOGGED] as shown above if you do not need this capability. + +Please note that the size of a batch is subject to the [batch_size_fail_threshold] configuration +option on the server. + +In addition, simple statements with named parameters are currently not supported in batches (this is +due to a [protocol limitation][CASSANDRA-10246] that will be fixed in a future version). If you try +to execute such a batch, an `IllegalArgumentException` is thrown. -[BatchStatement]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/BatchStatement.html +[BatchStatement]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/BatchStatement.html +[batch_dse]: http://docs.datastax.com/en/dse/5.1/cql/cql/cql_using/useBatch.html +[LOGGED]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/BatchStatement.Type.html#LOGGED +[UNLOGGED]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/BatchStatement.Type.html#UNLOGGED +[batch_size_fail_threshold]: https://docs.datastax.com/en/cassandra/3.x/cassandra/configuration/configCassandra_yaml.html#configCassandra_yaml__batch_size_fail_threshold_in_kb +[CASSANDRA-10246]: https://issues.apache.org/jira/browse/CASSANDRA-10246 diff --git a/manual/statements/built/README.md b/manual/statements/built/README.md index 405aa9d2161..ac0ba28b1e6 100644 --- a/manual/statements/built/README.md +++ b/manual/statements/built/README.md @@ -1,5 +1,287 @@ + + ## Built statements -*Coming soon... In the meantime, see the javadoc for [QueryBuilder].* +Built statements are generated via [QueryBuilder]'s Fluent API. Use of Fluent API allows +easier build of complex queries, as opposed to use of hardcoded query strings. + +Note: The provided builders perform very little validation of the built query. There is +no guarantee that a built query is valid, and it is definitively possible to create +invalid queries. + +Queries built with `QueryBuilder` are executed the same way as other queries--via +`execute` or `executeAsync`. When a query is built with inlined values, then it doesn't +differ much from a statement specified as a string. But it's also possible to build +the query with bind markers inside it, and then convert it into a [prepared statement](../prepared/). + +### Basics + +Generation of `BuiltStatement` is easy--start by calling of one of the +[QueryBuilder]'s methods that represent the CQL's "verb": `select`, `update`, `delete`, +`insertInto`, or `truncate`, provide required parameters, and then call "verb"-specific +functions to form a complete CQL statement (like, `where`, `from`, etc.). The statement's +target table can be specified as a simple table name (if a default keyspace has been set +when creating the `Session` object), as a combination of keyspace name and table name, or as +a [TableMetadata] object. + +Note: The `QueryBuilder` doesn't provide support for the full set of CQL. For +most of DDL operations (`CREATE TABLE`, etc.) you can use the [SchemaBuilder]. To perform other +operations, for example, for role management, you still need to use [simple statements](../simple/). + +### Selecting data + +Selection of data is quite simple--at minimum you need to provide a list of columns to +select, and then specify from which table to select these columns (you can also optionally +specify a condition, as described in the next section): + +```java +BuiltStatement selectAll1 = QueryBuilder.select("id", "t").from("test", "test"); +ResultSet rs = session.execute(selectAll1); +for (Row row: rs) { + System.out.println(row); +} +``` + +Note: The call `select("column1", "column2")` is really a shortcut for a chain of calls +`select().column("column1").column("column2")`. + +Please note that you can't pass the `*` as column name to select all columns--if you do +this, you'll get an exception about unknown column. To select all columns you either need to use +`select` in combination with `all` function, or simply don't specify a list of columns: + +```java +BuiltStatement selectAll2 = QueryBuilder.select().all().from("test", "test"); +BuiltStatement selectAll3 = QueryBuilder.select().from("test", "test"); +``` + +Besides selection of the specific columns, it's also possible to call arbitrary CQL +function by using the `fcall` method (this is just example, don't do this on real data): + +```java +BuiltStatement sum = QueryBuilder.select().fcall("sum", column("id")).as("sum_id") + .from("test", "test"); +``` + +Note: When using functions, Cassandra will generate aliases for you, but you can provide +explicit aliases by using `as` right after a given selector. + +For often used functions, there are shortcuts, such as, `countAll`, `ttl`, `writeTime`, +`uuid`, `now`, `toJson`, etc.: + +``` +BuiltStatement count = QueryBuilder.select().countAll() + .from("test", "test"); + +BuiltStatement ttlAndWriteTime = QueryBuilder.select().column("id").column("t") + .ttl("t").as("id_ttl").writeTime("t") + .from("test", "test"); +``` + +You can also cast the value of the given column to another type by using the `cast` function, +[specifying](https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/querybuilder/QueryBuilder.html#cast-java.lang.Object-com.datastax.driver.core.DataType-) +the column for which it should be performed, and to what type it should be casted. + +#### Specifying conditions + +Selection of data rarely happen on the whole table--in most cases, people are interested +in particular rows, located in one or several partitions. Conditions are specified by +using the `where` call, like this: + +```java +BuiltStatement selectOne = QueryBuilder.select().from("test", "test") + .where(QueryBuilder.eq("id", 1)); +``` + +The `where` function accepts the `Clause` object that is generated by calling +`QueryBuilder`'s +[functions](https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/querybuilder/QueryBuilder.html#eq-java.lang.Iterable-java.lang.Iterable-), +such as, `eq`, `ne`, `lt`, `in`, `contains`, `notNull`, etc. In most cases, these +functions receive 2 arguments: the name of the column, and the value to compare, but there +are also variants that receive 2 iterables for columns and values correspondingly. + +Note: as queries are becoming more and more complex, repeating `QueryBuilder` at all +places will make code less readable. In this case to simplify the code you can import all +(or only required) static functions of the `QueryBuilder` class (this is the same example +from above): + +```java +import static com.datastax.driver.core.querybuilder.QueryBuilder.*; + +//... + +BuiltStatement selectOne = QueryBuilder.select().from("test", "test") + .where(eq("id", 1)); +``` + +In case if you need to specify complex conditions, you can chain additional clauses +together with the `and` operator, that accepts the same clauses as `where`: + +```java +BuiltStatement select = QueryBuilder.select().from("test", "test") + .where(eq("id", "1")).and(eq("txt", "test")); +``` + +#### Other selection options + +For `SELECT` statements you can also specify a lot of different options: + - `allowFiltering` generates a corresponding `ALLOW FILTERING` part of query (***only use if you know what you're doing!***); + - `limit` and `perPartitionLimit` allows to specify the amount of data to fetch; + - `groupBy` performs grouping of data; + - `orderBy` allows to specify sorting direction for specified clustering columns; + +This very "artificial" example shows the use for some of them: + +```java +BuiltStatement selectOne = QueryBuilder.select().from("test") + .where(QueryBuilder.eq("id", 1)).limit(1).allowFiltering() + .perPartitionLimit(1).orderBy(desc("id")); +``` + +### Inserting data + +Insertion of data is straightforward--you specify the target table in a call to +`insertInto`, and then provide values to insert either by chaining several calls to the +`value` function, or by using the `values` function and passing lists or arrays of column +names and their corresponding values. The following 2 examples are equivalent: + +```java +QueryBuilder.insertInto("test").value("id", 4).value("t", "test 4"); +QueryBuilder.insertInto("test").values(Arrays.asList("id", "t"), Arrays.asList(4, "test 4")); +``` + +You can also insert JSON-formatted data by calling the `json` function & passing the data: + +```java +QueryBuilder.insertInto("test").json("{\"id\":4, \"t\":\"test 4\"}"); +``` + +`QueryBuilder` also allows generation of the statement that use lightweight +transactions (LWT) to check that inserted data doesn't exist yet. You just need to add +the call to `ifNotExists` to the statement: + +```java +QueryBuilder.insertInto("test").value("id", 4).ifNotExists(); +``` + +It is also possible to specify additional metadata for inserted data, such as TTL (time to live) or +timestamp. This is achieved with the `using` method and providing the `Using` +object that is generated either by `ttl`, or `timestamp` functions of the `QueryBuilder` class. If +you want to specify both, you need to chain them together with the `and` operator: + +```java +QueryBuilder.insertInto("test").value("id", 4).using(ttl(10)).and(timestamp(1000)); +``` + +Besides this, for newer versions of Cassandra it's possible to specify additional +parameters, such as `DEFAULT UNSET` & `DEFAULT NULL` in the `INSERT INTO ... JSON` +statements, by using `defaultUnset` & `defaultNull` correspondingly. + +### Update statements + +Updating the data is also relatively straightforward: you specify the data to update, +condition, and additional options if necessary: + +```java +BuiltStatement updateStatement = QueryBuilder.update("test").with(set("test", 1)) + .where(eq("id", 1)); +``` + +The first update operation is passed as an argument to the `with` function, and additional +operations could be chained via `and` calls: + +```java +BuiltStatement updateStatement = QueryBuilder.update("test").with(set("t", "test 1")) + .and(set("x", 10)).where(eq("id", 1)); +``` + +Besides the most often used `set` operation, there is a lot of operations for work with +all types of collections (lists, maps & sets): `add`, `discard`, `prepend`, `put`, +`remove`, `setIdx`, etc. For the full list of operations, see the [QueryBuilder]'s documentation. + +To update counters you can use the `incr` & `decr` functions that take the column name, +and the value by which column will be increased or decreased: + +```java +BuiltStatement query = QueryBuilder.update("counters") + .with(incr("counter", 1)).where(eq("id", 1)); +``` + +Similarly to insert statements, it's also possible to perform conditional updates by +calling either `ifExists` (to perform the update only if the entry exists), or by calling +`onlyIf` with a `Clause` object--in this case the row will be updated only if the clause +returns true: + +```java +Statement updateStatement = QueryBuilder.update("test").with(set("t", "test 1")) + .where(eq("id", 1)).ifExists(); +``` + +Setting the TTL & write timestamp is done the same way as for insert statements. + +### Deleting data + +You can delete either the whole row matching your condition: + +```java +BuiltStatement deleteStmt = QueryBuilder.delete().from("test") + .where(eq("id", "1")).and(eq("txt", "test")); +``` + +or specify a list of columns to delete: + +```java +BuiltStatement deleteStmt = QueryBuilder.delete("col1", "col2").from("test") + .where(eq("id", "1")).and(eq("txt", "test")); +``` + +Specification of conditions is similar to the other operations described above, including +conditional deletes with `ifExists` & `onlyIf`. + +### Prepared statements + +If you're repeating the same operation very often, the more effective way will be to +create a [prepared statement](../prepared/) from the `BuiltStatement`. To do this, instead +of the real values, use bind markers created either by calling `bindMarker` (which generates +a positional placeholder), or by calling `bindMarker("name")` (which creates a named +placeholder). After the statement is generated, just prepare it as usual, then bind, and +execute: + +```java +BuiltStatement selectOne2 = QueryBuilder.select().from("test", "test") + .where(eq("id", bindMarker())); +PreparedStatement preparedStatement = session.prepare(selectOne2); +ResultSet rs = session.execute(preparedStatement.bind(1)); +``` + +### Setting additional options + +As in the case of regular statements, you can also set options on built statements, +such as the consistency level (with `setConsistencyLevel`), enable/disable tracing +(with `enableTracing`/`disableTracing`), specify retry policy (with `setRetryPolicy`), etc. + +Note: the call to these functions changes the object type from `BuiltStatement` to +`Statement` or `RegularStatement`, so you won't be able to use functions specific to +`BuiltStatement` without explicit casting. + + -[QueryBuilder]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/querybuilder/QueryBuilder.html \ No newline at end of file +[QueryBuilder]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/querybuilder/QueryBuilder.html +[TableMetadata]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/TableMetadata.html +[SchemaBuilder]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/schemabuilder/SchemaBuilder.html diff --git a/manual/statements/prepared/README.md b/manual/statements/prepared/README.md index 2132db700ae..ccb0a33167a 100644 --- a/manual/statements/prepared/README.md +++ b/manual/statements/prepared/README.md @@ -1,3 +1,22 @@ + + ## Prepared statements Use [PreparedStatement] for queries that are executed multiple times in your application: @@ -122,10 +141,10 @@ BoundStatement bound = ps1.bind() // Using the unset method to unset previously set value. // Positional setter: -bound.unset("description"); +bound.unset(1); // Named setter: -bound.unset(1); +bound.unset("description"); ``` A bound statement also has getters to retrieve the values. Note that @@ -249,19 +268,18 @@ is currently no mechanism for Cassandra to invalidate the existing metadata. Be the driver is not able to properly react to these changes and will improperly read rows after a schema change is made. -Therefore it is currently recommended to not create prepared statements -for 'SELECT *' queries if you plan on making schema changes involving -adding or dropping columns. Alternatively you should list all columns of interest -in your statement, i.e.: `SELECT a, b, c FROM tbl`. +Therefore it is currently recommended to list all columns of interest in +your prepared statements (i.e. `SELECT a, b, c FROM table`), instead of +relying on `SELECT *`. This will be addressed in a future release of both Cassandra and the driver. Follow [CASSANDRA-10786] and [JAVA-1196] for more information. -[PreparedStatement]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/PreparedStatement.html -[BoundStatement]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/BoundStatement.html -[setPrepareOnAllHosts]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/QueryOptions.html#setPrepareOnAllHosts-boolean- -[setReprepareOnUp]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/QueryOptions.html#setReprepareOnUp-boolean- -[execute]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/Session.html#execute-com.datastax.driver.core.Statement- -[executeAsync]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/Session.html#executeAsync-com.datastax.driver.core.Statement- +[PreparedStatement]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/PreparedStatement.html +[BoundStatement]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/BoundStatement.html +[setPrepareOnAllHosts]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/QueryOptions.html#setPrepareOnAllHosts-boolean- +[setReprepareOnUp]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/QueryOptions.html#setReprepareOnUp-boolean- +[execute]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/Session.html#execute-com.datastax.driver.core.Statement- +[executeAsync]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/Session.html#executeAsync-com.datastax.driver.core.Statement- [CASSANDRA-10786]: https://issues.apache.org/jira/browse/CASSANDRA-10786 -[JAVA-1196]: https://datastax-oss.atlassian.net/browse/JAVA-1196 \ No newline at end of file +[JAVA-1196]: https://datastax-oss.atlassian.net/browse/JAVA-1196 diff --git a/manual/statements/simple/README.md b/manual/statements/simple/README.md index 9967b0e4bf3..35ba0e4c9ab 100644 --- a/manual/statements/simple/README.md +++ b/manual/statements/simple/README.md @@ -1,3 +1,22 @@ + + ## Simple statements Use [SimpleStatement] for queries that will be executed only once (or a few times) in your application: @@ -128,4 +147,4 @@ session.execute( 1, bytes); ``` -[SimpleStatement]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/SimpleStatement.html +[SimpleStatement]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/SimpleStatement.html diff --git a/manual/tuples/README.md b/manual/tuples/README.md index 345bcc517ff..6e7f844718f 100644 --- a/manual/tuples/README.md +++ b/manual/tuples/README.md @@ -1,4 +1,23 @@ -## Using Tuples with the Java driver + + +## Using Tuples with the Java Driver Cassandra allows to use `tuple` data types [in tables and user-defined types](https://docs.datastax.com/en/cql/3.1/cql/cql_reference/tupleType.html): @@ -14,7 +33,7 @@ CREATE TABLE ks.collect_things ( ### Fetching Tuples from Rows results -The DataStax Java driver exposes a special [`TupleValue`][TupleValue] class to handle such columns. +The Java Driver exposes a special [`TupleValue`][TupleValue] class to handle such columns. [`TupleValue`][TupleValue] exposes getters allowing to extract from the tuple all the data types supported by Cassandra: @@ -83,7 +102,7 @@ session.execute(bs); More generally, the `IN` keyword in a `SELECT` query will be used to define a *list* of desired values of the filtered clustering keys, those would simply be bound as a list of -[`TupleValue`][TupleValue] with the Java driver: +[`TupleValue`][TupleValue] with the Java Driver: ```java TupleType oneTimeUsageTuple = cluster.getMetadata().newTupleType(DataType.text(), DataType.text()); @@ -96,7 +115,7 @@ bs.setList("l", Arrays.asList(oneTimeUsageTuple.newValue("1", "1"), oneTimeUsage session.execute(bs); ``` -[TupleType]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/TupleType.html -[TupleValue]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/TupleValue.html -[newValueVararg]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/TupleType.html#newValue-java.lang.Object...- -[newValue]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/TupleType.html#newValue-- +[TupleType]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/TupleType.html +[TupleValue]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/TupleValue.html +[newValueVararg]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/TupleType.html#newValue-java.lang.Object...- +[newValue]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/TupleType.html#newValue-- diff --git a/manual/udts/README.md b/manual/udts/README.md index 760b87e9321..f8d15885b0b 100644 --- a/manual/udts/README.md +++ b/manual/udts/README.md @@ -1,5 +1,118 @@ + + ## User-defined types -*Coming soon... In the meantime, see the javadoc for [UserType].* +[CQL user-defined types][cql_doc] are ordered sets of named, typed fields. They must be defined in a +keyspace: + +``` +CREATE TYPE ks.type1 ( + a int, + b text, + c float); +``` + +And can then be used as a column type in tables, or a field type in other user-defined types in that +keyspace: + +``` +CREATE TABLE ks.collect_things ( + pk int, + ck1 text, + ck2 text, + v frozen, + PRIMARY KEY (pk, ck1, ck2) +); + +CREATE TYPE ks.type2 (v frozen); +``` + +### Fetching UDTs from results + +The driver maps UDT columns to the [UDTValue] class, which exposes getters and setters to access +individual fields by index or name: + +```java +Row row = session.execute("SELECT v FROM ks.collect_things WHERE pk = 1").one(); + +UDTValue udtValue = row.getUDTValue("v"); +int a = udtValue.getInt(0); +String b = udtValue.getString("b"); +Float c = udtValue.getFloat(2); +``` + +### Using UDTs as parameters + +Statements may contain UDTs as bound values: + +```java +PreparedStatement ps = + session.prepare( + "INSERT INTO ks.collect_things (pk, ck1, ck2, v) VALUES (:pk, :ck1, :ck2, :v)"); +``` + +To create a new UDT value, you must first have a reference to its [UserType]. There are +various ways to get it: + +* from the statement's metadata + + ```java + UserType udt = (UserType) ps.getVariables().getType("v"); + ``` + +* from the driver's [schema metadata](../metadata/#schema-metadata): + + ```java + UserType udt = session.getCluster().getMetadata().getKeyspace("ks").getUserType("type1"); + ``` + +* from another UDT value: + + ```java + UserType udt = udtValue.getType(); + ``` + +Note that the driver's official API does not expose a way to build [UserType] instances manually. +This is because the type's internal definition must precisely match the database schema; +if it doesn't (for example if the fields are not in the same order), you run the risk of inserting +corrupt data, that you won't be able to read back. + +Once you have the type, call `newValue()` and set the fields: + +```java +UDTValue udtValue = udt.newValue().setInt(0, 1).setString(1, "hello").setFloat(2, 2.3f); +``` + +And bind your UDT value like any other type: + +```java +BoundStatement bs = + ps.bind() + .setInt("pk", 1) + .setString("ck1", "1") + .setString("ck2", "1") + .setUDTValue("v", udtValue); +session.execute(bs); +``` + +[cql_doc]: https://docs.datastax.com/en/cql/3.3/cql/cql_reference/cqlRefUDType.html -[UserType]: http://docs.datastax.com/en/drivers/java/3.2/com/datastax/driver/core/UserType.html \ No newline at end of file +[UDTValue]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/UDTValue.html +[UserType]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/UserType.html diff --git a/pom.xml b/pom.xml index 897867bfa5a..d179f45d04f 100644 --- a/pom.xml +++ b/pom.xml @@ -1,12 +1,14 @@ - 2.7.9.1 - 2.9.1 + 2.7.9.7 + 2.9.9 1.0 1.0.4 - 2.0.7 - 3.0.27 + + 2.1.7 + 3.0.44 2.4.7 2.0.1 2.23.1 @@ -75,6 +85,7 @@ 6.8.8 1.7.0 1.10.8 + 2.25.0 1.3 1.1.2 1.2.3 @@ -91,26 +102,33 @@ - com.datastax.cassandra + org.apache.cassandra + cassandra-driver-core + ${project.parent.version} + + + + org.apache.cassandra cassandra-driver-core ${project.parent.version} + shaded - com.datastax.cassandra + org.apache.cassandra cassandra-driver-core ${project.parent.version} test-jar - com.datastax.cassandra + org.apache.cassandra cassandra-driver-mapping ${project.parent.version} - com.datastax.cassandra + org.apache.cassandra cassandra-driver-extras ${project.parent.version} @@ -158,8 +176,8 @@ - net.jpountz.lz4 - lz4 + org.lz4 + lz4-java ${lz4.version} @@ -313,6 +331,12 @@ ${mockito.version} + + com.github.tomakehurst + wiremock + ${wiremock.version} + + org.scassandra java-client @@ -333,8 +357,8 @@ io.netty - netty-tcnative - 2.0.1.Final + netty-tcnative-boringssl-static + ${netty-tcnative.version} ${os.detected.classifier} @@ -479,77 +503,14 @@ jar-no-fork - - - - - - maven-javadoc-plugin - 2.10.4 - true - - true - false - ${javadoc.opts} - - https://docs.oracle.com/javase/8/docs/api/ - https://google.github.io/guava/releases/19.0/api/docs/ - http://netty.io/4.0/api/ - http://www.joda.org/joda-time/apidocs/ - http://fasterxml.github.io/jackson-core/javadoc/2.8/ - http://fasterxml.github.io/jackson-databind/javadoc/2.7/ - https://javaee-spec.java.net/nonav/javadocs/ - - - - - org.xerial.snappy - snappy-java - ${snappy.version} - - - net.jpountz.lz4 - lz4 - ${lz4.version} - - - org.hdrhistogram - HdrHistogram - ${hdr.version} - - - com.fasterxml.jackson.core - jackson-core - ${jackson.version} - - - com.fasterxml.jackson.core - jackson-annotations - ${jackson.version} - - - com.fasterxml.jackson.core - jackson-databind - ${jackson-databind.version} - - - joda-time - joda-time - ${joda.version} - - - javax.json - javax.json-api - ${jsr353-api.version} - - - - - - attach-javadocs - - jar - + + + NOTICE.txt + + + NOTICE_binary.txt + + @@ -624,7 +585,7 @@ - 3.2.0 + 3.11.5 ../clirr-ignores.xml com/datastax/shaded/** @@ -645,6 +606,12 @@ + + com.coveo + fmt-maven-plugin + 2.9 + + com.mycila license-maven-plugin @@ -652,13 +619,15 @@ org.codehaus.mojo animal-sniffer-maven-plugin - 1.15 + 1.16 check-jdk6 @@ -709,8 +678,7 @@ limitations under the License. 1.0 - com.datastax.driver.extras.codecs.jdk8.IgnoreJDK6Requirement - + com.datastax.driver.core.IgnoreJDK6Requirement @@ -737,6 +705,7 @@ limitations under the License. ${test.groups} false + -Djdk.attach.allowAttachSelf=true ${cassandra.version} ${ipprefix} @@ -836,6 +805,7 @@ limitations under the License. + org.apache.maven.plugins maven-jar-plugin [2.2,) @@ -846,10 +816,44 @@ limitations under the License. + + + org.codehaus.mojo + clirr-maven-plugin + [2.7,) + + check + + + + + + + + + org.codehaus.gmaven + gmaven-plugin + [1.5,) + + testCompile + + + + + + + + org.apache.maven.plugins + maven-remote-resources-plugin + 1.7.0 + + true + + @@ -968,17 +972,14 @@ limitations under the License. jar-no-fork - - - - - maven-javadoc-plugin - - - attach-javadocs - - jar - + + + NOTICE.txt + + + NOTICE_binary.txt + + @@ -1014,9 +1015,31 @@ limitations under the License. [1.8,) - - -Xdoclint:none - + + + + + com.coveo + fmt-maven-plugin + + + check-format + initialize + + check + + + + + + org.apache.maven.plugins + maven-javadoc-plugin + + none + + + + @@ -1047,14 +1070,34 @@ limitations under the License. - + + jdk10 + + 10.0 + + + + true + + - - - ossrh - https://oss.sonatype.org/service/local/staging/deploy/maven2/ - - + + apple-silicon-dev + + + mac os x + mac + aarch64 + + + + + 2.2.10 + 1.1.10.1 + + + + @@ -1068,7 +1111,7 @@ limitations under the License. scm:git:git@github.com:datastax/java-driver.git scm:git:git@github.com:datastax/java-driver.git - https://github.com/datastax/java-driver + https://github.com/apache/cassandra-java-driver HEAD diff --git a/testing/README.md b/testing/README.md index 4dfbb525351..d1d97a6620b 100644 --- a/testing/README.md +++ b/testing/README.md @@ -1,3 +1,22 @@ + + ## Testing Prerequisites ### Install CCM diff --git a/testing/bin/coverage b/testing/bin/coverage old mode 100755 new mode 100644 index c920e9fa6d1..82702a91f4b --- a/testing/bin/coverage +++ b/testing/bin/coverage @@ -1,4 +1,20 @@ #!/usr/bin/env python +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. import argparse import ConfigParser diff --git a/upgrade_guide/README.md b/upgrade_guide/README.md index 3fe68aedc74..acec88fefdd 100644 --- a/upgrade_guide/README.md +++ b/upgrade_guide/README.md @@ -1,7 +1,111 @@ + + ## Upgrade guide The purpose of this guide is to detail changes made by successive -versions of the Java driver. +versions of the Java Driver. + +### 3.6.0 + +1. `ConsistencyLevel.LOCAL_SERIAL.isDCLocal()` now returns true. In driver + code, `isDCLocal()` is only used when evaluating a Statement's + ConsistencyLevel (which does not include Serial CLs), but as a matter of + correctness this was updated. + +2. `ReadFailureException` and `WriteFailureException` are now surfaced to + `RetryPolicy.onRequestError`. Consider updating custom `RetryPolicy` + implementations to account for this. In the general case, we recommend + using `RetryDecision.rethrow()`, see [JAVA-1944]. + +[JAVA-1944]: https://datastax-oss.atlassian.net/browse/JAVA-1944 + + +### 3.5.0 + +1. The `DowngradingConsistencyRetryPolicy` is now deprecated, see [JAVA-1752]. + It will also be removed in the next major release of the driver (4.0.0), + see [JAVA-1376]. + + The main motivation is the agreement that this policy's behavior should be + the application's concern, not the driver's. + + We recognize that there are use cases where downgrading is good – + for instance, a dashboard application would present the latest information + by reading at QUORUM, but it's acceptable for it to display stale information + by reading at ONE sometimes. + + But APIs provided by the driver should instead encourage idiomatic use of + a distributed system like Apache Cassandra, and a downgrading policy works + against this. It suggests that an anti-pattern such as "try to read at QUORUM, + but fall back to ONE if that fails" is a good idea in general use cases, + when in reality it provides no better consistency guarantees than working + directly at ONE, but with higher latencies. + + We therefore urge users to carefully choose upfront the consistency level that + works best for their use cases, and should they decide that the downgrading + behavior of `DowngradingConsistencyRetryPolicy` remains a good fit for certain + use cases, they will now have to implement this logic themselves, either + at application level, or alternatively at driver level, by rolling out their + own downgrading retry policy. + + To help users migrate existing applications that rely on + `DowngradingConsistencyRetryPolicy`, see this [online example] that illustrates + how to implement a downgrading logic at application level. + +[JAVA-1752]:https://datastax-oss.atlassian.net/browse/JAVA-1752 +[JAVA-1376]:https://datastax-oss.atlassian.net/browse/JAVA-1376 +[online example]:https://github.com/apache/cassandra-java-driver/blob/3.x/driver-examples/src/main/java/com/datastax/driver/examples/retry/DowngradingRetry.java + +2. The `TokenAwarePolicy` now has a new constructor that takes a `ReplicaOrdering` + argument, see [JAVA-1448]. + + One of the advantages of this feature is the new `NEUTRAL` + ordering strategy, which honors its child policy's ordering, i.e., replicas + are returned in the same relative order as in the child policy's query plan. + + For example, if the child policy returns the plan [A, B, C, D], and the replicas + for the query being routed are [D, A, B], then the token aware policy would return + the plan [A, B, D, C]. + + As a consequence, the constructor taking a boolean parameter `shuffleReplicas` + is now deprecated and will be removed in the next major release. + +[JAVA-1448]:https://datastax-oss.atlassian.net/browse/JAVA-1448 + + +### 3.4.0 + +`QueryBuilder` methods `in`, `lt`, `lte`, `eq`, `gt`, and `gte` now accept +`Iterable` as input rather than just `List`. This should have no impact unless +you were accessing these methods using reflection in which case you need to +account for these new parameter types. + + +### 3.3.1 + +Speculative executions can now be scheduled without delay: if +`SpeculativeExecutionPlan.nextExecution()` returns 0, the next execution will be fired immediately. +This allows aggressive policies that hit multiple replicas right away, in order to get the fastest +response possible. Note that this may break existing policies that used 0 to mean "no execution"; +make sure you use a negative value instead. + ### 3.2.0 diff --git a/upgrade_guide/migrating_from_astyanax/README.md b/upgrade_guide/migrating_from_astyanax/README.md index 0b518322a28..72bd82f9c58 100644 --- a/upgrade_guide/migrating_from_astyanax/README.md +++ b/upgrade_guide/migrating_from_astyanax/README.md @@ -1,10 +1,29 @@ + + # Migrating from Astyanax This section is a guide for users previously using *Astyanax* and looking for -migrating to the *DataStax Java driver*. +migrating to the *Java Driver*. See the child pages for more information: * [Changes at the language level](language_level_changes/) -* [Migrating Astyanax configurations to DataStax Java driver configurations](configuration/) +* [Migrating Astyanax configurations to Java Driver configurations](configuration/) * [Querying and retrieving results comparisons.](queries_and_results/) diff --git a/upgrade_guide/migrating_from_astyanax/configuration/README.md b/upgrade_guide/migrating_from_astyanax/configuration/README.md index c6629113d7f..6e94149dc47 100644 --- a/upgrade_guide/migrating_from_astyanax/configuration/README.md +++ b/upgrade_guide/migrating_from_astyanax/configuration/README.md @@ -1,22 +1,41 @@ + + # Configuration -## How Configuring the Java driver works +## How Configuring the Java Driver works -The two basic components in the Java driver are the `Cluster` and the `Session`. +The two basic components in the Java Driver are the `Cluster` and the `Session`. The `Cluster` is the object to create first, and on to which all global configurations apply. Connecting to the `Cluster` creates a `Session`. Queries are executed through the `Session`. The `Cluster` object then is to be viewed as the equivalent of the `AstyanaxContext` object. "Starting" an `AstyanaxContext` object typically returns a `Keyspace` -object, the `Keyspace` object is the equivalent of the *Java driver*’s `Session`. +object, the `Keyspace` object is the equivalent of the *Java Driver*’s `Session`. Configuring a `Cluster` works with the *Builder* pattern. The `Builder` takes all the configurations into account before building the `Cluster`. Following are some examples of the most important configurations that were -possible with *Astyanax* and how to translate them into *DataStax Java driver* -configurations. Please note that the Java driver has been optimized to handle most use +possible with *Astyanax* and how to translate them into *Java Driver* +configurations. Please note that the Java Driver has been optimized to handle most use cases at best and even though the following sections show how to tune some various options, the driver should provide the best performances with the default configurations and these options should not be changed unless there is a good reason to. @@ -25,11 +44,11 @@ and these options should not be changed unless there is a good reason to. Configuration of connection pools in *Astyanax* are made through the `ConnectionPoolConfigurationImpl`. This object gathers important configurations -that the *Java driver* has categorized in multiple *Option* and *Policy* kinds. +that the *Java Driver* has categorized in multiple *Option* and *Policy* kinds. ### Connections pools internals Everything concerning the internal pools of connections to the *Cassandra nodes* -will be gathered in the Java driver in the [`PoolingOptions`](../../../manual/pooling): +will be gathered in the Java Driver in the [`PoolingOptions`](../../../manual/pooling): *Astyanax*: @@ -40,7 +59,7 @@ ConnectionPoolConfigurationImpl cpool = .setMaxConnsPerHost(3) ``` -*Java driver*: +*Java Driver*: ```java PoolingOptions poolingOptions = @@ -50,10 +69,10 @@ PoolingOptions poolingOptions = The first number is the initial number of connections, the second is the maximum number of connections the driver is allowed to create for each host. -Note that the *Java driver* allows multiple simultaneous requests on one single +Note that the *Java Driver* allows multiple simultaneous requests on one single connection, as it is based upon the [*Native protocol*](../../../manual/native_protocol), an asynchronous binary protocol that can handle up to 32768 simultaneous requests on a -single connection. The Java driver is able to manage and distribute simultaneous requests +single connection. The Java Driver is able to manage and distribute simultaneous requests by itself even under high contention, and changing the default `PoolingOptions` is not necessary most of the time except for very [specific use cases](../../../manual/pooling/#tuning-protocol-v3-for-very-high-throughputs). @@ -83,18 +102,18 @@ Changing the client timeout options might have more impacts than expected, **ple sure to properly document before changing these options.** ## Load Balancing -Both *Astyanax* and the *Java driver* connect to multiple nodes of the *Cassandra* +Both *Astyanax* and the *Java Driver* connect to multiple nodes of the *Cassandra* cluster. Distributing requests through all the nodes plays an important role in the good operation of the `Cluster` and for best performances. With *Astyanax*, requests (or “operations”) can be sent directly to replicas that have a copy of the data targeted by the *“Row key”* specified in the operation. Since the *Thrift* API is low-level, it forces the user to provide *Row keys*, known as the `TokenAware` -connection pool type. This setting is also present in the *Java driver*, however +connection pool type. This setting is also present in the *Java Driver*, however the configuration is different and provides more options to tweak. -Load balancing in the *Java driver* is a *Policy*, it is a class that will be -plugged in the *Java driver*’s code and the Driver will call its methods when it -needs to. The *Java driver* comes with a preset of specific load balancing policies. +Load balancing in the *Java Driver* is a *Policy*, it is a class that will be +plugged in the *Java Driver*’s code and the Driver will call its methods when it +needs to. The *Java Driver* comes with a preset of specific load balancing policies. Here’s an equivalent code: *Astyanax*: @@ -111,7 +130,7 @@ AstyanaxConfigurationImpl aconf = .setDiscoveryType(discType) ``` -*Java driver*: +*Java Driver*: ```java LoadBalancingPolicy lbp = new TokenAwarePolicy( @@ -121,7 +140,7 @@ LoadBalancingPolicy lbp = new TokenAwarePolicy( ); ``` -*By default* the *Java driver* will instantiate the exact Load balancing policy +*By default* the *Java Driver* will instantiate the exact Load balancing policy shown above, with the `LocalDC` being the DC of the first host the driver connects to. So to get the same behaviour than the *TokenAware* pool type of *Astyanax*, users shouldn’t need to specify a load balancing policy since the default one @@ -129,14 +148,14 @@ should cover it. Important: Note that since *CQL* is an abstraction of the Cassandra’s architecture, a simple query needs to have the *Row key* specified explicitly on a `Statement` in order -to benefit from the *TokenAware* routing (the *Row key* in the *Java driver* is +to benefit from the *TokenAware* routing (the *Row key* in the *Java Driver* is referenced as *Routing Key*), unlike the *Astyanax* driver. Some differences occur related to the different kinds of `Statements` the *Java driver* provides. Please see [this link](../../../manual/load_balancing/#token-aware-policy) for specific information. Custom load balancing policies can easily be implemented by users, and supplied to -the *Java driver* for specific use cases. All information necessary is available +the *Java Driver* for specific use cases. All information necessary is available in the [Load balaning policies docs](../../../manual/load_balancing). ## Consistency levels @@ -151,26 +170,26 @@ AstyanaxConfigurationImpl aconf = .setDefaultWriteConsistencyLevel(ConsistencyLevel.CL*ALL) ``` -*Java driver*: +*Java Driver*: ```java QueryOptions qo = new QueryOptions().setConsistencyLevel(ConsistencyLevel.ALL); ``` -Since the *Java driver* only executes *CQL* statements, which can be either reads +Since the *Java Driver* only executes *CQL* statements, which can be either reads or writes to *Cassandra*, it is not possible to globally configure the Consistency Level for only reads or only writes. To do so, since the Consistency Level can be set per-statement, you can either set it on every statement, or use `PreparedStatements` (if queries are to be repeated with different values): in this case, setting the CL on the `PreparedStatement`, causes the `BoundStatements` to inherit the CL from the prepared statements they were prepared from. More -informations about how `Statement`s work in the *Java driver* are detailed +informations about how `Statement`s work in the *Java Driver* are detailed in the [“Queries and Results” section](../queries_and_results/). ## Authentication -Authentication settings are managed by the `AuthProvider` class in the *Java driver*. +Authentication settings are managed by the `AuthProvider` class in the *Java Driver*. It can be highly customizable, but also comes with default simple implementations: *Astyanax*: @@ -182,7 +201,7 @@ ConnectionPoolConfigurationImpl cpool = .setAuthenticationCredentials(authCreds) ``` -*Java driver*: +*Java Driver*: ```java AuthProvider authProvider = new PlainTextAuthProvider("username", "password"); @@ -205,7 +224,7 @@ ConnectionPoolConfigurationImpl cpool = .setPort(9160) ``` -*Java driver*: +*Java Driver*: ```java Cluster cluster = Cluster.builder() @@ -213,7 +232,7 @@ Cluster cluster = Cluster.builder() .withPort(9042) ``` -The *Java driver* by default connects to port *9042*, hence you can supply only +The *Java Driver* by default connects to port *9042*, hence you can supply only host names with the `addContactPoints(String...)` method. Note that the contact points are only the entry points to the `Cluster` for the *Automatic discovery phase*. @@ -222,7 +241,7 @@ phase*. With all options previously presented, one may configure and create the `Cluster` object this way: -*Java driver*: +*Java Driver*: ```java Cluster cluster = Cluster.builder() @@ -240,7 +259,7 @@ Session session = cluster.connect(); A few best practices are summed up in [this blog post](http://www.datastax.com/dev/blog/4-simple-rules-when-using-the-datastax-drivers-for-cassandra). -Concerning connection pools, the Java driver’s default settings should allow +Concerning connection pools, the Java Driver’s default settings should allow most of the users to get the best out of the driver in terms of throughput, they have been thoroughly tested and tweaked to accommodate the users’ needs. If one still wishes to change those, first [Monitoring the pools](../../../manual/pooling/#monitoring-and-tuning-the-pool) is @@ -248,5 +267,5 @@ advised, then a [deep dive in the Pools management mechanism](../../../manual/po provide enough insight. A lot more options are available in the different `XxxxOption`s classes, policies are -also highly customizable since the base Java driver's implementations can easily be +also highly customizable since the base Java Driver's implementations can easily be extended and implement user-specific actions. diff --git a/upgrade_guide/migrating_from_astyanax/language_level_changes/README.md b/upgrade_guide/migrating_from_astyanax/language_level_changes/README.md index 8116e82b566..8b17c78b81d 100644 --- a/upgrade_guide/migrating_from_astyanax/language_level_changes/README.md +++ b/upgrade_guide/migrating_from_astyanax/language_level_changes/README.md @@ -1,3 +1,22 @@ + + # Language change : from Thrift to CQL The data model changes when using *CQL* (Cassandra Query Language). *CQL* is providing an abstraction of the low-level data stored in *Cassandra*, in diff --git a/upgrade_guide/migrating_from_astyanax/queries_and_results/README.md b/upgrade_guide/migrating_from_astyanax/queries_and_results/README.md index 3f14620bac7..6fee3b2705e 100644 --- a/upgrade_guide/migrating_from_astyanax/queries_and_results/README.md +++ b/upgrade_guide/migrating_from_astyanax/queries_and_results/README.md @@ -1,8 +1,27 @@ + + # Queries and Results There are many resources such as [this post][planetCCqlLink] or [this post][dsBlogCqlLink] to learn how to transform previous Thrift operations to CQL queries. -The *Java driver* executes CQL queries through the `Session`. +The *Java Driver* executes CQL queries through the `Session`. The queries can either be simple *CQL* Strings or represented in the form of `Statement`s. The driver offers 4 kinds of statements, `SimpleStatement`, `Prepared/BoundStatement`, `BuiltStatement`, and `BatchStatement`. All necessary @@ -14,7 +33,7 @@ results of a *CQL* query will be in the form of *Rows* from *Tables*, composed of fixed set of columns, each with a type and a name. The driver exposes the set of *Rows* returned from a query as a ResultSet, thus containing *Rows* on which `getXXX()` can be called. Here are simple examples of translation from -*Astyanax* to *Java driver* in querying and retrieving query results. +*Astyanax* to *Java Driver* in querying and retrieving query results. ## Single column @@ -30,7 +49,7 @@ Column result = keyspace.prepareQuery(CF_STANDARD1) String value = result.getStringValue(); ``` -*Java driver*: +*Java Driver*: ``` Row row = session.execute("SELECT value FROM table1 WHERE key = '1' AND column1 = '3'").one(); @@ -57,7 +76,7 @@ while (!(columns = query.execute().getResult()).isEmpty()) { } ``` -*Java driver*: +*Java Driver*: ```java ResultSet rs = session.execute("SELECT value FROM table1 WHERE key = '1'"); @@ -84,7 +103,7 @@ while (it.hasNext()) { } ``` -*Java driver*: +*Java Driver*: ```java ResultSet rs = session.execute("SELECT value FROM table1 WHERE key = '1'" + @@ -96,11 +115,11 @@ for (Row row : rs) { ``` ## Async -The *Java driver* provides native support for asynchronous programming since it +The *Java Driver* provides native support for asynchronous programming since it is built on top of an [asynchronous protocol](../../../manual/native_protocol/), please see [this page](../../../manual/async/) for best practices regarding asynchronous programming -with the *Java driver*. +with the *Java Driver*. [planetCCqlLink]: http://www.planetcassandra.org/making-the-change-from-thrift-to-cql/ -[dsBlogCqlLink]: http://www.datastax.com/dev/blog/thrift-to-cql3 \ No newline at end of file +[dsBlogCqlLink]: http://www.datastax.com/dev/blog/thrift-to-cql3